From 6a81817dc8ff3116afbf32fcb9deab5ac23892ef Mon Sep 17 00:00:00 2001 From: Wolfgang Bangerth Date: Sun, 10 May 1998 00:21:05 +0000 Subject: [PATCH] Doc reformatting. git-svn-id: https://svn.dealii.org/trunk@273 0785d39b-7218-0410-832d-ea1e28bc413d --- deal.II/base/include/base/function.h | 58 +- deal.II/base/include/base/parameter_handler.h | 1276 ++++++++--------- deal.II/base/include/base/quadrature.h | 74 +- deal.II/base/include/base/quadrature_lib.h | 92 +- deal.II/deal.II/include/dofs/dof_accessor.h | 234 +-- .../deal.II/include/dofs/dof_constraints.h | 150 +- deal.II/deal.II/include/dofs/dof_handler.h | 564 ++++---- deal.II/deal.II/include/fe/fe.h | 142 +- deal.II/deal.II/include/fe/fe_lib.lagrange.h | 40 +- deal.II/deal.II/include/fe/fe_update_flags.h | 18 +- deal.II/deal.II/include/fe/fe_values.h | 634 ++++---- deal.II/deal.II/include/grid/geometry_info.h | 32 +- deal.II/deal.II/include/grid/point.h | 34 +- deal.II/deal.II/include/grid/tria_accessor.h | 113 +- deal.II/deal.II/include/grid/tria_boundary.h | 92 +- deal.II/deal.II/include/grid/tria_iterator.h | 402 +++--- deal.II/deal.II/include/grid/tria_line.h | 16 +- deal.II/deal.II/include/grid/tria_quad.h | 20 +- deal.II/deal.II/include/numerics/assembler.h | 20 +- deal.II/deal.II/include/numerics/base.h | 388 ++--- deal.II/deal.II/include/numerics/data_io.h | 392 ++--- .../include/numerics/error_estimator.h | 228 +-- 22 files changed, 2511 insertions(+), 2508 deletions(-) diff --git a/deal.II/base/include/base/function.h b/deal.II/base/include/base/function.h index fbdcd31891..c0eed3783a 100644 --- a/deal.II/base/include/base/function.h +++ b/deal.II/base/include/base/function.h @@ -14,24 +14,24 @@ /** - This class is a model for a continuous function. It returns the value - of the function at a given point through the #operator ()# member function, - which is virtual. It also has a function to return a whole list of function - values at different points to reduce the overhead of the virtual function - calls; this function is preset to successively call the function returning - one value at a time. - - There are other functions return the gradient of the function at one or - several points. You only have to overload those functions you need; the - functions returning several values at a time will call those returning - only one value, while those ones will throw an exception when called but - not overloaded. - - Unless only called a very small number of times, you should overload - both those functions returning only one value as well as those returning - a whole array, since the cost of evaluation of a point value is often - less than the virtual function call itself. -*/ + * This class is a model for a continuous function. It returns the value + * of the function at a given point through the #operator ()# member function, + * which is virtual. It also has a function to return a whole list of function + * values at different points to reduce the overhead of the virtual function + * calls; this function is preset to successively call the function returning + * one value at a time. + * + * There are other functions return the gradient of the function at one or + * several points. You only have to overload those functions you need; the + * functions returning several values at a time will call those returning + * only one value, while those ones will throw an exception when called but + * not overloaded. + * + * Unless only called a very small number of times, you should overload + * both those functions returning only one value as well as those returning + * a whole array, since the cost of evaluation of a point value is often + * less than the virtual function call itself. + */ template class Function { public: @@ -86,12 +86,12 @@ class Function { /** - Provide a function which always returns zero. Obviously, also the derivates - of this function are zero. - - This function is of use when you want to implement homogeneous boundary - conditions. -*/ + * Provide a function which always returns zero. Obviously, also the derivates + * of this function are zero. + * + * This function is of use when you want to implement homogeneous boundary + * conditions. + */ template class ZeroFunction : public Function { public: @@ -136,11 +136,11 @@ class ZeroFunction : public Function { /** - Provide a function which always returns a constant value, which is delivered - upon construction. Obviously, the derivates of this function are zerom which - is why we derive this class from #ZeroFunction#: we then only have to - overload th value functions, not all the derivatives. -*/ + * Provide a function which always returns a constant value, which is delivered + * upon construction. Obviously, the derivates of this function are zerom which + * is why we derive this class from #ZeroFunction#: we then only have to + * overload th value functions, not all the derivatives. + */ template class ConstantFunction : public ZeroFunction { public: diff --git a/deal.II/base/include/base/parameter_handler.h b/deal.II/base/include/base/parameter_handler.h index c290fae041..58790dbfcb 100644 --- a/deal.II/base/include/base/parameter_handler.h +++ b/deal.II/base/include/base/parameter_handler.h @@ -31,8 +31,8 @@ class ostream; /** - List of possible output formats. - */ + * List of possible output formats. + */ enum OutputStyle { Text, LaTeX, HTML }; @@ -42,423 +42,423 @@ enum OutputStyle { /** - The #ParameterHandler# class provides a standard interface to an input file - which provides at run-time for program parameters such as time step sizes, - geometries, right hand sides etc. The input for the program is given in files, - streams or strings in memory using text like - \begin{verbatim} - set Time step size = 0.3 - set Geometry = [0,1]x[0,3] - \end{verbatim} - Input may be sorted into subsection trees in order to give the input a logical - structure. - - - \subsection{Declaration of entries} - - In order to use the facilities of a #ParameterHandler# object, one first has - to make known the different entries the input file may or may not contain. This - is done in the following way: - \begin{verbatim} - ... - ParameterHandler prm; - prm.declare_entry ("Time step size", - "0.2", - ParameterHandler::RegularExpressions::Double); - prm.declare_entry ("Geometry", - "[0,1]x[0,1]", - ".*"); - ... - \end{verbatim} - Each entry is declared using the function #declare_entry#. The first parameter is - the name of the entry (in short: the entry). The second is the default answer to - be taken in case the entry is not specified in the input file. The third parameter - is a regular expression which the input (and the default answer) has to match. - Several such regular expressions are defined in #ParameterHandler::RegularExpressions#. - - Entries may be located in subsections which form a kind of input tree. For example - input parameters for linear solver routines should be classified in a subsection - named #Linear solver# or any other suitable name. This is accomplished in the - following way: - \begin{verbatim} - ... - LinEq eq; - eq.declare_parameters (prm); - ... - - void LinEq::declare_parameters (ParameterHandler &prm) { - prm.enter_subsection("Linear solver"); - prm.declare_entry ("Solver", - "CG", - "CG\\|GMRES\\|GaussElim"); - prm.declare_entry ("Maximum number of iterations", - "20", - ParameterHandler::RegularExpressions::Integer); - ... - prm.leave_subsection (); - }; - \end{verbatim} - - Subsections may be nested. For example a nonlinear solver may have a linear solver - as member object. Then the function call tree would be something like (if the class - #NonLinEq# has a member variables #eq# of type #LinEq#): - \begin{verbatim} - void NonLinEq::declare_parameters (ParameterHandler &prm) { - prm.enter_subsection ("Nonlinear solver"); - prm.declare_entry ("Nonlinear method", - "Newton-Raphson", - ParameterHandler::RegularExpressions::AlphaNum); - eq.declare_parameters (prm); - prm.leave_subsection (); - }; - \end{verbatim} - - For class member functions which declare the different entries we propose to use the - common name #declare_parameters#. In normal cases this method can be #static# since the - entries will not depend on any previous knowledge. Classes for which entries should - logically be grouped into subsections should declare these subsections themselves. If - a class has two or more member variables of the same type both of which should have - their own parameters, this parent class' method #declare_parameters# is responsible to - group them into different subsections: - \begin{verbatim} - void NonLinEq::declare_parameters (ParameterHandler &prm) { - prm.enter_subsection ("Nonlinear solver"); - prm.enter_subsection ("Linear solver 1"); - eq1.declare_parameters (prm); - prm.leave_subsection (); - - prm.enter_subsection ("Linear solver 2"); - eq2.declare_parameters (prm); - prm.leave_subsection (); - prm.leave_subsection (); - }; - \end{verbatim} - - - \subsection{Input files and special characters} - - For the first example above the input file would look like the following: - \begin{verbatim} - ... - subsection Nonlinear solver - set Nonlinear method = Gradient - subsection Linear solver - set Solver = CG - set Maxmimum number of iterations = 30 - end - end - ... # other stuff - \end{verbatim} - The words #subsection#, #set# and #end# may be either written in lowercase or uppercase - letters. Leading and trailing whitespace is removed, multiple whitespace is condensed into - only one. Since the latter applies also to the name of an entry, an entry name will not - be recognised if in the declaration multiple whitespace is used. - - In entry names and values the following characters are not allowed: \#, #{#, #}#, #|#. - Their use is reserved for the \ref{MultipleParameterLoop} class. - - Comments starting with \# are skipped. - - We propose to use the following - scheme to name entries: start the first word with a capital letter and use lowercase - letters further on. The same applies to the possible entry values to the right of the - #=# sign. - - - \subsection{Reading data from input sources} - - In order to read input you can use three possibilities: reading from an #istream# object, - reading from a file of which the name is given and reading from a string in memory in - which the lines are separated by #\n# characters. These possibilites are used as follows: - \begin{verbatim} - ParameterHandler prm; - ... - // declaration of entries - ... - prm.read_input (cin); // read input from standard in, - // or - prm.read_input ("simulation.in"); - // or - char *in = "set Time step size = 0.3 \n ..."; - prm.read_input (in); - ... - \end{verbatim} - You can use several sources of input successively. Entries which are changed more than - once will be overwritten everytime they are used. It is suggested to let the name of - parameter input end in #.prm#. - - You should not try to declare entries using #declare_entry# and #enter_subsection# with as - yet unknown subsection names after using #read_input#. The results in this case are - unspecified. - - If an error occurs upon reading the input, error messages are written to #cerr#. - - - \subsection{Getting entry values out of a #ParameterHandler# object} - - Each class gets its data out of a #ParameterHandler# object by calling the #get (...)# - member functions like this: - \begin{verbatim} - void NonLinEq::get_parameters (ParameterHandler &prm) { - prm.enter_subsection ("Nonlinear solver"); - String method = prm.get ("Nonlinear method"); - eq.get_parameters (prm); - prm.leave_subsection (); - }; - \end{verbatim} - #get()# returns the value of the given entry. If the entry was not specified in the input - source(s), the default value is returned. You have to enter and leave subsections - exactly as you did when declaring subsection. You may chose the order in which to - transverse the subsection tree. - - It is guaranteed that only entries matching the given regular expression are returned, - i.e. an input entry value which does not match the regular expression is not stored. - - You can use #get# to retrieve the parameter in text form, #get_integer# to get an integer - or #get_double# to get a double. It will cause an internal error if - the string could not be converted to an integer or a double. This should, though, not - happen if you correctly specified the regular expression for this entry; you should not - try to get out an integer or a double from an entry for which no according regular - expression was set. The internal error is raised through the #Assert()# macro family - which only works in debug mode. - - If you want to print out all user selectable features, use the - #print_parameters# function. It is generally a good idea to print all parameters - at the beginning of a log file, since this way input and output are together in - one file which makes matching at a later time easier. Additionally, the function - also print those entries which have not been modified in the input file und are - thus set to default values; since default values may change in the process of - program development, you cannot know the values of parameters not specified in the - input file. - - - \subsection{Style guide for data retrieval} - - We propose that every class which gets data out of a #ParameterHandler# object provides - a function named #get_parameters#. This should be declared #virtual#. #get_parameters# - functions in derived classes should call the #BaseClass::get_parameters# function. - - - \subsection{Possible future extensions} - - \begin{itemize} - \item Allow long input lines to be broken by appending a backslash character - (just like C macros and shell input). - \item Provide an #input filename# command for the input file to enable users to put the - most common parameters into separate files. - \end{itemize} - - - - \subsection{Worked Example} - - This is the code: - \begin{verbatim} - #include - #include "../include/parameter_handler.h" - - - class LinEq { - public: - static void declare_parameters (ParameterHandler &prm); - void get_parameters (ParameterHandler &prm); - private: - String Method; - int MaxIterations; - }; - - - class Problem { - private: - LinEq eq1, eq2; - String Matrix1, Matrix2; - String outfile; - public: - static void declare_parameters (ParameterHandler &prm); - void get_parameters (ParameterHandler &prm); - }; - - - - void LinEq::declare_parameters (ParameterHandler &prm) { - // declare parameters for the linear - // solver in a subsection - prm.enter_subsection ("Linear solver"); - prm.declare_entry ("Solver", - "CG", - "\\(CG\\|BiCGStab\\|GMRES\\)"); - prm.declare_entry ("Maximum number of iterations", - "20", - ParameterHandler::RegularExpressions::Integer); - prm.leave_subsection (); - }; - - - void LinEq::get_parameters (ParameterHandler &prm) { - prm.enter_subsection ("Linear solver"); - Method = prm.get ("Solver"); - MaxIterations = prm.get_integer ("Maximum number of iterations"); - prm.leave_subsection (); - cout << " LinEq: Method=" << Method << ", MaxIterations=" << MaxIterations << endl; - }; - - - - void Problem::declare_parameters (ParameterHandler &prm) { - // first some global parameter entries - prm.declare_entry ("Output file", - "out", - ".*"); - - prm.declare_entry ("Equation 1", - "Laplace", - ".*"); - prm.declare_entry ("Equation 2", - "Elasticity", - ".*"); - - // declare parameters for the - // first equation - prm.enter_subsection ("Equation 1"); - prm.declare_entry ("Matrix type", - "Sparse", - "\\(Full\\|Sparse\\|Diagonal\\)"); - LinEq::declare_parameters (prm); // for eq1 - prm.leave_subsection (); - - // declare parameters for the - // second equation - prm.enter_subsection ("Equation 2"); - prm.declare_entry ("Matrix type", - "Sparse", - "\\(Full\\|Sparse\\|Diagonal\\)"); - LinEq::declare_parameters (prm); // for eq2 - prm.leave_subsection (); - }; - - - void Problem::get_parameters (ParameterHandler &prm) { - // entries of the problem class - outfile = prm.get ("Output file"); - - String equation1 = prm.get ("Equation 1"), - equation2 = prm.get ("Equation 2"); - - // get parameters for the - // first equation - prm.enter_subsection ("Equation 1"); - Matrix1 = prm.get ("Matrix type"); - eq1.get_parameters (prm); // for eq1 - prm.leave_subsection (); - - // get parameters for the - // second equation - prm.enter_subsection ("Equation 2"); - Matrix2 = prm.get ("Matrix type"); - eq2.get_parameters (prm); // for eq2 - prm.leave_subsection (); - - cout << " Problem: outfile=" << outfile << endl - << " eq1=" << equation1 << ", eq2=" << equation2 << endl - << " Matrix1=" << Matrix1 << ", Matrix2=" << Matrix2 << endl; - }; - - - - - void main () { - ParameterHandler prm; - Problem p; - - p.declare_parameters (prm); - - // read input from "prmtest.prm"; giving - // argv[1] would also be a good idea - prm.read_input ("prmtest.prm"); - - // print parameters to cout as ASCII text - cout << endl << endl; - prm.print_parameters (cout, Text); - - // get parameters into the program - cout << endl << endl - << "Getting parameters:" << endl; - p.get_parameters (prm); - }; - \end{verbatim} - - - This is the input file (named "prmtest.prm"): - \begin{verbatim} - # first declare the types of equations - set Equation 1 = Poisson - set Equation 2 = Navier-Stokes - - subsection Equation 1 - set Matrix type = Sparse - subsection Linear solver # parameters for linear solver 1 - set Solver = Gauss-Seidel - set Maximum number of iterations = 40 - end - end - - subsection Equation 2 - set Matrix type = Full - subsection Linear solver - set Solver = CG - set Maximum number of iterations = 100 - end - end - \end{verbatim} - - And here is the ouput of the program: - \begin{verbatim} - Line 8: - The entry value - Gauss-Seidel - for the entry named - Solver - does not match the given regular expression - \(CG\|BiCGStab\|GMRES\) - - - Listing of Parameters - --------------------- - Equation 1 = Poisson - Equation 2 = Navier-Stokes - Output file= out - subsection Equation 1 - Matrix type = Sparse - subsection Linear solver - Maximum number of iterations = 40 <20> - Solver = CG - subsection Equation 2 - Matrix type = Full - subsection Linear solver - Maximum number of iterations = 100 <20> - Solver = CG - - - Getting parameters: - LinEq: Method=CG, MaxIterations=40 - LinEq: Method=CG, MaxIterations=100 - Problem: outfile=out - eq1=Poisson, eq2=Navier-Stokes - Matrix1=Sparse, Matrix2=Full - \end{verbatim} - - - \subsection{References} - - This class is inspired by the #MenuSystem# class of #DiffPack#. - - @memo This class provides a standard interface to an input file - which provides at run-time for program parameters such as time step sizes, - geometries, right hand sides etc. - - @author Wolfgang Bangerth, October 1997, revised February 1998 - @see MultipleParameterLoop - */ + * The #ParameterHandler# class provides a standard interface to an input file + * which provides at run-time for program parameters such as time step sizes, + * geometries, right hand sides etc. The input for the program is given in files, + * streams or strings in memory using text like + * \begin{verbatim} + * set Time step size = 0.3 + * set Geometry = [0,1]x[0,3] + * \end{verbatim} + * Input may be sorted into subsection trees in order to give the input a logical + * structure. + * + * + * \subsection{Declaration of entries} + * + * In order to use the facilities of a #ParameterHandler# object, one first has + * to make known the different entries the input file may or may not contain. This + * is done in the following way: + * \begin{verbatim} + * ... + * ParameterHandler prm; + * prm.declare_entry ("Time step size", + * "0.2", + * ParameterHandler::RegularExpressions::Double); + * prm.declare_entry ("Geometry", + * "[0,1]x[0,1]", + * ".*"); + * ... + * \end{verbatim} + * Each entry is declared using the function #declare_entry#. The first parameter is + * the name of the entry (in short: the entry). The second is the default answer to + * be taken in case the entry is not specified in the input file. The third parameter + * is a regular expression which the input (and the default answer) has to match. + * Several such regular expressions are defined in #ParameterHandler::RegularExpressions#. + * + * Entries may be located in subsections which form a kind of input tree. For example + * input parameters for linear solver routines should be classified in a subsection + * named #Linear solver# or any other suitable name. This is accomplished in the + * following way: + * \begin{verbatim} + * ... + * LinEq eq; + * eq.declare_parameters (prm); + * ... + * + * void LinEq::declare_parameters (ParameterHandler &prm) { + * prm.enter_subsection("Linear solver"); + *prm.declare_entry ("Solver", + * "CG", + * "CG\\|GMRES\\|GaussElim"); + *prm.declare_entry ("Maximum number of iterations", + * "20", + * ParameterHandler::RegularExpressions::Integer); + *... + *prm.leave_subsection (); + * }; + * \end{verbatim} + * + * Subsections may be nested. For example a nonlinear solver may have a linear solver + * as member object. Then the function call tree would be something like (if the class + * #NonLinEq# has a member variables #eq# of type #LinEq#): + * \begin{verbatim} + * void NonLinEq::declare_parameters (ParameterHandler &prm) { + * prm.enter_subsection ("Nonlinear solver"); + * prm.declare_entry ("Nonlinear method", + * "Newton-Raphson", + * ParameterHandler::RegularExpressions::AlphaNum); + * eq.declare_parameters (prm); + * prm.leave_subsection (); + * }; + * \end{verbatim} + * + * For class member functions which declare the different entries we propose to use the + * common name #declare_parameters#. In normal cases this method can be #static# since the + * entries will not depend on any previous knowledge. Classes for which entries should + * logically be grouped into subsections should declare these subsections themselves. If + * a class has two or more member variables of the same type both of which should have + * their own parameters, this parent class' method #declare_parameters# is responsible to + * group them into different subsections: + * \begin{verbatim} + * void NonLinEq::declare_parameters (ParameterHandler &prm) { + * prm.enter_subsection ("Nonlinear solver"); + * prm.enter_subsection ("Linear solver 1"); + *eq1.declare_parameters (prm); + *prm.leave_subsection (); + * + *prm.enter_subsection ("Linear solver 2"); + *eq2.declare_parameters (prm); + *prm.leave_subsection (); + *prm.leave_subsection (); + * }; + * \end{verbatim} + * + * + * \subsection{Input files and special characters} + * + * For the first example above the input file would look like the following: + * \begin{verbatim} + * ... + * subsection Nonlinear solver + * set Nonlinear method = Gradient + *subsection Linear solver + * set Solver = CG + * set Maxmimum number of iterations = 30 + *end + * end + * ... # other stuff + * \end{verbatim} + * The words #subsection#, #set# and #end# may be either written in lowercase or uppercase + * letters. Leading and trailing whitespace is removed, multiple whitespace is condensed into + * only one. Since the latter applies also to the name of an entry, an entry name will not + * be recognised if in the declaration multiple whitespace is used. + * + * In entry names and values the following characters are not allowed: \#, #{#, #}#, #|#. + * Their use is reserved for the \ref{MultipleParameterLoop} class. + * + * Comments starting with \# are skipped. + * + * We propose to use the following + * scheme to name entries: start the first word with a capital letter and use lowercase + * letters further on. The same applies to the possible entry values to the right of the + * #=# sign. + * + * + * \subsection{Reading data from input sources} + * + * In order to read input you can use three possibilities: reading from an #istream# object, + * reading from a file of which the name is given and reading from a string in memory in + * which the lines are separated by #\n# characters. These possibilites are used as follows: + * \begin{verbatim} + * ParameterHandler prm; + * ... + * // declaration of entries + * ... + * prm.read_input (cin); // read input from standard in, + * // or + * prm.read_input ("simulation.in"); + * // or + * char *in = "set Time step size = 0.3 \n ..."; + * prm.read_input (in); + * ... + * \end{verbatim} + * You can use several sources of input successively. Entries which are changed more than + * once will be overwritten everytime they are used. It is suggested to let the name of + * parameter input end in #.prm#. + * + * You should not try to declare entries using #declare_entry# and #enter_subsection# with as + * yet unknown subsection names after using #read_input#. The results in this case are + * unspecified. + * + * If an error occurs upon reading the input, error messages are written to #cerr#. + * + * + * \subsection{Getting entry values out of a #ParameterHandler# object} + * + * Each class gets its data out of a #ParameterHandler# object by calling the #get (...)# + * member functions like this: + * \begin{verbatim} + * void NonLinEq::get_parameters (ParameterHandler &prm) { + * prm.enter_subsection ("Nonlinear solver"); + * String method = prm.get ("Nonlinear method"); + * eq.get_parameters (prm); + * prm.leave_subsection (); + * }; + * \end{verbatim} + * #get()# returns the value of the given entry. If the entry was not specified in the input + * source(s), the default value is returned. You have to enter and leave subsections + * exactly as you did when declaring subsection. You may chose the order in which to + * transverse the subsection tree. + * + * It is guaranteed that only entries matching the given regular expression are returned, + * i.e. an input entry value which does not match the regular expression is not stored. + * + * You can use #get# to retrieve the parameter in text form, #get_integer# to get an integer + * or #get_double# to get a double. It will cause an internal error if + * the string could not be converted to an integer or a double. This should, though, not + * happen if you correctly specified the regular expression for this entry; you should not + * try to get out an integer or a double from an entry for which no according regular + * expression was set. The internal error is raised through the #Assert()# macro family + * which only works in debug mode. + * + * If you want to print out all user selectable features, use the + * #print_parameters# function. It is generally a good idea to print all parameters + * at the beginning of a log file, since this way input and output are together in + * one file which makes matching at a later time easier. Additionally, the function + * also print those entries which have not been modified in the input file und are + * thus set to default values; since default values may change in the process of + * program development, you cannot know the values of parameters not specified in the + * input file. + * + * + * \subsection{Style guide for data retrieval} + * + * We propose that every class which gets data out of a #ParameterHandler# object provides + * a function named #get_parameters#. This should be declared #virtual#. #get_parameters# + * functions in derived classes should call the #BaseClass::get_parameters# function. + * + * + * \subsection{Possible future extensions} + * + * \begin{itemize} + * \item Allow long input lines to be broken by appending a backslash character + * (just like C macros and shell input). + * \item Provide an #input filename# command for the input file to enable users to put the + * most common parameters into separate files. + * \end{itemize} + * + * + * + * \subsection{Worked Example} + * + * This is the code: + * \begin{verbatim} + * #include + * #include "../include/parameter_handler.h" + * + * + * class LinEq { + * public: + * static void declare_parameters (ParameterHandler &prm); + * void get_parameters (ParameterHandler &prm); + * private: + * String Method; + * int MaxIterations; + * }; + * + * + * class Problem { + * private: + * LinEq eq1, eq2; + * String Matrix1, Matrix2; + * String outfile; + * public: + * static void declare_parameters (ParameterHandler &prm); + * void get_parameters (ParameterHandler &prm); + * }; + * + * + * + * void LinEq::declare_parameters (ParameterHandler &prm) { + * // declare parameters for the linear + * // solver in a subsection + * prm.enter_subsection ("Linear solver"); + * prm.declare_entry ("Solver", + * "CG", + * "\\(CG\\|BiCGStab\\|GMRES\\)"); + * prm.declare_entry ("Maximum number of iterations", + * "20", + * ParameterHandler::RegularExpressions::Integer); + * prm.leave_subsection (); + * }; + * + * + * void LinEq::get_parameters (ParameterHandler &prm) { + * prm.enter_subsection ("Linear solver"); + * Method = prm.get ("Solver"); + * MaxIterations = prm.get_integer ("Maximum number of iterations"); + * prm.leave_subsection (); + * cout << " LinEq: Method=" << Method << ", MaxIterations=" << MaxIterations << endl; + * }; + * + * + * + * void Problem::declare_parameters (ParameterHandler &prm) { + * // first some global parameter entries + * prm.declare_entry ("Output file", + * "out", + * ".*"); + * + * prm.declare_entry ("Equation 1", + * "Laplace", + * ".*"); + * prm.declare_entry ("Equation 2", + * "Elasticity", + * ".*"); + * + * // declare parameters for the + * // first equation + * prm.enter_subsection ("Equation 1"); + * prm.declare_entry ("Matrix type", + * "Sparse", + * "\\(Full\\|Sparse\\|Diagonal\\)"); + * LinEq::declare_parameters (prm); // for eq1 + * prm.leave_subsection (); + * + * // declare parameters for the + * // second equation + * prm.enter_subsection ("Equation 2"); + * prm.declare_entry ("Matrix type", + * "Sparse", + * "\\(Full\\|Sparse\\|Diagonal\\)"); + * LinEq::declare_parameters (prm); // for eq2 + * prm.leave_subsection (); + * }; + * + * + * void Problem::get_parameters (ParameterHandler &prm) { + * // entries of the problem class + * outfile = prm.get ("Output file"); + * + * String equation1 = prm.get ("Equation 1"), + * equation2 = prm.get ("Equation 2"); + * + * // get parameters for the + * // first equation + * prm.enter_subsection ("Equation 1"); + * Matrix1 = prm.get ("Matrix type"); + * eq1.get_parameters (prm); // for eq1 + * prm.leave_subsection (); + * + * // get parameters for the + * // second equation + * prm.enter_subsection ("Equation 2"); + * Matrix2 = prm.get ("Matrix type"); + * eq2.get_parameters (prm); // for eq2 + * prm.leave_subsection (); + * + * cout << " Problem: outfile=" << outfile << endl + * << " eq1=" << equation1 << ", eq2=" << equation2 << endl + * << " Matrix1=" << Matrix1 << ", Matrix2=" << Matrix2 << endl; + * }; + * + * + * + * + * void main () { + * ParameterHandler prm; + * Problem p; + * + * p.declare_parameters (prm); + * + * // read input from "prmtest.prm"; giving + * // argv[1] would also be a good idea + * prm.read_input ("prmtest.prm"); + * + * // print parameters to cout as ASCII text + * cout << endl << endl; + * prm.print_parameters (cout, Text); + * + * // get parameters into the program + * cout << endl << endl + * << "Getting parameters:" << endl; + * p.get_parameters (prm); + * }; + * \end{verbatim} + * + * + * This is the input file (named "prmtest.prm"): + * \begin{verbatim} + * # first declare the types of equations + * set Equation 1 = Poisson + * set Equation 2 = Navier-Stokes + * + * subsection Equation 1 + * set Matrix type = Sparse + * subsection Linear solver # parameters for linear solver 1 + * set Solver = Gauss-Seidel + * set Maximum number of iterations = 40 + * end + * end + * + * subsection Equation 2 + * set Matrix type = Full + * subsection Linear solver + * set Solver = CG + * set Maximum number of iterations = 100 + * end + * end + * \end{verbatim} + * + * And here is the ouput of the program: + * \begin{verbatim} + * Line 8: + * The entry value + * Gauss-Seidel + * for the entry named + * Solver + * does not match the given regular expression + * \(CG\|BiCGStab\|GMRES\) + * + * + * Listing of Parameters + * --------------------- + * Equation 1 = Poisson + * Equation 2 = Navier-Stokes + * Output file= out + * subsection Equation 1 + * Matrix type = Sparse + * subsection Linear solver + * Maximum number of iterations = 40 <20> + * Solver = CG + * subsection Equation 2 + * Matrix type = Full + * subsection Linear solver + * Maximum number of iterations = 100 <20> + * Solver = CG + * + * + * Getting parameters: + * LinEq: Method=CG, MaxIterations=40 + * LinEq: Method=CG, MaxIterations=100 + * Problem: outfile=out + * eq1=Poisson, eq2=Navier-Stokes + * Matrix1=Sparse, Matrix2=Full + * \end{verbatim} + * + * + * \subsection{References} + * + * This class is inspired by the #MenuSystem# class of #DiffPack#. + * + * @memo This class provides a standard interface to an input file + * which provides at run-time for program parameters such as time step sizes, + * geometries, right hand sides etc. + * + * @author Wolfgang Bangerth, October 1997, revised February 1998 + * @see MultipleParameterLoop + */ class ParameterHandler { public: /** @@ -738,225 +738,225 @@ class ParameterHandler { /** - The class #MultipleParameterLoop# offers an easy possibility to test several - parameter sets during one run of the program. For this it uses the - #ParameterHandler# class to read in data in a standardized form, searches for - variant entry values and performs a loop over all combinations of parameters. - - Variant entry values are given like this: - \begin{verbatim} - set Time step size = { 0.1 | 0.2 | 0.3 } - \end{verbatim} - The loop will then perform three runs of the program, one for each value - of #Time step size#, while all other parameters are as specified or with their - default value. If there are several variant entry values in the input a loop is - performed for each combination of variant values: - \begin{verbatim} - set Time step size = { 0.1 | 0.2 } - set Solver = { CG | GMRES } - \end{verbatim} - will result in four runs of the programs, with time step 0.1 and 0.2 for each - of the two solvers. - - Opposite to a variant entry, an array entry looks like this: - \begin{verabtim} - set Output file = ofile.{{ 1 | 2 | 3 | 4 }} - \end{verbatim} - This indicates that if there are variant entries producing a total of four - different runs will write their results to the files #ofile.1#, #ofile.2#, - #ofile.3# and #ofile.4#, respectively. Array entries do not generate multiple - runs of the main loop themselves, but if there are variant entries, then in - the #n#th run of the main loop, also the #n#th value of an array is returned. - - Since the different variants are constructed in the order of declaration, not in - the order in which the variat entries appear in the input file, it may be - difficult to guess the mapping between the different variants and the appropriate - entry in an array. You will have to check the order of declaration, or use - only one variant entry. - - It is guaranteed that only selections which match the regular expression given - upon declaration of an entry are given back to the program. If a variant value - does not match the regular expression, the default value is stored and an error - is issued. Before the first run of the loop, all possible values are checked - for their conformance, so that the error is issued at the very beginning of the - program. - - - \subsection{Usage} - - The usage of this class is similar to the #ParameterHandler# class. First the - entries and subsections have to be declared, then a loop is performed in which - the different parameter sets are set, a new instance of a user class is created - which is then called. Taking the classes of the example for the - #ParameterHandler# class, the extended program would look like this: - \begin{verbatim} - class HelperClass : public MultipleParameterLoop::UserClass { - public: - HelperClass (); - - virtual void create_new (unsigned int runNo); - virtual void declare_parameters (ParameterHandler &prm); - virtual void run (ParameterHandler &prm); - private: - Problem *p; - }; - - - HelperClass::HelperClass () : p(0) {}; - - - void HelperClass::create_new (unsigned int runNo) { - if (p) delete p; - p = new Problem; - }; - - - void HelperClass::declare_parameters (ParameterHandler &prm) { - // entries of the problem class - // note: must be static member! - Problem::declare_parameters (prm); - }; - - - void HelperClass::run (ParameterHandler &prm) { - p->get_parameters (prm); - }; - - - - void main () { - // don't know why we have to write - // "class" here, but it won't work - // otherwise - class MultipleParameterLoop prm; - HelperClass h; - - h.declare_parameters (prm); - prm.read_input ("prmtest.prm"); - prm.loop (h); - }; - \end{verbatim} - - As can be seen, first a new helper class has to be set up. This must contain - a virtual constructor for a problem class. You can also derive your problem - class from #MultipleParameterLoop::UserClass# and let #create_new# clear all - member variables. If you have access to all inherited member variables in - some way this is the recommended procedure. A third possibility is to use - multiple inheritance and derive a helper class from both the - #MultipleParameterLoop::UserClass# and the problem class. In any case, - #create_new# has to provide a clean problem object which is the problem in - the second and third possibility. However, if possible, the second way should - be chosen. - - The derived class also - has to provide for member functions which declare the entries and which run - the program. Running the program includes getting the parameters out of the - #ParameterHandler# object. - - After defining an object of this helper class and an object of the - #MultipleParameterLoop# class, the entries have to be declared in the same way - as for the #ParameterHandler# class. Then the input has to be read. Finally - the loop is called. This executes the following steps: - \begin{verbatim} - for each combination - { - UserObject.create_new (runNo); - - set parameters for this run - - UserObject.run (*this); - }; - \end{verbatim} - #UserObject# is the parameter to the #loop# function. #create_new# is given the number - of the run (starting from one) to enable naming output files differently for each - run. - - - \subsection{Syntax for variant and array entry values} - - Variant values are specified like #prefix{ v1 | v2 | v3 | ... }postfix#. Whitespace - to the right of the opening brace #{# is ignored as well as to the left of the - closing brace #}# while whitespace on the respectively other side is not ignored. - Whitespace around the mid symbols #|# is also ignored. The empty selection - #prefix{ v1 | }postfix# is also allowed and produces the strings #prefixv1postfix# and - #prefixpostfix#. - - The syntax for array values is equal, apart from the double braces: - #prefix{{ v1 | v2 | v3 }}postfix#. - - - \subsection{Worked example} - - Given the above extensions to the example program for the #ParameterHandler# and the - following input file - \begin{verbatim} - set Equation 1 = Poisson - set Equation 2 = Navier-Stokes - set Output file= results.{{ 1 | 2 | 3 | 4 | 5 | 6 }} - - subsection Equation 1 - set Matrix type = Sparse - subsection Linear solver - set Solver = CG - set Maximum number of iterations = { 10 | 20 | 30 } - end - end - - subsection Equation 2 - set Matrix type = Full - subsection Linear solver - set Solver = { BiCGStab | GMRES } - set Maximum number of iterations = 100 - end - end - \end{verbatim} - this is the output: - \begin{verbatim} - LinEq: Method=CG, MaxIterations=10 - LinEq: Method=BiCGStab, MaxIterations=100 - Problem: outfile=results.1 - eq1=Poisson, eq2=Navier-Stokes - Matrix1=Sparse, Matrix2=Full - LinEq: Method=CG, MaxIterations=20 - LinEq: Method=BiCGStab, MaxIterations=100 - Problem: outfile=results.2 - eq1=Poisson, eq2=Navier-Stokes - Matrix1=Sparse, Matrix2=Full - LinEq: Method=CG, MaxIterations=30 - LinEq: Method=BiCGStab, MaxIterations=100 - Problem: outfile=results.3 - eq1=Poisson, eq2=Navier-Stokes - Matrix1=Sparse, Matrix2=Full - LinEq: Method=CG, MaxIterations=10 - LinEq: Method=GMRES, MaxIterations=100 - Problem: outfile=results.4 - eq1=Poisson, eq2=Navier-Stokes - Matrix1=Sparse, Matrix2=Full - LinEq: Method=CG, MaxIterations=20 - LinEq: Method=GMRES, MaxIterations=100 - Problem: outfile=results.5 - eq1=Poisson, eq2=Navier-Stokes - Matrix1=Sparse, Matrix2=Full - LinEq: Method=CG, MaxIterations=30 - LinEq: Method=GMRES, MaxIterations=100 - Problem: outfile=results.6 - eq1=Poisson, eq2=Navier-Stokes - Matrix1=Sparse, Matrix2=Full - \end{verbatim} - Since #create_new# gets the number of the run it would also be possible to output - the number of the run. - - - \subsection{References} - This class is inspired by the #Multipleloop# class of #DiffPack#. - - @memo This class provides an interface to an input file which provides at - run-time for multiple program parameters sets. The class performs a loop over - all combinations of parameter sets. - - @author Wolfgang Bangerth, October 1997 - @version 1.0 - @see ParameterHandler + * The class #MultipleParameterLoop# offers an easy possibility to test several + * parameter sets during one run of the program. For this it uses the + * #ParameterHandler# class to read in data in a standardized form, searches for + * variant entry values and performs a loop over all combinations of parameters. + * + * Variant entry values are given like this: + * \begin{verbatim} + * set Time step size = { 0.1 | 0.2 | 0.3 } + * \end{verbatim} + * The loop will then perform three runs of the program, one for each value + * of #Time step size#, while all other parameters are as specified or with their + * default value. If there are several variant entry values in the input a loop is + * performed for each combination of variant values: + * \begin{verbatim} + * set Time step size = { 0.1 | 0.2 } + * set Solver = { CG | GMRES } + * \end{verbatim} + * will result in four runs of the programs, with time step 0.1 and 0.2 for each + * of the two solvers. + * + * Opposite to a variant entry, an array entry looks like this: + * \begin{verabtim} + * set Output file = ofile.{{ 1 | 2 | 3 | 4 }} + * \end{verbatim} + * This indicates that if there are variant entries producing a total of four + * different runs will write their results to the files #ofile.1#, #ofile.2#, + * #ofile.3# and #ofile.4#, respectively. Array entries do not generate multiple + * runs of the main loop themselves, but if there are variant entries, then in + * the #n#th run of the main loop, also the #n#th value of an array is returned. + * + * Since the different variants are constructed in the order of declaration, not in + * the order in which the variat entries appear in the input file, it may be + * difficult to guess the mapping between the different variants and the appropriate + * entry in an array. You will have to check the order of declaration, or use + * only one variant entry. + * + * It is guaranteed that only selections which match the regular expression given + * upon declaration of an entry are given back to the program. If a variant value + * does not match the regular expression, the default value is stored and an error + * is issued. Before the first run of the loop, all possible values are checked + * for their conformance, so that the error is issued at the very beginning of the + * program. + * + * + * \subsection{Usage} + * + * The usage of this class is similar to the #ParameterHandler# class. First the + * entries and subsections have to be declared, then a loop is performed in which + * the different parameter sets are set, a new instance of a user class is created + * which is then called. Taking the classes of the example for the + * #ParameterHandler# class, the extended program would look like this: + * \begin{verbatim} + * class HelperClass : public MultipleParameterLoop::UserClass { + * public: + * HelperClass (); + * + * virtual void create_new (unsigned int runNo); + * virtual void declare_parameters (ParameterHandler &prm); + * virtual void run (ParameterHandler &prm); + * private: + * Problem *p; + * }; + * + * + * HelperClass::HelperClass () : p(0) {}; + * + * + * void HelperClass::create_new (unsigned int runNo) { + * if (p) delete p; + * p = new Problem; + * }; + * + * + * void HelperClass::declare_parameters (ParameterHandler &prm) { + * // entries of the problem class + * // note: must be static member! + * Problem::declare_parameters (prm); + * }; + * + * + * void HelperClass::run (ParameterHandler &prm) { + * p->get_parameters (prm); + * }; + * + * + * + * void main () { + * // don't know why we have to write + * // "class" here, but it won't work + * // otherwise + * class MultipleParameterLoop prm; + * HelperClass h; + * + * h.declare_parameters (prm); + * prm.read_input ("prmtest.prm"); + * prm.loop (h); + * }; + * \end{verbatim} + * + * As can be seen, first a new helper class has to be set up. This must contain + * a virtual constructor for a problem class. You can also derive your problem + * class from #MultipleParameterLoop::UserClass# and let #create_new# clear all + * member variables. If you have access to all inherited member variables in + * some way this is the recommended procedure. A third possibility is to use + * multiple inheritance and derive a helper class from both the + * #MultipleParameterLoop::UserClass# and the problem class. In any case, + * #create_new# has to provide a clean problem object which is the problem in + * the second and third possibility. However, if possible, the second way should + * be chosen. + * + * The derived class also + * has to provide for member functions which declare the entries and which run + * the program. Running the program includes getting the parameters out of the + * #ParameterHandler# object. + * + * After defining an object of this helper class and an object of the + * #MultipleParameterLoop# class, the entries have to be declared in the same way + * as for the #ParameterHandler# class. Then the input has to be read. Finally + * the loop is called. This executes the following steps: + * \begin{verbatim} + * for each combination + * { + * UserObject.create_new (runNo); + * + * set parameters for this run + * + * UserObject.run (*this); + * }; + * \end{verbatim} + * #UserObject# is the parameter to the #loop# function. #create_new# is given the number + * of the run (starting from one) to enable naming output files differently for each + * run. + * + * + * \subsection{Syntax for variant and array entry values} + * + * Variant values are specified like #prefix{ v1 | v2 | v3 | ... }postfix#. Whitespace + * to the right of the opening brace #{# is ignored as well as to the left of the + * closing brace #}# while whitespace on the respectively other side is not ignored. + * Whitespace around the mid symbols #|# is also ignored. The empty selection + * #prefix{ v1 | }postfix# is also allowed and produces the strings #prefixv1postfix# and + * #prefixpostfix#. + * + * The syntax for array values is equal, apart from the double braces: + * #prefix{{ v1 | v2 | v3 }}postfix#. + * + * + * \subsection{Worked example} + * + * Given the above extensions to the example program for the #ParameterHandler# and the + * following input file + * \begin{verbatim} + * set Equation 1 = Poisson + * set Equation 2 = Navier-Stokes + * set Output file= results.{{ 1 | 2 | 3 | 4 | 5 | 6 }} + * + * subsection Equation 1 + * set Matrix type = Sparse + * subsection Linear solver + * set Solver = CG + * set Maximum number of iterations = { 10 | 20 | 30 } + * end + * end + * + * subsection Equation 2 + * set Matrix type = Full + * subsection Linear solver + * set Solver = { BiCGStab | GMRES } + * set Maximum number of iterations = 100 + * end + * end + * \end{verbatim} + * this is the output: + * \begin{verbatim} + * LinEq: Method=CG, MaxIterations=10 + * LinEq: Method=BiCGStab, MaxIterations=100 + * Problem: outfile=results.1 + * eq1=Poisson, eq2=Navier-Stokes + * Matrix1=Sparse, Matrix2=Full + * LinEq: Method=CG, MaxIterations=20 + * LinEq: Method=BiCGStab, MaxIterations=100 + * Problem: outfile=results.2 + * eq1=Poisson, eq2=Navier-Stokes + * Matrix1=Sparse, Matrix2=Full + * LinEq: Method=CG, MaxIterations=30 + * LinEq: Method=BiCGStab, MaxIterations=100 + * Problem: outfile=results.3 + * eq1=Poisson, eq2=Navier-Stokes + * Matrix1=Sparse, Matrix2=Full + * LinEq: Method=CG, MaxIterations=10 + * LinEq: Method=GMRES, MaxIterations=100 + * Problem: outfile=results.4 + * eq1=Poisson, eq2=Navier-Stokes + * Matrix1=Sparse, Matrix2=Full + * LinEq: Method=CG, MaxIterations=20 + * LinEq: Method=GMRES, MaxIterations=100 + * Problem: outfile=results.5 + * eq1=Poisson, eq2=Navier-Stokes + * Matrix1=Sparse, Matrix2=Full + * LinEq: Method=CG, MaxIterations=30 + * LinEq: Method=GMRES, MaxIterations=100 + * Problem: outfile=results.6 + * eq1=Poisson, eq2=Navier-Stokes + * Matrix1=Sparse, Matrix2=Full + * \end{verbatim} + * Since #create_new# gets the number of the run it would also be possible to output + * the number of the run. + * + * + * \subsection{References} + * This class is inspired by the #Multipleloop# class of #DiffPack#. + * + * @memo This class provides an interface to an input file which provides at + * run-time for multiple program parameters sets. The class performs a loop over + * all combinations of parameter sets. + * + * @author Wolfgang Bangerth, October 1997 + * @version 1.0 + * @see ParameterHandler */ class MultipleParameterLoop : public ParameterHandler { public: diff --git a/deal.II/base/include/base/quadrature.h b/deal.II/base/include/base/quadrature.h index 844ff81d06..efb29a5493 100644 --- a/deal.II/base/include/base/quadrature.h +++ b/deal.II/base/include/base/quadrature.h @@ -11,20 +11,20 @@ /** - Base class for quadrature formulae in arbitrary dimensions. This class - stores quadrature points and weights on the unit line [0,1], unit - square [0,1]x[0,1], etc. This information is used together with - objects of the \Ref{FiniteElement} class to compute the values stored - in the \Ref{FEValues} objects. - - There are a number of derived classes, denoting concrete integration - formulae. These are named by a prefixed #Q#, the name of the formula - (e.g. #Gauss#) and finally the order of integration. For example, - #QGauss2# denotes a second order Gauss integration formula in - any dimension. Second order means that it integrates polynomials of - third order exact. In general, a formula of order #n# exactly - integrates polynomials of order #2n-1#. -*/ + * Base class for quadrature formulae in arbitrary dimensions. This class + * stores quadrature points and weights on the unit line [0,1], unit + * square [0,1]x[0,1], etc. This information is used together with + * objects of the \Ref{FiniteElement} class to compute the values stored + * in the \Ref{FEValues} objects. + * + * There are a number of derived classes, denoting concrete integration + * formulae. These are named by a prefixed #Q#, the name of the formula + * (e.g. #Gauss#) and finally the order of integration. For example, + * #QGauss2# denotes a second order Gauss integration formula in + * any dimension. Second order means that it integrates polynomials of + * third order exact. In general, a formula of order #n# exactly + * integrates polynomials of order #2n-1#. + */ template class Quadrature { public: @@ -96,29 +96,29 @@ class Quadrature { /** - This class is a helper class to facilitate the usage of quadrature formulae - on faces or subfaces of cells. It computes the locations of quadrature - points on the unit cell from a quadrature object for a mannifold of - one dimension less than that of the cell and the number of the face. - For example, giving the Simpson rule in one dimension and using the - #project_to_face# function with face number 1, the returned points will - be $(1,0)$, $(1,0.5)$ and $(1,1)$. Note that faces have an orientation, - so when projecting to face 3, you will get $(0,0)$, $(0,0.5)$ and $(0,1)$, - which is in clockwise sense, while for face 1 the points were in - counterclockwise sense. - - For the projection to subfaces (i.e. to the children of a face of the - unit cell), the same applies as above. Note the order in which the - children of a face are numbered, which in two dimensions coincides - with the orientation of the face. - - The different functions are grouped into a common class to avoid putting - them into global namespace (and to make documentation easier, since - presently the documentation tool can only handle classes, not global - functions). However, since they have no local data, all functions are - declared #static# and can be called without creating an object of this - class. -*/ + * This class is a helper class to facilitate the usage of quadrature formulae + * on faces or subfaces of cells. It computes the locations of quadrature + * points on the unit cell from a quadrature object for a mannifold of + * one dimension less than that of the cell and the number of the face. + * For example, giving the Simpson rule in one dimension and using the + * #project_to_face# function with face number 1, the returned points will + * be $(1,0)$, $(1,0.5)$ and $(1,1)$. Note that faces have an orientation, + * so when projecting to face 3, you will get $(0,0)$, $(0,0.5)$ and $(0,1)$, + * which is in clockwise sense, while for face 1 the points were in + * counterclockwise sense. + * + * For the projection to subfaces (i.e. to the children of a face of the + * unit cell), the same applies as above. Note the order in which the + * children of a face are numbered, which in two dimensions coincides + * with the orientation of the face. + * + * The different functions are grouped into a common class to avoid putting + * them into global namespace (and to make documentation easier, since + * presently the documentation tool can only handle classes, not global + * functions). However, since they have no local data, all functions are + * declared #static# and can be called without creating an object of this + * class. + */ template class QProjector { public: diff --git a/deal.II/base/include/base/quadrature_lib.h b/deal.II/base/include/base/quadrature_lib.h index f8064099c6..8f8ba2dbf8 100644 --- a/deal.II/base/include/base/quadrature_lib.h +++ b/deal.II/base/include/base/quadrature_lib.h @@ -9,10 +9,10 @@ /** - Second order Gauss quadrature formula. - - Reference: Ward Cheney, David Kincaid: Numerical Mathematics and Computing. -*/ + * Second order Gauss quadrature formula. + * + * Reference: Ward Cheney, David Kincaid: Numerical Mathematics and Computing. + */ template class QGauss2 : public Quadrature { public: @@ -22,10 +22,10 @@ class QGauss2 : public Quadrature { /** - Third order Gauss quadrature formula. - - Reference: Ward Cheney, David Kincaid: Numerical Mathematics and Computing. -*/ + * Third order Gauss quadrature formula. + * + * Reference: Ward Cheney, David Kincaid: Numerical Mathematics and Computing. + */ template class QGauss3 : public Quadrature { public: @@ -35,10 +35,10 @@ class QGauss3 : public Quadrature { /** - Fourth order Gauss quadrature formula. - - Reference: Ward Cheney, David Kincaid: Numerical Mathematics and Computing. -*/ + * Fourth order Gauss quadrature formula. + * + * Reference: Ward Cheney, David Kincaid: Numerical Mathematics and Computing. + */ template class QGauss4 : public Quadrature { public: @@ -49,10 +49,10 @@ class QGauss4 : public Quadrature { /** - Fifth order Gauss quadrature formula. - - Reference: Ward Cheney, David Kincaid: Numerical Mathematics and Computing. -*/ + * Fifth order Gauss quadrature formula. + * + * Reference: Ward Cheney, David Kincaid: Numerical Mathematics and Computing. + */ template class QGauss5 : public Quadrature { public: @@ -62,14 +62,14 @@ class QGauss5 : public Quadrature { /** - Sixth order Gauss quadrature formula. I have not found explicite - representations of the zeros of the Legendre functions of sixth - and higher degree. If anyone finds them, please replace the existing - numbers by these expressions. - - Reference: J. E. Akin: Application and Implementation of Finite - Element Methods -*/ + * Sixth order Gauss quadrature formula. I have not found explicite + * representations of the zeros of the Legendre functions of sixth + * and higher degree. If anyone finds them, please replace the existing + * numbers by these expressions. + * + * Reference: J. E. Akin: Application and Implementation of Finite + * Element Methods + */ template class QGauss6 : public Quadrature { public: @@ -79,14 +79,14 @@ class QGauss6 : public Quadrature { /** - Seventh order Gauss quadrature formula. I have not found explicite - representations of the zeros of the Legendre functions of sixth - and higher degree. If anyone finds them, please replace the existing - numbers by these expressions. - - Reference: J. E. Akin: Application and Implementation of Finite - Element Methods -*/ + * Seventh order Gauss quadrature formula. I have not found explicite + * representations of the zeros of the Legendre functions of sixth + * and higher degree. If anyone finds them, please replace the existing + * numbers by these expressions. + * + * Reference: J. E. Akin: Application and Implementation of Finite + * Element Methods + */ template class QGauss7 : public Quadrature { public: @@ -96,14 +96,14 @@ class QGauss7 : public Quadrature { /** - Eighth order Gauss quadrature formula. I have not found explicite - representations of the zeros of the Legendre functions of sixth - and higher degree. If anyone finds them, please replace the existing - numbers by these expressions. - - Reference: J. E. Akin: Application and Implementation of Finite - Element Methods -*/ + * Eighth order Gauss quadrature formula. I have not found explicite + * representations of the zeros of the Legendre functions of sixth + * and higher degree. If anyone finds them, please replace the existing + * numbers by these expressions. + * + * Reference: J. E. Akin: Application and Implementation of Finite + * Element Methods + */ template class QGauss8 : public Quadrature { public: @@ -115,8 +115,8 @@ class QGauss8 : public Quadrature { /** - First order midpoint quadrature rule. -*/ + * First order midpoint quadrature rule. + */ template class QMidpoint : public Quadrature { public: @@ -126,8 +126,8 @@ class QMidpoint : public Quadrature { /** - Simpson quadrature rule. -*/ + * Simpson quadrature rule. + */ template class QSimpson : public Quadrature { public: @@ -137,8 +137,8 @@ class QSimpson : public Quadrature { /** - Trapezoidal quadrature rule. -*/ + * Trapezoidal quadrature rule. + */ template class QTrapez : public Quadrature { public: diff --git a/deal.II/deal.II/include/dofs/dof_accessor.h b/deal.II/deal.II/include/dofs/dof_accessor.h index 585bcb1b8b..09fa115881 100644 --- a/deal.II/deal.II/include/dofs/dof_accessor.h +++ b/deal.II/deal.II/include/dofs/dof_accessor.h @@ -16,27 +16,27 @@ class dVector; /** - Define the basis for accessors to the degrees of freedom. - - Note that it is allowed to construct an object of which the - #dof_handler# pointer is a Null pointer. Such an object would - result in a strange kind of behaviour, though every reasonable - operating system should disallow access through that pointer. - The reason we do not check for the null pointer in the - constructor which gets passed the #DoFHandler# pointer is that - if we did we could not make dof iterators member of other classes - (like in the #FEValues# class) if we did not know about the - #DoFHandler# object to be used upon construction of that object. - Through the way this class is implemented here, we allow the - creation of a kind of virgin object which only gets useful if - assigned to from another object before first usage. - - Opposite to construction, it is not possible to copy an object - which has an invalid dof handler pointer. This is to guarantee - that every iterator which is once assigned to is a valid - object. However, this assertion only holds in debug mode, when - the #Assert# macro is switched on. - */ + * Define the basis for accessors to the degrees of freedom. + * + * Note that it is allowed to construct an object of which the + * #dof_handler# pointer is a Null pointer. Such an object would + * result in a strange kind of behaviour, though every reasonable + * operating system should disallow access through that pointer. + * The reason we do not check for the null pointer in the + * constructor which gets passed the #DoFHandler# pointer is that + * if we did we could not make dof iterators member of other classes + * (like in the #FEValues# class) if we did not know about the + * #DoFHandler# object to be used upon construction of that object. + * Through the way this class is implemented here, we allow the + * creation of a kind of virgin object which only gets useful if + * assigned to from another object before first usage. + * + * Opposite to construction, it is not possible to copy an object + * which has an invalid dof handler pointer. This is to guarantee + * that every iterator which is once assigned to is a valid + * object. However, this assertion only holds in debug mode, when + * the #Assert# macro is switched on. + */ template class DoFAccessor { public: @@ -102,52 +102,52 @@ class DoFAccessor { /** - Grant access to the degrees of freedom located on lines. - This class follows mainly the route laid out by the accessor library - declared in the triangulation library (\Ref{TriaAccessor}). It enables - the user to access the degrees of freedom on the lines (there are similar - versions for the DoFs on quads, etc), where the dimension of the underlying - triangulation does not really matter (i.e. this accessor works with the - lines in 1D-, 2D-, etc dimensions). - - - \subsection{Usage} - - The \Ref{DoFDimensionInfo} classes inherited by the \Ref{DoFHandler} classes - declare typedefs to iterators using the accessors declared in this class - hierarchy tree. Usage is best to happens through these typedefs, since they - are more secure to changes in the class naming and template interface as well - as they provide easier typing (much less complicated names!). - - - \subsection{Notes about the class hierarchy structure} - - The class hierarchy seems to be a bit confused here. The reason for this is - that we would really like to derive a #DoFLineAccessor# from a #LineAccessor#. - Unfortunately, we would run into problems, if we wanted a #DoFLineAccessor# - in one spatial dimension, in which case a line is also a cell. The traditional - solution would be to declare a #DoFCellAccessor<1># which is derived from - #DoFLineAccessor<1># and #CellAccessor<1># (the #DoFLineAccessor# cannot - itself be derived from #CellAccessor# since a line is not a cell - unless in one space dimension), but since a #DoFLineAccessor# and a - #CellAccessor# are both derived from #TriaAccessor#, we would have to make - the last derivation virtual. - - Since we want to avoid virtual inheritance since this involves another - indirection in every member variable access, we chose another way: we - pass a second template parameter to a #DoFLineAccessor# which tells it - which class to be derived from: if we are in one spatial dimension, the - base class is to be #CellAccessor<1>#, in two or more dimensions it - is a #LineAccessor#, i.e. am accessor to lines without the missing - functionality needed for cells (neighbors, etc.). - - This way we can declare a #DoFCellAccessor# in one dimension by deriving - from #DoFLineAccessor<1,CellAccessor<1> >#, thus getting the cell - functionality through the #DoFLineAccessor# instead of through a virtual - multiple inheritance of #DoFLineAccessor# and #CellAccessor<1>#. - - The same concept is used with #DoFQuadAccessor# classes etc. - */ + * Grant access to the degrees of freedom located on lines. + * This class follows mainly the route laid out by the accessor library + * declared in the triangulation library (\Ref{TriaAccessor}). It enables + * the user to access the degrees of freedom on the lines (there are similar + * versions for the DoFs on quads, etc), where the dimension of the underlying + * triangulation does not really matter (i.e. this accessor works with the + * lines in 1D-, 2D-, etc dimensions). + * + * + * \subsection{Usage} + * + * The \Ref{DoFDimensionInfo} classes inherited by the \Ref{DoFHandler} classes + * declare typedefs to iterators using the accessors declared in this class + * hierarchy tree. Usage is best to happens through these typedefs, since they + * are more secure to changes in the class naming and template interface as well + * as they provide easier typing (much less complicated names!). + * + * + * \subsection{Notes about the class hierarchy structure} + * + * The class hierarchy seems to be a bit confused here. The reason for this is + * that we would really like to derive a #DoFLineAccessor# from a #LineAccessor#. + * Unfortunately, we would run into problems, if we wanted a #DoFLineAccessor# + * in one spatial dimension, in which case a line is also a cell. The traditional + * solution would be to declare a #DoFCellAccessor<1># which is derived from + * #DoFLineAccessor<1># and #CellAccessor<1># (the #DoFLineAccessor# cannot + * itself be derived from #CellAccessor# since a line is not a cell + * unless in one space dimension), but since a #DoFLineAccessor# and a + * #CellAccessor# are both derived from #TriaAccessor#, we would have to make + * the last derivation virtual. + * + * Since we want to avoid virtual inheritance since this involves another + * indirection in every member variable access, we chose another way: we + * pass a second template parameter to a #DoFLineAccessor# which tells it + * which class to be derived from: if we are in one spatial dimension, the + * base class is to be #CellAccessor<1>#, in two or more dimensions it + * is a #LineAccessor#, i.e. am accessor to lines without the missing + * functionality needed for cells (neighbors, etc.). + * + * This way we can declare a #DoFCellAccessor# in one dimension by deriving + * from #DoFLineAccessor<1,CellAccessor<1> >#, thus getting the cell + * functionality through the #DoFLineAccessor# instead of through a virtual + * multiple inheritance of #DoFLineAccessor# and #CellAccessor<1>#. + * + * The same concept is used with #DoFQuadAccessor# classes etc. + */ template class DoFLineAccessor : public DoFAccessor, public BaseClass { public: @@ -230,9 +230,10 @@ class DoFLineAccessor : public DoFAccessor, public BaseClass { /** - Grant access to the degrees of freedom located on quads. - @see DoFLineAccessor - */ + * Grant access to the degrees of freedom located on quads. + * + * @see DoFLineAccessor + */ template class DoFQuadAccessor : public DoFAccessor, public BaseClass { public: @@ -325,19 +326,19 @@ class DoFQuadAccessor : public DoFAccessor, public BaseClass { /** - Intermediate, "typedef"-class, not for public use. - - Rationale for the declaration of members for this class: gcc 2.8 has a bug - when deriving from explicitely specialized classes which materializes in - the calculation of wrong addresses of member variables. By declaring the - general template of #DoFSubstructAccessor# to have the same object layout as - the specialized versions (using the same base classes), we fool the compiler, - which still looks in the wrong place for the addresses but finds the - right information. This way, at least ot works. - - Insert a guard, however, in the constructor to avoid that anyone (including - the compiler) happens to use this class. - */ + * Intermediate, "typedef"-class, not for public use. + * + * Rationale for the declaration of members for this class: gcc 2.8 has a bug + * when deriving from explicitely specialized classes which materializes in + * the calculation of wrong addresses of member variables. By declaring the + * general template of #DoFSubstructAccessor# to have the same object layout as + * the specialized versions (using the same base classes), we fool the compiler, + * which still looks in the wrong place for the addresses but finds the + * right information. This way, at least ot works. + * + * Insert a guard, however, in the constructor to avoid that anyone (including + * the compiler) happens to use this class. + */ template class DoFSubstructAccessor : public DoFAccessor, public TriaAccessor { @@ -353,26 +354,26 @@ class DoFSubstructAccessor : public DoFAccessor, /** - Intermediate, "typedef"-class, not for public use. - - \subsection{Rationale} - - This class is only a wrapper class used to do kind of a typedef - with template parameters. This class and #DoFSubstructAccessor<2># - wrap the following names: - \begin{verbatim} - DoFSubstructAccessor<1> := DoFLineAccessor<1,CellAccessor<1> >; - DoFSubstructAccessor<2> := DoFQuadAccessor<2,CellAccessor<2> >; - \end{verbatim} - We do this rather complex (and needless, provided C++ the needed constructs!) - class hierarchy manipulation, since this way we can declare and implement - the \Ref{DoFCellAccessor} dimension independent as an inheritance from - #DoFSubstructAccessor#. If we had not declared these - types, we would have to write two class declarations, one for - #DoFCellAccessor<1>#, derived from #DoFLineAccessor<1,CellAccessor<1> ># - and one for #DoFCellAccessor<2>#, derived from - #DoFQuadAccessor<2,CellAccessor<2> >#. - */ + * Intermediate, "typedef"-class, not for public use. + * + * \subsection{Rationale} + * + * This class is only a wrapper class used to do kind of a typedef + * with template parameters. This class and #DoFSubstructAccessor<2># + * wrap the following names: + * \begin{verbatim} + * DoFSubstructAccessor<1> := DoFLineAccessor<1,CellAccessor<1> >; + * DoFSubstructAccessor<2> := DoFQuadAccessor<2,CellAccessor<2> >; + * \end{verbatim} + * We do this rather complex (and needless, provided C++ the needed constructs!) + * class hierarchy manipulation, since this way we can declare and implement + * the \Ref{DoFCellAccessor} dimension independent as an inheritance from + * #DoFSubstructAccessor#. If we had not declared these + * types, we would have to write two class declarations, one for + * #DoFCellAccessor<1>#, derived from #DoFLineAccessor<1,CellAccessor<1> ># + * and one for #DoFCellAccessor<2>#, derived from + * #DoFQuadAccessor<2,CellAccessor<2> >#. + */ class DoFSubstructAccessor<1> : public DoFLineAccessor<1,CellAccessor<1> > { public: /** @@ -395,9 +396,10 @@ class DoFSubstructAccessor<1> : public DoFLineAccessor<1,CellAccessor<1> > { /** - Intermediate, "typedef"-class, not for public use. - @see DoFSubstructAccessor<1> - */ + * Intermediate, "typedef"-class, not for public use. + * + * @see DoFSubstructAccessor<1> + */ class DoFSubstructAccessor<2> : public DoFQuadAccessor<2,CellAccessor<2> > { public: /** @@ -423,16 +425,16 @@ class DoFSubstructAccessor<2> : public DoFQuadAccessor<2,CellAccessor<2> > { /** - Grant access to the degrees of freedom on a cell. In fact, since all - access to the degrees of freedom has been enabled by the classes - #DoFLineAccessor<1># and #DoFQuadAccessor<2># for the space dimension - one and two, respectively, this class only collects the pieces - together by deriving from the appropriate #DoF*Accessor# and the - right #CellAccessor# and finally adding two functions which give - access to the neighbors and children as #DoFCellAccessor# objects - rather than #CellAccessor# objects (the latter function was inherited - from the #CellAccessor# class). - */ + * Grant access to the degrees of freedom on a cell. In fact, since all + * access to the degrees of freedom has been enabled by the classes + * #DoFLineAccessor<1># and #DoFQuadAccessor<2># for the space dimension + * one and two, respectively, this class only collects the pieces + * together by deriving from the appropriate #DoF*Accessor# and the + * right #CellAccessor# and finally adding two functions which give + * access to the neighbors and children as #DoFCellAccessor# objects + * rather than #CellAccessor# objects (the latter function was inherited + * from the #CellAccessor# class). + */ template class DoFCellAccessor : public DoFSubstructAccessor { public: diff --git a/deal.II/deal.II/include/dofs/dof_constraints.h b/deal.II/deal.II/include/dofs/dof_constraints.h index 1545ad4c57..91c32b1515 100644 --- a/deal.II/deal.II/include/dofs/dof_constraints.h +++ b/deal.II/deal.II/include/dofs/dof_constraints.h @@ -18,81 +18,81 @@ class dVector; /** - This class represents the matrix denoting the distribution of the degrees - of freedom of hanging nodes. - - The matrix is organized in lines (rows), but only those lines are stored - where constraints are present. Lines where only one entry (identity) is - present are not stored if not explicitely inserted. - - Constraint matrices are used to handle hanging nodes and other constrained - degrees of freedom. When building the global system matrix and the right - hand sides, you normally build them without taking care of the constraints, - purely on a topological base, i.e. by a loop over cells. In order to do - actual calculations, you have to 'condense' these matrices: eliminate - constrained degrees of freedom and distribute the appropriate values to - the unconstrained dofs. This changes the sparsity pattern of the sparse - matrices used in finite element calculations und is thus a quite expensive - operation. - - Condensation is done in four steps: first the large matrix sparsity pattern - is created (e.g. using #DoFHandler::create_sparsity_pattern#), then the - sparsity pattern of the condensed matrix is made out of the large sparsity - pattern and the constraints. After that the global matrix is assembled and - finally condensed. To do these steps, you have (at least) two possibilities: - \begin{itemize} - \item Use two different sparsity patterns and two different matrices: you - may eliminate the lines and rows connected with a constraint and create - a totally new sparsity pattern and a new system matrix. This has the - advantage that the resulting system of equations is free from artifacts - of the condensation process and is therefore faster in the solution process - since no unnecessary multiplications occur (see below). However, there are - two major drawbacks: keeping two matrices at the same time can be quite - unacceptable in many cases, since these matrices may be several 10 or even - 100 MB large. Secondly, the condensation process is quite expensive, since - {\it all} entries of the matrix have to be copied, not only those which are - subject to constraints. - - \item Use only one sparsity pattern and one matrix: doing it this way, the - condense functions add nonzero entries to the sparsity pattern of the - large matrix (with constrained nodes in it) where the condensation process - of the matrix will create additional nonzero elements. In the condensation - process itself, lines and rows subject to constraints are distributed to - the lines and rows of unconstrained nodes. The constrained lines remain in - place, however, unlike in the first possibility described above. In order - not to disturb the solution process, these lines and rows are filled with - zeros and identity on the main diagonal; the appropriate value in the right - hand sides is set to zero. This way, the constrained node will always get - the value zero upon solution of the equation system and will not couple to - other nodes any more. - - This method has the advantage that only one matrix and sparsity pattern is - needed thus using less memory. Additionally, the condensation process is - less expensive, since not all but only constrained values in the matrix - have to be copied. On the other hand, the solution process will take a bit - longer, since matrix vector multiplications will incur multiplications - with zeroes in the lines subject to constraints. Additionally, the vector - size is larger than in the first possibility, resulting in more memory - consumption for those iterative solution methods using a larger number of - auxiliary vectors (e.g. methods using explicite orthogonalization - procedures). - \end{verbatim} - - Usually, the second way is chosen since memory consumption upon construction - of a second matrix rules out the first possibility. - - This class provides two sets of #condense# functions: those taking two - arguments refer to the first possibility above, those taking only one do - their job in-place and refer to the second possibility. - - Condensing vectors works exactly as described above for matrices. - - After solving the condensed system of equations, the solution vector has to - be redistributed. This is done by the two #distribute# function, one working - with two vectors, one working in-place. The operation of distribution undoes - the condensation process in some sense, but it should be noted that it is not - the inverse operation. - */ + * This class represents the matrix denoting the distribution of the degrees + * of freedom of hanging nodes. + * + * The matrix is organized in lines (rows), but only those lines are stored + * where constraints are present. Lines where only one entry (identity) is + * present are not stored if not explicitely inserted. + * + * Constraint matrices are used to handle hanging nodes and other constrained + * degrees of freedom. When building the global system matrix and the right + * hand sides, you normally build them without taking care of the constraints, + * purely on a topological base, i.e. by a loop over cells. In order to do + * actual calculations, you have to 'condense' these matrices: eliminate + * constrained degrees of freedom and distribute the appropriate values to + * the unconstrained dofs. This changes the sparsity pattern of the sparse + * matrices used in finite element calculations und is thus a quite expensive + * operation. + * + * Condensation is done in four steps: first the large matrix sparsity pattern + * is created (e.g. using #DoFHandler::create_sparsity_pattern#), then the + * sparsity pattern of the condensed matrix is made out of the large sparsity + * pattern and the constraints. After that the global matrix is assembled and + * finally condensed. To do these steps, you have (at least) two possibilities: + * \begin{itemize} + * \item Use two different sparsity patterns and two different matrices: you + * may eliminate the lines and rows connected with a constraint and create + * a totally new sparsity pattern and a new system matrix. This has the + * advantage that the resulting system of equations is free from artifacts + * of the condensation process and is therefore faster in the solution process + * since no unnecessary multiplications occur (see below). However, there are + * two major drawbacks: keeping two matrices at the same time can be quite + * unacceptable in many cases, since these matrices may be several 10 or even + * 100 MB large. Secondly, the condensation process is quite expensive, since + * {\it all} entries of the matrix have to be copied, not only those which are + * subject to constraints. + * + * \item Use only one sparsity pattern and one matrix: doing it this way, the + * condense functions add nonzero entries to the sparsity pattern of the + * large matrix (with constrained nodes in it) where the condensation process + * of the matrix will create additional nonzero elements. In the condensation + * process itself, lines and rows subject to constraints are distributed to + * the lines and rows of unconstrained nodes. The constrained lines remain in + * place, however, unlike in the first possibility described above. In order + * not to disturb the solution process, these lines and rows are filled with + * zeros and identity on the main diagonal; the appropriate value in the right + * hand sides is set to zero. This way, the constrained node will always get + * the value zero upon solution of the equation system and will not couple to + * other nodes any more. + * + * This method has the advantage that only one matrix and sparsity pattern is + * needed thus using less memory. Additionally, the condensation process is + * less expensive, since not all but only constrained values in the matrix + * have to be copied. On the other hand, the solution process will take a bit + * longer, since matrix vector multiplications will incur multiplications + * with zeroes in the lines subject to constraints. Additionally, the vector + * size is larger than in the first possibility, resulting in more memory + * consumption for those iterative solution methods using a larger number of + * auxiliary vectors (e.g. methods using explicite orthogonalization + * procedures). + * \end{verbatim} + * + * Usually, the second way is chosen since memory consumption upon construction + * of a second matrix rules out the first possibility. + * + * This class provides two sets of #condense# functions: those taking two + * arguments refer to the first possibility above, those taking only one do + * their job in-place and refer to the second possibility. + * + * Condensing vectors works exactly as described above for matrices. + * + * After solving the condensed system of equations, the solution vector has to + * be redistributed. This is done by the two #distribute# function, one working + * with two vectors, one working in-place. The operation of distribution undoes + * the condensation process in some sense, but it should be noted that it is not + * the inverse operation. + */ class ConstraintMatrix { public: /** diff --git a/deal.II/deal.II/include/dofs/dof_handler.h b/deal.II/deal.II/include/dofs/dof_handler.h index 4e3793c943..7d3cd74743 100644 --- a/deal.II/deal.II/include/dofs/dof_handler.h +++ b/deal.II/deal.II/include/dofs/dof_handler.h @@ -37,10 +37,10 @@ class ConstraintMatrix; /** - Store the indices of the degrees of freedom which are located on the lines. - Declare it to have a template parameter, but do not actually declare - other types than those explicitely instantiated. - */ + * Store the indices of the degrees of freedom which are located on the lines. + * Declare it to have a template parameter, but do not actually declare + * other types than those explicitely instantiated. + */ template class DoFLevel; @@ -49,47 +49,47 @@ class DoFLevel; /** - Store the indices of the degrees of freedom which are located on the lines. - - \subsection{Information for all #DoFLevel# classes} - - The #DoFLevel# classes - store the global indices of the degrees of freedom for each cell on a - certain level. The index or number of a degree of freedom is the zero-based - index of the according value in the solution vector and the row and column - index in the global matrix or the multigrid matrix for this level. These - indices refer to the unconstrained vectors and matrices, where we have not - taken account of the constraints introduced by hanging nodes. If more than - one value corresponds to one basis function, for example for vector equations - where the solution is vector valued and thus has several degrees of freedom - for each basis function, we nonetheless store only one index. This can then - be viewed as the index into a block vector, where each block contains the - different values according to a degree of freedom. It is left to the derived - classes, whether the values in a block are stored consecutively or distributed - (e.g. if the solution function is $u=(u_1, u_2)$, we could store the values - in the solution vector like - $\ldots, u_1^m, u_2^m, u_1^{m+1}, u_2^{m+1},\ldots$ with $m$ denoting the - $m$th basis function, or $\ldots, u_1^m, u_1^{m+1}, u_1^{m+2}, \ldots, - u_2^m, u_2^{m+1}, u_2^{m+2}, \ldots$, respectively). Likewise, the - constraint matrix returned by #DoFHandler::make_constraint_matrix ()# is then - to be understood as a block matrix. - - The storage format of the degrees of freedom indices (short: DoF indices) is - somewhat like a mirror of the data structures of the triangulation classes. - There is a hierarchy of #DoFLevel# classes for the different dimensions - which have objects named #line_dofs#, #quad_dofs# and so on, in which the - indices of DoFs located on lines and quads, respectively, are stored. The - indices are stored levelwise. The layout in - these arrays is as follows: if for a selected finite element (use - #DoFHandler::distribute_dofs()# to select a finite element) the number of - DoFs on each line (without those in the vertices) is #N#, then the length - of the #line_dofs# array is #N# times the number of lines on this level. The - DoF indices for the #i#th line are at the positions #N*i...(N+1)*i-1#. - - The DoF indices for vertices are not stored this way, since they need - different treatment in multigrid environments. If no multigrid is used, the - indices are stored in the #vertex_dofs# array of the #DoFHandler# class. - */ + * Store the indices of the degrees of freedom which are located on the lines. + * + * \subsection{Information for all #DoFLevel# classes} + * + * The #DoFLevel# classes + * store the global indices of the degrees of freedom for each cell on a + * certain level. The index or number of a degree of freedom is the zero-based + * index of the according value in the solution vector and the row and column + * index in the global matrix or the multigrid matrix for this level. These + * indices refer to the unconstrained vectors and matrices, where we have not + * taken account of the constraints introduced by hanging nodes. If more than + * one value corresponds to one basis function, for example for vector equations + * where the solution is vector valued and thus has several degrees of freedom + * for each basis function, we nonetheless store only one index. This can then + * be viewed as the index into a block vector, where each block contains the + * different values according to a degree of freedom. It is left to the derived + * classes, whether the values in a block are stored consecutively or distributed + * (e.g. if the solution function is $u=(u_1, u_2)$, we could store the values + * in the solution vector like + * $\ldots, u_1^m, u_2^m, u_1^{m+1}, u_2^{m+1},\ldots$ with $m$ denoting the + * $m$th basis function, or $\ldots, u_1^m, u_1^{m+1}, u_1^{m+2}, \ldots, + * u_2^m, u_2^{m+1}, u_2^{m+2}, \ldots$, respectively). Likewise, the + * constraint matrix returned by #DoFHandler::make_constraint_matrix ()# is then + * to be understood as a block matrix. + * + * The storage format of the degrees of freedom indices (short: DoF indices) is + * somewhat like a mirror of the data structures of the triangulation classes. + * There is a hierarchy of #DoFLevel# classes for the different dimensions + * which have objects named #line_dofs#, #quad_dofs# and so on, in which the + * indices of DoFs located on lines and quads, respectively, are stored. The + * indices are stored levelwise. The layout in + * these arrays is as follows: if for a selected finite element (use + * #DoFHandler::distribute_dofs()# to select a finite element) the number of + * DoFs on each line (without those in the vertices) is #N#, then the length + * of the #line_dofs# array is #N# times the number of lines on this level. The + * DoF indices for the #i#th line are at the positions #N*i...(N+1)*i-1#. + * + * The DoF indices for vertices are not stored this way, since they need + * different treatment in multigrid environments. If no multigrid is used, the + * indices are stored in the #vertex_dofs# array of the #DoFHandler# class. + */ class DoFLevel<1> { public: /** @@ -104,9 +104,9 @@ class DoFLevel<1> { /** - Store the indices of the degrees of freedom which are located on quads. - See \Ref{DoFLevel<1>} for more information. - */ + * Store the indices of the degrees of freedom which are located on quads. + * See \Ref{DoFLevel<1>} for more information. + */ class DoFLevel<2> : public DoFLevel<1> { public: /** @@ -123,12 +123,12 @@ class DoFLevel<2> : public DoFLevel<1> { /** - Define some types which differ between the dimensions. This class - is analogous to the \Ref{TriaDimensionInfo} class hierarchy. - - @see DoFDimensionInfo<1> - @see DoFDimensionInfo<2> - */ + * Define some types which differ between the dimensions. This class + * is analogous to the \Ref{TriaDimensionInfo} class hierarchy. + * + * @see DoFDimensionInfo<1> + * @see DoFDimensionInfo<2> + */ template class DoFDimensionInfo; @@ -137,10 +137,10 @@ class DoFDimensionInfo; /** - Define some types for the DoF handling in one dimension. - - The types have the same meaning as those declared in \Ref{TriaDimensionInfo<2>}. - */ + * Define some types for the DoF handling in one dimension. + * + * The types have the same meaning as those declared in \Ref{TriaDimensionInfo<2>}. + */ class DoFDimensionInfo<1> { public: typedef TriaRawIterator<1,DoFCellAccessor<1> > raw_line_iterator; @@ -166,10 +166,10 @@ class DoFDimensionInfo<1> { /** - Define some types for the DoF handling in two dimensions. - - The types have the same meaning as those declared in \Ref{TriaDimensionInfo<2>}. - */ + * Define some types for the DoF handling in two dimensions. + * + * The types have the same meaning as those declared in \Ref{TriaDimensionInfo<2>}. + */ class DoFDimensionInfo<2> { public: typedef TriaRawIterator<2,DoFLineAccessor<2,LineAccessor<2> > > raw_line_iterator; @@ -195,20 +195,20 @@ class DoFDimensionInfo<2> { /** - Give names to the different possibilities of renumbering the degrees - of freedom. - - \begin{itemize} - \item #Cuthill_McKee# and #reverse_Cuthill_McKee# traverse the triangulation - in a diagonal, advancing front like method and produce matrices with an - almost minimal bandwidth. - \item #reverse_Cuthill_McKey# does the same thing, but numbers the dofs in - the reverse order. - \end{itemize} - - For a description of the algorithms see the book of Schwarz (H.R.Scharz: - Methode der finiten Elemente). - */ + * Give names to the different possibilities of renumbering the degrees + * of freedom. + * + * \begin{itemize} + * \item #Cuthill_McKee# and #reverse_Cuthill_McKee# traverse the triangulation + * in a diagonal, advancing front like method and produce matrices with an + * almost minimal bandwidth. + * \item #reverse_Cuthill_McKey# does the same thing, but numbers the dofs in + * the reverse order. + * \end{itemize} + * + * For a description of the algorithms see the book of Schwarz (H.R.Scharz: + * Methode der finiten Elemente). + */ enum RenumberingMethod { Cuthill_McKee, reverse_Cuthill_McKee @@ -219,212 +219,212 @@ enum RenumberingMethod { /** - Manage the distribution and numbering of the degrees of freedom for - non-multigrid algorithms. - - We store a list of numbers for each cells - denoting the mapping between the degrees of freedom on this cell - and the global number of this degree of freedom; the number of a - degree of freedom lying on the interface of two cells is thus stored - twice, but is the same. The numbers refer to the unconstrained - matrices and vectors. The layout of storage of these indices is - described in the \Ref{DoFLevel} class documentation. - - Additionally, the DoFHandler is able to generate the condensation - matrix which connects constrained and unconstrained matrices and - vectors. - - Finally it offers a starting point for the assemblage of the matrices - by offering #begin()# and #end()# functions which return iterators - to walk on the DoF structures as well as the triangulation data. - These iterators work much like those described in the documentation - of the #Triangulation# class and of the iterator classes themselved, - but offer more functionality than pure triangulation iterators. The - order in which dof iterators are presented by the #++# and #--# operators - is the same as that for the alike triangulation iterators. - - - \subsection{Distribution of degrees of freedom} - - The degrees of freedom (`dofs') are distributed on the given triangulation - by the function #distribute_dofs()#. It gets passed a finite element object - describing how many degrees of freedom are located on vertices, lines, etc. - It traverses the triangulation cell by cell and numbers the dofs of that - cell if not yet numbered. For non-multigrid algorithms, only active cells - are considered. - - Since the triangulation is traversed starting with the cells of the coarsest - active level and going to more refined levels, the lowest numbers for dofs - are given to the largest cells as well as their bounding lines and vertices, - with the dofs of more refined cells getting higher numbers. - - This numbering implies very large bandwiths of the resulting matrices and - is thus vastly suboptimal for some solution algorithms. For this reason, - the #DoFHandler# class offers the function #renumber_dofs# which reorders - the dof numbering according to some scheme. Presently available are the - Cuthill-McKey (CM) and the Reverse Cuthill-McKey algorithm. These algorithms - have one major drawback: they require a good starting point, i.e. the degree - of freedom index afterwards to be numbered zero. This can thus be given by - the user, e.g. by exploiting knowledge of the actual topology of the - domain. It is also possible to given several starting indices, which may - be used to simulate a simple upstream numbering (by giving the inflow - dofs as starting values) or to make preconditioning faster (by letting - the dirichlet boundary indices be starting points). - - If no starting index is given, one is chosen by the program, namely one - with the smallest coordination number (the coordination number is the - number of other dofs this dof couples with). This dof is usually located - on the boundary of the domain. There is, however, large ambiguity in this - when using the hierarchical meshes used in this library, since in most - cases the computational domain is not approximated by tilting and deforming - elements and by plugging together variable numbers of elements at vertices, - but rather by hierarchical refinement. There is therefore a large number - of dofs with equal coordination numbers. The renumbering algorithms will - therefore not give optimal results. - - In the book of Schwarz (H.R.Schwarz: Methode der finiten Elemente), it is - advised to test many starting points, if possible all with the smallest - coordination number and also those with slightly higher numbers. However, - this seems only possible for meshes with at most several dozen or a few - hundred elements found in small engineering problems of the early 1980s - (the second edition was published in 1984), but certainly not with those - used in this library, featuring several 10,000 to a few 100,000 elements. - - On the other hand, the need to reduce the bandwidth has decreased since - with the mentioned number of cells, only iterative solution methods are - able to solve the resulting matrix systems. These, however, are not so - demanding with respect to the bandwidth as direct solvers used for - smaller problems. Things like upstream numbering become much more important - in recent times, so the suboptimality of the renumbering algorithms is - not that important any more. - - - \subsection{Implementation of renumbering schemes} - - The renumbering algorithms need quite a lot of memory, since they have - to store for each dof with which other dofs it couples. This is done - using a #dSMatrixStruct# object used to store the sparsity pattern. It - is not useful for the user to do anything between distributing the dofs - and renumbering, i.e. the calls to #DoFHandler::distribute_dofs# and - #DoFHandler::renumber_dofs# should follow each other immediately. If - you try to create a sparsity pattern or anything else in between, these - will be invalid afterwards. - - The renumbering may take care of dof-to-dof couplings only induced by - eliminating constraints. In addition to the memory consumption mentioned - above, this also takes quite some computational time, but it may be - switched of upon calling the #renumber_dofs# function. This will then - give inferior results, since knots in the graph (representing dofs) - are not found to be neighbors even if they would be after condensation. - - The renumbering algorithms work on a purely algebraic basis, due to the - isomorphism between the graph theoretical groundwork underlying the - algorithms and binary matrices (matrices of which the entries are binary - values) represented by the sparsity patterns. In special, the algorithms - do not try to exploit topological knowledge (e.g. corner detection) to - find appropriate starting points. This way, however, they work in - arbitrary space dimension. - - If you want to give starting points, you may give a list of dof indices - which will form the first step of the renumbering. The dofs of the list - will be consecutively numbered starting with zero, i.e. this list is not - renumbered according to the coordination number of the nodes. Indices not - in the allowed range are deleted. If no index is allowed, the algorithm - will search for its own starting point. - - - \subsection{Results of renumbering} - - The renumbering schemes mentioned above do not lead to optimal results. - However, after all there is no algorithm that accomplishes this within - reasonable time. There are situations where the lack of optimality even - leads to worse results than with the original, crude, levelwise numering - scheme; one of these examples is a mesh of four cells of which always - those cells are refined which are neighbors to the center (you may call - this mesh a `zoom in' mesh). In one such example the bandwidth was - increased by about 50 per cent. - - In most other cases, the bandwith is reduced significantly. The reduction - is the better the less structured the grid is. With one grid where the - cells were refined according to a random driven algorithm, the bandwidth - was reduced by a factor of six. - - Using the constraint information usually leads to reductions in bandwidth - of 10 or 20 per cent, but may for some very unstructured grids also lead - to an increase. You have to weigh the decrease in your case with the time - spent to use the constraint information, which usually is several times - longer than the `pure' renumbering algorithm. - - In almost all cases, the renumbering scheme finds a corner to start with. - Since there is more than one corner in most grids and since even an - interior degree of freedom may be a better starting point, giving the - starting point by the user may be a viable way if you have a simple - scheme to derive a suitable point (e.g. by successively taking the - third child of the cell top left of the coarsest level, taking its - third vertex and the dof index thereof, if you want the top left corner - vertex). If you do not know beforehand what your grid will look like - (e.g. when using adaptive algorithms), searching a best starting point - may be difficult, however, and in many cases will not justify the effort. - - \subsection{Data transfer between grids} - - The #DoFHandler# class offers two functions #make_transfer_matrix# which create - a matrix to transform the data of one grid to another. The functions assumes the - coarsest mesh of the two grids to be the same. However there are few ways to - check this (only the number of cells on the coarsest grid is compared). Also, - the selected finite element type of the two degree of freedom handler objects - must be the same. - - The algorithm goes recursively from the coarse mesh cells to their children - until the grids differ at this level. It then tries to prolong or restrict the - old cell(s) to the new cell(s) and makes up a matrix of these prolongations and - restrictions. This matrix multiplied with a vector on the old grid yields an - approximation of the projection of the function on the old grid to the new one. - - Building and using the transfer matrix is usually a quite expensive operation, - since we have to perform two runs over all cells (one for building the sparsity - structure, one to build the entries) and because of the memory consumption. - It may, however, pay if you have many - equations, since then the entries in the matrix can be considered as block - entries which are then applied to all function values at a given degree of - freedom. - - To build the matrix, you have to call first - #make_transfer_matrix (old_dof_object, sparsity_pattern);#, then create a - sparse matrix out of this pattern, e.g. by #dSMatrix m(sparsity_pattern);# - and finally give this to the second run: - #make_transfer_matrix (old_dof_object, m);#. The spasity pattern created - by the first run is automatically compressed. - - When creating the #dSMatrixStruct# sparsity pattern, you have to give the - dimension and the maximum number of entries per row. Obviously the image - dimension is the number of dofs on the new grid (you can get this using the - #n_dofs()# function), while the range dimension is the number of dofs on the - old grid. The maximum number of entries per row is determined by the maximum - number of levels $d$ which we have to cross upon transferring from one cell to - another (presently, transfer of one cell is only possible for #d=0,1#, i.e. - the two cells match or one is refined once more than the other, the - number of degrees of freedom per per vertex $d_v$, those on lines $d_l$, those - on quads $d_q$ and the number of subcells a cell is - refined to, which is $2**dim$. The maximum number of entries per row in one - dimension is then given by $(2*d_l+d_v)*2+1$ if $d=1$. For example, a one - dimensional linear element would need two entries per row. - In two dimensions, the maxmimum number is $(4*d_q+12*d_l+5*d_v)*4+1$ if $d=1$. - You can get these numbers by drawing little pictures and counting, there is - no mystique behind this. You can also get the right number by calling the - #max_transfer_entries (max_level_difference)# function. The actual number - depends on the finite element selected and may be much less, especially in - higher dimensions. - - If you do not have multiple equations and do not really use the matrix but still - have to transfer an arbitrary number of vectors to transfer, you can use the - #transfer()# function, which is able to transfer any number of vectors in only - one loop over all cells and without the memory consumption of the matrix. The - matrix seems only useful when trying to transfer whole matrices instead of - rebuilding them on the new grid. - - @author Wolfgang Bangerth, February 1998 - */ + * Manage the distribution and numbering of the degrees of freedom for + * non-multigrid algorithms. + * + * We store a list of numbers for each cells + * denoting the mapping between the degrees of freedom on this cell + * and the global number of this degree of freedom; the number of a + * degree of freedom lying on the interface of two cells is thus stored + * twice, but is the same. The numbers refer to the unconstrained + * matrices and vectors. The layout of storage of these indices is + * described in the \Ref{DoFLevel} class documentation. + * + * Additionally, the DoFHandler is able to generate the condensation + * matrix which connects constrained and unconstrained matrices and + * vectors. + * + * Finally it offers a starting point for the assemblage of the matrices + * by offering #begin()# and #end()# functions which return iterators + * to walk on the DoF structures as well as the triangulation data. + * These iterators work much like those described in the documentation + * of the #Triangulation# class and of the iterator classes themselved, + * but offer more functionality than pure triangulation iterators. The + * order in which dof iterators are presented by the #++# and #--# operators + * is the same as that for the alike triangulation iterators. + * + * + * \subsection{Distribution of degrees of freedom} + * + * The degrees of freedom (`dofs') are distributed on the given triangulation + * by the function #distribute_dofs()#. It gets passed a finite element object + * describing how many degrees of freedom are located on vertices, lines, etc. + * It traverses the triangulation cell by cell and numbers the dofs of that + * cell if not yet numbered. For non-multigrid algorithms, only active cells + * are considered. + * + * Since the triangulation is traversed starting with the cells of the coarsest + * active level and going to more refined levels, the lowest numbers for dofs + * are given to the largest cells as well as their bounding lines and vertices, + * with the dofs of more refined cells getting higher numbers. + * + * This numbering implies very large bandwiths of the resulting matrices and + * is thus vastly suboptimal for some solution algorithms. For this reason, + * the #DoFHandler# class offers the function #renumber_dofs# which reorders + * the dof numbering according to some scheme. Presently available are the + * Cuthill-McKey (CM) and the Reverse Cuthill-McKey algorithm. These algorithms + * have one major drawback: they require a good starting point, i.e. the degree + * of freedom index afterwards to be numbered zero. This can thus be given by + * the user, e.g. by exploiting knowledge of the actual topology of the + * domain. It is also possible to given several starting indices, which may + * be used to simulate a simple upstream numbering (by giving the inflow + * dofs as starting values) or to make preconditioning faster (by letting + * the dirichlet boundary indices be starting points). + * + * If no starting index is given, one is chosen by the program, namely one + * with the smallest coordination number (the coordination number is the + * number of other dofs this dof couples with). This dof is usually located + * on the boundary of the domain. There is, however, large ambiguity in this + * when using the hierarchical meshes used in this library, since in most + * cases the computational domain is not approximated by tilting and deforming + * elements and by plugging together variable numbers of elements at vertices, + * but rather by hierarchical refinement. There is therefore a large number + * of dofs with equal coordination numbers. The renumbering algorithms will + * therefore not give optimal results. + * + * In the book of Schwarz (H.R.Schwarz: Methode der finiten Elemente), it is + * advised to test many starting points, if possible all with the smallest + * coordination number and also those with slightly higher numbers. However, + * this seems only possible for meshes with at most several dozen or a few + * hundred elements found in small engineering problems of the early 1980s + * (the second edition was published in 1984), but certainly not with those + * used in this library, featuring several 10,000 to a few 100,000 elements. + * + * On the other hand, the need to reduce the bandwidth has decreased since + * with the mentioned number of cells, only iterative solution methods are + * able to solve the resulting matrix systems. These, however, are not so + * demanding with respect to the bandwidth as direct solvers used for + * smaller problems. Things like upstream numbering become much more important + * in recent times, so the suboptimality of the renumbering algorithms is + * not that important any more. + * + * + * \subsection{Implementation of renumbering schemes} + * + * The renumbering algorithms need quite a lot of memory, since they have + * to store for each dof with which other dofs it couples. This is done + * using a #dSMatrixStruct# object used to store the sparsity pattern. It + * is not useful for the user to do anything between distributing the dofs + * and renumbering, i.e. the calls to #DoFHandler::distribute_dofs# and + * #DoFHandler::renumber_dofs# should follow each other immediately. If + * you try to create a sparsity pattern or anything else in between, these + * will be invalid afterwards. + * + * The renumbering may take care of dof-to-dof couplings only induced by + * eliminating constraints. In addition to the memory consumption mentioned + * above, this also takes quite some computational time, but it may be + * switched of upon calling the #renumber_dofs# function. This will then + * give inferior results, since knots in the graph (representing dofs) + * are not found to be neighbors even if they would be after condensation. + * + * The renumbering algorithms work on a purely algebraic basis, due to the + * isomorphism between the graph theoretical groundwork underlying the + * algorithms and binary matrices (matrices of which the entries are binary + * values) represented by the sparsity patterns. In special, the algorithms + * do not try to exploit topological knowledge (e.g. corner detection) to + * find appropriate starting points. This way, however, they work in + * arbitrary space dimension. + * + * If you want to give starting points, you may give a list of dof indices + * which will form the first step of the renumbering. The dofs of the list + * will be consecutively numbered starting with zero, i.e. this list is not + * renumbered according to the coordination number of the nodes. Indices not + * in the allowed range are deleted. If no index is allowed, the algorithm + * will search for its own starting point. + * + * + * \subsection{Results of renumbering} + * + * The renumbering schemes mentioned above do not lead to optimal results. + * However, after all there is no algorithm that accomplishes this within + * reasonable time. There are situations where the lack of optimality even + * leads to worse results than with the original, crude, levelwise numering + * scheme; one of these examples is a mesh of four cells of which always + * those cells are refined which are neighbors to the center (you may call + * this mesh a `zoom in' mesh). In one such example the bandwidth was + * increased by about 50 per cent. + * + * In most other cases, the bandwith is reduced significantly. The reduction + * is the better the less structured the grid is. With one grid where the + * cells were refined according to a random driven algorithm, the bandwidth + * was reduced by a factor of six. + * + * Using the constraint information usually leads to reductions in bandwidth + * of 10 or 20 per cent, but may for some very unstructured grids also lead + * to an increase. You have to weigh the decrease in your case with the time + * spent to use the constraint information, which usually is several times + * longer than the `pure' renumbering algorithm. + * + * In almost all cases, the renumbering scheme finds a corner to start with. + * Since there is more than one corner in most grids and since even an + * interior degree of freedom may be a better starting point, giving the + * starting point by the user may be a viable way if you have a simple + * scheme to derive a suitable point (e.g. by successively taking the + * third child of the cell top left of the coarsest level, taking its + * third vertex and the dof index thereof, if you want the top left corner + * vertex). If you do not know beforehand what your grid will look like + * (e.g. when using adaptive algorithms), searching a best starting point + * may be difficult, however, and in many cases will not justify the effort. + * + * \subsection{Data transfer between grids} + * + * The #DoFHandler# class offers two functions #make_transfer_matrix# which create + * a matrix to transform the data of one grid to another. The functions assumes the + * coarsest mesh of the two grids to be the same. However there are few ways to + * check this (only the number of cells on the coarsest grid is compared). Also, + * the selected finite element type of the two degree of freedom handler objects + * must be the same. + * + * The algorithm goes recursively from the coarse mesh cells to their children + * until the grids differ at this level. It then tries to prolong or restrict the + * old cell(s) to the new cell(s) and makes up a matrix of these prolongations and + * restrictions. This matrix multiplied with a vector on the old grid yields an + * approximation of the projection of the function on the old grid to the new one. + * + * Building and using the transfer matrix is usually a quite expensive operation, + * since we have to perform two runs over all cells (one for building the sparsity + * structure, one to build the entries) and because of the memory consumption. + * It may, however, pay if you have many + * equations, since then the entries in the matrix can be considered as block + * entries which are then applied to all function values at a given degree of + * freedom. + * + * To build the matrix, you have to call first + * #make_transfer_matrix (old_dof_object, sparsity_pattern);#, then create a + * sparse matrix out of this pattern, e.g. by #dSMatrix m(sparsity_pattern);# + * and finally give this to the second run: + * #make_transfer_matrix (old_dof_object, m);#. The spasity pattern created + * by the first run is automatically compressed. + * + * When creating the #dSMatrixStruct# sparsity pattern, you have to give the + * dimension and the maximum number of entries per row. Obviously the image + * dimension is the number of dofs on the new grid (you can get this using the + * #n_dofs()# function), while the range dimension is the number of dofs on the + * old grid. The maximum number of entries per row is determined by the maximum + * number of levels $d$ which we have to cross upon transferring from one cell to + * another (presently, transfer of one cell is only possible for #d=0,1#, i.e. + * the two cells match or one is refined once more than the other, the + * number of degrees of freedom per per vertex $d_v$, those on lines $d_l$, those + * on quads $d_q$ and the number of subcells a cell is + * refined to, which is $2**dim$. The maximum number of entries per row in one + * dimension is then given by $(2*d_l+d_v)*2+1$ if $d=1$. For example, a one + * dimensional linear element would need two entries per row. + * In two dimensions, the maxmimum number is $(4*d_q+12*d_l+5*d_v)*4+1$ if $d=1$. + * You can get these numbers by drawing little pictures and counting, there is + * no mystique behind this. You can also get the right number by calling the + * #max_transfer_entries (max_level_difference)# function. The actual number + * depends on the finite element selected and may be much less, especially in + * higher dimensions. + * + * If you do not have multiple equations and do not really use the matrix but still + * have to transfer an arbitrary number of vectors to transfer, you can use the + * #transfer()# function, which is able to transfer any number of vectors in only + * one loop over all cells and without the memory consumption of the matrix. The + * matrix seems only useful when trying to transfer whole matrices instead of + * rebuilding them on the new grid. + * + * @author Wolfgang Bangerth, February 1998 + */ template class DoFHandler : public DoFDimensionInfo { public: diff --git a/deal.II/deal.II/include/fe/fe.h b/deal.II/deal.II/include/fe/fe.h index 0e9f5bf18d..66e1029db4 100644 --- a/deal.II/deal.II/include/fe/fe.h +++ b/deal.II/deal.II/include/fe/fe.h @@ -17,9 +17,9 @@ template struct FiniteElementData; /** - Dimension dependent data for finite elements. See the #FiniteElementBase# - class for more information. - */ + * Dimension dependent data for finite elements. See the #FiniteElementBase# + * class for more information. + */ struct FiniteElementData<1> { /** * Number of degrees of freedom on @@ -87,9 +87,9 @@ struct FiniteElementData<1> { /** - Dimension dependent data for finite elements. See the #FiniteElementBase# - class for more information. - */ + * Dimension dependent data for finite elements. See the #FiniteElementBase# + * class for more information. + */ struct FiniteElementData<2> { /** * Number of degrees of freedom on @@ -169,23 +169,23 @@ struct FiniteElementData<2> { /** - Base class for finite elements in arbitrary dimensions. This class provides - several fields which describe a specific finite element and which are filled - by derived classes. It more or less only offers the fields and access - functions which makes it possible to copy finite elements without knowledge - of the actual type (linear, quadratic, etc). - - The implementation of this base class is split into two parts: those fields - which are not common to all dimensions (#dofs_per_quad# for example are only - useful for #dim>=2#) are put into the #FiniteElementData# class which - is explicitely specialized for all used dimensions, while those fields which - may be formulated in a dimension-independent way are put into the present - class. - - The different matrices are initialized with the correct size, such that in - the derived (concrete) finite element classes, their entries must only be - filled in; no resizing is needed. - */ + * Base class for finite elements in arbitrary dimensions. This class provides + * several fields which describe a specific finite element and which are filled + * by derived classes. It more or less only offers the fields and access + * functions which makes it possible to copy finite elements without knowledge + * of the actual type (linear, quadratic, etc). + * + * The implementation of this base class is split into two parts: those fields + * which are not common to all dimensions (#dofs_per_quad# for example are only + * useful for #dim>=2#) are put into the #FiniteElementData# class which + * is explicitely specialized for all used dimensions, while those fields which + * may be formulated in a dimension-independent way are put into the present + * class. + * + * The different matrices are initialized with the correct size, such that in + * the derived (concrete) finite element classes, their entries must only be + * filled in; no resizing is needed. + */ template struct FiniteElementBase : public FiniteElementData { public: @@ -333,54 +333,54 @@ struct FiniteElementBase : public FiniteElementData { /** - Finite Element in any dimension. This class declares the functionality - to fill the fields of the #FiniteElementBase# class. Since this is - something that depends on the actual finite element, the functions are - declared virtual if it is not possible to provide a reasonable standard - implementation. - - - \subsection{Finite Elements in one dimension} - - Finite elements in one dimension need only set the #restriction# and - #prolongation# matrices in #FiniteElementBase<1>#. The constructor of - this class in one dimension presets the #interface_constraints# matrix - by the unit matrix with dimension one. Changing this behaviour in - derived classes is generally not a reasonable idea and you risk getting - in terrible trouble. - - - \subsection{Finite elements in two dimensions} - - In addition to the fields already present in 1D, a constraint matrix - is needed in case two quads meet at a common line of which one is refined - once more than the other one. Then there are constraints referring to the - hanging nodes on that side of the line which is refined. These constraints - are represented by a $n\times m$-matrix #line_constraints#, where $n$ is the - number of degrees of freedom on the refined side (those dofs on the middle - vertex plus those on the two lines), and $m$ is that of the unrefined side - (those dofs on the two vertices plus those on the line). The matrix is thus - a rectangular one. - - The mapping of the dofs onto the indices of the matrix is as follows: - let $d_v$ be the number of dofs on a vertex, $d_l$ that on a line, then - $m=0...d_v-1$ refers to the dofs on vertex zero of the unrefined line, - $m=d_v...2d_v-1$ to those on vertex one, - $m=2d_v...2d_v+d_l-1$ to those on the line. - - Similarly, $n=0...d_v-1$ refers to the dofs on the middle vertex - (vertex one of child line zero, vertex zero of child line one), - $n=d_v...d_v+d_l-1$ refers to the dofs on child line zero, - $n=d_v+d_l...d_v+2d_l-1$ refers to the dofs on child line one. - Please note that we do not need to reserve space for the dofs on the - end vertices of the refined lines, since these must be mapped one-to-one - to the appropriate dofs of the vertices of the unrefined line. - - It should be noted that it is not possible to distribute a constrained - degree of freedom to other degrees of freedom which are themselves - constrained. Only one level of indirection is allowed. It is not known - at the time of this writing whether this is a constraint itself. - */ + * Finite Element in any dimension. This class declares the functionality + * to fill the fields of the #FiniteElementBase# class. Since this is + * something that depends on the actual finite element, the functions are + * declared virtual if it is not possible to provide a reasonable standard + * implementation. + * + * + * \subsection{Finite Elements in one dimension} + * + * Finite elements in one dimension need only set the #restriction# and + * #prolongation# matrices in #FiniteElementBase<1>#. The constructor of + * this class in one dimension presets the #interface_constraints# matrix + * by the unit matrix with dimension one. Changing this behaviour in + * derived classes is generally not a reasonable idea and you risk getting + * in terrible trouble. + * + * + * \subsection{Finite elements in two dimensions} + * + * In addition to the fields already present in 1D, a constraint matrix + * is needed in case two quads meet at a common line of which one is refined + * once more than the other one. Then there are constraints referring to the + * hanging nodes on that side of the line which is refined. These constraints + * are represented by a $n\times m$-matrix #line_constraints#, where $n$ is the + * number of degrees of freedom on the refined side (those dofs on the middle + * vertex plus those on the two lines), and $m$ is that of the unrefined side + * (those dofs on the two vertices plus those on the line). The matrix is thus + * a rectangular one. + * + * The mapping of the dofs onto the indices of the matrix is as follows: + * let $d_v$ be the number of dofs on a vertex, $d_l$ that on a line, then + * $m=0...d_v-1$ refers to the dofs on vertex zero of the unrefined line, + * $m=d_v...2d_v-1$ to those on vertex one, + * $m=2d_v...2d_v+d_l-1$ to those on the line. + * + * Similarly, $n=0...d_v-1$ refers to the dofs on the middle vertex + * (vertex one of child line zero, vertex zero of child line one), + * $n=d_v...d_v+d_l-1$ refers to the dofs on child line zero, + * $n=d_v+d_l...d_v+2d_l-1$ refers to the dofs on child line one. + * Please note that we do not need to reserve space for the dofs on the + * end vertices of the refined lines, since these must be mapped one-to-one + * to the appropriate dofs of the vertices of the unrefined line. + * + * It should be noted that it is not possible to distribute a constrained + * degree of freedom to other degrees of freedom which are themselves + * constrained. Only one level of indirection is allowed. It is not known + * at the time of this writing whether this is a constraint itself. + */ template class FiniteElement : public FiniteElementBase { public: diff --git a/deal.II/deal.II/include/fe/fe_lib.lagrange.h b/deal.II/deal.II/include/fe/fe_lib.lagrange.h index 1ffbf129b9..adccc94626 100644 --- a/deal.II/deal.II/include/fe/fe_lib.lagrange.h +++ b/deal.II/deal.II/include/fe/fe_lib.lagrange.h @@ -10,18 +10,18 @@ /** - Define a (bi-, tri-, etc)linear finite element in #dim# space dimensions, - along with (bi-, tri-)linear (therefore isoparametric) transforms from the - unit cell to the real cell. - - The linear, isoparametric mapping from a point $\vec \xi$ on the unit cell - to a point $\vec x$ on the real cell is defined as - $$ \vec x(\vec \xi) = \sum_j {\vec p_j} N_j(\xi) $$ - where $\vec p_j$ is the vector to the $j$th corner point of the cell in - real space and $N_j(\vec \xi)$ is the value of the basis function associated - with the $j$th corner point, on the unit cell at point $\vec \xi$. The sum - over $j$ runs over all corner points. - */ + * Define a (bi-, tri-, etc)linear finite element in #dim# space dimensions, + * along with (bi-, tri-)linear (therefore isoparametric) transforms from the + * unit cell to the real cell. + * + * The linear, isoparametric mapping from a point $\vec \xi$ on the unit cell + * to a point $\vec x$ on the real cell is defined as + * $$ \vec x(\vec \xi) = \sum_j {\vec p_j} N_j(\xi) $$ + * where $\vec p_j$ is the vector to the $j$th corner point of the cell in + * real space and $N_j(\vec \xi)$ is the value of the basis function associated + * with the $j$th corner point, on the unit cell at point $\vec \xi$. The sum + * over $j$ runs over all corner points. + */ template class FELinear : public FiniteElement { public: @@ -146,10 +146,10 @@ class FELinear : public FiniteElement { /** - Define a (bi-, tri-, etc)quadratic finite element in #dim# space dimensions. - In one space dimension, a linear (subparametric) mapping from the unit cell - to the real cell is implemented. - */ + * Define a (bi-, tri-, etc)quadratic finite element in #dim# space dimensions. + * In one space dimension, a linear (subparametric) mapping from the unit cell + * to the real cell is implemented. + */ template class FEQuadratic : public FiniteElement { public: @@ -241,10 +241,10 @@ class FEQuadratic : public FiniteElement { /** - Define a (bi-, tri-, etc)cubic finite element in #dim# space dimensions. - In one space dimension, a linear (subparametric) mapping from the unit cell - to the real cell is implemented. - */ + * Define a (bi-, tri-, etc)cubic finite element in #dim# space dimensions. + * In one space dimension, a linear (subparametric) mapping from the unit cell + * to the real cell is implemented. + */ template class FECubic : public FiniteElement { public: diff --git a/deal.II/deal.II/include/fe/fe_update_flags.h b/deal.II/deal.II/include/fe/fe_update_flags.h index f333d9d532..dfc8a66319 100644 --- a/deal.II/deal.II/include/fe/fe_update_flags.h +++ b/deal.II/deal.II/include/fe/fe_update_flags.h @@ -6,15 +6,15 @@ /** - Provide a set of flags which tells the #FEValues<>::reinit# function, which - fields are to be updated for each cell. E.g. if you do not need the - gradients since you want to assemble the mass matrix, you can switch that - off. By default, all flags are off, i.e. no reinitialization will be done. - - A variable of this type has to be passed to the constructor of the - #FEValues# object. You can select more than one flag by concatenation - using the #|# (bitwise #or#) operator. - */ + * Provide a set of flags which tells the #FEValues<>::reinit# function, which + * fields are to be updated for each cell. E.g. if you do not need the + * gradients since you want to assemble the mass matrix, you can switch that + * off. By default, all flags are off, i.e. no reinitialization will be done. + * + * A variable of this type has to be passed to the constructor of the + * #FEValues# object. You can select more than one flag by concatenation + * using the #|# (bitwise #or#) operator. + */ enum UpdateFlags { /** * Default: update nothing. diff --git a/deal.II/deal.II/include/fe/fe_values.h b/deal.II/deal.II/include/fe/fe_values.h index ba7364ccf4..baf6930aba 100644 --- a/deal.II/deal.II/include/fe/fe_values.h +++ b/deal.II/deal.II/include/fe/fe_values.h @@ -20,121 +20,121 @@ template class Quadrature; /** - This class offers a multitude of arrays and other fields which are used by - the derived classes #FEValues#, #FEFaceValues# and #FESubfaceValues#. - In principle, it is the - back end of the front end for the unification of a certain finite element - and a quadrature formula which evaluates certain aspects of the finite - element at quadrature points. - - This class is an optimization which avoids evaluating the shape functions - at the quadrature points each time a quadrature takes place. Rather, the - values and gradients (and possibly higher order derivatives in future - versions of this library) are evaluated once and for all on the unit - cell or face before doing the quadrature itself. Only the Jacobian matrix of - the transformation from the unit cell or face to the real cell or face and - the integration points in real space are calculated each time we move on - to a new face. - - Actually, this class does none of the evaluations at startup itself; this is - all done by the derived classes. It only offers the basic functionality, - like providing those fields that are common to the derived classes and - access to these fields. Any computations are in the derived classes. See there - for more information. - - It has support for the restriction of finite elements to faces of cells or - even to subfaces (i.e. refined faces). For this purpose, it offers an array - of matrices of ansatz function values, rather than one. Since the value of - a function at a quadrature point is an invariant under the transformation - from the unit cell to the real cell, it is only evaluated once upon startup. - However, when considering the restriction of a finite element to a face of - a cell (using a given quadrature rule), we may be tempted to compute the - restriction to all faces at startup (thus ending in four array of ansatz - function values in two dimensions, one per face, and even more in higher - dimensions) and let the respective #reinit# function of the derived classes - set a number which of the fields is to be taken when the user requests the - function values. This is done through the #selected_dataset# variable. See - the derived classes and the #get_values# function for the exact usage of - this variable. - - - \subsection{Definitions} - - The Jacobian matrix is defined to be - $$ J_{ij} = {d\xi_i \over dx_j} $$ - where the $\xi_i$ are the coordinates on the unit cell and the $x_i$ are - the coordinates on the real cell. - This is the form needed to compute the gradient on the real cell from - the gradient on the unit cell. If we want to transform the area element - $dx dy$ from the real to the unit cell, we have to take the determinant of - the inverse matrix, which is the reciprocal value of the determinant of the - matrix defined above. - - The Jacobi matrix is always that of the transformation of unit to real cell. - This applies also to the case where the derived class handles faces or - subfaces, in which case also the transformation of unit to real cell is - needed. However, the Jacobi matrix of the full transformation is always - needed if we want to get the values of the gradients, which need to be - transformed with the full Jacobi matrix, while we only need the - transformation from unit to real face to compute the determinant of the - Jacobi matrix to get the scaling of the surface element $do$. - - - \subsection{Member functions} - - The functions of this class fall into different cathegories: - \begin{itemize} - \item #shape_value#, #shape_grad#, etc: return one of the values - of this object at a time. In many cases you will want to get - a whole bunch at a time for performance or convenience reasons, - then use the #get_*# functions. - - \item #get_shape_values#, #get_shape_grads#, etc: these return - a reference to a whole field. Usually these fields contain - the values of all ansatz functions at all quadrature points. - - \item #get_function_values#, #get_function_gradients#: these - two functions offer a simple way to avoid the detour of the - ansatz functions, if you have a finite solution (resp. the - vector of values associated with the different ansatz functions.) - Then you may want to get information from the restriction of - the finite element function to a certain cell, e.g. the values - of the function at the quadrature points or the values of its - gradient. These two functions provide the information needed: - you pass it a vector holding the finite element solution and the - functions return the values or gradients of the finite element - function restricted to the cell which was given last time the - #reinit# function was given. - - Though possible in principle, these functions do not call the - #reinit# function, you have to do so yourself beforehand. On the - other hand, a copy of the cell iterator is stored which was used - last time the #reinit# function was called. This frees us from - the need to pass the cell iterator again to these two functions, - which guarantees that the cell used here is in sync with that used - for the #reinit# function. You should, however, make sure that - nothing substantial happens to the #DoFHandler# object or any - other involved instance between the #reinit# and the #get_function_*# - functions are called. - - \item #reinit#: initialize the #FEValues# object for a certain cell. - This function is not in the present class but only in the derived - classes and has a variable call syntax. - See the docs for the derived classes for more information. - \end{itemize} - - - \subsection{Implementational issues} - - The #FEValues# object keeps track of those fields which really need to - be computed, since the computation of the gradients of the ansatz functions - and of other values on each real cell can be quite an expensive thing - if it is not needed. The - object knows about which fields are needed by the #UpdateFlags# object - passed through the constructor. In debug mode, the accessor functions, which - return values from the different fields, check whether the required field - was initialized, thus avoiding use of unitialized data. - + * This class offers a multitude of arrays and other fields which are used by + * the derived classes #FEValues#, #FEFaceValues# and #FESubfaceValues#. + * In principle, it is the + * back end of the front end for the unification of a certain finite element + * and a quadrature formula which evaluates certain aspects of the finite + * element at quadrature points. + * + * This class is an optimization which avoids evaluating the shape functions + * at the quadrature points each time a quadrature takes place. Rather, the + * values and gradients (and possibly higher order derivatives in future + * versions of this library) are evaluated once and for all on the unit + * cell or face before doing the quadrature itself. Only the Jacobian matrix of + * the transformation from the unit cell or face to the real cell or face and + * the integration points in real space are calculated each time we move on + * to a new face. + * + * Actually, this class does none of the evaluations at startup itself; this is + * all done by the derived classes. It only offers the basic functionality, + * like providing those fields that are common to the derived classes and + * access to these fields. Any computations are in the derived classes. See there + * for more information. + * + * It has support for the restriction of finite elements to faces of cells or + * even to subfaces (i.e. refined faces). For this purpose, it offers an array + * of matrices of ansatz function values, rather than one. Since the value of + * a function at a quadrature point is an invariant under the transformation + * from the unit cell to the real cell, it is only evaluated once upon startup. + * However, when considering the restriction of a finite element to a face of + * a cell (using a given quadrature rule), we may be tempted to compute the + * restriction to all faces at startup (thus ending in four array of ansatz + * function values in two dimensions, one per face, and even more in higher + * dimensions) and let the respective #reinit# function of the derived classes + * set a number which of the fields is to be taken when the user requests the + * function values. This is done through the #selected_dataset# variable. See + * the derived classes and the #get_values# function for the exact usage of + * this variable. + * + * + * \subsection{Definitions} + * + * The Jacobian matrix is defined to be + * $$ J_{ij} = {d\xi_i \over dx_j} $$ + * where the $\xi_i$ are the coordinates on the unit cell and the $x_i$ are + * the coordinates on the real cell. + * This is the form needed to compute the gradient on the real cell from + * the gradient on the unit cell. If we want to transform the area element + * $dx dy$ from the real to the unit cell, we have to take the determinant of + * the inverse matrix, which is the reciprocal value of the determinant of the + * matrix defined above. + * + * The Jacobi matrix is always that of the transformation of unit to real cell. + * This applies also to the case where the derived class handles faces or + * subfaces, in which case also the transformation of unit to real cell is + * needed. However, the Jacobi matrix of the full transformation is always + * needed if we want to get the values of the gradients, which need to be + * transformed with the full Jacobi matrix, while we only need the + * transformation from unit to real face to compute the determinant of the + * Jacobi matrix to get the scaling of the surface element $do$. + * + * + * \subsection{Member functions} + * + * The functions of this class fall into different cathegories: + * \begin{itemize} + * \item #shape_value#, #shape_grad#, etc: return one of the values + * of this object at a time. In many cases you will want to get + * a whole bunch at a time for performance or convenience reasons, + * then use the #get_*# functions. + * + * \item #get_shape_values#, #get_shape_grads#, etc: these return + * a reference to a whole field. Usually these fields contain + * the values of all ansatz functions at all quadrature points. + * + * \item #get_function_values#, #get_function_gradients#: these + * two functions offer a simple way to avoid the detour of the + * ansatz functions, if you have a finite solution (resp. the + * vector of values associated with the different ansatz functions.) + * Then you may want to get information from the restriction of + * the finite element function to a certain cell, e.g. the values + * of the function at the quadrature points or the values of its + * gradient. These two functions provide the information needed: + * you pass it a vector holding the finite element solution and the + * functions return the values or gradients of the finite element + * function restricted to the cell which was given last time the + * #reinit# function was given. + * + * Though possible in principle, these functions do not call the + * #reinit# function, you have to do so yourself beforehand. On the + * other hand, a copy of the cell iterator is stored which was used + * last time the #reinit# function was called. This frees us from + * the need to pass the cell iterator again to these two functions, + * which guarantees that the cell used here is in sync with that used + * for the #reinit# function. You should, however, make sure that + * nothing substantial happens to the #DoFHandler# object or any + * other involved instance between the #reinit# and the #get_function_*# + * functions are called. + * + * \item #reinit#: initialize the #FEValues# object for a certain cell. + * This function is not in the present class but only in the derived + * classes and has a variable call syntax. + * See the docs for the derived classes for more information. + * \end{itemize} + * + * + * \subsection{Implementational issues} + * + * The #FEValues# object keeps track of those fields which really need to + * be computed, since the computation of the gradients of the ansatz functions + * and of other values on each real cell can be quite an expensive thing + * if it is not needed. The + * object knows about which fields are needed by the #UpdateFlags# object + * passed through the constructor. In debug mode, the accessor functions, which + * return values from the different fields, check whether the required field + * was initialized, thus avoiding use of unitialized data. + * @author Wolfgang Bangerth, 1998 */ template @@ -487,24 +487,24 @@ class FEValuesBase { /** - Represent a finite element evaluated with a specific quadrature rule on - a cell. - - The unit cell is defined to be the tensor product of the interval $[0,1]$ - in the present number of dimensions. In part of the literature, the convention - is used that the unit cell be the tensor product of the interval $[-1,1]$, - which is to distinguished properly. - - Objects of this class store a multitude of different values needed to - do the assemblage steps on real cells rather than on the unit cell. Among - these values are the values and gradients of the shape functions at the - quadrature points on the real and the unit cell, the location of the - quadrature points on the real and on the unit cell, the weights of the - quadrature points, the Jacobian matrices of the mapping from the unit to - the real cell at the quadrature points and so on. - - @author Wolfgang Bangerth, 1998 - */ + * Represent a finite element evaluated with a specific quadrature rule on + * a cell. + * + * The unit cell is defined to be the tensor product of the interval $[0,1]$ + * in the present number of dimensions. In part of the literature, the convention + * is used that the unit cell be the tensor product of the interval $[-1,1]$, + * which is to distinguished properly. + * + * Objects of this class store a multitude of different values needed to + * do the assemblage steps on real cells rather than on the unit cell. Among + * these values are the values and gradients of the shape functions at the + * quadrature points on the real and the unit cell, the location of the + * quadrature points on the real and on the unit cell, the weights of the + * quadrature points, the Jacobian matrices of the mapping from the unit to + * the real cell at the quadrature points and so on. + * + * @author Wolfgang Bangerth, 1998 + */ template class FEValues : public FEValuesBase { public: @@ -577,71 +577,71 @@ class FEValues : public FEValuesBase { /** - This class provides for the data elements needed for the restriction of - finite elements to faces or subfaces. It does no real computations, apart - from initialization of the fields with the right size. It more or - less is only a base class to the #FEFaceValues# and #FESubfaceValues# - classes which do the real computations. See there for descriptions of - what is really going on. - - Since many of the concepts are the same whether we restrict a finite element - to a face or a subface (i.e. the child of the face of a cell), we describe - those common concepts here, rather than in the derived classes. - - - \subsection{Technical issues} - - The unit face is defined to be the tensor product of the interval $[0,1]$ - in the present number of dimensions minus one. In part of the literature, - the convention is used that the unit cell/face be the tensor product of the - interval $[-1,1]$, which is to distinguished properly. A subface is the - child of a face; they are numbered in the way laid down in the - #Triangulation# class. - - Just like in the #FEValues# class, function values and gradients on the unit - face or subface are evaluated at the quadrature points only once, and stored - by the common base class. Being a tensor of rank zero, the function values - remain the same when we want them at the quadrature points on the real cell, - while we get the gradients (a tensor of rank one) by multiplication with the - Jacobi matrix of the transformation, which we need to compute for each cell - and each quadrature point. - - However, while in the #FEValues# class the quadrature points are always the - same, here we deal with more than one (sub)face. We therefore store the values - and gradients of the ansatz functions on the unit cell in an array with as - many elements as there are (sub)faces on a cell. The same applies for the - quadrature points on the (sub)faces: for each (sub)face we store the position - on the cell. This way we still need to evaluate unit gradients and function - values only once and only recompute the gradients on the real (sub)face by - multiplication of the unit gradients on the presently selected (sub)face - with the Jacobi matrix. - - - When the #reinit# function of a derived class is called, only those - gradients, quadrature points etc are transformed to the real cell which - belong to the selected face or subface. The number of the selected face - or subface is stored in the #selected_dataset# variable of the base class - such that the #shape_value# function can return the shape function's - values on the (sub)face which was last selected by a call to the #reinit# - function. - - In addition to the complications described above, we need two different - Jacobi matrices and determinants in this context: one for the transformation - of the unit cell to the real cell (this Jacobi matrix is needed to - compute the restriction of the real gradient to the given face) and one - for the transformation of the unit face to the real face or subface - (needed to compute the weight factors for integration along faces). These two - concepts have to be carefully separated. - - Finally, we will often need the outward normal to a cell at the quadrature - points. While this could in principle be easily done using the Jacobi - matrices at the quadrature points and the normal vectors to the unit cell - (also easily derived, since they have an appealingly simple form for the unit - cell ;-), it is more efficiently done by the finite element class itself. - For example for (bi-, tri-)linear mappings the normal vector is readily - available without complicated matrix-vector-multiplications. - - @author Wolfgang Bangerth, 1998 + * This class provides for the data elements needed for the restriction of + * finite elements to faces or subfaces. It does no real computations, apart + * from initialization of the fields with the right size. It more or + * less is only a base class to the #FEFaceValues# and #FESubfaceValues# + * classes which do the real computations. See there for descriptions of + * what is really going on. + * + * Since many of the concepts are the same whether we restrict a finite element + * to a face or a subface (i.e. the child of the face of a cell), we describe + * those common concepts here, rather than in the derived classes. + * + * + * \subsection{Technical issues} + * + * The unit face is defined to be the tensor product of the interval $[0,1]$ + * in the present number of dimensions minus one. In part of the literature, + * the convention is used that the unit cell/face be the tensor product of the + * interval $[-1,1]$, which is to distinguished properly. A subface is the + * child of a face; they are numbered in the way laid down in the + * #Triangulation# class. + * + * Just like in the #FEValues# class, function values and gradients on the unit + * face or subface are evaluated at the quadrature points only once, and stored + * by the common base class. Being a tensor of rank zero, the function values + * remain the same when we want them at the quadrature points on the real cell, + * while we get the gradients (a tensor of rank one) by multiplication with the + * Jacobi matrix of the transformation, which we need to compute for each cell + * and each quadrature point. + * + * However, while in the #FEValues# class the quadrature points are always the + * same, here we deal with more than one (sub)face. We therefore store the values + * and gradients of the ansatz functions on the unit cell in an array with as + * many elements as there are (sub)faces on a cell. The same applies for the + * quadrature points on the (sub)faces: for each (sub)face we store the position + * on the cell. This way we still need to evaluate unit gradients and function + * values only once and only recompute the gradients on the real (sub)face by + * multiplication of the unit gradients on the presently selected (sub)face + * with the Jacobi matrix. + * + * + * When the #reinit# function of a derived class is called, only those + * gradients, quadrature points etc are transformed to the real cell which + * belong to the selected face or subface. The number of the selected face + * or subface is stored in the #selected_dataset# variable of the base class + * such that the #shape_value# function can return the shape function's + * values on the (sub)face which was last selected by a call to the #reinit# + * function. + * + * In addition to the complications described above, we need two different + * Jacobi matrices and determinants in this context: one for the transformation + * of the unit cell to the real cell (this Jacobi matrix is needed to + * compute the restriction of the real gradient to the given face) and one + * for the transformation of the unit face to the real face or subface + * (needed to compute the weight factors for integration along faces). These two + * concepts have to be carefully separated. + * + * Finally, we will often need the outward normal to a cell at the quadrature + * points. While this could in principle be easily done using the Jacobi + * matrices at the quadrature points and the normal vectors to the unit cell + * (also easily derived, since they have an appealingly simple form for the unit + * cell ;-), it is more efficiently done by the finite element class itself. + * For example for (bi-, tri-)linear mappings the normal vector is readily + * available without complicated matrix-vector-multiplications. + * + * @author Wolfgang Bangerth, 1998 */ template class FEFaceValuesBase : public FEValuesBase { @@ -740,26 +740,26 @@ class FEFaceValuesBase : public FEValuesBase { /** - Represent a finite element evaluated with a specific quadrature rule on - the face of a cell. - - This class is very similar to the #FEValues# class; see there for more - documentation. It is, however, a bit more involved: since we want to - compute the restriction of finite element functions (here: the basis - functions, but a finite element function is obtained by multiplication - with the nodal values and summation) to the face of a cell and since - finite element functions and especially their gradients need not be - continuous at faces, we can not compute the wanted information from - the face and a finite element class on the unit cell alone, but we - need the real cell as well. In addition, we need to know what number - the face is in the set of faces of the cell we want to restrict. - Finally, since we may want to use higher order elements with unit cell - to real cell mappings of higher than first order, thus applying curved - boundaries, we need to know an object describing the boundary of the - domain. - - @author Wolfgang Bangerth, 1998 - */ + * Represent a finite element evaluated with a specific quadrature rule on + * the face of a cell. + * + * This class is very similar to the #FEValues# class; see there for more + * documentation. It is, however, a bit more involved: since we want to + * compute the restriction of finite element functions (here: the basis + * functions, but a finite element function is obtained by multiplication + * with the nodal values and summation) to the face of a cell and since + * finite element functions and especially their gradients need not be + * continuous at faces, we can not compute the wanted information from + * the face and a finite element class on the unit cell alone, but we + * need the real cell as well. In addition, we need to know what number + * the face is in the set of faces of the cell we want to restrict. + * Finally, since we may want to use higher order elements with unit cell + * to real cell mappings of higher than first order, thus applying curved + * boundaries, we need to know an object describing the boundary of the + * domain. + * + * @author Wolfgang Bangerth, 1998 + */ template class FEFaceValues : public FEFaceValuesBase { public: @@ -812,105 +812,105 @@ class FEFaceValues : public FEFaceValuesBase { /** - Represent a finite element evaluated with a specific quadrature rule on - the child of the face of a cell. - - This class is very similar to the #FEFaceValues# class; see there for - more documentation. It serves the computation of interface integrals - where the cells on both sides of the face have different refinement - levels. This is useful for example when we want to integrate the jump - of the gradient of the finite element solution along the boundary of - a cell to estimate the error. Now, this is not so much of a problem - if all neighbors of the cell have the same refinement level, then we - will use the #FEFaceValues# class, but it gets trickier if one of the - cells is more refined than the other. - - To this end, there seem to be two ways which may be applicable: - \begin{itemize} - \item Prolong the coarser cell to the finer refinement level: we could - compute the prolongation of the finite element functions to the - child cells and consider the subface a face of one of the child cells. - This approach seems clear and rather simple to implement, however it - has two major drawbacks: first, the finite element space on the - refined (child) cells may not be included in the space of the unrefined - cell, in which case the prolongation would alter information and thus - make computations worthless in the worst case. The second reason is - a practical one, namely that by refining the cell virtually, we would - end up with child cells which do not exist in real and can thus not be - represented in terms of iterators. This would mean that we had to change - the whole interface to the #FE*Values# classes to accept cell corner - points by value, etc, instead of relying on appropriate iterators. This - seems to be clumsy and not very suitable to maintain an orthogonal - programming style. Apart from that, we already have iterators, why - shouldn't we use them? - - \item Use 'different' quadrature formulae: this second approach is the - way we chose here. The idea is to evaluate the finite element ansatz - functions on the two cells restricted to the face in question separately, - by restricting the ansatz functions on the less refined cell to its - face and the functions on the more refined cell to its face as well, - the second face being a child to the first one. Now, if we would use - the same quadrature formula for both restrictions, we would end up with - the same number of quadrature points, but at different locations since - they were evaluated on faces of different size. We therefore use the - original quadrature formula for the refined cell and a modified one for - the coarse cell, the latter being modified in such a way that the - locations of the quadrature points match each other. - - An example may shed more light onto this: assume we are in two dimension, - we have a cell of which we want to evaluate a finite element function on - face zero, and neighbor zero is refined (then so is face zero). The - quadrature formula shall be the Simpson rule with quadrature points - $0$, $0.5$ and $1$. The present cell shall be the unit cell, without - loss of generality. Then the face in question is the line $(0,0)$ to - $(1,0)$, subdivided into two subfaces. We will then compute the - restriction of the present cell to the common subface $(0,0)$ to - $(0.5,5)$ by using a modified quadrature formulae with quadrature - points $(0,0)$, $(0.25,0)$ and $(0.5,0)$ (coordinates on the cell) - which is not symmetric as was the original quadrature rule for a line. - This modified quadrature rule is computed by projection onto the subface - using the #QProjector::project_to_subface()# function. The neighboring - cell, being refined once more than the present is evaluated with the - quadrature formula projected to the common face, but using the original - quadrature formula. This way, the locations of the quadrature points - on both sides of the common face match each other. - \end{itemize} - - For a use of this mechanism, take a look of the code in the error - estimation hierarchy, since there often the jump of a finite element - function's gradient across cell boundaries is computed. - - - \subsection{Other implementational subjects} - - It does not seem useful to ask for the off-points of the ansatz functions - (name #ansatz_points# in the #FEValuesBase# class) for subfaces. These are - therefore not supported for this class and should throw an error if - accessed. Specifying #update_ansatz_points# for the #UpdateFlags# in the - constructor is disallowed. - - The values of the ansatz functions on the subfaces are stored as an array - of matrices, each matrix representing the values of the ansatz functions at - the quadrature points at one subface. The ordering is as follows: the values - of the ansatz functions at face #face#, subface #subface# are stored in - #shape_values[face*(1<<(dim-1))+subface]#. The same order applies for the - quadrature points on the unit cell, which are stored in the - #unit_quadrature_points# array. Note that #1<<(dim-1)# is the number of - subfaces per face. - - One subtle problem is that if a face is at the boundary, then computation - of subfaces may be a bit tricky, since we do not know whether the user - intends to better approximate the boundary by the subfaces or only wants - to have the subfaces be one part of the mother face. However, it is hardly - conceivable what someone wants when using this class for faces at the - boundary, in the end this class was invented to facilitate integration - along faces with cells of different refinement levels on both sides, - integration along the boundary of the domain is better done through - the #FEFaceValues# class. For this reason, calling #reinit# with a - boundary face will result in an error. - - @author Wolfgang Bangerth, 1998 - */ + * Represent a finite element evaluated with a specific quadrature rule on + * the child of the face of a cell. + * + * This class is very similar to the #FEFaceValues# class; see there for + * more documentation. It serves the computation of interface integrals + * where the cells on both sides of the face have different refinement + * levels. This is useful for example when we want to integrate the jump + * of the gradient of the finite element solution along the boundary of + * a cell to estimate the error. Now, this is not so much of a problem + * if all neighbors of the cell have the same refinement level, then we + * will use the #FEFaceValues# class, but it gets trickier if one of the + * cells is more refined than the other. + * + * To this end, there seem to be two ways which may be applicable: + * \begin{itemize} + * \item Prolong the coarser cell to the finer refinement level: we could + * compute the prolongation of the finite element functions to the + * child cells and consider the subface a face of one of the child cells. + * This approach seems clear and rather simple to implement, however it + * has two major drawbacks: first, the finite element space on the + * refined (child) cells may not be included in the space of the unrefined + * cell, in which case the prolongation would alter information and thus + * make computations worthless in the worst case. The second reason is + * a practical one, namely that by refining the cell virtually, we would + * end up with child cells which do not exist in real and can thus not be + * represented in terms of iterators. This would mean that we had to change + * the whole interface to the #FE*Values# classes to accept cell corner + * points by value, etc, instead of relying on appropriate iterators. This + * seems to be clumsy and not very suitable to maintain an orthogonal + * programming style. Apart from that, we already have iterators, why + * shouldn't we use them? + * + * \item Use 'different' quadrature formulae: this second approach is the + * way we chose here. The idea is to evaluate the finite element ansatz + * functions on the two cells restricted to the face in question separately, + * by restricting the ansatz functions on the less refined cell to its + * face and the functions on the more refined cell to its face as well, + * the second face being a child to the first one. Now, if we would use + * the same quadrature formula for both restrictions, we would end up with + * the same number of quadrature points, but at different locations since + * they were evaluated on faces of different size. We therefore use the + * original quadrature formula for the refined cell and a modified one for + * the coarse cell, the latter being modified in such a way that the + * locations of the quadrature points match each other. + * + * An example may shed more light onto this: assume we are in two dimension, + * we have a cell of which we want to evaluate a finite element function on + * face zero, and neighbor zero is refined (then so is face zero). The + * quadrature formula shall be the Simpson rule with quadrature points + * $0$, $0.5$ and $1$. The present cell shall be the unit cell, without + * loss of generality. Then the face in question is the line $(0,0)$ to + * $(1,0)$, subdivided into two subfaces. We will then compute the + * restriction of the present cell to the common subface $(0,0)$ to + * $(0.5,5)$ by using a modified quadrature formulae with quadrature + * points $(0,0)$, $(0.25,0)$ and $(0.5,0)$ (coordinates on the cell) + * which is not symmetric as was the original quadrature rule for a line. + * This modified quadrature rule is computed by projection onto the subface + * using the #QProjector::project_to_subface()# function. The neighboring + * cell, being refined once more than the present is evaluated with the + * quadrature formula projected to the common face, but using the original + * quadrature formula. This way, the locations of the quadrature points + * on both sides of the common face match each other. + * \end{itemize} + * + * For a use of this mechanism, take a look of the code in the error + * estimation hierarchy, since there often the jump of a finite element + * function's gradient across cell boundaries is computed. + * + * + * \subsection{Other implementational subjects} + * + * It does not seem useful to ask for the off-points of the ansatz functions + * (name #ansatz_points# in the #FEValuesBase# class) for subfaces. These are + * therefore not supported for this class and should throw an error if + * accessed. Specifying #update_ansatz_points# for the #UpdateFlags# in the + * constructor is disallowed. + * + * The values of the ansatz functions on the subfaces are stored as an array + * of matrices, each matrix representing the values of the ansatz functions at + * the quadrature points at one subface. The ordering is as follows: the values + * of the ansatz functions at face #face#, subface #subface# are stored in + * #shape_values[face*(1<<(dim-1))+subface]#. The same order applies for the + * quadrature points on the unit cell, which are stored in the + * #unit_quadrature_points# array. Note that #1<<(dim-1)# is the number of + * subfaces per face. + * + * One subtle problem is that if a face is at the boundary, then computation + * of subfaces may be a bit tricky, since we do not know whether the user + * intends to better approximate the boundary by the subfaces or only wants + * to have the subfaces be one part of the mother face. However, it is hardly + * conceivable what someone wants when using this class for faces at the + * boundary, in the end this class was invented to facilitate integration + * along faces with cells of different refinement levels on both sides, + * integration along the boundary of the domain is better done through + * the #FEFaceValues# class. For this reason, calling #reinit# with a + * boundary face will result in an error. + * + * @author Wolfgang Bangerth, 1998 + */ template class FESubfaceValues : public FEFaceValuesBase { public: diff --git a/deal.II/deal.II/include/grid/geometry_info.h b/deal.II/deal.II/include/grid/geometry_info.h index 81ae9574f4..3665626d77 100644 --- a/deal.II/deal.II/include/grid/geometry_info.h +++ b/deal.II/deal.II/include/grid/geometry_info.h @@ -12,14 +12,14 @@ template struct GeometryInfo; /** - Publish some information about geometrical interconnections to the - outside world, for one spacial dimension in this case. These are, - for example the numbers of children per cell, faces per cell, etc, - but also neighborship information, It is especially useful if you - want to loop over all faces in any space dimension, but don't want - to think about their number in a dimension independent expression. - This not only reduces thinking effort but also error possibilities. -*/ + * Publish some information about geometrical interconnections to the + * outside world, for one spacial dimension in this case. These are, + * for example the numbers of children per cell, faces per cell, etc, + * but also neighborship information, It is especially useful if you + * want to loop over all faces in any space dimension, but don't want + * to think about their number in a dimension independent expression. + * This not only reduces thinking effort but also error possibilities. + */ struct GeometryInfo<1> { public: /** @@ -58,14 +58,14 @@ struct GeometryInfo<1> { /** - Publish some information about geometrical interconnections to the - outside world, for two spacial dimensions in this case. These are, - for example the numbers of children per cell, faces per cell, etc, - but also neighborship information, It is especially useful if you - want to loop over all faces in any space dimension, but don't want - to think about their number in a dimension independent expression. - This not only reduces thinking effort but also error possibilities. -*/ + * Publish some information about geometrical interconnections to the + * outside world, for two spacial dimensions in this case. These are, + * for example the numbers of children per cell, faces per cell, etc, + * but also neighborship information, It is especially useful if you + * want to loop over all faces in any space dimension, but don't want + * to think about their number in a dimension independent expression. + * This not only reduces thinking effort but also error possibilities. + */ struct GeometryInfo<2> { public: /** diff --git a/deal.II/deal.II/include/grid/point.h b/deal.II/deal.II/include/grid/point.h index a99b50be6b..eaaf99b11e 100644 --- a/deal.II/deal.II/include/grid/point.h +++ b/deal.II/deal.II/include/grid/point.h @@ -10,23 +10,23 @@ /** - The #Point# class provides for a point or vector in a space with arbitrary - dimension #dim#. - - It is the preferred object to be passed to functions which - operate on points in spaces of a priori unknown dimension: rather than - using functions like #double f(double x)# and #double f(double x, double y)#, - you use double #f(Point &p)#. - - #Point# also serves as a starting point for the implementation of the - geometrical primitives like #Polyhedron#, #Triangle#, etc. - - #Point#s can also be thought of as vectors, i.e. points in a vector space - without an obvious meaning. For instance, it may be suitable to let the - gradient of a function be a #point# vector: - #Point gradient_of_f (const Point &x)#. #Point#s have all - functionality for this, e.g. scalar products, addition etc. - */ + * The #Point# class provides for a point or vector in a space with arbitrary + * dimension #dim#. + * + * It is the preferred object to be passed to functions which + * operate on points in spaces of a priori unknown dimension: rather than + * using functions like #double f(double x)# and #double f(double x, double y)#, + * you use double #f(Point &p)#. + * + * #Point# also serves as a starting point for the implementation of the + * geometrical primitives like #Polyhedron#, #Triangle#, etc. + * + * #Point#s can also be thought of as vectors, i.e. points in a vector space + * without an obvious meaning. For instance, it may be suitable to let the + * gradient of a function be a #point# vector: + * #Point gradient_of_f (const Point &x)#. #Point#s have all + * functionality for this, e.g. scalar products, addition etc. + */ template class Point { public: diff --git a/deal.II/deal.II/include/grid/tria_accessor.h b/deal.II/deal.II/include/grid/tria_accessor.h index b8be69f2f5..27560268f8 100644 --- a/deal.II/deal.II/include/grid/tria_accessor.h +++ b/deal.II/deal.II/include/grid/tria_accessor.h @@ -35,9 +35,9 @@ template class Triangulation; /** - The three states an iterator can be in: valid, past-the-end and - invalid. - */ + * The three states an iterator can be in: valid, past-the-end and + * invalid. + */ enum IteratorState { valid, past_the_end, invalid }; @@ -45,13 +45,13 @@ enum IteratorState { valid, past_the_end, invalid }; /** - Implements the accessor class descibed in the documentation of - the iterator classes (see \Ref{TriaRawIterator}. - - This class offers only the basic functionality (stores the necessary - data members, offers comparison operators and the like), but has no - functionality to actually dereference data. This is done in the derived - classes. + * Implements the accessor class descibed in the documentation of + * the iterator classes (see \Ref{TriaRawIterator}. + * + * This class offers only the basic functionality (stores the necessary + * data members, offers comparison operators and the like), but has no + * functionality to actually dereference data. This is done in the derived + * classes. */ template class TriaAccessor { @@ -265,11 +265,11 @@ class TriaAccessor { /** - Accessor to dereference the data of lines. This accessor is used to - point to lines in #dim# space dimensions. There is a derived class - for lines in one space dimension, in which case a line is also a cell - and thus has much more functionality than in lower dimensions. - */ + * Accessor to dereference the data of lines. This accessor is used to + * point to lines in #dim# space dimensions. There is a derived class + * for lines in one space dimension, in which case a line is also a cell + * and thus has much more functionality than in lower dimensions. + */ template class LineAccessor : public TriaAccessor { public: @@ -481,12 +481,12 @@ class LineAccessor : public TriaAccessor { /** - Accessor to dereference the data of quads. This accessor is used to - point to quads in #dim# space dimensions (only #dim>=2# seems reasonable - to me). There is a derived class - for quads in two space dimension, in which case a quad is also a cell - and thus has much more functionality than in lower dimensions. - */ + * Accessor to dereference the data of quads. This accessor is used to + * point to quads in #dim# space dimensions (only #dim>=2# seems reasonable + * to me). There is a derived class + * for quads in two space dimension, in which case a quad is also a cell + * and thus has much more functionality than in lower dimensions. + */ template class QuadAccessor : public TriaAccessor { public: @@ -722,34 +722,34 @@ class QuadAccessor : public TriaAccessor { /** - Intermediate, "typedef"-class, not for public use. - */ + * Intermediate, "typedef"-class, not for public use. + */ template class TriaSubstructAccessor; /** - Intermediate, "typedef"-class, not for public use. - - \subsection{Rationale} - - This class is only a wrapper class used to do kind of a typedef - with template parameters. This class and #TriaSubstructAccessor<2># - wrap the following names: - \begin{verbatim} - TriaSubstructAccessor<1> := LineAccessor<1>; - TriaSubstructAccessor<2> := QuadAccessor<2>; - \end{verbatim} - We do this rather complex (and needless, provided C++ the needed constructs!) - class hierarchy manipulation, since this way we can declare and implement - the \Ref{CellAccessor} dimension independent as an inheritance from - #TriaSubstructAccessor#. If we had not declared these - types, we would have to write two class declarations, one for - #CellAccessor<1>#, derived from #LineAccessor<1># - and one for #CellAccessor<2>#, derived from - #QuadAccessor<2>#. - */ + * Intermediate, "typedef"-class, not for public use. + * + * \subsection{Rationale} + * + * This class is only a wrapper class used to do kind of a typedef + * with template parameters. This class and #TriaSubstructAccessor<2># + * wrap the following names: + * \begin{verbatim} + * TriaSubstructAccessor<1> := LineAccessor<1>; + * TriaSubstructAccessor<2> := QuadAccessor<2>; + * \end{verbatim} + * We do this rather complex (and needless, provided C++ the needed constructs!) + * class hierarchy manipulation, since this way we can declare and implement + * the \Ref{CellAccessor} dimension independent as an inheritance from + * #TriaSubstructAccessor#. If we had not declared these + * types, we would have to write two class declarations, one for + * #CellAccessor<1>#, derived from #LineAccessor<1># + * and one for #CellAccessor<2>#, derived from + * #QuadAccessor<2>#. + */ class TriaSubstructAccessor<1> : public LineAccessor<1> { public: /** @@ -773,9 +773,10 @@ class TriaSubstructAccessor<1> : public LineAccessor<1> { /** - Intermediate, "typedef"-class, not for public use. - @see TriaSubstructAccessor<1> - */ + * Intermediate, "typedef"-class, not for public use. + * + * @see TriaSubstructAccessor<1> + */ class TriaSubstructAccessor<2> : public QuadAccessor<2> { public: /** @@ -803,16 +804,16 @@ class TriaSubstructAccessor<2> : public QuadAccessor<2> { /** - This class allows access to a cell: a line in one dimension, a quad - in two dimension, etc. - - The following refers to any space dimension: - - This class allows access to a {\bf cell}, which is a line in 1D and a quad in - 2D. Cells have more functionality than lines or quads by themselves, for - example they can be flagged for refinement, they have neighbors, they have - the possibility to check whether they are at the boundary etc. This class - offers access to all this data. + * This class allows access to a cell: a line in one dimension, a quad + * in two dimension, etc. + * + * The following refers to any space dimension: + * + * This class allows access to a {\bf cell}, which is a line in 1D and a quad in + * 2D. Cells have more functionality than lines or quads by themselves, for + * example they can be flagged for refinement, they have neighbors, they have + * the possibility to check whether they are at the boundary etc. This class + * offers access to all this data. */ template class CellAccessor : public TriaSubstructAccessor { diff --git a/deal.II/deal.II/include/grid/tria_boundary.h b/deal.II/deal.II/include/grid/tria_boundary.h index f194505374..17e57aba2d 100644 --- a/deal.II/deal.II/include/grid/tria_boundary.h +++ b/deal.II/deal.II/include/grid/tria_boundary.h @@ -7,31 +7,31 @@ #include /** - This class is used to represent a boundary to a triangulation. - When a triangulation creates a new vertex on the boundary of the - domain, it determines the new vertex' coordinates through the - following code (here in two dimensions): - \begin{verbatim} - ... - const Point<2> *neighbors[2] = {&neighbor1, &neighbor2}; - Point<2> new_vertex = boundary.in_between (neighbors); - ... - \end{verbatim} - #neighbor1# and #neighbor2# are the two vertices bounding the old - line on the boundary, which is to be subdivided. #boundary# is an - object of type #Boundary#. - - In 3D, a new vertex may be placed on the middle of a line or on - the middle of a side. In the both cases, an array with four points - has to be passed to #in_between#; in the latter case the two end - points of the line have to be given consecutively twice, as - elements 0 and 1, and 2 and 3, respectively. - - There are specialisations, #StraightBoundary#, which places - the new point right into the middle of the given points, and - #HyperBallBoundary# creating a hyperball with given radius - around a given center point. - */ + * This class is used to represent a boundary to a triangulation. + * When a triangulation creates a new vertex on the boundary of the + * domain, it determines the new vertex' coordinates through the + * following code (here in two dimensions): + * \begin{verbatim} + * ... + * const Point<2> *neighbors[2] = {&neighbor1, &neighbor2}; + * Point<2> new_vertex = boundary.in_between (neighbors); + * ... + * \end{verbatim} + * #neighbor1# and #neighbor2# are the two vertices bounding the old + * line on the boundary, which is to be subdivided. #boundary# is an + * object of type #Boundary#. + * + * In 3D, a new vertex may be placed on the middle of a line or on + * the middle of a side. In the both cases, an array with four points + * has to be passed to #in_between#; in the latter case the two end + * points of the line have to be given consecutively twice, as + * elements 0 and 1, and 2 and 3, respectively. + * + * There are specialisations, #StraightBoundary#, which places + * the new point right into the middle of the given points, and + * #HyperBallBoundary# creating a hyperball with given radius + * around a given center point. + */ template class Boundary { public: @@ -53,15 +53,15 @@ class Boundary { /** - Specialisation of \Ref{Boundary}, which places the new point right - into the middle of the given points. The middle is defined as the - arithmetic mean of the points. - - This class does not really describe a boundary in the usual sense. By - placing new points in teh middle of old ones, it rather assumes that the - boundary of the domain is given by the polygon/polyhedron defined by the - boundary of the initial coarse triangulation. - */ + * Specialisation of \Ref{Boundary}, which places the new point right + * into the middle of the given points. The middle is defined as the + * arithmetic mean of the points. + * + * This class does not really describe a boundary in the usual sense. By + * placing new points in teh middle of old ones, it rather assumes that the + * boundary of the domain is given by the polygon/polyhedron defined by the + * boundary of the initial coarse triangulation. + */ template class StraightBoundary : public Boundary { public: @@ -82,18 +82,18 @@ class StraightBoundary : public Boundary { /** - Specialisation of \Ref{Boundary}, which places the new point on - the boundary of a ball in arbitrary dimension. It works by projecting - the point in the middle of the old points onto the ball. The middle is - defined as the arithmetic mean of the points. - - The center of the ball and its radius may be given upon construction of - an object of this type. They default to the origin and a radius of 1.0. - - This class is derived from #StraightBoundary# rather than from - #Boundary#, which would seem natural, since this way we can use the - #StraightBoundary::in_between(neighbors)# function. - */ + * Specialisation of \Ref{Boundary}, which places the new point on + * the boundary of a ball in arbitrary dimension. It works by projecting + * the point in the middle of the old points onto the ball. The middle is + * defined as the arithmetic mean of the points. + * + * The center of the ball and its radius may be given upon construction of + * an object of this type. They default to the origin and a radius of 1.0. + * + * This class is derived from #StraightBoundary# rather than from + * #Boundary#, which would seem natural, since this way we can use the + * #StraightBoundary::in_between(neighbors)# function. + */ template class HyperBallBoundary : public StraightBoundary { public: diff --git a/deal.II/deal.II/include/grid/tria_iterator.h b/deal.II/deal.II/include/grid/tria_iterator.h index 56b2e80105..55ea217d15 100644 --- a/deal.II/deal.II/include/grid/tria_iterator.h +++ b/deal.II/deal.II/include/grid/tria_iterator.h @@ -27,200 +27,200 @@ template class Triangulation; /** - This class implements an iterator, analogous to those of the standard - template library (STL). It fulfills the requirements of a bidirectional iterator. - See the C++ documentation for further details of iterator specification and - usage. In addition to the STL - iterators an iterator of this class provides a #-># operator, i.e. you can - write statements like #i->set_refine_flag ();#. - - {\bf Note:} Please read the documentation about the prefix and the - postfix #++# operators in this and the derived classes! - - \subsection{Purpose} - - #iterators# are used whenever a loop over all lines, quads, cells etc. - is to be performed. These loops can then be coded like this: - \begin{verbatim} - cell_iterator i = tria.begin(); - cell_iterator end = tria.end(); - for (; i!=end; ++i) - if (cell->at_boundary()) - cell->set_refine_flag(); - \end{verbatim} - Note the usage of #++i# instead of #i++# since this does not involve - temporaries and copying. You should also really use a fixed value - #end# rather than coding #for (; i!=tria.end(); ++i)#, since - the creation and copying of these iterators is rather expensive - compared to normal pointers. - - The objects pointed to by iterators are #TriangulationLevel<1>::LinesData#, - #TriangulationLevel<2>::LinesData# - and #TriangulationLevel<2>::QuadsData#. To chose which of those, the - template parameter #Pointee# is used. - - Since the names as is are quite unhandy, the #Triangulation<># class which - uses these iterators declares typedef'd versions. See there for more - information. - - The objects pointed to are, as mentioned, #LinesData# etc. To be - more exact, when dereferencing an iterator, you do not get a #LineData# - object (or the like, but we will assume that you have a #line_iterator# - in the following), but a {\it virtual} object (called {\it accessor}) which - behaves as if it stored the data of a line. It does not contain any data - itself, but provides functions to manipulate the data of the line it - stands for. - - Since the data of one line is splitted to - several arrays (#lines#, #children# and #used#) for performance reasons - rather than keeping all information in a #Line# struct, access through - an accessor is usually much simpler than handling the exact data structure - and also less error prone since the data structure itself can be changed - in an arbitrary way while the only pieces of code which access these - data structures are the accessors. - - On the other hand, iterators are not much slower than operating directly - on the data structures, since they perform the loops that you had - to handcode yourself anyway. Most iterator and accessor functions are - inlined. - - The main functionality of iterators, however, resides in the #++# and - #--# operators. These move the iterator forward or backward just as if - it were a pointer into an array. Here, this operation is not so easy, - since it may include skipping some elements and the transition between - the triangulation levels. This is completely hidden from the user, though - you can still create an iterator pointing to an arbitrary element. - Actually, the operation of moving iterators back and forth is not done in - the iterator classes, but rather in the accessor classes. Since these are - passed as template arguments, you can write your own versions here to add - more functionality. - - Furthermore, the iterators decribed here satisfy the requirement of - input and bidirectional iterators as stated by the C++ standard and - the STL documentation. It is therefore possible to use the functions - from the {\it algorithm section} of the C++ standard, e.g. #count_if# - (see the documentation for \Ref{Triangulation} for an example) and - several others. Unfortunately, with some of them (e.g. #distance#), - g++2.7 has some problems and we will have to wait for g++2.8. - - - \subsection{Differences between the classes in this inheritance tree} - - #TriaRawIterator# objects point to lines, cells, etc in - the lists whether they are used or not (in the vectors, also {\it dead} - objects are stored, since deletion in vectors is expensive and we - also do not want to destroy the ordering induced by the numbering - in the vectors). Therefore not all raw iterators point to valid objects. - - There are two derived versions of this class: \Ref{TriaIterator} - objects, which only loop over used (valid) cells and - #TriaActiveIterator# objects - which only loop over active cells (not refined). - - - \subsection{Implementation} - - In principle, the Iterator class does not have much functionality. It - only becomes useful when assigned an #Accessor# (the second template - parameter), which really does the access to data. An #Accessor# has to - fulfil some requirements: - \begin{itemize} - \item It must have two members named #present_level# and #present_index# - storing the address of the element in the triangulation presently - pointed to. Furthermore, the three #Tria{Raw| |Active}Iterator# classes - have to be friends to the accessor or these data members must be public. - \item It must have a constructor which takes 1. a #Triangulation*#, - 2. and 3. and integer, denoting the initial level and index. - \item For the #TriaIterator# and the #TriaActiveIterator# class, it must - have a member function #bool used()#, for the latter a member function - #bool active()#. - \item It should not modify the #present_level# and #present_index# fields, - since this is what the iterator classes do, but it should use them to - dereference the data it points to. - \item It must have void operators #++# and #--#. - \end{itemize} - Then the iterator is able to do what it is supposed to. All of the necessary - functions are implemented in the #Accessor# base class, but you may write - your own version (non-virtual, since we use templates) to add functionality. - - There is a standard implementation, using classes which are derived from - \Ref{TriaAccessor}. These classes point to #Line#s, #Quad#s and the like. - For advanced use of the iterator classes, derive classes from - #{Line|Quad|Cell}Accessor# which also dereference data structures in other - objects, e.g. in a finite element context. An iterator with such an accessor - then simultaneously points to (for example) a cell in the triangulation and - the data stored on it in the finite element class. - - Derived accessor classes may need additional data (e.g. the #DoFAccessor# - needs a pointer to the #DoFHandler# to work on). This data can be - set upon construction through the last argument of the constructors. - Ideally, its type is a local type to the accessor and must have the name - #Accessor::LocalData#. In the standard implementation, this type is - declared to be a void pointer. The iterator constructors take their - last argument carrying the additional data by default as zero, so unless - #Accessor::LocalData# is a number or a pointer you may not construct - such an iterator without giving the last argument. If you want to use - the additional data, you also have to overload the #TriaAccessor::copy_data# - function. - - Unfortunately, the skeched way does not work, since gcc is not able to - recognize the type defined local to the template argument (it does not - suport the #typename# keyword at present), so we can only pass a voie - pointer. You may, however, convert this to any type, normally to another - pointer or to a pointer to a structure pointing to the data to be passed. - The mechanism may be changed if the mentioned features appear in gcc. - - Another possibility would be to have a function, say #set_local_data(...)# - in the accessor classes which need additional data. You could then create - an iterator like this: - \begin{verbatim} - TriaIterator<1,MyAccesor> i; - i->set_local_data (1,2,3); - \end{verbatim} - But this will not always work: if the iterator #i# is not a valid one, then - the library will forbid you to dereference it (which normally is a good - idea), thus resulting in an error when you dereference it in the second - line. - - - \subsection{Warning} - - It seems impossible to preserve #const#ness of a triangulation through - iterator usage. Thus, if you declare pointers to a #const# triangulation - object, you should be well aware that you might involuntarily alter the - data stored in the triangulation. - - \subsection{Internals} - - There is a representation of past-the-end-pointers, denoted by special - values of the member variables #present_level# and #present_index#: - If #present_level>=0# and #present_index>=0#, then the object is valid; - if #present_level==-1# and #present_index==-1#, then the iterator points - past the end; in all other cases, the iterator is considered invalid. - You can check this by calling the #state()# function. - - An iterator is also invalid, if the pointer pointing to the #Triangulation# - object is invalid or zero. - - Finally, an iterator is invalid, if the element pointed to by - #present_level# and #present_index# is not used, i.e. if the #used# - flag is set to false. - - The last two checks are not made in #state()# since both cases should only - occur upon unitialized construction through #memcpy# and the like (the - parent triangulation can only be set upon construction). If - an iterator is constructed empty through the empty constructor, - #present_level==-2# and #present_index==-2#. Thus, the iterator is - invalid anyway, regardless of the state of the triangulation pointer - and the state of the element pointed to. - - Past-the-end iterators may also be used to compare an iterator with the - {\it before-the-start} value, when running backwards. There is no - distiction between the iterators pointing past the two ends of a vector. - - @see Triangulation - @see TriaDimensionInfo - @author Wolfgang Bangerth, 1998 + * This class implements an iterator, analogous to those of the standard + * template library (STL). It fulfills the requirements of a bidirectional iterator. + * See the C++ documentation for further details of iterator specification and + * usage. In addition to the STL + * iterators an iterator of this class provides a #-># operator, i.e. you can + * write statements like #i->set_refine_flag ();#. + * + * {\bf Note:} Please read the documentation about the prefix and the + * postfix #++# operators in this and the derived classes! + * + * \subsection{Purpose} + * + * #iterators# are used whenever a loop over all lines, quads, cells etc. + * is to be performed. These loops can then be coded like this: + * \begin{verbatim} + * cell_iterator i = tria.begin(); + * cell_iterator end = tria.end(); + * for (; i!=end; ++i) + * if (cell->at_boundary()) + * cell->set_refine_flag(); + * \end{verbatim} + * Note the usage of #++i# instead of #i++# since this does not involve + * temporaries and copying. You should also really use a fixed value + * #end# rather than coding #for (; i!=tria.end(); ++i)#, since + * the creation and copying of these iterators is rather expensive + * compared to normal pointers. + * + * The objects pointed to by iterators are #TriangulationLevel<1>::LinesData#, + * #TriangulationLevel<2>::LinesData# + * and #TriangulationLevel<2>::QuadsData#. To chose which of those, the + * template parameter #Pointee# is used. + * + * Since the names as is are quite unhandy, the #Triangulation<># class which + * uses these iterators declares typedef'd versions. See there for more + * information. + * + * The objects pointed to are, as mentioned, #LinesData# etc. To be + * more exact, when dereferencing an iterator, you do not get a #LineData# + * object (or the like, but we will assume that you have a #line_iterator# + * in the following), but a {\it virtual} object (called {\it accessor}) which + * behaves as if it stored the data of a line. It does not contain any data + * itself, but provides functions to manipulate the data of the line it + * stands for. + * + * Since the data of one line is splitted to + * several arrays (#lines#, #children# and #used#) for performance reasons + * rather than keeping all information in a #Line# struct, access through + * an accessor is usually much simpler than handling the exact data structure + * and also less error prone since the data structure itself can be changed + * in an arbitrary way while the only pieces of code which access these + * data structures are the accessors. + * + * On the other hand, iterators are not much slower than operating directly + * on the data structures, since they perform the loops that you had + * to handcode yourself anyway. Most iterator and accessor functions are + * inlined. + * + * The main functionality of iterators, however, resides in the #++# and + * #--# operators. These move the iterator forward or backward just as if + * it were a pointer into an array. Here, this operation is not so easy, + * since it may include skipping some elements and the transition between + * the triangulation levels. This is completely hidden from the user, though + * you can still create an iterator pointing to an arbitrary element. + * Actually, the operation of moving iterators back and forth is not done in + * the iterator classes, but rather in the accessor classes. Since these are + * passed as template arguments, you can write your own versions here to add + * more functionality. + * + * Furthermore, the iterators decribed here satisfy the requirement of + * input and bidirectional iterators as stated by the C++ standard and + * the STL documentation. It is therefore possible to use the functions + * from the {\it algorithm section} of the C++ standard, e.g. #count_if# + * (see the documentation for \Ref{Triangulation} for an example) and + * several others. Unfortunately, with some of them (e.g. #distance#), + * g++2.7 has some problems and we will have to wait for g++2.8. + * + * + * \subsection{Differences between the classes in this inheritance tree} + * + * #TriaRawIterator# objects point to lines, cells, etc in + * the lists whether they are used or not (in the vectors, also {\it dead} + * objects are stored, since deletion in vectors is expensive and we + * also do not want to destroy the ordering induced by the numbering + * in the vectors). Therefore not all raw iterators point to valid objects. + * + * There are two derived versions of this class: \Ref{TriaIterator} + * objects, which only loop over used (valid) cells and + * #TriaActiveIterator# objects + * which only loop over active cells (not refined). + * + * + * \subsection{Implementation} + * + * In principle, the Iterator class does not have much functionality. It + * only becomes useful when assigned an #Accessor# (the second template + * parameter), which really does the access to data. An #Accessor# has to + * fulfil some requirements: + * \begin{itemize} + * \item It must have two members named #present_level# and #present_index# + * storing the address of the element in the triangulation presently + *pointed to. Furthermore, the three #Tria{Raw| |Active}Iterator# classes + *have to be friends to the accessor or these data members must be public. + * \item It must have a constructor which takes 1. a #Triangulation*#, + * 2. and 3. and integer, denoting the initial level and index. + * \item For the #TriaIterator# and the #TriaActiveIterator# class, it must + * have a member function #bool used()#, for the latter a member function + *#bool active()#. + * \item It should not modify the #present_level# and #present_index# fields, + * since this is what the iterator classes do, but it should use them to + *dereference the data it points to. + * \item It must have void operators #++# and #--#. + * \end{itemize} + * Then the iterator is able to do what it is supposed to. All of the necessary + * functions are implemented in the #Accessor# base class, but you may write + * your own version (non-virtual, since we use templates) to add functionality. + * + * There is a standard implementation, using classes which are derived from + * \Ref{TriaAccessor}. These classes point to #Line#s, #Quad#s and the like. + * For advanced use of the iterator classes, derive classes from + * #{Line|Quad|Cell}Accessor# which also dereference data structures in other + * objects, e.g. in a finite element context. An iterator with such an accessor + * then simultaneously points to (for example) a cell in the triangulation and + * the data stored on it in the finite element class. + * + * Derived accessor classes may need additional data (e.g. the #DoFAccessor# + * needs a pointer to the #DoFHandler# to work on). This data can be + * set upon construction through the last argument of the constructors. + * Ideally, its type is a local type to the accessor and must have the name + * #Accessor::LocalData#. In the standard implementation, this type is + * declared to be a void pointer. The iterator constructors take their + * last argument carrying the additional data by default as zero, so unless + * #Accessor::LocalData# is a number or a pointer you may not construct + * such an iterator without giving the last argument. If you want to use + * the additional data, you also have to overload the #TriaAccessor::copy_data# + * function. + * + * Unfortunately, the skeched way does not work, since gcc is not able to + * recognize the type defined local to the template argument (it does not + * suport the #typename# keyword at present), so we can only pass a voie + * pointer. You may, however, convert this to any type, normally to another + * pointer or to a pointer to a structure pointing to the data to be passed. + * The mechanism may be changed if the mentioned features appear in gcc. + * + * Another possibility would be to have a function, say #set_local_data(...)# + * in the accessor classes which need additional data. You could then create + * an iterator like this: + * \begin{verbatim} + * TriaIterator<1,MyAccesor> i; + * i->set_local_data (1,2,3); + * \end{verbatim} + * But this will not always work: if the iterator #i# is not a valid one, then + * the library will forbid you to dereference it (which normally is a good + * idea), thus resulting in an error when you dereference it in the second + * line. + * + * + * \subsection{Warning} + * + * It seems impossible to preserve #const#ness of a triangulation through + * iterator usage. Thus, if you declare pointers to a #const# triangulation + * object, you should be well aware that you might involuntarily alter the + * data stored in the triangulation. + * + * \subsection{Internals} + * + * There is a representation of past-the-end-pointers, denoted by special + * values of the member variables #present_level# and #present_index#: + * If #present_level>=0# and #present_index>=0#, then the object is valid; + * if #present_level==-1# and #present_index==-1#, then the iterator points + * past the end; in all other cases, the iterator is considered invalid. + * You can check this by calling the #state()# function. + * + * An iterator is also invalid, if the pointer pointing to the #Triangulation# + * object is invalid or zero. + * + * Finally, an iterator is invalid, if the element pointed to by + * #present_level# and #present_index# is not used, i.e. if the #used# + * flag is set to false. + * + * The last two checks are not made in #state()# since both cases should only + * occur upon unitialized construction through #memcpy# and the like (the + * parent triangulation can only be set upon construction). If + * an iterator is constructed empty through the empty constructor, + * #present_level==-2# and #present_index==-2#. Thus, the iterator is + * invalid anyway, regardless of the state of the triangulation pointer + * and the state of the element pointed to. + * + * Past-the-end iterators may also be used to compare an iterator with the + * {\it before-the-start} value, when running backwards. There is no + * distiction between the iterators pointing past the two ends of a vector. + * + * @see Triangulation + * @see TriaDimensionInfo + * @author Wolfgang Bangerth, 1998 */ template class TriaRawIterator : public bidirectional_iterator{ @@ -430,9 +430,9 @@ class TriaRawIterator : public bidirectional_iterator{ /** - This specialization of \Ref{TriaRawIterator} provides access only to the - {\it used} lines, quads, cells, etc. - */ + * This specialization of \Ref{TriaRawIterator} provides access only to the + * {\it used} lines, quads, cells, etc. + */ template class TriaIterator : public TriaRawIterator { public: @@ -546,10 +546,10 @@ class TriaIterator : public TriaRawIterator { /** - This specialization of \Ref{TriaIterator} provides access only to the - {\it active} lines, quads, cells, etc. An active cell is a cell which is not - refined and thus a cell on which calculations on the finest level are done. - */ + * This specialization of \Ref{TriaIterator} provides access only to the + * {\it active} lines, quads, cells, etc. An active cell is a cell which is not + * refined and thus a cell on which calculations on the finest level are done. + */ template class TriaActiveIterator : public TriaIterator { public: diff --git a/deal.II/deal.II/include/grid/tria_line.h b/deal.II/deal.II/include/grid/tria_line.h index 4a3150907c..be519bbc11 100644 --- a/deal.II/deal.II/include/grid/tria_line.h +++ b/deal.II/deal.II/include/grid/tria_line.h @@ -8,14 +8,14 @@ /** - Lines denote the boundaries of quads and the edges of hexaeders. They are - characterized by the (global) indices of the endpoints. - - A line itself has one index, as far as the topological part handled in - the triangulation is concerned: the index in the level - it belongs to. The level index is implicitely given by the position - in the #lines.lines# list attached to the information of each level. - */ + * Lines denote the boundaries of quads and the edges of hexaeders. They are + * characterized by the (global) indices of the endpoints. + * + * A line itself has one index, as far as the topological part handled in + * the triangulation is concerned: the index in the level + * it belongs to. The level index is implicitely given by the position + * in the #lines.lines# list attached to the information of each level. + */ class Line { public: /** diff --git a/deal.II/deal.II/include/grid/tria_quad.h b/deal.II/deal.II/include/grid/tria_quad.h index faed7dfa1e..7d99563c7c 100644 --- a/deal.II/deal.II/include/grid/tria_quad.h +++ b/deal.II/deal.II/include/grid/tria_quad.h @@ -8,16 +8,16 @@ #include /** - #Quad#s denote the fundamental entities of triangulations in two dimensions - and the boundaries of hexaeders in three dimensions. They are - characterized by the (global) indices of the corner points. - - A quad itself has one index, as far as the topological part handled in - the triangulation is concerned: the index in the level - it belongs to. The level index is implicitely given by the position - in the #quads.quads# list attached to the information of each level - of the triangulation. - */ + * #Quad#s denote the fundamental entities of triangulations in two dimensions + * and the boundaries of hexaeders in three dimensions. They are + * characterized by the (global) indices of the corner points. + * + * A quad itself has one index, as far as the topological part handled in + * the triangulation is concerned: the index in the level + * it belongs to. The level index is implicitely given by the position + * in the #quads.quads# list attached to the information of each level + * of the triangulation. + */ class Quad { public: diff --git a/deal.II/deal.II/include/numerics/assembler.h b/deal.II/deal.II/include/numerics/assembler.h index 20cca022bb..6f7a7e2fd6 100644 --- a/deal.II/deal.II/include/numerics/assembler.h +++ b/deal.II/deal.II/include/numerics/assembler.h @@ -22,13 +22,13 @@ class dVector; /** - This is the base class for equation objects. Equations objects describe the - finite element discretisation of one or more equations. - - Equation objects need only provide functions which set up the cell - matrices and the cell right hand side. These are then automatically inserted - into the global matrices and vectors. - */ + * This is the base class for equation objects. Equations objects describe the + * finite element discretisation of one or more equations. + * + * Equation objects need only provide functions which set up the cell + * matrices and the cell right hand side. These are then automatically inserted + * into the global matrices and vectors. + */ template class Equation { public: @@ -171,9 +171,9 @@ struct AssemblerData { /** - An #Assembler# is a specialized version of a #DoFCellAccessor# which adds - functionality to assemble global matrices and vectors from cell base ones. - */ + * An #Assembler# is a specialized version of a #DoFCellAccessor# which adds + * functionality to assemble global matrices and vectors from cell base ones. + */ template class Assembler : public DoFCellAccessor { public: diff --git a/deal.II/deal.II/include/numerics/base.h b/deal.II/deal.II/include/numerics/base.h index 4c56a3bd57..f914af9865 100644 --- a/deal.II/deal.II/include/numerics/base.h +++ b/deal.II/deal.II/include/numerics/base.h @@ -26,16 +26,16 @@ template class Function; /** - Denote which norm/integral is to be computed. The following possibilities - are implemented: - \begin{itemize} - \item #mean#: the function or difference of functions is integrated - on each cell. - \item #L1_norm#: the absolute value of the function is integrated. - \item #L2_norm#: the square of the function is integrated on each - cell; afterwards the root is taken of this value. - \end{itemize} -*/ + * Denote which norm/integral is to be computed. The following possibilities + * are implemented: + * \begin{itemize} + * \item #mean#: the function or difference of functions is integrated + * on each cell. + * \item #L1_norm#: the absolute value of the function is integrated. + * \item #L2_norm#: the square of the function is integrated on each + * cell; afterwards the root is taken of this value. + * \end{itemize} + */ enum NormType { mean, L1_norm, @@ -49,190 +49,190 @@ enum NormType { /** - Base class for user problems. This class stores the system matrix and right - hand side vectors as well as a solution vector. It initiates the assemblage - process of matrix and vectors and so on. - - This class is not extremely versatile as could certainly be. For example - it presently only supports sparse matrices and has no multigrid features. - However, all these things depend strongly on the problem and it seems - best to implement many of these things yourself. Thus, this class is more - a display of concept haw to work with deal.II. - - - \subsection{Assemblage} - - The #assemble# member function does the assemblage of the system matrix and - the given number of right hand sides. It does the following steps: - \begin{itemize} - \item Initialize solution vector with zero entries. - \item Create sparsity pattern of the system matrix and condense it with - the constraints induced by hanging nodes. - \item Initialize an assembler object. - \item Loop over all cells and assemble matrix and vectors using the given - quadrature formula and the equation object which contains the weak - formulation of the equation. - \item Apply Dirichlet boundary conditions. See the section on boundary - conditions for more details. - \item Condense the system matrix and right hand side with the constraints - induced by hanging nodes. - \end{itemize} - - The #assemble# function needs an object describing the boundary of the domain, - since for higher order finite elements, we may be tempted to use curved faces - of cells for better approximation of the boundary. In this case, the - transformation from the unit cell to the real cell requires knowledge of - the exact boundary of the domain. - - - \subsection{Solving} - - Calling the #solve# function with a solver object, the system of equations - which results after having called the #assemble# function is solved. After - this, the solution vector is distributed again, i.e. the constrained nodes - are given their correct values. - - - \subsection{Boundary conditions} - - During assemblage of matrices and right hand side, use is made of dirichlet - boundary conditions (in short: bc) specified to the #assemble# function. You - can specify a list of pairs of boundary indicators (of type #unsigned char#; - see the section in the documentation of the \Ref{Triangulation} class for more - details) and the according functions denoting the dirichlet boundary values - of the nodes on boundary faces with this boundary indicator. - - Usually, all other boundary conditions, such as inhomogeneous Neumann values - or mixed boundary conditions are handled in the weak formulation. No attempt - is made to include these into the process of assemblage therefore. - - The inclusion into the assemblage process is as follows: when the matrix and - vectors are set up, a list of nodes subject to dirichlet bc is made and - matrix and vectors are changed accordingly. This is done by deleting all - entries in the matrix in the line of this degree of freedom, setting the - main diagonal entry to one and the right hand side element to the - boundary value at this node. This forces this node's value to be as specified. - To decouple the remaining linear system of equations and to make the system - symmetric again (at least if it was before), one Gauss elimination - step is performed with this line, by adding this (now almost empty) line to - all other lines which couple with the given degree of freedom and thus - eliminating all coupling between this degree of freedom and others. Now - also the column consists only of zeroes, apart from the main diagonal entry. - - It seems as if we had to make clear not to overwrite the lines of other - boundary nodes when doing the Gauss elimination step. However, since we - reset the right hand side when passing such a node, it is not a problem - to change the right hand side values of other boundary nodes not yet - processed. It would be a problem to change those entries of nodes already - processed, but since the matrix entry of the present column on the row - of an already processed node is zero, the Gauss step does not change - the right hand side. We need therefore not take special care of other - boundary nodes. - - To make solving faster, we preset the solution vector with the right boundary - values. Since boundary nodes can never be hanging nodes, and since all other - entries of the solution vector are zero, we need not condense the solution - vector if the condensation process is done in-place. If done by copying - matrix and vectors to smaller ones, it would also be necessary to condense - the solution vector to preserve the preset boundary values. - - It it not clear whether the deletion of coupling between the boundary degree - of freedom and other dofs really forces the corresponding entry in the - solution vector to have the right value when using iterative solvers, - since their search directions may contains components in the direction - of the boundary node. For this reason, we perform a very simple line - balancing by not setting the main diagonal entry to unity, but rather - to the value it had before deleting this line, or to the first nonzero - main diagonal entry if it is zero from a previous Gauss elimination - step. Of course we have to change - the right hand side appropriately. This is not a very good - strategy, but it at least should give the main diagonal entry a value - in the right order of dimension, which makes the solving process a bit - more stable. A refined algorithm would set the entry to the mean of the - other diagonal entries, but this seems to be too expensive. - - Because of the mentioned question, whether or not a preset solution value - which does not couple with other degrees of freedom remains its value or - not during solving iteratively, it may or may not be necessary to set - the correct value after solving again. This question is an open one as of - now and may be answered by future experience. - - At present, boundary values are interpolated, i.e. a node is given the - point value of the boundary function. In some cases, it may be necessary - to use the L2-projection of the boundary function or any other method. - This can be done by overloading the virtual function - #make_boundary_value_list# which must return a list of boundary dofs - and their corresponding values. - - You should be aware that the boundary function may be evaluated at nodes - on the interior of faces. These, however, need not be on the true - boundary, but rather are on the approximation of the boundary represented - by teh mapping of the unit cell to the real cell. Since this mapping will - in most cases not be the exact one at the face, the boundary function is - evaluated at points which are not on the boundary and you should make - sure that the returned values are reasonable in some sense anyway. - - - \subsection{Computing errors} - - The function #integrate_difference# performs the calculation of the error - between the finite element solution and a given (continuous) reference - function in different norms. The integration is performed using a given - quadrature formulae and assumes that the given finite element objects equals - that used for the computation of the solution. - - The result ist stored in a vector (named #difference#), where each entry - equals the given norm of the difference on one cell. The order of entries - is the same as a #cell_iterator# takes when started with #begin_active# and - promoted with the #++# operator. - - You can use the #distribute_cell_to_dof_vector# function of the #DoFHandler# - class to convert cell based data to a data vector with values on the degrees - of freedom, which can then be attached to a #DataOut# object to be printed. - - Presently, there is the possibility to compute the following values from the - difference, on each cell: #mean#, #L1_norm#, #L2_norm#, #Linfty_norm#, - #H1_seminorm#. - For the mean difference value, the reference function minus the numerical - solution is computed, not the other way round. - - The infinity norm of the difference on a given cell returns the maximum - absolute value of the difference at the quadrature points given by the - quadrature formula parameter. This will in some cases not be too good - an approximation, since for example the Gauss quadrature formulae do - not evaluate the difference at the end or corner points of the cells. - You may want to chose a quadrature formula with more quadrature points - or one with another distribution of the quadrature points in this case. - You should also take into account the superconvergence properties of finite - elements in some points: for example in 1D, the standard finite element - method is a collocation method and should return the exact value at nodal - points. Therefore, the trapezoidal rule should always return a vanishing - L-infinity error. Conversely, in 2D the maximum L-infinity error should - be located at the vertices or at the center of the cell, which would make - it plausible to use the Simpson quadrature rule. On the other hand, there - may be superconvergence at Gauss integration points. These examples are not - intended as a rule of thumb, rather they are though to illustrate that the - use of the wrong quadrature formula may show a significantly wrong result - and care should be taken to chose the right formula. - - The $H_1$ seminorm is the $L_2$ norm of the gradient of the difference. The - full $H_1$ norm is the sum of the seminorm and the $L_2$ norm. - - To get the {\it global} L_1 error, you have to sum up the entries in - #difference#, e.g. using #dVector::l1_norm# function. - For the global L_2 difference, you have to sum up the squares of the - entries and take the root of the sum, e.g. using #dVector::l2_norm. - These two operations represent the - l_1 and l_2 norms of the vectors, but you need not take the absolute - value of each entry, since the cellwise norms are already positive. - - To get the global mean difference, simply sum up the elements as above. - To get the L_\infty norm, take the maximum of the vector elements, e.g. - using the #dVector::linfty_norm# function. - - For the global $H_1$ norm and seminorm, the same rule applies as for the - $L_2$ norm: compute the $l_2$ norm of the cell error vector. - */ + * Base class for user problems. This class stores the system matrix and right + * hand side vectors as well as a solution vector. It initiates the assemblage + * process of matrix and vectors and so on. + * + * This class is not extremely versatile as could certainly be. For example + * it presently only supports sparse matrices and has no multigrid features. + * However, all these things depend strongly on the problem and it seems + * best to implement many of these things yourself. Thus, this class is more + * a display of concept how to work with deal.II. + * + * + * \subsection{Assemblage} + * + * The #assemble# member function does the assemblage of the system matrix and + * the given number of right hand sides. It does the following steps: + * \begin{itemize} + * \item Initialize solution vector with zero entries. + * \item Create sparsity pattern of the system matrix and condense it with + * the constraints induced by hanging nodes. + * \item Initialize an assembler object. + * \item Loop over all cells and assemble matrix and vectors using the given + * quadrature formula and the equation object which contains the weak + * formulation of the equation. + * \item Apply Dirichlet boundary conditions. See the section on boundary + * conditions for more details. + * \item Condense the system matrix and right hand side with the constraints + * induced by hanging nodes. + * \end{itemize} + * + * The #assemble# function needs an object describing the boundary of the domain, + * since for higher order finite elements, we may be tempted to use curved faces + * of cells for better approximation of the boundary. In this case, the + * transformation from the unit cell to the real cell requires knowledge of + * the exact boundary of the domain. + * + * + * \subsection{Solving} + * + * Calling the #solve# function with a solver object, the system of equations + * which results after having called the #assemble# function is solved. After + * this, the solution vector is distributed again, i.e. the constrained nodes + * are given their correct values. + * + * + * \subsection{Boundary conditions} + * + * During assemblage of matrices and right hand side, use is made of dirichlet + * boundary conditions (in short: bc) specified to the #assemble# function. You + * can specify a list of pairs of boundary indicators (of type #unsigned char#; + * see the section in the documentation of the \Ref{Triangulation} class for more + * details) and the according functions denoting the dirichlet boundary values + * of the nodes on boundary faces with this boundary indicator. + * + * Usually, all other boundary conditions, such as inhomogeneous Neumann values + * or mixed boundary conditions are handled in the weak formulation. No attempt + * is made to include these into the process of assemblage therefore. + * + * The inclusion into the assemblage process is as follows: when the matrix and + * vectors are set up, a list of nodes subject to dirichlet bc is made and + * matrix and vectors are changed accordingly. This is done by deleting all + * entries in the matrix in the line of this degree of freedom, setting the + * main diagonal entry to one and the right hand side element to the + * boundary value at this node. This forces this node's value to be as specified. + * To decouple the remaining linear system of equations and to make the system + * symmetric again (at least if it was before), one Gauss elimination + * step is performed with this line, by adding this (now almost empty) line to + * all other lines which couple with the given degree of freedom and thus + * eliminating all coupling between this degree of freedom and others. Now + * also the column consists only of zeroes, apart from the main diagonal entry. + * + * It seems as if we had to make clear not to overwrite the lines of other + * boundary nodes when doing the Gauss elimination step. However, since we + * reset the right hand side when passing such a node, it is not a problem + * to change the right hand side values of other boundary nodes not yet + * processed. It would be a problem to change those entries of nodes already + * processed, but since the matrix entry of the present column on the row + * of an already processed node is zero, the Gauss step does not change + * the right hand side. We need therefore not take special care of other + * boundary nodes. + * + * To make solving faster, we preset the solution vector with the right boundary + * values. Since boundary nodes can never be hanging nodes, and since all other + * entries of the solution vector are zero, we need not condense the solution + * vector if the condensation process is done in-place. If done by copying + * matrix and vectors to smaller ones, it would also be necessary to condense + * the solution vector to preserve the preset boundary values. + * + * It it not clear whether the deletion of coupling between the boundary degree + * of freedom and other dofs really forces the corresponding entry in the + * solution vector to have the right value when using iterative solvers, + * since their search directions may contains components in the direction + * of the boundary node. For this reason, we perform a very simple line + * balancing by not setting the main diagonal entry to unity, but rather + * to the value it had before deleting this line, or to the first nonzero + * main diagonal entry if it is zero from a previous Gauss elimination + * step. Of course we have to change + * the right hand side appropriately. This is not a very good + * strategy, but it at least should give the main diagonal entry a value + * in the right order of dimension, which makes the solving process a bit + * more stable. A refined algorithm would set the entry to the mean of the + * other diagonal entries, but this seems to be too expensive. + * + * Because of the mentioned question, whether or not a preset solution value + * which does not couple with other degrees of freedom remains its value or + * not during solving iteratively, it may or may not be necessary to set + * the correct value after solving again. This question is an open one as of + * now and may be answered by future experience. + * + * At present, boundary values are interpolated, i.e. a node is given the + * point value of the boundary function. In some cases, it may be necessary + * to use the L2-projection of the boundary function or any other method. + * This can be done by overloading the virtual function + * #make_boundary_value_list# which must return a list of boundary dofs + * and their corresponding values. + * + * You should be aware that the boundary function may be evaluated at nodes + * on the interior of faces. These, however, need not be on the true + * boundary, but rather are on the approximation of the boundary represented + * by teh mapping of the unit cell to the real cell. Since this mapping will + * in most cases not be the exact one at the face, the boundary function is + * evaluated at points which are not on the boundary and you should make + * sure that the returned values are reasonable in some sense anyway. + * + * + * \subsection{Computing errors} + * + * The function #integrate_difference# performs the calculation of the error + * between the finite element solution and a given (continuous) reference + * function in different norms. The integration is performed using a given + * quadrature formulae and assumes that the given finite element objects equals + * that used for the computation of the solution. + * + * The result ist stored in a vector (named #difference#), where each entry + * equals the given norm of the difference on one cell. The order of entries + * is the same as a #cell_iterator# takes when started with #begin_active# and + * promoted with the #++# operator. + * + * You can use the #distribute_cell_to_dof_vector# function of the #DoFHandler# + * class to convert cell based data to a data vector with values on the degrees + * of freedom, which can then be attached to a #DataOut# object to be printed. + * + * Presently, there is the possibility to compute the following values from the + * difference, on each cell: #mean#, #L1_norm#, #L2_norm#, #Linfty_norm#, + * #H1_seminorm#. + * For the mean difference value, the reference function minus the numerical + * solution is computed, not the other way round. + * + * The infinity norm of the difference on a given cell returns the maximum + * absolute value of the difference at the quadrature points given by the + * quadrature formula parameter. This will in some cases not be too good + * an approximation, since for example the Gauss quadrature formulae do + * not evaluate the difference at the end or corner points of the cells. + * You may want to chose a quadrature formula with more quadrature points + * or one with another distribution of the quadrature points in this case. + * You should also take into account the superconvergence properties of finite + * elements in some points: for example in 1D, the standard finite element + * method is a collocation method and should return the exact value at nodal + * points. Therefore, the trapezoidal rule should always return a vanishing + * L-infinity error. Conversely, in 2D the maximum L-infinity error should + * be located at the vertices or at the center of the cell, which would make + * it plausible to use the Simpson quadrature rule. On the other hand, there + * may be superconvergence at Gauss integration points. These examples are not + * intended as a rule of thumb, rather they are though to illustrate that the + * use of the wrong quadrature formula may show a significantly wrong result + * and care should be taken to chose the right formula. + * + * The $H_1$ seminorm is the $L_2$ norm of the gradient of the difference. The + * full $H_1$ norm is the sum of the seminorm and the $L_2$ norm. + * + * To get the {\it global} L_1 error, you have to sum up the entries in + * #difference#, e.g. using #dVector::l1_norm# function. + * For the global L_2 difference, you have to sum up the squares of the + * entries and take the root of the sum, e.g. using #dVector::l2_norm. + * These two operations represent the + * l_1 and l_2 norms of the vectors, but you need not take the absolute + * value of each entry, since the cellwise norms are already positive. + * + * To get the global mean difference, simply sum up the elements as above. + * To get the L_\infty norm, take the maximum of the vector elements, e.g. + * using the #dVector::linfty_norm# function. + * + * For the global $H_1$ norm and seminorm, the same rule applies as for the + * $L_2$ norm: compute the $l_2$ norm of the cell error vector. + */ template class ProblemBase { public: diff --git a/deal.II/deal.II/include/numerics/data_io.h b/deal.II/deal.II/include/numerics/data_io.h index d5628946dc..fc8192dc02 100644 --- a/deal.II/deal.II/include/numerics/data_io.h +++ b/deal.II/deal.II/include/numerics/data_io.h @@ -19,10 +19,10 @@ class dVector; /** - Structure which is passed to the #Triangulation::create_triangulation# - function. It contains all data needed to construct a cell, namely the - indices of the vertices and the material indicator. -*/ + * Structure which is passed to the #Triangulation::create_triangulation# + * function. It contains all data needed to construct a cell, namely the + * indices of the vertices and the material indicator. + */ template struct CellData { int vertices[2<::create_triangulation# - function to describe boundary information. - - This structure is the same for all dimensions, since we use an input - function which is the same for all dimensions. The content of objects - of this structure varies with the dimensions, however. - - Since in one space dimension, there is no boundary information apart - from the two end points of the interval, this structure does not contain - anything and exists only for consistency, to allow a common interface - for all space dimensions. All fields should always be empty. - - Boundary data in 2D consists - of a list of lines which belong to a given boundary component. A - boundary component is a list of lines which are given a common - number describing the boundary condition to hold on this part of the - boundary. The triangulation creation function gives lines not in this - list either the boundary indicator zero (if on the boundary) or 255 - (if in the interior). Explicitely giving a line the indicator 255 - will result in an error, as well as giving an interior line a boundary - indicator. -*/ + * Structure to be passed to the #Triangulation::create_triangulation# + * function to describe boundary information. + * + * This structure is the same for all dimensions, since we use an input + * function which is the same for all dimensions. The content of objects + * of this structure varies with the dimensions, however. + * + * Since in one space dimension, there is no boundary information apart + * from the two end points of the interval, this structure does not contain + * anything and exists only for consistency, to allow a common interface + * for all space dimensions. All fields should always be empty. + * + * Boundary data in 2D consists + * of a list of lines which belong to a given boundary component. A + * boundary component is a list of lines which are given a common + * number describing the boundary condition to hold on this part of the + * boundary. The triangulation creation function gives lines not in this + * list either the boundary indicator zero (if on the boundary) or 255 + * (if in the interior). Explicitely giving a line the indicator 255 + * will result in an error, as well as giving an interior line a boundary + * indicator. + */ struct SubCellData { /** * Each record of this vector describes @@ -92,108 +92,108 @@ struct SubCellData { /** - This class implements an input mechanism for grid data. It allows to - read a grid structure into a triangulation object. Future versions - will also allow to read data on this grid into vectors. - - At present, only UCD (unstructured cell data) is supported as input - format for grid data. Any numerical data after the block of topological - information is ignored. - - To read grid data, the triangulation to be fed with has to be empty. - When giving a file which does not contain the assumed information or - which does not keep to the right format, the state of the triangulation - will be undefined afterwards. Upon input, only lines in one dimension - and line and quads in two dimensions are accepted. All other cell types - (e.g. triangles in two dimensions, quads and hexes in 3d) are rejected. - The vertex and cell numbering in the UCD file, which - need not be consecutively, is lost upon transfer to the triangulation - object, since this one needs consecutively numbered elements. - - Material indicators are accepted to denote the material id of cells and - to denote boundary part indication for lines in 2D. Read the according - sections in the documentation of the \Ref{Triangulation} class for - further details. - - - \subsection{Structure of input grid data} - - It is your duty to use a correct numbering of vertices in the cell list, - i.e. for lines, you have to first give the vertex with the lower coordinate - value, then that with the higher coordinate value. For quadrilaterals in - two dimensions, the vertex indices in the #quad# list have to be such that - the vertices are numbered in counter-clockwise sense. - - In two dimensions, another difficulty occurs, which has to do with the sense - of a quadrilateral. A quad consists of four lines which have a direction, - which is per definitionem as follows: - \begin{verbatim} - 3-->--2 - | | - ^ ^ - | | - 0-->--1 - \end{verbatim} - Now, two adjacent cells must have a vertex numbering such that the direction - of the common side is the same. For example, the following two quads - \begin{verbatim} - 3---4---5 - | | | - 0---1---2 - \end{verbatim} - may be characterised by the vertex numbers (0 1 4 3) and (1 2 5 4), since - the middle line would get the direction #1->4# when viewed from both cells. - The numbering (0 1 4 3) and (5 4 1 2) would not be allowed, since the left - quad would give the common line the direction #1->4#, while the right one - would want to use #4->1#, leading to ambiguity. The #Triangulation# object - is capable of detecting this special case, which can be eliminated by - rotating the indices of the right quad by two. However, it would not - know what to do if you gave the vertex indices (4 1 2 5), since then it - would have to rotate by one element or three, the decision which to take is - not yet implemented. - - There are more ambiguous cases, where the triangulation may not know what - to do at all without the use of very sophisticated algorithms. On such example - is the following: - \begin{verbatim} - 9---10-----11 - | | / | - 6---7---8 | - | | | | - 3---4---5 | - | | \ | - 0---1------2 - \end{verbatim} - Assume that you had numbered the vertices in the cells at the left boundary - in a way, that the following line directions are induced: - \begin{verbatim} - 9->-10-----11 - ^ ^ / | - 6->-7---8 | - ^ ^ | | - 3->-4---5 | - ^ ^ \ | - 0->-1------2 - \end{verbatim} - (This could for example be done by using the indices (0 1 4 3), (3 4 7 6), - (6 7 10 9) for the three cells). Now, you will not find a way of giving - indices for the right cells, without introducing either ambiguity for - one line or other, or without violating that within each cells, there must be - one vertex from which both lines are directed away and the opposite one to - which both adjacent lines point to. - - The solution in this case is to renumber one of the three left cells, e.g. - by reverting the sense of the line between vertices 7 and 10 by numbering - the top left cell by (9 6 7 10). - - But this is a thing that the triangulation - object can't do for you, since it would involve backtracking to cells - already created when we find that we can't number the indices of one of - the rightmost cells consistently. It is neither clear how to do this - backtracking nor whether it can be done with a stopping algorithm, if - possible within polynomial time. This kind of numbering must be made - upon construction of the coarse grid, unfortunately. - */ + * This class implements an input mechanism for grid data. It allows to + * read a grid structure into a triangulation object. Future versions + * will also allow to read data on this grid into vectors. + * + * At present, only UCD (unstructured cell data) is supported as input + * format for grid data. Any numerical data after the block of topological + * information is ignored. + * + * To read grid data, the triangulation to be fed with has to be empty. + * When giving a file which does not contain the assumed information or + * which does not keep to the right format, the state of the triangulation + * will be undefined afterwards. Upon input, only lines in one dimension + * and line and quads in two dimensions are accepted. All other cell types + * (e.g. triangles in two dimensions, quads and hexes in 3d) are rejected. + * The vertex and cell numbering in the UCD file, which + * need not be consecutively, is lost upon transfer to the triangulation + * object, since this one needs consecutively numbered elements. + * + * Material indicators are accepted to denote the material id of cells and + * to denote boundary part indication for lines in 2D. Read the according + * sections in the documentation of the \Ref{Triangulation} class for + * further details. + * + * + * \subsection{Structure of input grid data} + * + * It is your duty to use a correct numbering of vertices in the cell list, + * i.e. for lines, you have to first give the vertex with the lower coordinate + * value, then that with the higher coordinate value. For quadrilaterals in + * two dimensions, the vertex indices in the #quad# list have to be such that + * the vertices are numbered in counter-clockwise sense. + * + * In two dimensions, another difficulty occurs, which has to do with the sense + * of a quadrilateral. A quad consists of four lines which have a direction, + * which is per definitionem as follows: + * \begin{verbatim} + * 3-->--2 + * | | + * ^ ^ + * | | + * 0-->--1 + * \end{verbatim} + * Now, two adjacent cells must have a vertex numbering such that the direction + * of the common side is the same. For example, the following two quads + * \begin{verbatim} + * 3---4---5 + * | | | + * 0---1---2 + * \end{verbatim} + * may be characterised by the vertex numbers (0 1 4 3) and (1 2 5 4), since + * the middle line would get the direction #1->4# when viewed from both cells. + * The numbering (0 1 4 3) and (5 4 1 2) would not be allowed, since the left + * quad would give the common line the direction #1->4#, while the right one + * would want to use #4->1#, leading to ambiguity. The #Triangulation# object + * is capable of detecting this special case, which can be eliminated by + * rotating the indices of the right quad by two. However, it would not + * know what to do if you gave the vertex indices (4 1 2 5), since then it + * would have to rotate by one element or three, the decision which to take is + * not yet implemented. + * + * There are more ambiguous cases, where the triangulation may not know what + * to do at all without the use of very sophisticated algorithms. On such example + * is the following: + * \begin{verbatim} + * 9---10-----11 + * | | / | + * 6---7---8 | + * | | | | + * 3---4---5 | + * | | \ | + * 0---1------2 + * \end{verbatim} + * Assume that you had numbered the vertices in the cells at the left boundary + * in a way, that the following line directions are induced: + * \begin{verbatim} + * 9->-10-----11 + * ^ ^ / | + * 6->-7---8 | + * ^ ^ | | + * 3->-4---5 | + * ^ ^ \ | + * 0->-1------2 + * \end{verbatim} + * (This could for example be done by using the indices (0 1 4 3), (3 4 7 6), + * (6 7 10 9) for the three cells). Now, you will not find a way of giving + * indices for the right cells, without introducing either ambiguity for + * one line or other, or without violating that within each cells, there must be + * one vertex from which both lines are directed away and the opposite one to + * which both adjacent lines point to. + * + * The solution in this case is to renumber one of the three left cells, e.g. + * by reverting the sense of the line between vertices 7 and 10 by numbering + * the top left cell by (9 6 7 10). + * + * But this is a thing that the triangulation + * object can't do for you, since it would involve backtracking to cells + * already created when we find that we can't number the indices of one of + * the rightmost cells consistently. It is neither clear how to do this + * backtracking nor whether it can be done with a stopping algorithm, if + * possible within polynomial time. This kind of numbering must be made + * upon construction of the coarse grid, unfortunately. + */ template class DataIn { public: @@ -249,74 +249,74 @@ class DataIn { /** - This class implements an output mechanism for grid and simulation data - in several formats. - At present it supports output in UCD (unstructured cell data) and - partly in GNUPLOT format. - - It allows the user to attach a degree of freedom handler object - (#DoFHandler#) which also gives access to the geometry data of the - underlying triangulation and to add data vectors of which the values - are to be written. - - - \subsection{Limitations} - - At present, no grouping of components to vectors is implemented, i.e. - you can only write each component independent of the others. Also, it - is not possible to output calculations which were performed on elements - with more or less than one degree of freedom per vertex. - - - \subsection{UCD format} - - The UCD format is described in the AVS developer's guide. Due to - limitations in the present format, only node based data can be output, - so higher order elements are only written with their node values, no - interior or line values are used. No use is made of the possibility - to give cell and model data since these are not supported by all - UCD aware programs. - - The ASCII UCD format is used. In future versions, a binary version may - follow up. - - Note that to enumerate the vertices, not the vertex index is used but - the index of the degree of freedom located on this vertex. This makes - the mapping between the vertices and the entries in the data vectors - much easier. - - - \subsection{GNUPLOT format} - - The GNUPLOT format is not able to handle data on unstructured grids - directly. Directly would mean that you only give the vertices and - the solution values thereon and the program constructs its own grid - to represent the data. This is only possible for a structured tensor - product grid in two dimensions. - - In one dimension, the format is obviously #x v1 v2 ...#, where #x# - is the coordinate value of a grid point, while the #vi# are the - vector elements referring to the present node. Within GNUPLOT, - call #plot "filename" using 1:x#. #x# denotes the number of the data set you - want to see plus one. For example #using 1:4# would mean to plot the - third data vector. - - For more than one dimension, the #DataOut::write_gnuplot()# somehow - duplicates the functionality of the #Triangulation::print_gnuplot()# - functions. These, however, offer more functionality in some respect. - The grid is represented as a sequence of lines, where each cell is - a sequence of five vertices (the first one is appended to close the - contour of the cell) with the data appended after each vertex. Each cell - is therefore a sequence of five lines #x y v1 v2 ...# forming together - the bounding line of this cell. After each cell, two newlines are inserted - to prevent GNUPLOT from joining the lines bounding two cells. - - To view the results in two dimensions, use #set data style lines# - within gnuplot and call #plot "filename"# to see the grid. Use - #set parametric# and #splot "filename" using 1:2:x# to get a 3d surface - plot of the (#x-2#)th data set. For example, using #x=4# would mean to - plot the second data set. - */ + * This class implements an output mechanism for grid and simulation data + * in several formats. + * At present it supports output in UCD (unstructured cell data) and + * partly in GNUPLOT format. + * + * It allows the user to attach a degree of freedom handler object + * (#DoFHandler#) which also gives access to the geometry data of the + * underlying triangulation and to add data vectors of which the values + * are to be written. + * + * + * \subsection{Limitations} + * + * At present, no grouping of components to vectors is implemented, i.e. + * you can only write each component independent of the others. Also, it + * is not possible to output calculations which were performed on elements + * with more or less than one degree of freedom per vertex. + * + * + * \subsection{UCD format} + * + * The UCD format is described in the AVS developer's guide. Due to + * limitations in the present format, only node based data can be output, + * so higher order elements are only written with their node values, no + * interior or line values are used. No use is made of the possibility + * to give cell and model data since these are not supported by all + * UCD aware programs. + * + * The ASCII UCD format is used. In future versions, a binary version may + * follow up. + * + * Note that to enumerate the vertices, not the vertex index is used but + * the index of the degree of freedom located on this vertex. This makes + * the mapping between the vertices and the entries in the data vectors + * much easier. + * + * + * \subsection{GNUPLOT format} + * + * The GNUPLOT format is not able to handle data on unstructured grids + * directly. Directly would mean that you only give the vertices and + * the solution values thereon and the program constructs its own grid + * to represent the data. This is only possible for a structured tensor + * product grid in two dimensions. + * + * In one dimension, the format is obviously #x v1 v2 ...#, where #x# + * is the coordinate value of a grid point, while the #vi# are the + * vector elements referring to the present node. Within GNUPLOT, + * call #plot "filename" using 1:x#. #x# denotes the number of the data set you + * want to see plus one. For example #using 1:4# would mean to plot the + * third data vector. + * + * For more than one dimension, the #DataOut::write_gnuplot()# somehow + * duplicates the functionality of the #Triangulation::print_gnuplot()# + * functions. These, however, offer more functionality in some respect. + * The grid is represented as a sequence of lines, where each cell is + * a sequence of five vertices (the first one is appended to close the + * contour of the cell) with the data appended after each vertex. Each cell + * is therefore a sequence of five lines #x y v1 v2 ...# forming together + * the bounding line of this cell. After each cell, two newlines are inserted + * to prevent GNUPLOT from joining the lines bounding two cells. + * + * To view the results in two dimensions, use #set data style lines# + * within gnuplot and call #plot "filename"# to see the grid. Use + * #set parametric# and #splot "filename" using 1:2:x# to get a 3d surface + * plot of the (#x-2#)th data set. For example, using #x=4# would mean to + * plot the second data set. + */ template class DataOut { public: diff --git a/deal.II/deal.II/include/numerics/error_estimator.h b/deal.II/deal.II/include/numerics/error_estimator.h index e3abc10889..4a9c7798d9 100644 --- a/deal.II/deal.II/include/numerics/error_estimator.h +++ b/deal.II/deal.II/include/numerics/error_estimator.h @@ -22,120 +22,120 @@ class dVector; /** - Implementation of the error estimator by Kelly, Gago, Zienkiewicz and - Babuska. - This error estimator tries to approximate the error per cell by integration - of the jump of the gradient of the solution along the faces of each cell. - It can be understood as a gradient recovery estimator; see the survey - of Ainsworth for a complete discussion. - - It seem as if this error estimator should only be valid for linear ansatz - spaces, and there are indications that for higher order ansatz spaces the - integrals computed here show superconvergence properties, i.e. they tend - to zero faster than the error itself, thus ruling out the values as error - indicators. - - The error estimator returns a vector of estimated errors per cell which - can be used to feed the #Triangulation::refine_*# functions. - - - \subsection{Implementation} - - In principle, the implementation of the error estimation is simple: let - $$ \eta_K^2 = - \frac h{24} \int_{\partial K} \left[\frac{\partial u_h}{\partial n}\right]^2 do - $$ - be the error estimator for cell $K$. $[\cdot]$ denotes the jump of the - argument at the face. In the paper of Ainsworth, $h$ is divided by $24$, - but this factor is a bit esoteric, stemming from interpolation estimates - and stability constants which may hold for the Poisson problem, but may - not hold for more general situations. In the implementation, this factor - is considered, but may lead to wrong results. You may scale the vector - appropriately afterwards. - - To perform the integration, use is made of the #FEFaceValues# and - #FESubfaceValues# classes. The integration is performed by looping - over all cells and integrating over faces that are not yet treated. - This way we avoid integration on faces twice, once for each time we - visit one of the adjacent cells. In a second loop over all cells, we - sum up the contributions of the faces (which are the integrated - square of the jumps) of each cell and take the square root. - - We store the contribution of each face in a #map#, as provided by the - C++ standard library, with the iterator pointing to that face being the - key into the map. In fact, we do not store the indicator per face, but - only the integral listed above. When looping the second time over all - cells, we have to sum up the contributions of the faces, multiply them - with $\frac h{24}$ and take the square root. By doing the multiplication - with $h$ in the second loop, we avoid problems to decide with which $h$ - to multiply, that of the cell on the one or that of the cell on the other - side of the face. - - $h$ is taken to be the greatest length of the diagonals of the cell. For - more or less uniform cells without deformed angles, this coincides with - the diameter of the cell. - - - \subsection{Boundary values} - - If the face is at the boundary, i.e. there is no neighboring cell to which - the jump in the gradiend could be computed, there are two possibilities: - \begin{itemize} - \item The face belongs to a Dirichlet boundary. Then the face is not - considered, which can be justified looking at a dual problem technique and - should hold exactly if the boundary can be approximated exactly by the - finite element used (i.e. it is a linear boundary for linear finite elements, - quadratic for isoparametric quadratic elements, etc). For boundaries which - can not be exactly approximated, one should consider the difference - $z-z_h$ on the face, $z$ being a dual problem's solution which is zero at - the true boundary and $z_h$ being an approximation, which in most cases - will be zero on the numerical boundary. Since on the numerical boundary - $z$ will not be zero in general, we would get another term here, but this - one is neglected for practical reasons, in the hope that the error made - here will tend to zero faster than the energy error we wish to estimate. - - Though no integration is necessary, in the list of face contributions we - store a zero for this face, which makes summing up the contributions of - the different faces to the cells easier. - - \item The face belongs to a Neumann boundary. In this case, the - contribution of the face $F\in\partial K$ looks like - $$ \int_F \left|g-\frac{\partial u_h}{\partial n}\right| ds $$ - where $g$ is the Neumann boundary function. - - \item No other boundary conditions are considered. - \end{itemize} - - Thanks go to Franz-Theo Suttmeier for clarifications about boundary - conditions. - - - \subsection{Handling of hanging nodes} - - The integration along faces with hanging nodes is quite tricky, since one - of the elements has to be shifted one level up or down. See the - documentation for the #FESubfaceValues# class for more information about - technical issues regarding this topic. - - In praxi, since we integrate over each face only once, we do this when we - are on the coarser one of the two cells adjacent to a subface (a subface - is defined to be the child of a face; seen from the coarse cell, it is a - subface, while seen from the refined cell it is one of its faces). The - reason is that finding neighborship information is a bit easier then, but - that's all practical reasoning, nothing fundamental. - - Since we integrate from the coarse side of the face, we have the mother - face readily at hand and store the result of the integration over that - mother face (being the sum of the integrals along the subfaces) in the - abovementionned map of integrals as well. This consumes some memory more - than needed, but makes the summing up of the face contributions to the - cells easier, since then we have the information from all faces of all - cells at hand and need not think about explicitely determining whether - a face was refined or not. The same applies for boundary faces, see - above. - - @author Wolfgang Bangerth, 1998 -*/ + * Implementation of the error estimator by Kelly, Gago, Zienkiewicz and + * Babuska. + * This error estimator tries to approximate the error per cell by integration + * of the jump of the gradient of the solution along the faces of each cell. + * It can be understood as a gradient recovery estimator; see the survey + * of Ainsworth for a complete discussion. + * + * It seem as if this error estimator should only be valid for linear ansatz + * spaces, and there are indications that for higher order ansatz spaces the + * integrals computed here show superconvergence properties, i.e. they tend + * to zero faster than the error itself, thus ruling out the values as error + * indicators. + * + * The error estimator returns a vector of estimated errors per cell which + * can be used to feed the #Triangulation::refine_*# functions. + * + * + * \subsection{Implementation} + * + * In principle, the implementation of the error estimation is simple: let + * $$ \eta_K^2 = + * \frac h{24} \int_{\partial K} \left[\frac{\partial u_h}{\partial n}\right]^2 do + * $$ + * be the error estimator for cell $K$. $[\cdot]$ denotes the jump of the + * argument at the face. In the paper of Ainsworth, $h$ is divided by $24$, + * but this factor is a bit esoteric, stemming from interpolation estimates + * and stability constants which may hold for the Poisson problem, but may + * not hold for more general situations. In the implementation, this factor + * is considered, but may lead to wrong results. You may scale the vector + * appropriately afterwards. + * + * To perform the integration, use is made of the #FEFaceValues# and + * #FESubfaceValues# classes. The integration is performed by looping + * over all cells and integrating over faces that are not yet treated. + * This way we avoid integration on faces twice, once for each time we + * visit one of the adjacent cells. In a second loop over all cells, we + * sum up the contributions of the faces (which are the integrated + * square of the jumps) of each cell and take the square root. + * + * We store the contribution of each face in a #map#, as provided by the + * C++ standard library, with the iterator pointing to that face being the + * key into the map. In fact, we do not store the indicator per face, but + * only the integral listed above. When looping the second time over all + * cells, we have to sum up the contributions of the faces, multiply them + * with $\frac h{24}$ and take the square root. By doing the multiplication + * with $h$ in the second loop, we avoid problems to decide with which $h$ + * to multiply, that of the cell on the one or that of the cell on the other + * side of the face. + * + * $h$ is taken to be the greatest length of the diagonals of the cell. For + * more or less uniform cells without deformed angles, this coincides with + * the diameter of the cell. + * + * + * \subsection{Boundary values} + * + * If the face is at the boundary, i.e. there is no neighboring cell to which + * the jump in the gradiend could be computed, there are two possibilities: + * \begin{itemize} + * \item The face belongs to a Dirichlet boundary. Then the face is not + * considered, which can be justified looking at a dual problem technique and + * should hold exactly if the boundary can be approximated exactly by the + * finite element used (i.e. it is a linear boundary for linear finite elements, + * quadratic for isoparametric quadratic elements, etc). For boundaries which + * can not be exactly approximated, one should consider the difference + * $z-z_h$ on the face, $z$ being a dual problem's solution which is zero at + * the true boundary and $z_h$ being an approximation, which in most cases + * will be zero on the numerical boundary. Since on the numerical boundary + * $z$ will not be zero in general, we would get another term here, but this + * one is neglected for practical reasons, in the hope that the error made + * here will tend to zero faster than the energy error we wish to estimate. + * + * Though no integration is necessary, in the list of face contributions we + * store a zero for this face, which makes summing up the contributions of + * the different faces to the cells easier. + * + * \item The face belongs to a Neumann boundary. In this case, the + * contribution of the face $F\in\partial K$ looks like + * $$ \int_F \left|g-\frac{\partial u_h}{\partial n}\right| ds $$ + * where $g$ is the Neumann boundary function. + * + * \item No other boundary conditions are considered. + * \end{itemize} + * + * Thanks go to Franz-Theo Suttmeier for clarifications about boundary + * conditions. + * + * + * \subsection{Handling of hanging nodes} + * + * The integration along faces with hanging nodes is quite tricky, since one + * of the elements has to be shifted one level up or down. See the + * documentation for the #FESubfaceValues# class for more information about + * technical issues regarding this topic. + * + * In praxi, since we integrate over each face only once, we do this when we + * are on the coarser one of the two cells adjacent to a subface (a subface + * is defined to be the child of a face; seen from the coarse cell, it is a + * subface, while seen from the refined cell it is one of its faces). The + * reason is that finding neighborship information is a bit easier then, but + * that's all practical reasoning, nothing fundamental. + * + * Since we integrate from the coarse side of the face, we have the mother + * face readily at hand and store the result of the integration over that + * mother face (being the sum of the integrals along the subfaces) in the + * abovementioned map of integrals as well. This consumes some memory more + * than needed, but makes the summing up of the face contributions to the + * cells easier, since then we have the information from all faces of all + * cells at hand and need not think about explicitely determining whether + * a face was refined or not. The same applies for boundary faces, see + * above. + * + * @author Wolfgang Bangerth, 1998 + */ template class KellyErrorEstimator { public: -- 2.39.5