From 967efaaab32a9b5fdc5c13e00f73e4a944f93588 Mon Sep 17 00:00:00 2001 From: guido Date: Fri, 9 Jan 2004 10:28:12 +0000 Subject: [PATCH] Doc for doxygen git-svn-id: https://svn.dealii.org/trunk@8286 0785d39b-7218-0410-832d-ea1e28bc413d --- .../include/base/auto_derivative_function.h | 132 ++++++++---------- 1 file changed, 61 insertions(+), 71 deletions(-) diff --git a/deal.II/base/include/base/auto_derivative_function.h b/deal.II/base/include/base/auto_derivative_function.h index 1b5cbfa9d7..ff53f9baad 100644 --- a/deal.II/base/include/base/auto_derivative_function.h +++ b/deal.II/base/include/base/auto_derivative_function.h @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 2001, 2002, 2003 by the deal authors +// Copyright (C) 2001, 2002, 2003, 2004 by the deal authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -23,14 +23,14 @@ * employing numerical difference quotients. This only, if the user * function does not provide the gradient function himself. * - * @sect3{Usage} + * @section Usage Usage * The following example of an user defined function overloads and - * implements only the @p{value} function but not the @p{gradient} - * function. If the @p{gradient} function is invoked then the gradient - * function implemented by the @p{AutoDerivativeFunction} is called, + * implements only the value() function but not the gradient() + * function. If the gradient() function is invoked then the gradient + * function implemented by the AutoDerivativeFunction is called, * where the latter function imployes numerical difference quotients. * - * @begin{verbatim} + * @code * class UserFunction: public AutoDerivativeFunction * { // access to one component at one point * double value (const Point &p, const @@ -40,32 +40,32 @@ * * // gradient by employing difference quotients. * Tensor<1,dim> grad=user_function.gradient(some_point); - * @end{verbatim} + * @endcode * * If the user overloads and implements also the gradient function, * then, of course, the users gradient function is called. * - * Note, that the usage of the @p{value} and @p{gradient} functions - * explained above, also applies to the @p{value_list} and - * @p{gradient_list} functions as well as to the vector valued - * versions of these functions, see e.g. @p{vector_value}, - * @p{vector_gradient}, @p{vector_value_list} and - * @p{vector_gradient_list}. + * Note, that the usage of the value() and gradient() functions + * explained above, also applies to the value_list() and + * gradient_list() functions as well as to the vector valued + * versions of these functions, see e.g. vector_value(), + * vector_gradient(), vector_value_list() and + * vector_gradient_list(). * - * The @p{gradient} and @p{gradient_list} functions make use of the - * @p{value} function. The @p{vector_gradient} and - * @p{vector_gradient_list} make use of the @p{vector_value} + * The gradient() and gradient_list() functions make use of the + * Function::value() function. The vector_gradient() and + * vector_gradient_list() make use of the Function::vector_value() * function. Make sure that the user defined function implements the - * @p{value} function and the @p{vector_value} function, respectively. + * value() function and the vector_value() function, respectively. * - * Furthermore note, that an object of this class does @em{not} represent - * the derivative of a function, like @ref{FunctionDerivative}, that - * gives a directional derivate by calling the @p{value} function. In - * fact, this class (the @p{AutoDerivativeFunction} class) can - * substitute the @p{Function} class as base class for user defined - * classes. This class implements the @p{gradient} functions for + * Furthermore note, that an object of this class does not represent + * the derivative of a function, like FunctionDerivative, that + * gives a directional derivate by calling the value() function. In + * fact, this class (the AutoDerivativeFunction class) can + * substitute the Function class as base class for user defined + * classes. This class implements the gradient() functions for * automatic computation of numerical difference quotients and serves - * as intermediate class between the base @p{Function} class and the + * as intermediate class between the base Function class and the * user defined function class. * * @author Ralf Hartmann, 2001 @@ -97,29 +97,29 @@ class AutoDerivativeFunction : public Function /** * Constructor. Takes the * difference step size - * @p{h}. It's within the user's + * h. It's within the user's * responsibility to choose an - * appropriate value here. @p{h} + * appropriate value here. h * should be chosen taking into * account the absolute value as * well as the amount of local * variation of the function. - * Setting @p{h=1e-6} might be a + * Setting h=1e-6 might be a * good choice for functions with * an absolute value of about 1, * that furthermore does not vary * to much. * - * @p{h} can be changed later - * using the @p{set_h} function. + * h can be changed later + * using the set_h() function. * - * Sets @p{DifferenceFormula} - * @p{formula} to the default - * @p{Euler} formula of the - * @p{set_formula} + * Sets DifferenceFormula + * formula to the default + * Euler formula of the + * set_formula() * function. Change this preset * formula by calling the - * @p{set_formula} function. + * set_formula() function. */ AutoDerivativeFunction (const double h, const unsigned int n_components = 1, @@ -133,27 +133,21 @@ class AutoDerivativeFunction : public Function /** * Choose the difference formula. - * - * Formulas implemented right now - * are first order backward Euler - * (@p{UpwindEuler}), second - * order symmetric Euler - * (@p{Euler}) and a symmetric - * fourth order formula - * (@p{FourthOrder}). + * See the enum #DifferenceFormula + * for available choices. */ void set_formula (const DifferenceFormula formula = Euler); /** * Takes the difference step size - * @p{h}. It's within the user's + * h. It's within the user's * responsibility to choose an - * appropriate value here. @p{h} + * appropriate value here. h * should be chosen taking into * account the absolute value of * as well as the amount of local * variation of the function. - * Setting @p{h=1e-6} might be a + * Setting h=1e-6 might be a * good choice for functions with * an absolute value of about 1, * that furthermore does not vary @@ -166,10 +160,9 @@ class AutoDerivativeFunction : public Function * specified component of the * function at the given point. * - * Imployes numerical difference + * Computes numerical difference * quotients using the preset - * @p{DifferenceFormula} - * @p{formula}. + * #DifferenceFormula. */ virtual Tensor<1,dim> gradient (const Point &p, const unsigned int component = 0) const; @@ -179,58 +172,55 @@ class AutoDerivativeFunction : public Function * components of the * function at the given point. * - * Imployes numerical difference + * Computes numerical difference * quotients using the preset - * @p{DifferenceFormula} - * @p{formula}. + * #DifferenceFormula. */ virtual void vector_gradient (const Point &p, std::vector > &gradients) const; /** - * Set @p{gradients} to the + * Set gradients to the * gradients of the specified * component of the function at - * the @p{points}. It is assumed - * that @p{gradients} already has the + * the points. It is assumed + * that gradients already has the * right size, i.e. the same - * size as the @p{points} array. + * size as the points array. * - * Imployes numerical difference + * Computes numerical difference * quotients using the preset - * @p{DifferenceFormula} - * @p{formula}. + * #DifferenceFormula. */ virtual void gradient_list (const std::vector > &points, std::vector > &gradients, const unsigned int component = 0) const; /** - * Set @p{gradients} to the gradients of - * the function at the @p{points}, + * Set gradients to the gradients of + * the function at the points, * for all components. - * It is assumed that @p{gradients} + * It is assumed that gradients * already has the right size, i.e. - * the same size as the @p{points} array. + * the same size as the points array. * * The outer loop over - * @p{gradients} is over the points + * gradients is over the points * in the list, the inner loop * over the different components * of the function. * - * Imploys numerical difference + * Computes numerical difference * quotients using the preset - * @p{DifferenceFormula} - * @p{formula}. + * #DifferenceFormula. */ virtual void vector_gradient_list (const std::vector > &points, std::vector > > &gradients) const; /** * Returns a - * @p{DifferenceFormula} of the - * order @p{ord} at minimum. + * #DifferenceFormula of the + * order ord at minimum. */ static DifferenceFormula @@ -245,20 +235,20 @@ class AutoDerivativeFunction : public Function /** * Step size of the difference - * formula. Set by the @p{set_h} + * formula. Set by the set_h() * function. */ double h; /** * Includes the unit vectors - * scaled by @p{h}. + * scaled by h. */ std::vector > ht; /** * Difference formula. Set by the - * @p{set_formula} function. + * set_formula() function. */ DifferenceFormula formula; }; -- 2.39.5