From 6083a91d17518d3b7768a3aad1290163fa76fe5c Mon Sep 17 00:00:00 2001 From: hartmann Date: Tue, 15 May 2001 16:09:39 +0000 Subject: [PATCH] New test for the new AutoDerivativeFunction. git-svn-id: https://svn.dealii.org/trunk@4611 0785d39b-7218-0410-832d-ea1e28bc413d --- tests/base/Makefile.in | 3 +- tests/base/auto_derivative_function.cc | 189 ++++++++++++++++++++ tests/base/auto_derivative_function.checked | 33 ++++ 3 files changed, 224 insertions(+), 1 deletion(-) create mode 100644 tests/base/auto_derivative_function.cc create mode 100644 tests/base/auto_derivative_function.checked diff --git a/tests/base/Makefile.in b/tests/base/Makefile.in index c555aad7f5..f60060d754 100644 --- a/tests/base/Makefile.in +++ b/tests/base/Makefile.in @@ -25,9 +25,10 @@ quadrature_test.exe: quadrature_test.go $(libraries) reference.exe : reference.go abort.go $(libraries) tensor.exe : tensor.go $(libraries) timer.exe : timer.go $(libraries) +auto_derivative_function.exe : auto_derivative_function.go $(libraries) -tests = logtest quadrature_test reference tensor timer polynomial_test +tests = logtest quadrature_test reference tensor timer polynomial_test auto_derivative_function ############################################################ diff --git a/tests/base/auto_derivative_function.cc b/tests/base/auto_derivative_function.cc new file mode 100644 index 0000000000..fa9f55dc2d --- /dev/null +++ b/tests/base/auto_derivative_function.cc @@ -0,0 +1,189 @@ +//---------------------------- polynomial_test.cc --------------------------- +// $Id$ +// Version: $Name$ +// +// Copyright (C) 2000, 2001 by the deal.II authors +// +// This file is subject to QPL and may not be distributed +// without copyright and license information. Please refer +// to the file deal.II/doc/license.html for the text and +// further information on this license. +// +//---------------------------- polynomial_test.cc --------------------------- + +#include + + +#include + +#include +#include +#include +#include +#include + + +template +class AutoSinExp: public AutoDerivativeFunction +{ + public: + AutoSinExp(); + virtual ~AutoSinExp() {}; + + virtual double value (const Point &p, + const unsigned int component = 0) const; + + /** + * n_components=2. First + * component=0, second component + * filled by @p{value} function. + */ + virtual void vector_value (const Point &p, + Vector &values) const; +}; + + + +template +AutoSinExp::AutoSinExp(): + AutoDerivativeFunction(1e-6, 2) +{} + + + +template +double AutoSinExp::value (const Point &p, + const unsigned int) const +{ + return sin(2*p(0))*exp(3*p(1)); +} + + +template +void AutoSinExp::vector_value (const Point &p, + Vector &values) const +{ + Assert(values.size()==n_components, ExcDimensionMismatch(values.size(), n_components)); + values(0)=0; + values(1)=value(p); +} + +/*----------------------------------------------------------*/ + +template +class ExactSinExp: public AutoSinExp +{ + public: + ExactSinExp() {}; + ~ExactSinExp() {}; + + virtual Tensor<1,dim> gradient (const Point &p, + const unsigned int component = 0) const; + + virtual void vector_gradient (const Point &p, + typename std::vector > &gradients) const; +}; + + + + +template +Tensor<1,dim> ExactSinExp::gradient (const Point &p, + const unsigned int) const +{ + Tensor<1,dim> grad; + grad[0]=2*cos(2*p(0))*exp(3*p(1)); + grad[1]=3*sin(2*p(0))*exp(3*p(1)); + return grad; +} + + +template +void ExactSinExp::vector_gradient (const Point &p, + typename std::vector > &gradients) const +{ + Assert(gradients.size()==n_components, ExcDimensionMismatch(gradients.size(), n_components)); + + gradients[0].clear(); + gradients[1]=gradient(p); +} + + +int main(int, char) +{ + ofstream logfile("auto_derivative_function.output"); + logfile.precision(4); + deallog.attach(logfile); + deallog.depth_console(0); + + const unsigned int dim=2; + AutoSinExp auto_function; + ExactSinExp exact_function; + Point p(0.23, 0.1); + std::vector > ps(1, p); + + Tensor<1,dim> u_grad=exact_function.gradient(p); + + + AutoDerivativeFunction::DifferenceFormula formula; + const double h_base=0.1; + for (unsigned int order=1; order<5; ++order) + { + formula=AutoDerivativeFunction::get_formula_of_order(order); + auto_function.set_formula(formula); + deallog << "order=" << order << ", formula=" << formula << endl; + ConvergenceTable history; + + unsigned int factor=1; + for (unsigned int i=0; i<6; ++i, factor*=2) + { + history.add_value("f", factor); + history.omit_column_from_convergence_rate_evaluation("f"); + + auto_function.set_h(h_base/factor); + + // Test of gradient function + Tensor<1,dim> a_grad=auto_function.gradient(p); + a_grad-=u_grad; + double value=sqrt(a_grad*a_grad); + history.add_value("grad", value); + history.set_scientific("grad", true); + history.set_precision("grad", 2); + + // Test of gradient_list + // function + std::vector > a_grads(1); + auto_function.gradient_list(ps, a_grads); + a_grads[0]-=u_grad; + value=sqrt(a_grads[0]*a_grads[0]); + history.add_value("grads[0]", value); + history.set_scientific("grads[0]", true); + history.set_precision("grads[0]", 2); + + // Test of vector_gradient + // function + std::vector > a_vgrad(2); + auto_function.vector_gradient(p, a_vgrad); + a_vgrad[1]-=u_grad; + value=sqrt(a_vgrad[1]*a_vgrad[1]); + history.add_value("vgrad[1]", value); + history.set_scientific("vgrad[1]", true); + history.set_precision("vgrad[1]", 2); + + // Test of + // vector_gradient_list + // function + std::vector > > + a_vgrads(1, std::vector > (2)); + auto_function.vector_gradient_list(ps, a_vgrads); + a_vgrads[0][1]-=u_grad; + value=sqrt(a_vgrads[0][1]*a_vgrads[0][1]); + history.add_value("vgrads[1]", value); + history.set_scientific("vgrads[1]", true); + history.set_precision("vgrads[1]", 2); + } + history.evaluate_all_convergence_rates( + ConvergenceTable::reduction_rate); + history.write_text(deallog.get_file_stream()); + } +} diff --git a/tests/base/auto_derivative_function.checked b/tests/base/auto_derivative_function.checked new file mode 100644 index 0000000000..b2271d7330 --- /dev/null +++ b/tests/base/auto_derivative_function.checked @@ -0,0 +1,33 @@ + +DEAL::order=1, formula=1 +f grad grads[0] vgrad[1] vgrads[1] +1 2.66e-01 - 2.66e-01 - 2.66e-01 - 2.66e-01 - +2 1.40e-01 1.90 1.40e-01 1.90 1.40e-01 1.90 1.40e-01 1.90 +4 7.19e-02 1.95 7.19e-02 1.95 7.19e-02 1.95 7.19e-02 1.95 +8 3.64e-02 1.97 3.64e-02 1.97 3.64e-02 1.97 3.64e-02 1.97 +16 1.83e-02 1.99 1.83e-02 1.99 1.83e-02 1.99 1.83e-02 1.99 +32 9.19e-03 1.99 9.19e-03 1.99 9.19e-03 1.99 9.19e-03 1.99 +DEAL::order=2, formula=0 +f grad grads[0] vgrad[1] vgrads[1] +1 3.15e-02 - 3.15e-02 - 3.15e-02 - 3.15e-02 - +2 7.86e-03 4.01 7.86e-03 4.01 7.86e-03 4.01 7.86e-03 4.01 +4 1.96e-03 4.00 1.96e-03 4.00 1.96e-03 4.00 1.96e-03 4.00 +8 4.91e-04 4.00 4.91e-04 4.00 4.91e-04 4.00 4.91e-04 4.00 +16 1.23e-04 4.00 1.23e-04 4.00 1.23e-04 4.00 1.23e-04 4.00 +32 3.07e-05 4.00 3.07e-05 4.00 3.07e-05 4.00 3.07e-05 4.00 +DEAL::order=3, formula=2 +f grad grads[0] vgrad[1] vgrads[1] +1 5.07e-04 - 5.07e-04 - 5.07e-04 - 5.07e-04 - +2 3.15e-05 16.12 3.15e-05 16.12 3.15e-05 16.12 3.15e-05 16.12 +4 1.96e-06 16.03 1.96e-06 16.03 1.96e-06 16.03 1.96e-06 16.03 +8 1.23e-07 16.01 1.23e-07 16.01 1.23e-07 16.01 1.23e-07 16.01 +16 7.66e-09 16.00 7.66e-09 16.00 7.66e-09 16.00 7.66e-09 16.00 +32 0.00 16.00 0.00 16.00 0.00 16.00 0.00 16.00 +DEAL::order=4, formula=2 +f grad grads[0] vgrad[1] vgrads[1] +1 5.07e-04 - 5.07e-04 - 5.07e-04 - 5.07e-04 - +2 3.15e-05 16.12 3.15e-05 16.12 3.15e-05 16.12 3.15e-05 16.12 +4 1.96e-06 16.03 1.96e-06 16.03 1.96e-06 16.03 1.96e-06 16.03 +8 1.23e-07 16.01 1.23e-07 16.01 1.23e-07 16.01 1.23e-07 16.01 +16 7.66e-09 16.00 7.66e-09 16.00 7.66e-09 16.00 7.66e-09 16.00 +32 0.00 16.00 0.00 16.00 0.00 16.00 0.00 16.00 -- 2.39.5