From: hartmann Date: Mon, 7 May 2001 13:20:03 +0000 (+0000) Subject: Implementation of TensorProductPolynomials::compute_value, compute_grad and compute_g... X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9a5aac8c3420066f44105c238126e779e60497ec;p=dealii-svn.git Implementation of TensorProductPolynomials::compute_value, compute_grad and compute_grad_grad function. git-svn-id: https://svn.dealii.org/trunk@4550 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/base/include/base/tensor_product_polynomials.h b/deal.II/base/include/base/tensor_product_polynomials.h index 3f091d8807..69b7fdf377 100644 --- a/deal.II/base/include/base/tensor_product_polynomials.h +++ b/deal.II/base/include/base/tensor_product_polynomials.h @@ -47,20 +47,115 @@ class TensorProductPolynomials TensorProductPolynomials(const std::vector &pols); /** - * Calculates the polynomials - * and their derivatives at - * @p{unit_point}. + * Computes the value and the + * first and second derivatives + * of each tensor product + * polynomial at @p{unit_point}. * - * The vectors must either have - * length @p{0} or number of - * polynomials. In the first - * case, the function will not - * compute these values. + * The size of the vectors must + * either be equal @p{0} or equal + * @p{n_tensor_pols}. In the + * first case, the function will + * not compute these values. + * + * If you need values or + * derivatives of all tensor + * product polynomials then use + * this function, rather than + * using any of the + * @p{compute_value}, + * @p{compute_grad} or + * @p{compute_grad_grad} + * functions, see below, in a + * loop over all tensor product + * polynomials. + */ + void compute(const Point &unit_point, + std::vector &values, + typename std::vector > &grads, + typename std::vector > &grad_grads) const; + + /** + * Computes the value of the + * @p{i}th tensor product + * polynomial at + * @p{unit_point}. Here @p{i} is + * given in tensor product + * numbering. + * + * Note, that using this function + * within a loop over all tensor + * product polynomials is not + * efficient, because then each + * point value of the underlying + * (one-dimensional) polynomials + * is (unnecessarily) computed + * several times. Instead use + * the @p{compute} function, see + * above, with + * @p{values.size()==n_tensor_pols} + * to get the point values of all + * tensor polynomials all at once + * and in a much more efficient + * way. + */ + double compute_value (const unsigned int i, + const Point &p) const; + + /** + * Computes the grad of the + * @p{i}th tensor product + * polynomial at + * @p{unit_point}. Here @p{i} is + * given in tensor product + * numbering. + * + * Note, that using this function + * within a loop over all tensor + * product polynomials is not + * efficient, because then each + * derivative value of the + * underlying (one-dimensional) + * polynomials is (unnecessarily) + * computed several times. + * Instead use the @p{compute} + * function, see above, with + * @p{grads.size()==n_tensor_pols} + * to get the point value of all + * tensor polynomials all at once + * and in a much more efficient + * way. + */ + Tensor<1,dim> compute_grad (const unsigned int i, + const Point &p) const; + + /** + * Computes the second + * derivative (grad_grad) of the + * @p{i}th tensor product + * polynomial at + * @p{unit_point}. Here @p{i} is + * given in tensor product + * numbering. + * + * Note, that using this function + * within a loop over all tensor + * product polynomials is not + * efficient, because then each + * derivative value of the + * underlying (one-dimensional) + * polynomials is (unnecessarily) + * computed several times. + * Instead use the @p{compute} + * function, see above, with + * @p{grad_grads.size()==n_tensor_pols} + * to get the point value of all + * tensor polynomials all at once + * and in a much more efficient + * way. */ - void compute (const Point &unit_point, - std::vector &values, - typename std::vector > &grads, - typename std::vector > &grad_grads) const; + Tensor<2,dim> compute_grad_grad(const unsigned int i, + const Point &p) const; /** * Returns the number of tensor @@ -92,21 +187,35 @@ class TensorProductPolynomials */ const unsigned int n_tensor_pols; - + /** + * @p{n_pols_to[n]=polynomials.size()^n} + * Filled by the constructor. + * + * For internal use only. + */ + std::vector n_pols_to; + + static unsigned int power(const unsigned int x, const unsigned int y); }; - - template template TensorProductPolynomials::TensorProductPolynomials( const std::vector &pols): polynomials (pols.begin(), pols.end()), - n_tensor_pols(power(pols.size(), dim)) -{} + n_tensor_pols(power(pols.size(), dim)), + n_pols_to(dim+1) +{ + const unsigned int n_pols=polynomials.size(); + + n_pols_to[0]=1; + for (unsigned int i=0; i unsigned int TensorProductPolynomials::power(const unsigned int x, const unsigned int y) @@ -28,6 +29,84 @@ unsigned int TensorProductPolynomials::power(const unsigned int x, } + +template +double +TensorProductPolynomials::compute_value(const unsigned int i, + const Point &p) const +{ + const unsigned int n_pols=polynomials.size(); + + double value=1.; + for (unsigned int d=0; d +Tensor<1,dim> +TensorProductPolynomials::compute_grad(const unsigned int i, + const Point &p) const +{ + const unsigned int n_pols=polynomials.size(); + + std::vector > v(dim, std::vector (2)); + + for (unsigned int d=0; d grad; + for (unsigned int d=0; d +Tensor<2,dim> +TensorProductPolynomials::compute_grad_grad(const unsigned int i, + const Point &p) const +{ + const unsigned int n_pols=polynomials.size(); + + std::vector > v(dim, std::vector (3)); + for (unsigned int d=0; d grad_grad; + + for (unsigned int d1=0; d1 void TensorProductPolynomials::compute( const Point &p, @@ -35,12 +114,7 @@ void TensorProductPolynomials::compute( typename std::vector > &grads, typename std::vector > &grad_grads) const { - unsigned int n_pols=polynomials.size(); - std::vector n_pols_to(dim+1); - n_pols_to[0]=1; - for (unsigned int i=0; i