From cf0822e1c174e27775d4527df80ef0ce0a0f0d58 Mon Sep 17 00:00:00 2001 From: Denis Davydov Date: Fri, 7 Dec 2018 20:01:57 +0100 Subject: [PATCH] SmoothnessEstimator: Implementation with dedicated coefficient decay functions for both Fourier and Legendre series expansions individually. --- cmake/config/template-arguments.in | 5 - doc/news/changes/minor/20181227DenisDavydov | 6 + examples/step-27/doc/intro.dox | 2 +- examples/step-27/step-27.cc | 5 +- include/deal.II/fe/fe_series.h | 6 + .../deal.II/numerics/smoothness_estimator.h | 328 ++++++--- .../numerics/smoothness_estimator.templates.h | 294 ++++++-- source/fe/fe_series_legendre.cc | 12 + source/numerics/smoothness_estimator.cc | 5 - source/numerics/smoothness_estimator.inst.in | 70 +- tests/hp/laplace.h | 635 ++++++++++++++++++ tests/hp/laplace_mitchel2014_04_peak.cc | 243 +++++++ tests/hp/laplace_mitchel2014_04_peak.output | 13 + tests/hp/step-27.cc | 5 +- tests/numerics/smoothness_estimator_01.cc | 362 ++++++++++ ...smoothness_estimator_01.with_gsl=on.output | 55 ++ 16 files changed, 1872 insertions(+), 174 deletions(-) create mode 100644 doc/news/changes/minor/20181227DenisDavydov create mode 100644 tests/hp/laplace.h create mode 100644 tests/hp/laplace_mitchel2014_04_peak.cc create mode 100644 tests/hp/laplace_mitchel2014_04_peak.output create mode 100644 tests/numerics/smoothness_estimator_01.cc create mode 100644 tests/numerics/smoothness_estimator_01.with_gsl=on.output diff --git a/cmake/config/template-arguments.in b/cmake/config/template-arguments.in index 836fb7fc33..efe2f5111e 100644 --- a/cmake/config/template-arguments.in +++ b/cmake/config/template-arguments.in @@ -258,11 +258,6 @@ AFFINE_CONSTRAINTS_SP_BLOCK := { BlockSparsityPattern; @DEAL_II_EXPAND_TRILINOS_BLOCK_SPARSITY_PATTERN@; } -// Series expansion templates -SERIES_EXPANSION_TEMPLATES := { FESeries::Fourier; - FESeries::Legendre; - } - // all supported logical dimensions DIMENSIONS := { 1; 2; 3 } diff --git a/doc/news/changes/minor/20181227DenisDavydov b/doc/news/changes/minor/20181227DenisDavydov new file mode 100644 index 0000000000..b45077c2c9 --- /dev/null +++ b/doc/news/changes/minor/20181227DenisDavydov @@ -0,0 +1,6 @@ +New: Add FESeries::Legendre::get_size_in_each_direction() to retrieve +the number of coefficients in each direction. Also add an Assert in +FESeries::Legendre::calculate() to check the dimension of the table to store +coefficients. +
+(Denis Davydov, 2018/12/27) diff --git a/examples/step-27/doc/intro.dox b/examples/step-27/doc/intro.dox index e30a733ca0..9e1cca1b68 100644 --- a/examples/step-27/doc/intro.dox +++ b/examples/step-27/doc/intro.dox @@ -511,7 +511,7 @@ $\hat u(\hat{\bf x})$ is in $H^s(\hat K)$ with $s=\mu-\frac d2$. These steps outlined above are applicable on many different scenarios, which motivated the introduction of a generic function -SmoothnessEstimator::estimate_by_coeff_decay() in deal.II, that combines all +SmoothnessEstimator::estimate_by_coefficient_decay() in deal.II, that combines all the tasks described in this section in one simple function call.

Compensating for anisotropy

diff --git a/examples/step-27/step-27.cc b/examples/step-27/step-27.cc index 3d32f9638f..39cad4e140 100644 --- a/examples/step-27/step-27.cc +++ b/examples/step-27/step-27.cc @@ -349,8 +349,9 @@ namespace Step27 // Estimating the smoothness is performed with the method of decaing // expansion coefficients as outlined in the introduction. Vector smoothness_indicators; - SmoothnessEstimator::estimate_by_coeff_decay>( - dof_handler, solution, smoothness_indicators); + SmoothnessEstimator::fourier_coefficient_decay(dof_handler, + solution, + smoothness_indicators); // Next we want to generate graphical output. In addition to the two // estimated quantities derived above, we would also like to output the diff --git a/include/deal.II/fe/fe_series.h b/include/deal.II/fe/fe_series.h index c2f7ed87b8..229604c51e 100644 --- a/include/deal.II/fe/fe_series.h +++ b/include/deal.II/fe/fe_series.h @@ -253,6 +253,12 @@ namespace FESeries const unsigned int cell_active_fe_index, Table & legendre_coefficients); + /** + * Return number of coefficients in each coordinate direction. + */ + unsigned int + get_size_in_each_direction() const; + /** * Calculate all transformation matrices to transfer the finite element * solution to the series expansion representation. diff --git a/include/deal.II/numerics/smoothness_estimator.h b/include/deal.II/numerics/smoothness_estimator.h index 9965cd8444..cdb30ab702 100644 --- a/include/deal.II/numerics/smoothness_estimator.h +++ b/include/deal.II/numerics/smoothness_estimator.h @@ -19,6 +19,13 @@ #include +#include +#include + +#include + +#include + #include #include @@ -30,76 +37,223 @@ DEAL_II_NAMESPACE_OPEN /** - * Estimate the smoothness of a solution based on the decay of coefficients from - * a series expansion. - * - * From the definition, we can write our series expansion $\hat U_{\bf k}$ as a - * matrix product - * @f[ - * \hat U_{\bf k} - * = {\cal F}_{{\bf k},j} u_j, - * @f] - * with $u_j$ the coefficients and ${\cal F}_{{\bf k},j}$ the transformation - * matrix from the expansion. We use the classes FESeries::Fourier and - * FESeries::Legendre to determine all coefficients $u_j$. - * - * The next step is that we have to estimate how fast these coefficients - * decay with $|{\bf k}|$. Thus, we perform a least-squares fit - * @f[ - * \min_{\alpha,\mu} - * \frac 12 \sum_{{\bf k}, |{\bf k}|\le N} - * \left( |\hat U_{\bf k}| - \alpha |{\bf k}|^{-\mu}\right)^2 - * @f] - * with linear regressions coefficients $\alpha$ and $\mu$. For simplification, - * we apply a logarithm on our minimization problem - * @f[ - * \min_{\beta,\mu} - * Q(\beta,\mu) = - * \frac 12 \sum_{{\bf k}, |{\bf k}|\le N} - * \left( \ln |\hat U_{\bf k}| - \beta + \mu \ln |{\bf k}|\right)^2, - * @f] - * where $\beta=\ln \alpha$. This is now a problem for which the - * optimality conditions $\frac{\partial Q}{\partial\beta}=0, - * \frac{\partial Q}{\partial\mu}=0$, are linear in $\beta,\mu$. We can - * write these conditions as follows: - * @f[ - * \left(\begin{array}{cc} - * \sum_{{\bf k}, |{\bf k}|\le N} 1 & - * \sum_{{\bf k}, |{\bf k}|\le N} \ln |{\bf k}| - * \\ - * \sum_{{\bf k}, |{\bf k}|\le N} \ln |{\bf k}| & - * \sum_{{\bf k}, |{\bf k}|\le N} (\ln |{\bf k}|)^2 - * \end{array}\right) - * \left(\begin{array}{c} - * \beta \\ -\mu - * \end{array}\right) - * = - * \left(\begin{array}{c} - * \sum_{{\bf k}, |{\bf k}|\le N} \ln |\hat U_{{\bf k}}| - * \\ - * \sum_{{\bf k}, |{\bf k}|\le N} \ln |\hat U_{{\bf k}}| \ln |{\bf k}| - * \end{array}\right) - * @f] - * Solving for $\beta$ and $\mu$ is nothing else but a linear regression fit and - * to do that we will use FESeries::linear_regression(). - * - * While we are not particularly interested in the actual value of - * $\beta$, the formula above gives us a mean to calculate the value of - * the exponent $\mu$ that we can then use to determine that - * $\hat u(\hat{\bf x})$ is in $H^s(\hat K)$ with $s=\mu-\frac d2$. These - * Sobolev indices $s$ will suffice as our smoothness estimators and will be - * calculated on each cell for any provided solution. - * - * @note An extensive demonstration of the use of these functions is provided in step-27. - * - * @ingroup numerics - * @author Denis Davydov, 2016, Marc Fehling, 2018 + * A namespace for various smoothness estimation strategies for hp-adaptive FEM. */ namespace SmoothnessEstimator { /** - * Estimates the smoothness of the provided solutions using the method of - * decaying coefficents as outlined above. + * Estimate smoothness from decay of Legendre absolute values of coefficients + * on the reference cell. + * + * In one dimension, the finite element solution on the reference element with + * polynomial degree $p$ can be written as + * @f[ + * u_h(\hat x) = \sum_{j=0}^{p} a_j P_j(\hat x) + * @f] + * where $\{P_j(x)\}$ are Legendre polynomials. The decay of the coefficients + * is estimated by performing the linear regression fit of + * @f[ + * \ln |a_j| \sim C - \sigma j + * @f] + * for $j=0,..,p$. The rate of the decay $\sigma$ can be used to estimate the + * smoothness. For example, one strategy to implement hp-refinement + * criteria is to perform p-refinement if $\sigma>1$. + * + * Extension to higher dimension is done by performing the fit in each + * coordinate direction separately and then taking the lowest value of + * $\sigma$. + * + * For each input vector of degrees of freedom defined on a DoFHandler, + * this function returns a vector with as many elements as there are cells + * where each element contains $\exp(-\sigma)$, which is a so-called + * analyticity (see references below). + * + * @param [in] fe_series FESeries::Legendre object to calculate coefficients. + * This object needs to be initialized to have at least $p+1$ coefficients in + * each direction, where $p$ is the maximum polynomial degree to be used. + * @param [in] dof_hander An hp::DoFHandler + * @param [in] all_solutions A vector of pointers to the solution vectors + * @param [out] all_smoothness_indicators A vector of pointers to the smoothness indicators for each @p all_solutions. + * @param [in] coefficients_predicate A predicate to select Legendre + * coefficients $a_j \;\; j=0\dots p$ for linear regression in each coordinate + * direction. The user is responsible for updating the vector of `flags` + * provided to this function. Note that its size is $p+1$, where $p$ is the + * polynomial degree of the FE basis on a given element. Default + * implementation will use all Legendre coefficients in each coordinate + * direction, i.e. set all elements of the vector to `true`. + * @param [in] smallest_abs_coefficient The smallest absolute value of the + * coefficient to be used in linear regression in each coordinate direction. + * Note that Legendre coefficients of some functions may have a repeating + * pattern of zero coefficients (i.e. for functions that are locally symmetric + * or antisymmetric about the midpoint of the element in any coordinate + * direction). Thus this parameters allows to ingore small (in absolute value) + * coefficients within the linear regression fit. In case there are less than + * two non-zero coefficients for a coordinate direction, this direction will + * be skipped. If all coefficients are zero, the returned value for this cell + * will be zero (i.e. corresponding to the $\sigma=\infty$). + * + * For more theoretical details see + * @code{.bib} + * @Article{Mavriplis1994, + * author = {Mavriplis, Catherine}, + * title = {Adaptive mesh strategies for the spectral element method}, + * journal = {{Computer Methods in Applied Mechanics and Engineering}}, + * year = {1994}, + * volume = {116}, + * number = {1}, + * pages = {77--86}, + * publisher = {Elsevier}, + * } + * @article{Houston2005, + * author = {Houston, Paul and S{\"u}li, Endre}, + * title = {A note on the design of hp-adaptive finite element + * methods for elliptic partial differential equations}, + * journal = {{Computer Methods in Applied Mechanics and Engineering}}, + * number = {2}, + * pages = {229--243}, + * publisher = {Elsevier}, + * volume = {194}, + * year = {2005} + * } + * @article{Eibner2007, + * author = {Eibner, Tino and Melenk, Jens Markus}, + * title = {An adaptive strategy for hp-FEM based on testing for + * analyticity}, + * journal = {{Computational Mechanics}}, + * year = {2007}, + * volume = {39}, + * number = {5}, + * pages = {575--595}, + * publisher = {Springer}, + * } + * @endcode + * and for the application within the deal.II: + * @code{.bib} + * @article{Davydov2017, + * author = {Denis Davydov and Tymofiy Gerasimov and Jean-Paul Pelteret and + * Paul Steinmann}, + * title = {Convergence study of the h-adaptive PUM and the hp-adaptive + * FEM applied to eigenvalue problems in quantum mechanics}, + * journal = {{Advanced Modeling and Simulation in Engineering Sciences}}, + * year = {2017}, + * volume = {4}, + * number = {1}, + * pages = {7}, + * issn = {2213-7467}, + * doi = {10.1186/s40323-017-0093-0}, + * } + * @endcode + * + * @ingroup numerics + * @author Denis Davydov, 2018 + */ + template + void + legendre_coefficient_decay( + FESeries::Legendre & fe_series, + const hp::DoFHandler & dof_handler, + const std::vector &all_solutions, + const std::vector *> & all_smoothness_indicators, + const std::function &flags)> coefficients_predicate = + [](std::vector &flags) -> void { + std::fill(flags.begin(), flags.end(), true); + }, + const double smallest_abs_coefficient = 1e-10); + + /** + * Same as above, but for a single solution vector. + */ + template + void + legendre_coefficient_decay( + FESeries::Legendre & fe_series, + const hp::DoFHandler & dof_handler, + const VectorType & solution, + Vector & smoothness_indicators, + const std::function &flags)> coefficients_predicate = + [](std::vector &flags) -> void { + std::fill(flags.begin(), flags.end(), true); + }, + const double smallest_abs_coefficient = 1e-10); + + /** + * Same as above, but for a single solution vector and with the default + * FESeries::Legendre. + */ + template + void + legendre_coefficient_decay( + const hp::DoFHandler & dof_handler, + const VectorType & solution, + Vector & smoothness_indicators, + const std::function &flags)> coefficients_predicate = + [](std::vector &flags) -> void { + std::fill(flags.begin(), flags.end(), true); + }, + const double smallest_abs_coefficient = 1e-10); + + /** + * Estimate the smoothness of a solution based on the decay of coefficients + * from a series expansion. + * + * From the definition, we can write our series expansion $\hat U_{\bf k}$ as + * a matrix product + * @f[ + * \hat U_{\bf k} + * = {\cal F}_{{\bf k},j} u_j, + * @f] + * with $u_j$ the coefficients and ${\cal F}_{{\bf k},j}$ the transformation + * matrix from the expansion. We use the classes FESeries::Fourier and + * FESeries::Legendre to determine all coefficients $u_j$. + * + * The next step is that we have to estimate how fast these coefficients + * decay with $|{\bf k}|$. Thus, we perform a least-squares fit + * @f[ + * \min_{\alpha,\mu} + * \frac 12 \sum_{{\bf k}, |{\bf k}|\le N} + * \left( |\hat U_{\bf k}| - \alpha |{\bf k}|^{-\mu}\right)^2 + * @f] + * with linear regressions coefficients $\alpha$ and $\mu$. For + * simplification, we apply a logarithm on our minimization problem + * @f[ + * \min_{\beta,\mu} + * Q(\beta,\mu) = + * \frac 12 \sum_{{\bf k}, |{\bf k}|\le N} + * \left( \ln |\hat U_{\bf k}| - \beta + \mu \ln |{\bf k}|\right)^2, + * @f] + * where $\beta=\ln \alpha$. This is now a problem for which the + * optimality conditions $\frac{\partial Q}{\partial\beta}=0, + * \frac{\partial Q}{\partial\mu}=0$, are linear in $\beta,\mu$. We can + * write these conditions as follows: + * @f[ + * \left(\begin{array}{cc} + * \sum_{{\bf k}, |{\bf k}|\le N} 1 & + * \sum_{{\bf k}, |{\bf k}|\le N} \ln |{\bf k}| + * \\ + * \sum_{{\bf k}, |{\bf k}|\le N} \ln |{\bf k}| & + * \sum_{{\bf k}, |{\bf k}|\le N} (\ln |{\bf k}|)^2 + * \end{array}\right) + * \left(\begin{array}{c} + * \beta \\ -\mu + * \end{array}\right) + * = + * \left(\begin{array}{c} + * \sum_{{\bf k}, |{\bf k}|\le N} \ln |\hat U_{{\bf k}}| + * \\ + * \sum_{{\bf k}, |{\bf k}|\le N} \ln |\hat U_{{\bf k}}| \ln |{\bf k}| + * \end{array}\right) + * @f] + * Solving for $\beta$ and $\mu$ is nothing else but a linear regression fit + * and to do that we will use FESeries::linear_regression(). + * + * While we are not particularly interested in the actual value of + * $\beta$, the formula above gives us a mean to calculate the value of + * the exponent $\mu$ that we can then use to determine that + * $\hat u(\hat{\bf x})$ is in $H^s(\hat K)$ with $s=\mu-\frac d2$. These + * Sobolev indices $s$ will suffice as our smoothness estimators and will be + * calculated on each cell for any provided solution. + * + * @note An extensive demonstration of the use of these functions is provided in step-27. * * The @p regression_strategy parameter determines which norm will be used on the subset of * coeffiecients $\mathbf{k}$ with the same absolute value $|\mathbf{k}|$. @@ -110,12 +264,16 @@ namespace SmoothnessEstimator * * An individual @p fe_series object can be supplied, which has to be constructed with the * same FECollection object as the @p dof_handler. + * + * @ingroup numerics + * @author Denis Davydov, 2016, Marc Fehling, 2018 + * */ - template + template void - estimate_by_coeff_decay( - FESeriesType & fe_series, - const DoFHandlerType & dof_handler, + fourier_coefficient_decay( + FESeries::Fourier & fe_series, + const hp::DoFHandler & dof_handler, const std::vector &all_solutions, const std::vector *> & all_smoothness_indicators, const VectorTools::NormType regression_strategy = VectorTools::Linfty_norm); @@ -123,13 +281,13 @@ namespace SmoothnessEstimator /** * Same as the function above, only for one @p solution vector. */ - template + template void - estimate_by_coeff_decay( - FESeriesType & fe_series, - const DoFHandlerType & dof_handler, - const VectorType & solution, - Vector & smoothness_indicators, + fourier_coefficient_decay( + FESeries::Fourier & fe_series, + const hp::DoFHandler &dof_handler, + const VectorType & solution, + Vector & smoothness_indicators, const VectorTools::NormType regression_strategy = VectorTools::Linfty_norm); /** @@ -139,14 +297,14 @@ namespace SmoothnessEstimator * * Provide the desired series expansion as a template argument, i.e. * @code - * SmoothnessEstimator::estimate_by_coeff_decay>( + * SmoothnessEstimator::estimate_by_coefficient_decay>( * dof_handler, all_solutions, all_smoothness_indicators); * @endcode */ - template + template void - estimate_by_coeff_decay( - const DoFHandlerType & dof_handler, + fourier_coefficient_decay( + const hp::DoFHandler & dof_handler, const std::vector &all_solutions, const std::vector *> & all_smoothness_indicators, const VectorTools::NormType regression_strategy = VectorTools::Linfty_norm); @@ -154,12 +312,12 @@ namespace SmoothnessEstimator /** * Same as the function above, only for one @p solution vector. */ - template + template void - estimate_by_coeff_decay( - const DoFHandlerType & dof_handler, - const VectorType & solution, - Vector & smoothness_indicators, + fourier_coefficient_decay( + const hp::DoFHandler &dof_handler, + const VectorType & solution, + Vector & smoothness_indicators, const VectorTools::NormType regression_strategy = VectorTools::Linfty_norm); } // namespace SmoothnessEstimator diff --git a/include/deal.II/numerics/smoothness_estimator.templates.h b/include/deal.II/numerics/smoothness_estimator.templates.h index 2570ddcbc6..48fb8d25cb 100644 --- a/include/deal.II/numerics/smoothness_estimator.templates.h +++ b/include/deal.II/numerics/smoothness_estimator.templates.h @@ -53,16 +53,19 @@ namespace SmoothnessEstimator /** - * Calculates predicates of @p ind in the form - * \f$ - * v = \sum\limits_{d=0}^{dim} ind[d]^2 - * \f$. - * - * We flag the predicate whether it fulfills the criterion - * \f$ - * 0 < v < max_degree^2 - * \f$ - * using @p max_degree. + * we will need to take the maximum + * absolute value of fourier coefficients which correspond to $k$-vector + * $|{\bf k}|= const$. To filter the coefficients Table we + * will use the FESeries::process_coefficients() which requires a predicate + * to be specified. The predicate should operate on TableIndices and return + * a pair of bool and unsigned int. The latter + * is the value of the map from TableIndicies to unsigned int. It is + * used to define subsets of coefficients from which we search for the one + * with highest absolute value, i.e. $l^\infty$-norm. The bool + * parameter defines which indices should be used in processing. In the + * current case we are interested in coefficients which correspond to + * $0 < i*i+j*j < N*N$ and $0 < i*i+j*j+k*k < N*N$ in 2D and 3D, + * respectively. */ template std::pair @@ -79,30 +82,217 @@ namespace SmoothnessEstimator } // namespace + template + void + legendre_coefficient_decay( + FESeries::Legendre & fe_legendre, + const hp::DoFHandler & dof_handler, + const std::vector &all_solutions, + const std::vector *> & all_smoothness_indicators, + const std::function &flags)> coefficients_predicate, + const double smallest_abs_coefficient) + { + Assert(smallest_abs_coefficient >= 0., + ExcMessage("smallest_abs_coefficient should be non-negative.")); + + using number = typename VectorType::value_type; + using number_coeff = + typename FESeries::Legendre::CoefficientType; + + AssertDimension(all_solutions.size(), all_smoothness_indicators.size()); + + for (auto &smoothness_indicator : all_smoothness_indicators) + smoothness_indicator->reinit( + dof_handler.get_triangulation().n_active_cells()); + + Table expansion_coefficients; + resize(expansion_coefficients, fe_legendre.get_size_in_each_direction()); + + Vector local_dof_values; + + // auxiliary vector to do linear regression + std::vector x; + std::vector y; + + x.reserve(dof_handler.get_fe_collection().max_degree()); + y.reserve(dof_handler.get_fe_collection().max_degree()); + + // precalculate predicates for each degree: + std::vector> predicates( + dof_handler.get_fe_collection().max_degree()); + for (unsigned int p = 1; p <= dof_handler.get_fe_collection().max_degree(); + ++p) + { + auto &pred = predicates[p - 1]; + // we have p+1 coefficients for degree p + pred.resize(p + 1); + coefficients_predicate(pred); + } + + for (auto &cell : dof_handler.active_cell_iterators()) + if (cell->is_locally_owned()) + { + local_dof_values.reinit(cell->get_fe().dofs_per_cell); + + const unsigned int pe = cell->get_fe().degree; + + Assert(pe > 0, ExcInternalError()); + const auto &pred = predicates[pe - 1]; + + // since we use coefficients with indices [1,pe] in each direction, + // the number of coefficients we need to calculate is at least N=pe+1 + AssertIndexRange(pe, fe_legendre.get_size_in_each_direction()); + + auto solution_it = all_solutions.cbegin(); + auto smoothness_indicators_it = all_smoothness_indicators.begin(); + for (; solution_it != all_solutions.cend(); + ++solution_it, ++smoothness_indicators_it) + { + cell->get_dof_values(*(*solution_it), local_dof_values); + fe_legendre.calculate(local_dof_values, + cell->active_fe_index(), + expansion_coefficients); + + // choose the smallest decay of coefficients in each direction, + // i.e. the maximum decay slope k_v + number_coeff k_v = -std::numeric_limits::max(); + for (unsigned int d = 0; d < dim; d++) + { + x.resize(0); + y.resize(0); + + // will use all non-zero coefficients allowed by the predicate + // function + Assert(pred.size() == pe + 1, ExcInternalError()); + for (unsigned int i = 0; i <= pe; i++) + if (pred[i]) + { + TableIndices ind; + ind[d] = i; + const number_coeff coeff_abs = + std::abs(expansion_coefficients(ind)); + + if (coeff_abs > smallest_abs_coefficient) + { + y.push_back(std::log(coeff_abs)); + x.push_back(i); + } + } + + // in case we don't have enough non-zero coefficient to fit, + // skip this direction + if (x.size() < 2) + continue; + + const std::pair fit = + FESeries::linear_regression(x, y); + + // decay corresponds to negative slope + // take the lesser negative slope along each direction + k_v = std::max(k_v, fit.first); + } + + (*(*smoothness_indicators_it))(cell->active_cell_index()) = + std::exp(k_v); + } + } + } - template + + + template void - estimate_by_coeff_decay( - FESeriesType & fe_series, - const DoFHandlerType & dof_handler, + legendre_coefficient_decay( + FESeries::Legendre & fe_legendre, + const hp::DoFHandler & dof_handler, + const VectorType & solution, + Vector & smoothness_indicators, + const std::function &flags)> coefficients_predicate, + const double smallest_abs_coefficient) + { + const std::vector all_solutions(1, &solution); + const std::vector *> all_smoothness_indicators( + 1, &smoothness_indicators); + + legendre_coefficient_decay(fe_legendre, + dof_handler, + all_solutions, + all_smoothness_indicators, + coefficients_predicate, + smallest_abs_coefficient); + } + + + + template + void + legendre_coefficient_decay( + const hp::DoFHandler & dof_handler, + const VectorType & solution, + Vector & smoothness_indicators, + const std::function &flags)> coefficients_predicate, + const double smallest_abs_coefficient) + { + const unsigned int max_degree = + dof_handler.get_fe_collection().max_degree(); + + // We initialize a FESeries::Legendre expansion object object which will be + // used to calculate the expansion coefficients. In addition to the + // hp::FECollection, we need to provide quadrature rules hp::QCollection for + // integration on the reference cell. + // We will need to assemble the expansion matrices for each of the finite + // elements we deal with, i.e. the matrices F_k,j. We have to do that for + // each of the finite elements in use. To that end we need a quadrature + // rule. As a default, we use the same quadrature formula for each finite + // element, namely one that is obtained by iterating a 2-point Gauss formula + // as many times as the maximal polynomial degree. + QGauss<1> base_quadrature(2); + QIterated quadrature(base_quadrature, max_degree); + + hp::QCollection expansion_q_collection; + for (unsigned int i = 0; i < dof_handler.get_fe_collection().size(); ++i) + expansion_q_collection.push_back(quadrature); + + FESeries::Legendre legendre(max_degree + 1, + dof_handler.get_fe_collection(), + expansion_q_collection); + + legendre_coefficient_decay(legendre, + dof_handler, + solution, + smoothness_indicators, + coefficients_predicate, + smallest_abs_coefficient); + } + + + + template + void + fourier_coefficient_decay( + FESeries::Fourier & fe_series, + const hp::DoFHandler & dof_handler, const std::vector &all_solutions, const std::vector *> & all_smoothness_indicators, const VectorTools::NormType regression_strategy) { + using number = typename VectorType::value_type; + using number_coeff = + typename FESeries::Fourier::CoefficientType; + AssertDimension(all_solutions.size(), all_smoothness_indicators.size()); for (auto &smoothness_indicator : all_smoothness_indicators) smoothness_indicator->reinit( dof_handler.get_triangulation().n_active_cells()); - const unsigned int dim = DoFHandlerType::dimension; const unsigned int max_degree = dof_handler.get_fe_collection().max_degree(); - Table expansion_coefficients; + Table expansion_coefficients; resize(expansion_coefficients, max_degree); - Vector local_dof_values; + Vector local_dof_values; std::vector ln_k; std::pair, std::vector> res; for (auto &cell : dof_handler.active_cell_iterators()) @@ -155,12 +345,11 @@ namespace SmoothnessEstimator } // Second, calculate ln(U_k). - for (double &residual_element : res.second) + for (auto &residual_element : res.second) residual_element = std::log(residual_element); // Last, do the linear regression. - std::pair fit = - FESeries::linear_regression(ln_k, res.second); + const auto fit = FESeries::linear_regression(ln_k, res.second); // Compute the Sobolev index s=mu-dim/2 and store it in the vector // of estimated values for each cell. @@ -172,36 +361,35 @@ namespace SmoothnessEstimator - template + template void - estimate_by_coeff_decay(FESeriesType & fe_series, - const DoFHandlerType & dof_handler, - const VectorType & solution, - Vector & smoothness_indicators, - const VectorTools::NormType regression_strategy) + fourier_coefficient_decay(FESeries::Fourier & fe_series, + const hp::DoFHandler &dof_handler, + const VectorType & solution, + Vector & smoothness_indicators, + const VectorTools::NormType regression_strategy) { const std::vector all_solutions(1, &solution); const std::vector *> all_smoothness_indicators( 1, &smoothness_indicators); - estimate_by_coeff_decay(fe_series, - dof_handler, - all_solutions, - all_smoothness_indicators, - regression_strategy); + fourier_coefficient_decay(fe_series, + dof_handler, + all_solutions, + all_smoothness_indicators, + regression_strategy); } - template + template void - estimate_by_coeff_decay( - const DoFHandlerType & dof_handler, + fourier_coefficient_decay( + const hp::DoFHandler & dof_handler, const std::vector &all_solutions, const std::vector *> & all_smoothness_indicators, const VectorTools::NormType regression_strategy) { - const unsigned int dim = DoFHandlerType::dimension; const unsigned int max_degree = dof_handler.get_fe_collection().max_degree(); @@ -225,34 +413,34 @@ namespace SmoothnessEstimator // The FESeries::Fourier class' constructor first parameter $N$ defines the // number of coefficients in 1D with the total number of coefficients being // $N^{dim}$. - FESeriesType fe_series(max_degree, - dof_handler.get_fe_collection(), - expansion_q_collection); - - estimate_by_coeff_decay(fe_series, - dof_handler, - all_solutions, - all_smoothness_indicators, - regression_strategy); + FESeries::Fourier fe_series(max_degree, + dof_handler.get_fe_collection(), + expansion_q_collection); + + fourier_coefficient_decay(fe_series, + dof_handler, + all_solutions, + all_smoothness_indicators, + regression_strategy); } - template + template void - estimate_by_coeff_decay(const DoFHandlerType & dof_handler, - const VectorType & solution, - Vector & smoothness_indicators, - const VectorTools::NormType regression_strategy) + fourier_coefficient_decay(const hp::DoFHandler &dof_handler, + const VectorType & solution, + Vector & smoothness_indicators, + const VectorTools::NormType regression_strategy) { const std::vector all_solutions(1, &solution); const std::vector *> all_smoothness_indicators( 1, &smoothness_indicators); - estimate_by_coeff_decay(dof_handler, - all_solutions, - all_smoothness_indicators, - regression_strategy); + fourier_coefficient_decay(dof_handler, + all_solutions, + all_smoothness_indicators, + regression_strategy); } } // namespace SmoothnessEstimator diff --git a/source/fe/fe_series_legendre.cc b/source/fe/fe_series_legendre.cc index 9a17c2aa93..b23de8ad08 100644 --- a/source/fe/fe_series_legendre.cc +++ b/source/fe/fe_series_legendre.cc @@ -230,6 +230,15 @@ namespace FESeries + template + unsigned int + Legendre::get_size_in_each_direction() const + { + return N; + } + + + template template void @@ -238,6 +247,9 @@ namespace FESeries const unsigned int cell_active_fe_index, Table & legendre_coefficients) { + for (unsigned int d = 0; d < dim; ++d) + AssertDimension(legendre_coefficients.size(d), N); + ensure_existence(*fe_collection, *q_collection, N, diff --git a/source/numerics/smoothness_estimator.cc b/source/numerics/smoothness_estimator.cc index efba58d335..8276f8aa85 100644 --- a/source/numerics/smoothness_estimator.cc +++ b/source/numerics/smoothness_estimator.cc @@ -13,11 +13,6 @@ // // --------------------------------------------------------------------- -#include -#include - -#include - #include #include #include diff --git a/source/numerics/smoothness_estimator.inst.in b/source/numerics/smoothness_estimator.inst.in index 25db4d0a05..d6aca7def7 100644 --- a/source/numerics/smoothness_estimator.inst.in +++ b/source/numerics/smoothness_estimator.inst.in @@ -15,41 +15,69 @@ for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : SPACE_DIMENSIONS; - VEC : REAL_VECTOR_TYPES; - DH : DOFHANDLER_TEMPLATES; - EXP : SERIES_EXPANSION_TEMPLATES) + VEC : REAL_VECTOR_TYPES) { #if deal_II_dimension != 1 && deal_II_dimension <= deal_II_space_dimension - template void SmoothnessEstimator::estimate_by_coeff_decay< - EXP, - DH, - VEC>(EXP &, - const DH &, + template void SmoothnessEstimator::legendre_coefficient_decay< + deal_II_dimension, + deal_II_space_dimension, + VEC>(FESeries::Legendre &, + const hp::DoFHandler &, + const std::vector &, + const std::vector *> &, + const std::function & flags)>, + const double); + + template void SmoothnessEstimator::legendre_coefficient_decay< + deal_II_dimension, + deal_II_space_dimension, + VEC>(FESeries::Legendre &, + const hp::DoFHandler &, + const VEC &, + Vector &, + const std::function & flags)>, + const double); + + template void SmoothnessEstimator::legendre_coefficient_decay< + deal_II_dimension, + deal_II_space_dimension, + VEC>(const hp::DoFHandler &, + const VEC &, + Vector &, + const std::function & flags)>, + const double); + + template void SmoothnessEstimator::fourier_coefficient_decay< + deal_II_dimension, + deal_II_space_dimension, + VEC>(FESeries::Fourier &, + const hp::DoFHandler &, const std::vector &, const std::vector *> &, const VectorTools::NormType); - template void SmoothnessEstimator::estimate_by_coeff_decay< - EXP, - DH, - VEC>(EXP &, - const DH &, + template void SmoothnessEstimator::fourier_coefficient_decay< + deal_II_dimension, + deal_II_space_dimension, + VEC>(FESeries::Fourier &, + const hp::DoFHandler &, const VEC &, Vector &, const VectorTools::NormType); - template void SmoothnessEstimator::estimate_by_coeff_decay< - EXP, - DH, - VEC>(const DH &, + template void SmoothnessEstimator::fourier_coefficient_decay< + deal_II_dimension, + deal_II_space_dimension, + VEC>(const hp::DoFHandler &, const std::vector &, const std::vector *> &, const VectorTools::NormType); - template void SmoothnessEstimator::estimate_by_coeff_decay< - EXP, - DH, - VEC>(const DH &, + template void SmoothnessEstimator::fourier_coefficient_decay< + deal_II_dimension, + deal_II_space_dimension, + VEC>(const hp::DoFHandler &, const VEC &, Vector &, const VectorTools::NormType); + #endif } diff --git a/tests/hp/laplace.h b/tests/hp/laplace.h new file mode 100644 index 0000000000..a87e12949a --- /dev/null +++ b/tests/hp/laplace.h @@ -0,0 +1,635 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +// base header for hp-FEM test on Laplace equation. + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "../tests.h" + +/** + * Basic class for Laplace problem + */ +template +class Laplace +{ +public: + Laplace(const Function &force_function, + const Function &exact_solution, + const Function &boundary_conditions, + const unsigned int n_cycles, + const std::string txt_file_name); + + virtual ~Laplace(); + + void + run(); + + hp::DoFHandler & + get_dof_handler(); + + void + setup_solve_estimate(Vector &output_estimate); + +protected: + void + setup_system(); + + virtual void + setup_geometry() = 0; + + void + assemble(); + + virtual void + solve(); + + /** + * estimate error + */ + virtual void + estimate_error() = 0; + + /** + * mark cells for h-refinement based on error estimation only + */ + virtual void + mark_h_cells() = 0; + + /** + * remove h-refinement flag from some cells and populate @p p_cells with + * iterators to those cells, that shall be p-refined. + */ + virtual std::pair + substitute_h_for_p( + std::vector::active_cell_iterator> + &p_cells) = 0; + + void + refine_grid(const unsigned int cycle); + + void + calculate_error(); + + void + output_results(int cycle); + + void + print_errors(); + + const Function &force_function; + const Function &exact_solution; + const Function &boundary_conditions; + + Triangulation triangulation; + hp::FECollection fe; + hp::DoFHandler dof_handler; + hp::QCollection quadrature; + hp::QCollection quadrature_infty; + + AffineConstraints constraints; + SparsityPattern sparsity_pattern; + TrilinosWrappers::SparseMatrix system_matrix; + TrilinosWrappers::MPI::Vector system_rhs; + TrilinosWrappers::MPI::Vector solution; + TrilinosWrappers::MPI::Vector solution_locally_relevant; + + Vector estimated_error_per_cell; + double total_error; + + std::pair hp_number; + + double L2_error; + double H1_error; + double Linfty_error; + + const unsigned int n_cycles; + + std::string sp; + ConvergenceTable error_table; + std::string output_name; + + MPI_Comm mpi_communicator; + const unsigned int n_mpi_processes; + const unsigned int this_mpi_process; + ConditionalOStream pcout; + + IndexSet locally_owned_dofs; + IndexSet locally_relevant_dofs; +}; + + + +// implementatoin +template +Laplace::Laplace(const Function &force_function, + const Function &exact_solution, + const Function &boundary_conditions, + const unsigned int n_cycles, + const std::string output_name) + : force_function(force_function) + , exact_solution(exact_solution) + , boundary_conditions(boundary_conditions) + , dof_handler(triangulation) + , n_cycles(n_cycles) + , sp(" ") + , output_name(output_name) + , mpi_communicator(MPI_COMM_WORLD) + , n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator)) + , this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator)) + , pcout(std::cout, (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)) +{ + hp_number.first = 0; + hp_number.second = 0; + + deallog << std::endl; +} + +template +Laplace::~Laplace() +{ + dof_handler.clear(); +} + +template +hp::DoFHandler & +Laplace::get_dof_handler() +{ + return dof_handler; +} + +template +void +Laplace::setup_solve_estimate(Vector &output_estimate) +{ + setup_system(); + assemble(); + solve(); + estimate_error(); + output_estimate = estimated_error_per_cell; +} + +template +void +Laplace::setup_system() +{ + GridTools::partition_triangulation(n_mpi_processes, triangulation); + + dof_handler.distribute_dofs(fe); + + locally_owned_dofs = dof_handler.locally_owned_dofs(); + + locally_relevant_dofs.clear(); + DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); + AssertThrow(locally_relevant_dofs.n_elements() == dof_handler.n_dofs(), + ExcInternalError()); + + // init vectors + solution_locally_relevant.reinit(locally_owned_dofs, + locally_relevant_dofs, + mpi_communicator); + solution.reinit(locally_owned_dofs, mpi_communicator); + solution = 0; + solution_locally_relevant = solution; + system_rhs.reinit(locally_owned_dofs, mpi_communicator); + system_rhs = 0; + + constraints.clear(); + // constraints.reinit(locally_relevant_dofs); + DoFTools::make_hanging_node_constraints(dof_handler, constraints); + VectorTools::interpolate_boundary_values(dof_handler, + 0, + boundary_conditions, + constraints); + if (dim == 1) + VectorTools::interpolate_boundary_values(dof_handler, + 1, + boundary_conditions, + constraints); + constraints.close(); + + TrilinosWrappers::SparsityPattern sp(locally_owned_dofs, mpi_communicator); + DoFTools::make_sparsity_pattern( + dof_handler, sp, constraints, false, this_mpi_process); + sp.compress(); + + system_matrix.reinit(sp); + + estimated_error_per_cell.reinit(triangulation.n_active_cells()); + + // print out some info: + pcout << "Number of active cells: " << triangulation.n_active_cells() + << std::endl; + pcout << "Number of degrees of freedom: " << dof_handler.n_dofs() + << std::endl; +} + +template +void +Laplace::assemble() +{ + pcout << "Assembling..."; + + hp::FEValues hp_fe_values(fe, + quadrature, + update_values | update_gradients | + update_quadrature_points | + update_JxW_values); + + FullMatrix cell_matrix; + Vector cell_rhs; + + std::vector local_dof_indices; + + for (auto &cell : dof_handler.active_cell_iterators()) + if (cell->subdomain_id() == this_mpi_process) + { + hp_fe_values.reinit(cell); + const FEValues &fe_values = hp_fe_values.get_present_fe_values(); + const unsigned int & dofs_per_cell = fe_values.dofs_per_cell; + + local_dof_indices.resize(dofs_per_cell); + cell_matrix.reinit(dofs_per_cell, dofs_per_cell); + cell_rhs.reinit(dofs_per_cell); + + cell_matrix = 0.; + cell_rhs = 0.; + + const unsigned int n_q_points = + hp_fe_values.get_present_fe_values().n_quadrature_points; + for (unsigned int q_index = 0; q_index < n_q_points; q_index++) + { + for (unsigned int i = 0; i < dofs_per_cell; i++) + { + for (unsigned int j = i; j < dofs_per_cell; j++) + { + cell_matrix(i, j) += (fe_values.shape_grad(i, q_index) * + fe_values.shape_grad(j, q_index)) * + fe_values.JxW(q_index); + } + + cell_rhs(i) += + force_function.value(fe_values.quadrature_point(q_index)) * + fe_values.shape_value(i, q_index) * fe_values.JxW(q_index); + } + } + + // exploit symmetry + for (unsigned int i = 0; i < dofs_per_cell; i++) + for (unsigned int j = i; j < dofs_per_cell; j++) + cell_matrix(j, i) = cell_matrix(i, j); + + + cell->get_dof_indices(local_dof_indices); + constraints.distribute_local_to_global( + cell_matrix, cell_rhs, local_dof_indices, system_matrix, system_rhs); + } + + system_matrix.compress(VectorOperation::add); + system_rhs.compress(VectorOperation::add); + + pcout << " done." << std::endl; +} + +//#define DIRECT + +template +void +Laplace::solve() +{ + pcout << "Solving..."; + + SolverControl solver_control(system_rhs.size(), + 1e-8 * system_rhs.l2_norm(), + /*log_history*/ false, + /*log_result*/ false); + + constraints.set_zero(solution); + constraints.set_zero(system_rhs); +#ifdef DIRECT + std::string solver_name = + "Amesos_Superludist"; //"Amesos_Mumps" || "Amesos_Klu" + + TrilinosWrappers::SolverDirect::AdditionalData additional_data(false, + solver_name); + + TrilinosWrappers::SolverDirect solver(solver_control, additional_data); + + solver.solve(system_matrix, solution, system_rhs); + + TrilinosWrappers::MPI::Vector tmp(solution); + const double l2 = system_matrix.residual(tmp, solution, system_rhs); + solver_control.check(1, l2); +#else + TrilinosWrappers::SolverCG cg(solver_control); + + TrilinosWrappers::PreconditionSSOR preconditioner; + TrilinosWrappers::PreconditionSSOR::AdditionalData data(1.2); + preconditioner.initialize(system_matrix, data); + + cg.solve(system_matrix, solution, system_rhs, preconditioner); +#endif + + constraints.distribute(solution); + solution_locally_relevant = solution; + + pcout << " done." << std::endl; +} + +template +void +Laplace::refine_grid(const unsigned int cycle) +{ + pcout << "Refining mesh..." << std::endl; + + // 3.2. Mark cells for h-refinement + mark_h_cells(); + + // 3.3. Substitute h for p refinement + std::vector::active_cell_iterator> p_cells; + hp_number = substitute_h_for_p(p_cells); + + triangulation.prepare_coarsening_and_refinement(); + + // 3.4. Solution Transfer + SolutionTransfer> + soltrans(dof_handler); + + // copy current functions + TrilinosWrappers::MPI::Vector solution_coarse; + solution_coarse.reinit(locally_owned_dofs, + locally_relevant_dofs, + mpi_communicator); + solution_coarse = solution; + soltrans.prepare_for_coarsening_and_refinement(solution_coarse); + + // increase fe_index() + for (unsigned int i = 0; i < p_cells.size(); i++) + { + typename hp::DoFHandler::active_cell_iterator cell( + &triangulation, p_cells[i]->level(), p_cells[i]->index(), &dof_handler); + + const unsigned int incremented_index = cell->active_fe_index() + 1; + Assert(incremented_index < fe.size(), ExcInternalError()); + cell->set_active_fe_index(incremented_index); + } + + // 3.5. Refinement + triangulation.execute_coarsening_and_refinement(); + + // FIXME: some hp strategies might need: + // post_execute_coarsening_and_refinement(); + + // 3.6. Setup + setup_system(); + + // 3.7. Solution Transfer finish + soltrans.interpolate(solution_coarse, solution); +} + +template +void +Laplace::calculate_error() +{ + L2_error = 0.0; + H1_error = 0.0; + Linfty_error = 0.0; + + hp::FEValues hp_fe_values_linf(fe, + quadrature_infty, + update_values | update_quadrature_points); + hp::FEValues hp_fe_values(fe, + quadrature, + update_values | update_gradients | + update_quadrature_points | + update_JxW_values); + + std::vector values, exact_values; + std::vector values_linf, exact_values_linf; + std::vector> gradients, exact_gradients; + + for (auto &cell : dof_handler.active_cell_iterators()) + if (cell->subdomain_id() == this_mpi_process) + { + hp_fe_values.reinit(cell); + hp_fe_values_linf.reinit(cell); + const FEValues &fe_values = hp_fe_values.get_present_fe_values(); + const unsigned int n_q_points = fe_values.n_quadrature_points; + + const FEValues &fe_values_linf = + hp_fe_values_linf.get_present_fe_values(); + const unsigned int n_q_points_linf = fe_values_linf.n_quadrature_points; + + values_linf.resize(n_q_points_linf); + exact_values_linf.resize(n_q_points_linf); + + values.resize(n_q_points); + exact_values.resize(n_q_points); + gradients.resize(n_q_points); + exact_gradients.resize(n_q_points); + + fe_values.get_function_values(solution_locally_relevant, values); + fe_values_linf.get_function_values(solution_locally_relevant, + values_linf); + fe_values.get_function_gradients(solution_locally_relevant, gradients); + + exact_solution.value_list(fe_values.get_quadrature_points(), + exact_values); + + exact_solution.value_list(fe_values_linf.get_quadrature_points(), + exact_values_linf); + + exact_solution.gradient_list(fe_values.get_quadrature_points(), + exact_gradients); + + double cell_L2 = 0.0, cell_Linf = 0.0, cell_H1 = 0.0; + + for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) + { + const double diff_values = exact_values[q_point] - values[q_point]; + const Tensor<1, dim> diff_grad = + exact_gradients[q_point] - gradients[q_point]; + cell_L2 += diff_values * diff_values * fe_values.JxW(q_point); + cell_H1 += (diff_grad * diff_grad) * fe_values.JxW(q_point); + } + + for (unsigned int q_point = 0; q_point < n_q_points_linf; ++q_point) + { + cell_Linf = std::max(cell_Linf, + std::abs(exact_values_linf[q_point] - + values_linf[q_point])); + } + + + // calculate l2_norm() for cell-vectors for L2 and H1 + // and linfty_norm() for Linf: + L2_error += cell_L2; + H1_error += cell_H1; + Linfty_error = std::max(Linfty_error, cell_Linf); + } // end of loop over cells + + // finish l2_norm() / linfty_norm() calculation: + L2_error = sqrt(Utilities::MPI::sum(L2_error, mpi_communicator)); + H1_error = sqrt(Utilities::MPI::sum(H1_error, mpi_communicator)); + Linfty_error = Utilities::MPI::max(Linfty_error, mpi_communicator); +} + +template +void +Laplace::output_results(int cycle) +{ + // log: + error_table.add_value("cycle", cycle); + error_table.add_value("cells", triangulation.n_active_cells()); + error_table.add_value("h-cells", hp_number.first); + error_table.add_value("p-cells", hp_number.second); + error_table.add_value("dofs", dof_handler.n_dofs()); + error_table.add_value("L2", L2_error); + error_table.add_value("H1", H1_error); + error_table.add_value("Linfty", Linfty_error); + error_table.add_value("estimated", total_error); + + if (this_mpi_process == 0) + deallog << cycle << sp << triangulation.n_active_cells() << sp + << hp_number.first << sp << hp_number.second << sp + << dof_handler.n_dofs() << sp << L2_error << sp << H1_error << sp + << Linfty_error << sp << total_error << sp << std::endl; +} + +template +void +Laplace::print_errors() +{ + error_table.set_precision("L2", 3); + error_table.set_precision("H1", 3); + error_table.set_precision("Linfty", 3); + error_table.set_precision("estimated", 3); + error_table.set_scientific("L2", true); + error_table.set_scientific("H1", true); + error_table.set_scientific("Linfty", true); + error_table.set_scientific("estimated", true); + + pcout << std::endl << "Error analysis:" << std::endl; + if (this_mpi_process == 0) + { + error_table.write_text(std::cout); + + const std::string fname = output_name + ".gp"; + std::ofstream output(fname.c_str(), std::ios::out | std::ios::trunc); + + // use Gnuplot datablocks: + output << "$data << EOD" << std::endl; + error_table.write_text(output); + output << "EOD" << std::endl << std::endl; + + output + << "set terminal postscript eps enhanced color dashed \"Helvetica\" 22" + << std::endl + << "set style line 1 linetype 1 linecolor rgb \"#e41a1c\" linewidth 2.000 pointtype 4 pointsize 2.0" + << std::endl + << "set style line 2 linetype 1 linecolor rgb \"#377eb8\" linewidth 2.000 pointtype 6 pointsize 2.0" + << std::endl + << "set xlabel \"DoF\"" << std::endl + << "set ylabel \"L2+H1\"" << std::endl + << "set logscale xy" << std::endl + << "set format x \"10^{%T}\"" << std::endl + << "set format y \"10^{%T}\"" << std::endl + << "set output \'" << output_name << ".eps\'" << std::endl + << "plot \"$data\" using ($5):($6+$7) axis x1y1 with lp ls 1 title \"error\", \\" + << std::endl + << " \"$data\" using ($5):($9) axis x1y1 with lp ls 2 title \"{/Symbol h}_{/Symbol W}\"" + << std::endl; + } +} + +template +void +Laplace::run() +{ + // 1. Define problem + setup_geometry(); + setup_system(); + + for (unsigned int cycle = 0; cycle <= n_cycles; cycle++) + { + pcout << std::endl << "Cycle " << cycle << std::endl; + + // 2. Solve Problem + assemble(); + solve(); + calculate_error(); + + estimate_error(); + total_error = estimated_error_per_cell.l2_norm(); + + output_results(cycle); + + // Do refinement (Yes/No) ? + if (cycle < n_cycles) + { + refine_grid(cycle); + } + } + print_errors(); +} diff --git a/tests/hp/laplace_mitchel2014_04_peak.cc b/tests/hp/laplace_mitchel2014_04_peak.cc new file mode 100644 index 0000000000..0cc813c60e --- /dev/null +++ b/tests/hp/laplace_mitchel2014_04_peak.cc @@ -0,0 +1,243 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +// test SmoothnessEstimator::legendre_coefficient_decay() on +// problem 4 (peak) in Mitchel 2014. + +#include "laplace.h" + + +template +class ForcingFunction : public Function +{ +public: + ForcingFunction(const double alpha, const Point center) + : Function(1) + , alpha(alpha) + , center(center) + {} + + virtual double + value(const Point &point, const unsigned int component = 0) const; + +private: + const double alpha; + const Point center; +}; + +template +double +ForcingFunction::value(const Point &point, const unsigned int) const +{ + const double x = point[0]; + const double y = point[1]; + + return -exp(-alpha * (point - center).norm_square()) * 4 * alpha * + (alpha * (point - center).norm_square() - 1); +} + +template +class ExactSolution : public Function +{ +public: + ExactSolution(const double alpha, const Point center) + : Function(1) + , alpha(alpha) + , center(center){}; + + virtual double + value(const Point &point, const unsigned int component = 0) const; + + virtual Tensor<1, dim> + gradient(const Point &point, const unsigned int component = 0) const; + +private: + const double alpha; + const Point center; +}; + +template +double +ExactSolution::value(const Point &point, const unsigned int) const +{ + return exp(-alpha * ((point - center).norm_square())); +} + +template +Tensor<1, dim> +ExactSolution::gradient(const Point &point, const unsigned int) const +{ + Tensor<1, dim> grad_u = point - center; + grad_u *= -2 * alpha * exp(-alpha * ((point - center).norm_square())); + return grad_u; +} + + +template +class Problem4 : public Laplace +{ +public: + Problem4(const Function &force_function, + const Function &exact_solution, + const Function &boundary_conditions, + const unsigned int n_cycles, + const std::string output_name); + + +private: + void + setup_geometry(); + void + estimate_error(); + void + mark_h_cells(); + + std::pair + substitute_h_for_p( + std::vector::active_cell_iterator> &p_cells); + + hp::QCollection quadrature_face; +}; + +template +Problem4::Problem4(const Function &force_function, + const Function &exact_solution, + const Function &boundary_conditions, + const unsigned int n_cycles, + const std::string output_name) + : Laplace(force_function, + exact_solution, + boundary_conditions, + n_cycles, + output_name) +{ + for (unsigned int p = 1; p <= n_cycles; p++) + { + // Laplace::fe.push_back(FE_Q_Hierarchical(p)); + Laplace::fe.push_back(FE_Q(p)); + Laplace::quadrature.push_back(QSorted(QGauss(p + 1))); + + quadrature_face.push_back(QSorted(QGauss(p + 1))); + + const QTrapez<1> q_trapez; + const QIterated q_iterated(q_trapez, p + 3); + Laplace::quadrature_infty.push_back(QSorted(q_iterated)); + } +} + + + +template +std::pair +Problem4::substitute_h_for_p( + std::vector::active_cell_iterator> &p_cells) +{ + Vector smoothness_indicators( + Laplace::triangulation.n_active_cells()); + SmoothnessEstimator::legendre_coefficient_decay(Laplace::dof_handler, + Laplace::solution, + smoothness_indicators); + + unsigned int num_p_cells = 0; + unsigned int num_h_cells = 0; + for (auto &cell : Laplace::dof_handler.active_cell_iterators()) + if (cell->refine_flag_set()) + { + typename Triangulation::active_cell_iterator tria_cell( + &(Laplace::triangulation), cell->level(), cell->index()); + + const unsigned int cur_fe_index = cell->active_fe_index(); + const bool p_ref = smoothness_indicators(cell->index()) < exp(-1.); + + if (cur_fe_index < Laplace::fe.size() - 1 && p_ref) + { + ++num_p_cells; + cell->clear_refine_flag(); + p_cells.push_back(tria_cell); + } + else + { + ++num_h_cells; + } + } + + return std::make_pair(num_h_cells, num_p_cells); +} + + + +template +void +Problem4::setup_geometry() +{ + std::vector number_elements(2); + number_elements[0] = 16; + number_elements[1] = 16; + + GridGenerator::subdivided_hyper_rectangle(Laplace::triangulation, + number_elements, + Point(0, 0), + Point(1, 1), + false); +} + + + +template +void +Problem4::estimate_error() +{ + KellyErrorEstimator::estimate( + Laplace::dof_handler, + quadrature_face, + std::map *>(), + Laplace::solution, + Laplace::estimated_error_per_cell); +} + +template +void +Problem4::mark_h_cells() +{ + GridRefinement::refine_and_coarsen_fixed_number( + Laplace::triangulation, + Laplace::estimated_error_per_cell, + 0.2, + 0.0); +} + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + + const int dim = 2; + + initlog(); + + // peak strength + const double alpha = 1000; + // peak position: + const double xc = 0.5; + const double yc = 0.5; + + const Point center(xc, yc); + + ForcingFunction ff(alpha, center); + ExactSolution ex(alpha, center); + + Problem4 problem(ff, ex, ex, 10, "convergence"); + problem.run(); +} diff --git a/tests/hp/laplace_mitchel2014_04_peak.output b/tests/hp/laplace_mitchel2014_04_peak.output new file mode 100644 index 0000000000..7bcfbb54fb --- /dev/null +++ b/tests/hp/laplace_mitchel2014_04_peak.output @@ -0,0 +1,13 @@ + +DEAL:: +DEAL::0 256 0 0 289 0.267081 1.78414 1.23386 1.29971 +DEAL::1 256 0 56 477 0.0236257 0.743531 0.249786 1.18708 +DEAL::2 268 4 48 801 0.00108376 0.293919 0.0466411 0.361176 +DEAL::3 349 22 32 1730 0.000121568 0.0610935 0.00469929 0.0464975 +DEAL::4 421 18 52 2818 3.08015e-05 0.0204013 0.00315297 0.0197872 +DEAL::5 496 11 74 4632 6.14178e-06 0.00604078 0.000714754 0.00530244 +DEAL::6 622 28 72 7303 2.03947e-06 0.00210333 0.000148863 0.00185557 +DEAL::7 712 17 108 10086 5.30920e-07 0.000744017 7.24474e-05 0.000775517 +DEAL::8 811 20 123 13127 2.97635e-07 0.000379131 3.62580e-05 0.000400767 +DEAL::9 901 15 148 17913 1.18544e-07 0.000194730 1.48736e-05 0.000216587 +DEAL::10 1030 20 161 23692 4.78884e-08 0.000106719 9.54505e-06 0.000117773 diff --git a/tests/hp/step-27.cc b/tests/hp/step-27.cc index 446008e619..857c7163ff 100644 --- a/tests/hp/step-27.cc +++ b/tests/hp/step-27.cc @@ -270,8 +270,9 @@ namespace Step27 estimated_error_per_cell); Vector smoothness_indicators; - SmoothnessEstimator::estimate_by_coeff_decay>( - dof_handler, solution, smoothness_indicators); + SmoothnessEstimator::fourier_coefficient_decay(dof_handler, + solution, + smoothness_indicators); // Output to VTK if (false) diff --git a/tests/numerics/smoothness_estimator_01.cc b/tests/numerics/smoothness_estimator_01.cc new file mode 100644 index 0000000000..13b3e41b66 --- /dev/null +++ b/tests/numerics/smoothness_estimator_01.cc @@ -0,0 +1,362 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +// essentially similar to fe/fe_series_05.cc but test smoothness estimation. +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include + +#include +#include + +#include + +#include + +#include "../tests.h" + +using namespace dealii; + +template +class LegendreFunction : public Function +{ +public: + LegendreFunction(const Table &coefficients) + : dealii::Function(1) + , coefficients(coefficients) + {} + + virtual double + value(const Point &point, const unsigned int component = 0) const; + + const Table & + get_coefficients() const + { + return coefficients; + } + +private: + const Table coefficients; +}; + +// copy-paste from fe_series.cc +template +double +Lh(const Point &x_q, const TableIndices &indices) +{ + double res = 1.0; + for (unsigned int d = 0; d < dim; d++) + { + const double x = 2.0 * (x_q[d] - 0.5); + Assert((x_q[d] <= 1.0) && (x_q[d] >= 0.), + ExcMessage("x_q is not in [0,1]" + Utilities::to_string(x_q[d]))); + const int ind = indices[d]; + res *= sqrt(2.0) * gsl_sf_legendre_Pl(ind, x); + } + return res; +} + +template <> +double +LegendreFunction<2>::value(const dealii::Point<2> &point, + const unsigned int) const +{ + double f = 0.0; + + for (unsigned int i = 0; i < coefficients.size(0); i++) + for (unsigned int j = 0; j < coefficients.size(1); j++) + f += Lh(point, TableIndices<2>(i, j)) * coefficients(i, j); + + return f; +} + +template <> +double +LegendreFunction<3>::value(const dealii::Point<3> &point, + const unsigned int) const +{ + double f = 0.0; + + for (unsigned int i = 0; i < coefficients.size(0); i++) + for (unsigned int j = 0; j < coefficients.size(1); j++) + for (unsigned int k = 0; k < coefficients.size(2); k++) + f += Lh(point, TableIndices<3>(i, j, k)) * coefficients(i, j, k); + + return f; +} + +void +compare(const Table<2, double> &coeff1, const Table<2, double> &coeff2) +{ + double linf = 0.; + for (unsigned int i = 0; i < coeff1.size(0); i++) + for (unsigned int j = 0; j < coeff1.size(1); j++) + linf = std::max(linf, std::abs(coeff1(i, j) - coeff2(i, j))); + + deallog << "Linf norm in exact and calculate Legendre coefficients:" + << std::endl + << linf << std::endl; +} + +void +compare(const Table<3, double> &coeff1, const Table<3, double> &coeff2) +{ + double linf = 0.; + for (unsigned int i = 0; i < coeff1.size(0); i++) + for (unsigned int j = 0; j < coeff1.size(1); j++) + for (unsigned int k = 0; k < coeff1.size(2); k++) + linf = std::max(linf, std::abs(coeff1(i, j, k) - coeff2(i, j, k))); + + deallog << "Linf norm in exact and calculate Legendre coefficients:" + << std::endl + << linf << std::endl; +} + +void resize(Table<2, double> &coeff, const unsigned int N) +{ + coeff.reinit(N, N); +} + +void resize(Table<3, double> &coeff, const unsigned int N) +{ + TableIndices<3> size; + for (unsigned int d = 0; d < 3; d++) + size[d] = N; + coeff.reinit(size); +} + + + +template +void +test(const LegendreFunction &func, const unsigned int poly_degree) +{ + // custom predicate: + // p-ref for linear elements and use j=1,...,pe otherwise. + const auto coefficients_predicate = [](std::vector &flags) -> void { + std::fill(flags.begin(), flags.end(), flags.size() > 2); + flags[0] = false; + }; + + const unsigned int max_poly = poly_degree + 3; + deallog << "-----------------------------------" << std::endl; + deallog << dim << "d, p=" << poly_degree << ", max_p=" << max_poly + << std::endl; + deallog << "-----------------------------------" << std::endl; + Triangulation triangulation; + hp::DoFHandler dof_handler(triangulation); + hp::FECollection fe_collection; + hp::QCollection quadrature_formula; + + // add some extra FEs in fe_collection + for (unsigned int p = 1; p <= max_poly; p++) + { + fe_collection.push_back(FE_Q(p)); + quadrature_formula.push_back(QGauss(p + 1 + 5)); + } + + GridGenerator::hyper_cube(triangulation, 0.0, 1.0); // reference cell + const unsigned int fe_index = poly_degree - 1; + dof_handler.begin_active()->set_active_fe_index(fe_index); + dof_handler.distribute_dofs(fe_collection); + + Vector values(dof_handler.n_dofs()); + + VectorTools::interpolate(dof_handler, func, values); + + const unsigned int N = poly_degree + 1; + FESeries::Legendre legendre(N, fe_collection, quadrature_formula); + + const Table &coeff_in = func.get_coefficients(); + Table coeff_out; + resize(coeff_out, N); + + Vector local_dof_values; + + typename hp::DoFHandler::active_cell_iterator cell = + dof_handler.begin_active(); + { + const unsigned int cell_n_dofs = cell->get_fe().dofs_per_cell; + const unsigned int cell_active_fe_index = cell->active_fe_index(); + + local_dof_values.reinit(cell_n_dofs); + cell->get_dof_values(values, local_dof_values); + + legendre.calculate(local_dof_values, cell_active_fe_index, coeff_out); + } + + compare(coeff_in, coeff_out); + + // finally test smoothness estimator: + Vector smoothness(1); + SmoothnessEstimator::legendre_coefficient_decay( + legendre, dof_handler, values, smoothness, coefficients_predicate); + + deallog << "smoothness:" << std::endl << smoothness[0] << std::endl; + + dof_handler.clear(); +} + +int +main() +{ + std::ofstream logfile("output"); + dealii::deallog.attach(logfile, /*do not print job id*/ false); + dealii::deallog.depth_console(0); + + // for linear elements we expect p-refinement by convention + { + const unsigned int dim = 2; + const unsigned int coeff_1d = 2; + const unsigned int p = 1; + Table coeff_in(coeff_1d, coeff_1d); + unsigned int ind = 0; + for (unsigned int i = 0; i < coeff_1d; i++) + for (unsigned int j = 0; j < coeff_1d; j++) + coeff_in(i, j) = 1.0 + ind++; + + LegendreFunction function(coeff_in); + test(function, p); + deallog << "expected smoothness:" << std::endl << 0. << std::endl; + } + + // for quadratic we can already assign exponential decay: a_i = C exp ( -k + // i) set one with different k's + { + const double k1 = 1.; + const double k2 = 2.; + + const unsigned int dim = 2; + const unsigned int coeff_1d = 3; + const unsigned int p = 2; + Table coeff_in(coeff_1d, coeff_1d); + unsigned int ind = 0; + for (unsigned int i = 0; i < coeff_1d; i++) + coeff_in(i, 0) = exp(-k1 * i); + + for (unsigned int i = 0; i < coeff_1d; i++) + coeff_in(0, i) = exp(-k2 * i); + + // make sure predicate skips 0-th: + coeff_in(0, 0) = 12345; + + LegendreFunction function(coeff_in); + test(function, p); + + deallog << "expected smoothness:" << std::endl + << exp(-std::min(k1, k2)) << std::endl; + } + + // linear elements in 3D (expect zero output) + { + const unsigned int dim = 3; + const unsigned int coeff_1d = 2; + const unsigned int p = 1; + Table coeff_in(coeff_1d, coeff_1d, coeff_1d); + unsigned int ind = 0; + for (unsigned int i = 0; i < coeff_1d; i++) + for (unsigned int j = 0; j < coeff_1d; j++) + for (unsigned int k = 0; k < coeff_1d; k++) + coeff_in(i, j, k) = 1.0 + ind++; + + LegendreFunction function(coeff_in); + test(function, p); + deallog << "expected smoothness:" << std::endl << 0. << std::endl; + } + + // cubic in 3D + { + const double k1 = 2.; + const double k2 = 3.; + const double k3 = 4.; + const unsigned int dim = 3; + const unsigned int coeff_1d = 4; + const unsigned int p = 3; + Table coeff_in(coeff_1d, coeff_1d, coeff_1d); + for (unsigned int i = 0; i < coeff_1d; i++) + coeff_in(i, 0, 0) = exp(-k1 * i); + + for (unsigned int j = 0; j < coeff_1d; j++) + coeff_in(0, j, 0) = exp(-k2 * j); + + for (unsigned int k = 0; k < coeff_1d; k++) + coeff_in(0, 0, k) = exp(-k3 * k); + + // make sure predicate skips 0-th: + coeff_in(0, 0, 0) = 12345; + + LegendreFunction function(coeff_in); + test(function, p); + + deallog << "expected smoothness:" << std::endl + << exp(-std::min(k1, std::min(k2, k3))) << std::endl; + } + + + // 4-th order in 3D but with some coefficients being zero + { + const double k1 = 2.; + const double k2 = k1 + 1.; + const unsigned int dim = 3; + const unsigned int coeff_1d = 5; + const unsigned int p = 4; + Table coeff_in(coeff_1d, coeff_1d, coeff_1d); + // all non-zero: + for (unsigned int i = 0; i < coeff_1d; i++) + coeff_in(i, 0, 0) = exp(-k2 * i); + + // some non-zero (2nd and 4th), the slowest decay will be from this + // direction + for (unsigned int j = 2; j < coeff_1d; j = j + 2) + coeff_in(0, j, 0) = exp(-k1 * j); + + // all but one zero: + for (unsigned int k = 3; k < coeff_1d; k = k + 10) + coeff_in(0, 0, k) = exp(-k2 * k); + + // make sure predicate skips 0-th: + coeff_in(0, 0, 0) = 12345; + + LegendreFunction function(coeff_in); + test(function, p); + + deallog << "expected smoothness:" << std::endl << exp(-k1) << std::endl; + } + + // cubic in 3D (zero) + { + const unsigned int dim = 3; + const unsigned int coeff_1d = 4; + const unsigned int p = 3; + Table coeff_in(coeff_1d, coeff_1d, coeff_1d); + + LegendreFunction function(coeff_in); + test(function, p); + + deallog << "expected smoothness:" << std::endl << 0. << std::endl; + } + + dealii::deallog << "Ok" << std::endl; +} diff --git a/tests/numerics/smoothness_estimator_01.with_gsl=on.output b/tests/numerics/smoothness_estimator_01.with_gsl=on.output new file mode 100644 index 0000000000..65d1adcafb --- /dev/null +++ b/tests/numerics/smoothness_estimator_01.with_gsl=on.output @@ -0,0 +1,55 @@ +DEAL::----------------------------------- +DEAL::2d, p=1, max_p=4 +DEAL::----------------------------------- +DEAL::Linf norm in exact and calculate Legendre coefficients: +DEAL::1.77636e-15 +DEAL::smoothness: +DEAL::0.00000 +DEAL::expected smoothness: +DEAL::0.00000 +DEAL::----------------------------------- +DEAL::2d, p=2, max_p=5 +DEAL::----------------------------------- +DEAL::Linf norm in exact and calculate Legendre coefficients: +DEAL::7.21645e-16 +DEAL::smoothness: +DEAL::0.367879 +DEAL::expected smoothness: +DEAL::0.367879 +DEAL::----------------------------------- +DEAL::3d, p=1, max_p=4 +DEAL::----------------------------------- +DEAL::Linf norm in exact and calculate Legendre coefficients: +DEAL::3.55271e-15 +DEAL::smoothness: +DEAL::0.00000 +DEAL::expected smoothness: +DEAL::0.00000 +DEAL::----------------------------------- +DEAL::3d, p=3, max_p=6 +DEAL::----------------------------------- +DEAL::Linf norm in exact and calculate Legendre coefficients: +DEAL::3.19189e-15 +DEAL::smoothness: +DEAL::0.135335 +DEAL::expected smoothness: +DEAL::0.135335 +DEAL::----------------------------------- +DEAL::3d, p=4, max_p=7 +DEAL::----------------------------------- +DEAL::Linf norm in exact and calculate Legendre coefficients: +DEAL::4.31599e-15 +DEAL::smoothness: +DEAL::0.135335 +DEAL::expected smoothness: +DEAL::0.135335 +DEAL::----------------------------------- +DEAL::3d, p=3, max_p=6 +DEAL::----------------------------------- +DEAL::Linf norm in exact and calculate Legendre coefficients: +DEAL::0.00000 +DEAL::smoothness: +DEAL::0.00000 +DEAL::expected smoothness: +DEAL::0.00000 +DEAL::Ok -- 2.39.5