* ---------------------------------------------------------------------
*
- * Author:
+ * Authors: Natasha Sharma, University of Texas at El Paso,
+ * Guido Kanschat, University of Heidelberg
+ * Timo Heister, Clemson University
+ * Wolfgang Bangerth, Colorado State University
+ * Zhuroan Wang, Colorado State University
*/
+
+
+// @sect3{Include files}
+
+// The first few (many?) include files have already been used in the previous
+// example, so we will not explain their meaning here again. The principal
+// structure of the program is very similar to that of, for example, step-4
+// and so we include many of the same header files.
+
#include <deal.II/base/quadrature_lib.h>
#include <deal.II/base/function.h>
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/grid/tria_iterator.h>
-#include <deal.II/fe/fe_interface_values.h>
#include <deal.II/fe/fe_q.h>
#include <deal.II/fe/fe_values.h>
#include <deal.II/fe/mapping_q.h>
#include <deal.II/dofs/dof_accessor.h>
#include <deal.II/dofs/dof_tools.h>
-#include <deal.II/meshworker/mesh_loop.h>
-
#include <deal.II/numerics/vector_tools.h>
#include <deal.II/numerics/data_out.h>
+// The two most interesting header files will be these two:
+#include <deal.II/fe/fe_interface_values.h>
+#include <deal.II/meshworker/mesh_loop.h>
+// The first of these is responsible for providing the class FEInterfaceValue
+// that can be used to evaluate quantities such as the jump or average
+// of shape functions (or their gradients) across interfaces between cells.
+// This class will be quite useful in evaluating the penalty terms that appear
+// in the C0IP formulation.
+
#include <fstream>
#include <iostream>
#include <cmath>
-namespace StepBiharmonic
+namespace Step71
{
using namespace dealii;
+ // In the following namespace, let us define the exact solution against
+ // which we will compare the numerically computed one. It has the form
+ // $u(x,y) = \sin(\pi x) \sin(\pi y)$ (only the 2d case is implemented),
+ // and the namespace also contains a class that corresponds to the right
+ // hand side that produces this solution.
namespace ExactSolution
{
using numbers::PI;
- /**
- * An exact solution of the form
- * $ u(x,y) = \sin(\pi x) \sin(\pi y) $.
- *
- * Note that this solution has zero boundary values for the *value*
- * of the solution, but not for its Laplacian. Consequently, the
- * boundary contribution to the penalty terms is not zero.
- */
template <int dim>
class Solution : public Function<dim>
{
};
- /**
- * The corresponding right hand side.
- */
template <int dim>
class RightHandSide : public Function<dim>
{
- /*************************************************************/
// @sect3{The main class}
+ //
+ // The following is the principal class of this tutorial program. It has
+ // the structure of many of the other tutorial programs and there should
+ // really be nothing particularly surprising about its contents or
+ // the constructor that follows it.
template <int dim>
class BiharmonicProblem
{
void compute_errors();
void output_results(const unsigned int iteration) const;
- Triangulation<dim> triangulation;
- const MappingQ<dim> mapping;
- const FE_Q<dim> fe;
+ Triangulation<dim> triangulation;
+
+ MappingQ<dim> mapping;
+
+ FE_Q<dim> fe;
DoFHandler<dim> dof_handler;
AffineConstraints<double> constraints;
Vector<double> system_rhs;
};
+
+
template <int dim>
BiharmonicProblem<dim>::BiharmonicProblem(const unsigned int fe_degree)
: mapping(1)
+ // Next up are the functions that create the initial mesh (a once refined
+ // unit square) and set up the constraints, vectors, and matrices on
+ // each mesh. Again, both of these are essentially unchanged from many
+ // previous tutorial programs.
template <int dim>
void BiharmonicProblem<dim>::make_grid()
{
+ // @sect{Assembling the linear system}
+ //
+ // The following pieces of code are more interesting. They all relate to the
+ // assembly of the linear system. While assemling the cell-interior terms
+ // is not of great difficulty -- that works in essence like the assembly
+ // of the corresponding terms of the Laplace equation, and you have seen
+ // how this works in step-4 or step-6, for example -- the difficulty
+ // is with the penalty terms in the formulation. These require the evaluation
+ // of gradients of shape functions at interfaces of cells. At the least,
+ // one would therefore need to use two FEFaceValues objects, but if one of the
+ // two sides is adaptively refined, then one actually needs an FEFaceValues
+ // and one FESubfaceValues objects; one also needs to keep track which
+ // shape functions live where, and finally we need to ensure that every
+ // face is visited only once. All of this is a substantial overhead to the
+ // logic we really want to implement (namely the penalty terms in the
+ // bilinear form). As a consequence, we will make use of the
+ // FEInterfaceValues class -- a helper class in deal.II that allows us
+ // to abstract away the two FEFaceValues or FESubfaceValues objects and
+ // directly access what we really care about: jumps, averages, etc.
+ //
+ // But this doesn't yet solve our problem of having to keep track of
+ // which faces we have already visited when we loop over all cells and
+ // all of their faces. To make this process simpler, we use the
+ // MeshWorker::mesh_loop() function that provides a simple interface
+ // for this task: Based on the ideas outlined in the WorkStream
+ // namespace documentation, MeshWorker::mesh_loop() requires three
+ // functions that do work on cells, interior faces, and boundary
+ // faces; these functions work on scratch objects for intermediate
+ // results, and then copy the result of their computations into
+ // copy data objects from where a copier function copies them into
+ // the global matrix and right hand side objects.
+ //
+ // The following structures then provide the scratch and copy objects
+ // that are necessary for this approach. You may look up the WorkStream
+ // namespace as well as the
+ // @ref threads "Parallel computing with multiple processors"
+ // module for more information on how they typically work.
template <int dim>
struct ScratchData
{
- struct CopyDataFace
+ struct CopyData
{
- FullMatrix<double> cell_matrix;
- std::vector<types::global_dof_index> joint_dof_indices;
- };
+ CopyData(const unsigned int dofs_per_cell)
+ : cell_matrix(dofs_per_cell, dofs_per_cell)
+ , cell_rhs(dofs_per_cell)
+ , local_dof_indices(dofs_per_cell)
+ {}
+ CopyData(const CopyData &) = default;
- struct CopyData
- {
- FullMatrix<double> cell_matrix;
- Vector<double> cell_rhs;
- std::vector<types::global_dof_index> local_dof_indices;
- std::vector<CopyDataFace> face_data;
- template <class Iterator>
- void reinit(const Iterator &cell, unsigned int dofs_per_cell)
+ struct FaceData
{
- cell_matrix.reinit(dofs_per_cell, dofs_per_cell);
- cell_rhs.reinit(dofs_per_cell);
+ FullMatrix<double> cell_matrix;
+ std::vector<types::global_dof_index> joint_dof_indices;
+ };
- local_dof_indices.resize(dofs_per_cell);
- cell->get_dof_indices(local_dof_indices);
- }
+ FullMatrix<double> cell_matrix;
+ Vector<double> cell_rhs;
+ std::vector<types::global_dof_index> local_dof_indices;
+ std::vector<FaceData> face_data;
};
+ // The more interesting part is where we actually assemble the linear system.
+ // Fundamentally, this function has five parts:
+ // - The definition of the `cell_worker` "lambda function", a small
+ // function that is defined within the surrounding `assemble_system()`
+ // function and that will be responsible for computing the local
+ // integrals on an individual cell; it will work on a copy of the
+ // `ScratchData` class and put its results into the corresponding
+ // `CopyData` object.
+ // - The definition of the `face_worker` lambda function that does
+ // the integration of all terms that live on the interfaces between
+ // cells.
+ // - The definition of the `boundary_worker` function that does the
+ // same but for cell faces located on the boundary of the domain.
+ // - The definition of the `copier` function that is responsible
+ // for copying all of the data the previous three functions have
+ // put into copy objects for a single cell, into the global matrix
+ // and right hand side.
+ //
+ // The fifth part is the one where we bring all of this together.
+ //
+ // Let us go through each of these pieces necessary for the assembly
+ // in turns.
template <int dim>
void BiharmonicProblem<dim>::assemble_system()
{
- using Iterator = decltype(dof_handler.begin_active());
- const ExactSolution::RightHandSide<dim> right_hand_side;
-
+ using Iterator = typename DoFHandler<dim>::active_cell_iterator;
+
+ // The first piece is the `cell_worker` that does the assembly
+ // on the cell interiors. It is a (lambda) function that takes
+ // a cell (input), a scratch object, and a copy object (output)
+ // as arguments. It looks like the assembly functions of many
+ // other of the tutorial programs, or at least the body of the
+ // loop over all cells.
+ //
+ // The terms we integrate here are the cell contribution
+ // @f{align*}{
+ // A^K_{ij} = \int_K \nabla^2\varphi_i(x) : \nabla^2\varphi_j(x) dx
+ // @f}
+ // to the global matrix, and
+ // @f{align*}{
+ // f^K_i = \int_K varphi_i(x) f(x) dx
+ // @f}
+ // to the right hand side vector.
auto cell_worker = [&](const Iterator & cell,
ScratchData<dim> &scratch_data,
CopyData & copy_data) {
- const unsigned int n_dofs = scratch_data.fe_values.get_fe().dofs_per_cell;
- copy_data.reinit(cell, n_dofs);
+ copy_data.cell_matrix = 0;
+ copy_data.cell_rhs = 0;
+
scratch_data.fe_values.reinit(cell);
+ cell->get_dof_indices(copy_data.local_dof_indices);
- const auto &q_points = scratch_data.fe_values.get_quadrature_points();
+ const FEValues<dim> &fe_values = scratch_data.fe_values;
- const FEValues<dim> & fe_v = scratch_data.fe_values;
- const std::vector<double> &JxW = fe_v.get_JxW_values();
+ const ExactSolution::RightHandSide<dim> right_hand_side;
- const double nu = 1.0;
+ const unsigned int dofs_per_cell =
+ scratch_data.fe_values.get_fe().dofs_per_cell;
- for (unsigned int point = 0; point < fe_v.n_quadrature_points; ++point)
+ for (unsigned int point = 0; point < fe_values.n_quadrature_points;
+ ++point)
{
- for (unsigned int i = 0; i < n_dofs; ++i)
+ for (unsigned int i = 0; i < dofs_per_cell; ++i)
{
- for (unsigned int j = 0; j < n_dofs; ++j)
+ for (unsigned int j = 0; j < dofs_per_cell; ++j)
{
- // \int_Z \nu \nabla^2 u \cdot \nabla^2 v \, dx.
copy_data.cell_matrix(i, j) +=
- nu *
- scalar_product(fe_v.shape_hessian(i, point),
- fe_v.shape_hessian(j, point)) *
- JxW[point]; // dx
+ scalar_product(
+ fe_values.shape_hessian(i, point), // nabla^2 phi_i(x)
+ fe_values.shape_hessian(j, point)) * // nabla^2 phi_j(x)
+ fe_values.JxW(point); // dx
}
- copy_data.cell_rhs(i) += fe_v.shape_value(i, point) *
- right_hand_side.value(q_points[point]) *
- JxW[point]; // dx
+ copy_data.cell_rhs(i) +=
+ fe_values.shape_value(i, point) * // phi_i(x)
+ right_hand_side.value(
+ fe_values.quadrature_point(point)) * // f(x)
+ fe_values.JxW(point); // dx
}
}
};
+ // The next building block is the one that assembled penalty terms on each
+ // of the interior faces of the mesh. As described in the documention of
+ // MeshWorker::mesh_loop(), this function receives arguments that denote
+ // a cell and its neighboring cell, as well as (for each of the two
+ // cells) the face (and potentially sub-face) we have to integrate
+ // over. Again, we also get a scratch object, and a copy object
+ // for putting the results in.
+ //
+ // The function has three parts itself. At the top, we initialize
+ // the FEInterfaceValues object and create a new `CopyData::FaceData`
+ // object to store our input in. This gets pushed to the end of the
+ // `copy_data.face_data` variable. We need to do this because
+ // the number of faces (or subfaces) over which we integrate for a
+ // given cell differs from cell to cell, and the sizes of these
+ // matrices also differ, depending on what degrees of freedom
+ // are adjacent to the face or subface.
+ //
+ // TODO: Complete once we've got all terms and factors pinned down.
auto face_worker = [&](const Iterator & cell,
const unsigned int &f,
const unsigned int &sf,
const unsigned int &nsf,
ScratchData<dim> & scratch_data,
CopyData & copy_data) {
- FEInterfaceValues<dim> &fe_i = scratch_data.fe_interface_values;
- fe_i.reinit(cell, f, sf, ncell, nf, nsf);
- const auto &q_points = fe_i.get_quadrature_points();
+ FEInterfaceValues<dim> &fe_interface_values =
+ scratch_data.fe_interface_values;
+ fe_interface_values.reinit(cell, f, sf, ncell, nf, nsf);
copy_data.face_data.emplace_back();
- CopyDataFace ©_data_face = copy_data.face_data.back();
+ CopyData::FaceData ©_data_face = copy_data.face_data.back();
- const unsigned int n_dofs = fe_i.n_current_interface_dofs();
- copy_data_face.joint_dof_indices = fe_i.get_interface_dof_indices();
+ copy_data_face.joint_dof_indices =
+ fe_interface_values.get_interface_dof_indices();
- copy_data_face.cell_matrix.reinit(n_dofs, n_dofs);
+ const unsigned int n_interface_dofs =
+ fe_interface_values.n_current_interface_dofs();
+ copy_data_face.cell_matrix.reinit(n_interface_dofs, n_interface_dofs);
- const std::vector<double> & JxW = fe_i.get_JxW_values();
- const std::vector<Tensor<1, dim>> &normals = fe_i.get_normal_vectors();
+ // The second part deals with determining what the penalty
+ // parameter should be.
+ // TODO: Complete
// eta = 1/2 + 2C_2
// gamma = eta/|e|
}
- for (unsigned int qpoint = 0; qpoint < q_points.size(); ++qpoint)
+ // Finally, and as usual, we loop over the quadrature points
+ // and indices `i` and `j` to add up the contributions of this
+ // face or sub-face. These are then stored in the `copy_data.face_data`
+ // object created above.
+ for (unsigned int point = 0;
+ point < fe_interface_values.n_quadrature_points;
+ ++point)
{
// \int_F -{grad^2 u n n } [grad v n]
// - {grad^2 v n n } [grad u n]
// + gamma [grad u n ][grad v n]
- const auto &n = normals[qpoint];
+ const auto &n = fe_interface_values.normal(point);
- for (unsigned int i = 0; i < n_dofs; ++i)
- for (unsigned int j = 0; j < n_dofs; ++j)
+ for (unsigned int i = 0; i < n_interface_dofs; ++i)
+ for (unsigned int j = 0; j < n_interface_dofs; ++j)
{
copy_data_face.cell_matrix(i, j) +=
- (-(fe_i.average_hessian(i, qpoint) * n *
- n) // - {grad^2 v n n }
- * (fe_i.jump_gradient(j, qpoint) * n) // [grad u n]
- - (fe_i.average_hessian(j, qpoint) * n *
+ (-(fe_interface_values.average_hessian(i, point) * n *
+ n) // - {grad^2 v n n }
+ * (fe_interface_values.jump_gradient(j, point) *
+ n) // [grad u n]
+ - (fe_interface_values.average_hessian(j, point) * n *
n) // - {grad^2 u n n }
- * (fe_i.jump_gradient(i, qpoint) * n) // [grad v n]
+ * (fe_interface_values.jump_gradient(i, point) *
+ n) // [grad v n]
// gamma [grad u n ][grad v n]:
- + gamma * (fe_i.jump_gradient(i, qpoint) * n) *
- (fe_i.jump_gradient(j, qpoint) * n)) *
- JxW[qpoint]; // dx
+ + gamma * (fe_interface_values.jump_gradient(i, point) * n) *
+ (fe_interface_values.jump_gradient(j, point) * n)) *
+ fe_interface_values.JxW(point); // dx
}
}
};
+ // The third piece is to do the same kind of assembly for faces that
+ // are at the boundary. The idea is the same as above, of course,
+ // with only the difference that there are now penalty terms that
+ // also go into the right hand side.
+ //
+ // TODO: Complete, same as above.
auto boundary_worker = [&](const Iterator & cell,
const unsigned int &face_no,
ScratchData<dim> & scratch_data,
CopyData & copy_data) {
// return;
- FEInterfaceValues<dim> &fe_i = scratch_data.fe_interface_values;
- fe_i.reinit(cell, face_no);
- const auto &q_points = fe_i.get_quadrature_points();
+ FEInterfaceValues<dim> &fe_interface_values =
+ scratch_data.fe_interface_values;
+ fe_interface_values.reinit(cell, face_no);
+ const auto &q_points = fe_interface_values.get_quadrature_points();
copy_data.face_data.emplace_back();
- CopyDataFace ©_data_face = copy_data.face_data.back();
+ CopyData::FaceData ©_data_face = copy_data.face_data.back();
- const unsigned int n_dofs = fe_i.n_current_interface_dofs();
- copy_data_face.joint_dof_indices = fe_i.get_interface_dof_indices();
+ const unsigned int n_dofs =
+ fe_interface_values.n_current_interface_dofs();
+ copy_data_face.joint_dof_indices =
+ fe_interface_values.get_interface_dof_indices();
copy_data_face.cell_matrix.reinit(n_dofs, n_dofs);
- const std::vector<double> & JxW = fe_i.get_JxW_values();
- const std::vector<Tensor<1, dim>> &normals = fe_i.get_normal_vectors();
+ const std::vector<double> &JxW = fe_interface_values.get_JxW_values();
+ const std::vector<Tensor<1, dim>> &normals =
+ fe_interface_values.get_normal_vectors();
const ExactSolution::Solution<dim> exact_solution;
{
for (unsigned int j = 0; j < n_dofs; ++j)
copy_data_face.cell_matrix(i, j) +=
- (-(fe_i.average_hessian(i, qpoint) * n *
- n) // - {grad^2 v n n }
- * (fe_i.jump_gradient(j, qpoint) * n) // [grad u n]
+ (-(fe_interface_values.average_hessian(i, qpoint) * n *
+ n) // - {grad^2 v n n }
+ * (fe_interface_values.jump_gradient(j, qpoint) *
+ n) // [grad u n]
//
- - (fe_i.average_hessian(j, qpoint) * n *
+ - (fe_interface_values.average_hessian(j, qpoint) * n *
n) // - {grad^2 u n n }
- * (fe_i.jump_gradient(i, qpoint) * n) // [grad v n]
- //
+ * (fe_interface_values.jump_gradient(i, qpoint) *
+ n) // [grad v n]
+ //
+ 2.0 * gamma *
- (fe_i.jump_gradient(i, qpoint) * n) // 2 gamma [grad v n]
- * (fe_i.jump_gradient(j, qpoint) * n) // [grad u n]
+ (fe_interface_values.jump_gradient(i, qpoint) *
+ n) // 2 gamma [grad v n]
+ * (fe_interface_values.jump_gradient(j, qpoint) *
+ n) // [grad u n]
) *
JxW[qpoint]; // dx
copy_data.cell_rhs(i) +=
- (-(fe_i.average_hessian(i, qpoint) * n *
- n) * // - {grad^2 v n n }
- (exact_gradients[qpoint] * n) // (grad u_exact n)
- + 2.0 * gamma //
- * (fe_i.jump_gradient(i, qpoint) * n) // [grad v n]
- * (exact_gradients[qpoint] * n) // (grad u_exact n)
+ (-(fe_interface_values.average_hessian(i, qpoint) * n *
+ n) * // - {grad^2 v n n }
+ (exact_gradients[qpoint] * n) // (grad u_exact n)
+ + 2.0 * gamma //
+ * (fe_interface_values.jump_gradient(i, qpoint) *
+ n) // [grad v n]
+ * (exact_gradients[qpoint] * n) // (grad u_exact n)
) *
JxW[qpoint]; // dx
}
}
};
- auto copier = [&](const CopyData &c) {
- constraints.distribute_local_to_global(c.cell_matrix,
- c.cell_rhs,
- c.local_dof_indices,
+ // Part 4 was a small function that copies the data produced by the
+ // cell, interior, and boundary face assemblers above into the
+ // global matrix and right hand side vector. There really is not
+ // very much to do here: We distribute the cell matrix and right
+ // hand side contributions as we have done in almost all of the
+ // other tutorial programs using the constraints objects. We then
+ // also have to do the same for the face matrix contributions
+ // that have gained content for the faces (interior and boundary)
+ // and that the `face_worker` and `boundary_worker` have added
+ // to the `copy_data.face_data` array.
+ auto copier = [&](const CopyData ©_data) {
+ constraints.distribute_local_to_global(copy_data.cell_matrix,
+ copy_data.cell_rhs,
+ copy_data.local_dof_indices,
system_matrix,
system_rhs);
- for (auto &cdf : c.face_data)
+ for (auto &cdf : copy_data.face_data)
{
constraints.distribute_local_to_global(cdf.cell_matrix,
cdf.joint_dof_indices,
}
};
- const unsigned int n_gauss_points = dof_handler.get_fe().degree + 1;
- ScratchData<dim> scratch_data(mapping,
+ // Having set all of this up, what remains is to just create a scratch
+ // and copy data object and call the MeshWorker::mesh_loop() function
+ // that then goes over all cells and faces, calls the respective workers
+ // on them, and then the copier function that puts things into the
+ // global matrix and right hand side. As an additional benefit,
+ // MeshWorker::mesh_loop() does all of this in parallel, using
+ // as many processor cores as your machine happens to have.
+ const unsigned int n_gauss_points = dof_handler.get_fe().degree + 1;
+ ScratchData<dim> scratch_data(mapping,
fe,
n_gauss_points,
update_values | update_gradients |
update_values | update_gradients |
update_hessians | update_quadrature_points |
update_JxW_values | update_normal_vectors);
- CopyData copy_data;
+ CopyData copy_data(dof_handler.get_fe().dofs_per_cell);
MeshWorker::mesh_loop(dof_handler.begin_active(),
dof_handler.end(),
cell_worker,
}
+
+ // @sect{Solving the linear system and postprocessing}
+ //
+ // The show is essentially over at this point: The remaining functions are
+ // not overly interesting or novel. The first one simply uses a direct
+ // solver to solve the linear system (see also step-29):
template <int dim>
void BiharmonicProblem<dim>::solve()
{
SparseDirectUMFPACK A_direct;
A_direct.initialize(system_matrix);
A_direct.vmult(solution, system_rhs);
+
constraints.distribute(solution);
}
+ // The next function evaluates the error between the computed solution
+ // and the exact solution (which is known here because we have chosen
+ // the right hand side and boundary values in a way so that we know
+ // the corresponding solution). In the first two code blocks below,
+ // we compute the error in the $L_2$ norm and the $H^1$ semi-norm.
template <int dim>
void BiharmonicProblem<dim>::compute_errors()
{
- const unsigned int n_gauss_points =
- dof_handler.get_fe().tensor_degree() + 1;
-
{
Vector<float> norm_per_cell(triangulation.n_active_cells());
VectorTools::integrate_difference(mapping,
solution,
ExactSolution::Solution<dim>(),
norm_per_cell,
- QGauss<dim>(n_gauss_points + 1),
+ QGauss<dim>(fe.degree + 2),
VectorTools::L2_norm);
const double error_norm =
VectorTools::compute_global_error(triangulation,
solution,
ExactSolution::Solution<dim>(),
norm_per_cell,
- QGauss<dim>(n_gauss_points + 1),
+ QGauss<dim>(fe.degree + 2),
VectorTools::H1_seminorm);
const double error_norm =
VectorTools::compute_global_error(triangulation,
<< std::endl;
}
- // Now also compute the H2 seminorm error, integrating over the interiors
- // of the cells but not taking into account the interface jump terms.
- // This is *not* equivalent to the energy error for the problem.
+ // Now also compute an approximation to the $H^2$ seminorm error. The actual
+ // $H^2$ seminorm would require us to integrate second derivatives of the
+ // solution $u_h$, but given the Lagrange shape functions we use, $u_h$ of
+ // course has kinks at the interfaces between cells, and consequently second
+ // derivatives are singular at interfaces. As a consequence, we really only
+ // integrating over the interiors of the cells and ignore the interface
+ // contributions. This is *not* an equivalent norm to the energy norm for
+ // the problem, but still gives us an idea of how fast the error converges.
+ //
+ // We note that one could address this issue by defining a norm that
+ // is equivalent to the energy norm. This would involve adding up not
+ // only the integrals over cell interiors as we do below, but also adding
+ // penalty terms for the jump of the derivative of $u_h$ across interfaces,
+ // with an appropriate scaling of the two kinds of terms. We will leave
+ // this for later work.
{
const QGauss<dim> quadrature_formula(fe.degree + 2);
ExactSolution::Solution<dim> exact_solution;
exact_solution.hessian_list(fe_values.get_quadrature_points(),
exact_hessians);
- double diff = 0;
+ double local_error = 0;
for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
{
- diff +=
+ local_error +=
((exact_hessians[q_point] - hessians[q_point]).norm_square() *
fe_values.JxW(q_point));
}
- error_per_cell[cell->active_cell_index()] = std::sqrt(diff);
+ error_per_cell[cell->active_cell_index()] = std::sqrt(local_error);
}
+
const double error_norm = error_per_cell.l2_norm();
std::cout << " Error in the broken H2 seminorm: " << error_norm
<< std::endl;
}
+
+ // Equally uninteresting is the function that generates graphical output.
+ // It looks exactly like the one in step-6, for example.
template <int dim>
void
BiharmonicProblem<dim>::output_results(const unsigned int iteration) const
DataOut<dim> data_out;
data_out.attach_dof_handler(dof_handler);
- data_out.add_data_vector(solution, "u");
- Vector<double> exact = solution;
- unsigned int degree = fe.tensor_degree();
- const ExactSolution::Solution<dim> exact_solution;
- VectorTools::project(mapping,
- dof_handler,
- constraints,
- QGauss<dim>(degree + 1),
- exact_solution,
- exact);
- data_out.add_data_vector(exact, "exact");
-
+ data_out.add_data_vector(solution, "solution");
data_out.build_patches();
- std::ofstream output_vtk(
- ("output_" + Utilities::int_to_string(iteration, 6) + ".vtk").c_str());
- data_out.write_vtk(output_vtk);
+ std::ofstream output_vtu(
+ ("output_" + Utilities::int_to_string(iteration, 6) + ".vtu").c_str());
+ data_out.write_vtu(output_vtu);
}
+ // The same is true for the `run()` function: Just like in previous
+ // programs.
template <int dim>
void BiharmonicProblem<dim>::run()
{
{
std::cout << "Cycle: " << cycle << " of " << n_cycles << std::endl;
-
-
triangulation.refine_global(1);
setup_system();
std::cout << std::endl;
}
}
-} // namespace StepBiharmonic
+} // namespace Step71
-int main(int argc, char *argv[])
+// @sect3{The main() function}
+//
+// Finally for the `main()` function. There is, again, not very much to see
+// here: It looks like the ones in previous tutorial programs. There
+// is a variable that allows selecting the polynomial degree of the element
+// we want to use for solving the equation. Because the C0IP formulation
+// we use requires the element degree to be at least two, we check with
+// an assertion that whatever one sets for the polynomial degree actually
+// makes sense.
+int main()
{
try
{
using namespace dealii;
- using namespace StepBiharmonic;
-
- Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv);
-
- unsigned int degree = 2; // minimum degree 2
+ using namespace Step71;
- // If provided on the command line, override the polynomial degree
- // by the one given there.
- if (argc > 1)
- degree = Utilities::string_to_int(argv[1]);
+ const unsigned int fe_degree = 2;
+ Assert(fe_degree >= 2,
+ ExcMessage("The C0IP formulation for the biharmonic problem "
+ "only works if one uses elements of polynomial "
+ "degree at least 2."));
- BiharmonicProblem<2> my_bi(degree);
- my_bi.run();
+ BiharmonicProblem<2> biharmonic_problem(fe_degree);
+ biharmonic_problem.run();
}
catch (std::exception &exc)
{