#include <string>
+// I prefer to use the ParameterHandler class in a slightly different way than
+// usual: The class Parameters creates, uses, and then destroys a
+// ParameterHandler inside the <code>read_parameter_file</code> method instead
+// of keeping it around. This is nice because now all of the run time
+// parameters are contained in a simple class and it can be copied or passed
+// around very easily.
namespace CDR
{
using namespace dealii;
#include <functional>
+// One of the goals I had in writing this entry was to split up functions into
+// different compilation units instead of using one large file. This is the
+// header file for a pair of functions (only one of which I ultimately use)
+// which build the system matrix.
namespace CDR
{
using namespace dealii;
{
using namespace dealii;
+ // This is the actual implementation of the <code>create_system_matrix</code>
+ // function described in the header file. It is similar to the system matrix
+ // assembly routine in step-40.
template<int dim, typename UpdateFunction>
void internal_create_system_matrix
(const DoFHandler<dim> &dof_handler,
const auto convection_contribution = current_convection
*fe_values.shape_grad(j, q);
cell_matrix(i, j) += fe_values.JxW(q)*
- // mass and reaction part
+ // Here are the time step, mass, and reaction parts:
((1.0 + time_step/2.0*parameters.reaction_coefficient)
*fe_values.shape_value(i, q)*fe_values.shape_value(j, q)
+ time_step/2.0*
- // convection part
+ // and the convection part:
(fe_values.shape_value(i, q)*convection_contribution
- // Laplacian part
+ // and, finally, the diffusion part:
+ parameters.diffusion_coefficient
*(fe_values.shape_grad(i, q)*fe_values.shape_grad(j, q)))
);
#include <functional>
+// Similarly to <code>create_system_matrix</code>, I wrote a separate function
+// to compute the right hand side.
namespace CDR
{
using namespace dealii;
*fe_values.shape_grad(j, q);
cell_rhs(i) += fe_values.JxW(q)*
- // mass and reaction part
+ // Here are the mass and reaction part:
(((1.0 - time_step/2.0*parameters.reaction_coefficient)
*fe_values.shape_value(i, q)*fe_values.shape_value(j, q)
- time_step/2.0*
- // convection part
+ // the convection part:
(fe_values.shape_value(i, q)*convection_contribution
- // Laplacian part
+ // the diffusion part:
+ parameters.diffusion_coefficient
*(fe_values.shape_grad(i, q)*fe_values.shape_grad(j, q))))
*current_fe_coefficients[j]
- // forcing parts
+ // and, finally, the forcing function part:
+ time_step/2.0*
(current_forcing + previous_forcing)
*fe_values.shape_value(i, q));
#define dealii__cdr_write_pvtu_output_h
#include <deal.II/dofs/dof_handler.h>
+// This is a small class which handles PVTU output.
namespace CDR
{
using namespace dealii;
#include <fstream>
#include <vector>
+// Here is the implementation of the important function. This is similar to
+// what is presented in step-40.
namespace CDR
{
using namespace dealii;
DataOutBase::VtkFlags flags;
flags.time = current_time;
+ // While the default flag is for the best compression level, using
+ // <code>best_speed</code> makes this function much faster.
flags.compression_level = DataOutBase::VtkFlags::ZlibCompressionLevel::best_speed;
data_out.set_flags(flags);
#include <deal.II-cdr/system_matrix.h>
#include <deal.II-cdr/system_matrix.templates.h>
+// This file exists just to build template specializations of
+// <code>create_system_matrix</code>. Even though the solver is run in
+// parallel with Trilinos objects, other serial solvers can use the same
+// function without recompilation by compiling everything here just one time.
namespace CDR
{
using namespace dealii;
#include <deal.II-cdr/system_rhs.templates.h>
+// Like <code>system_matrix.cc</code>, this file just compiles template
+// specializations.
namespace CDR
{
using namespace dealii;
#include <deal.II-cdr/write_pvtu_output.templates.h>
+// Again, this file just compiles the constructor and also the templated
+// functions.
namespace CDR
{
using namespace dealii;
#include <deal.II/numerics/error_estimator.h>
-// for distributed computations
+// These headers are for distributed computations:
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
#include <deal.II/distributed/tria.h>
constexpr int manifold_id {0};
-
+// This is the actual solver class which performs time iteration and calls the
+// appropriate library functions to do it.
template<int dim>
class CDRProblem
{
ConstraintMatrix constraints;
bool first_run;
+
+ // As is usual in parallel programs, I keep two copies of parts of the
+ // complete solution: <code>locally_relevant_solution</code> contains both
+ // the locally calculated solution as well as the layer of cells at its
+ // boundary (the @ref GlossGhostCells "ghost cells") while
+ // <code>completely_distributed_solution</code> only contains the parts of
+ // the solution computed on the current @ref GlossMPIProcess "MPI process".
TrilinosWrappers::MPI::Vector locally_relevant_solution;
TrilinosWrappers::MPI::Vector completely_distributed_solution;
TrilinosWrappers::MPI::Vector system_rhs;
int main(int argc, char *argv[])
{
+ // One of the new features in C++11 is the <code>chrono</code> component of
+ // the standard library. This gives us an easy way to time the output.
auto t0 = std::chrono::high_resolution_clock::now();
Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);