From 3ed3eb245bd84c2bda8960ca0e639bdf0e4d98be Mon Sep 17 00:00:00 2001 From: Peter Munch Date: Wed, 31 Mar 2021 12:39:35 +0200 Subject: [PATCH] Add step-76 --- doc/doxygen/references.bib | 14 + doc/doxygen/tutorial/tutorial.h.in | 5 + examples/step-67/doc/results.dox | 16 + examples/step-76/CMakeLists.txt | 39 + examples/step-76/doc/builds-on | 1 + examples/step-76/doc/intro.dox | 475 ++++++++ examples/step-76/doc/kind | 1 + examples/step-76/doc/results.dox | 165 +++ examples/step-76/doc/tooltip | 1 + examples/step-76/step-76.cc | 1642 ++++++++++++++++++++++++++++ 10 files changed, 2359 insertions(+) create mode 100644 examples/step-76/CMakeLists.txt create mode 100644 examples/step-76/doc/builds-on create mode 100644 examples/step-76/doc/intro.dox create mode 100644 examples/step-76/doc/kind create mode 100644 examples/step-76/doc/results.dox create mode 100644 examples/step-76/doc/tooltip create mode 100644 examples/step-76/step-76.cc diff --git a/doc/doxygen/references.bib b/doc/doxygen/references.bib index 55f5e7c278..1cdc3d2fee 100644 --- a/doc/doxygen/references.bib +++ b/doc/doxygen/references.bib @@ -901,6 +901,20 @@ year = {2008}, } +% ------------------------------------ +% Step 76 +% ------------------------------------ + +@misc{munch2020hyperdeal, + title={hyper.deal: An efficient, matrix-free finite-element library for high-dimensional partial differential equations}, + author={Peter Munch and Katharina Kormann and Martin Kronbichler}, + year={2020}, + eprint={2002.08110}, + archivePrefix={arXiv}, + primaryClass={cs.MS} +} + + % ------------------------------------ % References used elsewhere % ------------------------------------ diff --git a/doc/doxygen/tutorial/tutorial.h.in b/doc/doxygen/tutorial/tutorial.h.in index dd4ed6d4be..5529cedd50 100644 --- a/doc/doxygen/tutorial/tutorial.h.in +++ b/doc/doxygen/tutorial/tutorial.h.in @@ -603,6 +603,11 @@ *
Keywords: MeshWorker::mesh_loop(), FEInterfaceValues, ConvergenceTable * * + * + * step-76 + * Like step-67, but for demonstrating MPI-3.0 shared-memory features. + *
Keywords: MatrixFree, MatrixFree::cell_loop() + * * * * diff --git a/examples/step-67/doc/results.dox b/examples/step-67/doc/results.dox index 8e599d249e..17c3ac6037 100644 --- a/examples/step-67/doc/results.dox +++ b/examples/step-67/doc/results.dox @@ -803,3 +803,19 @@ compressible Navier--Stokes equations by adding viscous terms, as described in here despite the additional cost of elliptic terms, e.g. via an interior penalty method, one can switch the basis from FE_DGQ to FE_DGQHermite like in the step-59 tutorial program. + + +

Using cell-centric loops and shared memory

+ +In this tutorial, we used face-centric loops. Here, cell and face integrals +are treated in separate loops, resulting in multiple writing accesses into the +result vector, which is relatively expensive on modern hardware since writing +operations generally result also in an implicit read operation. Element-centric +loops, on the other hand, are processing a cell and in direct succession +processing all its 2d faces. Although this kind of loop implies that fluxes have +to be computed twice (for each side of an interior face), the fact that the +result vector has to accessed only once might - and the fact that the resulting +algorithm is free of race-conditions and as such perfectly suitable for +shared memory - already give a performance boost. If you are interested in these +advanced topics, you can take a look at step-76 where we take the present +tutorial and modify it so that we can use these features. diff --git a/examples/step-76/CMakeLists.txt b/examples/step-76/CMakeLists.txt new file mode 100644 index 0000000000..af176f3eb5 --- /dev/null +++ b/examples/step-76/CMakeLists.txt @@ -0,0 +1,39 @@ +## +# CMake script for the step-76 tutorial program: +## + +# Set the name of the project and target: +SET(TARGET "step-76") + +# Declare all source files the target consists of. Here, this is only +# the one step-X.cc file, but as you expand your project you may wish +# to add other source files as well. If your project becomes much larger, +# you may want to either replace the following statement by something like +# FILE(GLOB_RECURSE TARGET_SRC "source/*.cc") +# FILE(GLOB_RECURSE TARGET_INC "include/*.h") +# SET(TARGET_SRC ${TARGET_SRC} ${TARGET_INC}) +# or switch altogether to the large project CMakeLists.txt file discussed +# in the "CMake in user projects" page accessible from the "User info" +# page of the documentation. +SET(TARGET_SRC + ${TARGET}.cc + ) + +# Usually, you will not need to modify anything beyond this point... + +CMAKE_MINIMUM_REQUIRED(VERSION 3.1.0) + +FIND_PACKAGE(deal.II 9.3.0 + HINTS ${deal.II_DIR} ${DEAL_II_DIR} ../ ../../ $ENV{DEAL_II_DIR} + ) +IF(NOT ${deal.II_FOUND}) + MESSAGE(FATAL_ERROR "\n" + "*** Could not locate a (sufficiently recent) version of deal.II. ***\n\n" + "You may want to either pass a flag -DDEAL_II_DIR=/path/to/deal.II to cmake\n" + "or set an environment variable \"DEAL_II_DIR\" that contains this path." + ) +ENDIF() + +DEAL_II_INITIALIZE_CACHED_VARIABLES() +PROJECT(${TARGET}) +DEAL_II_INVOKE_AUTOPILOT() diff --git a/examples/step-76/doc/builds-on b/examples/step-76/doc/builds-on new file mode 100644 index 0000000000..ba853388e1 --- /dev/null +++ b/examples/step-76/doc/builds-on @@ -0,0 +1 @@ +step-67 diff --git a/examples/step-76/doc/intro.dox b/examples/step-76/doc/intro.dox new file mode 100644 index 0000000000..38f2f7c9df --- /dev/null +++ b/examples/step-76/doc/intro.dox @@ -0,0 +1,475 @@ + +
+ + +This program was contributed by Martin Kronbichler, Peter Munch, and David +Schneider. Many of the features shown here have been added to deal.II during +and for the development of the deal.II-based, efficient, matrix-free +finite-element library for high-dimensional partial differential equations +hyper.deal (see https://github.com/hyperdeal/hyperdeal). For more details and +for applications of the presented features in slightly different contexts +(high-dimensional advection equation and Vlasov-Poisson equations) see the release +paper @cite munch2020hyperdeal. + +This work was partly supported by the German Research Foundation (DFG) through +the project "High-order discontinuous Galerkin for the exa-scale" (ExaDG) +within the priority program "Software for Exascale Computing" (SPPEXA) and +by the Bavarian government through the project "High-order matrix-free finite +element implementations with hybrid parallelization and improved data locality" +within the KONWIHR program. + + + +

Introduction

+ +This tutorial program solves the Euler equations of fluid dynamics, using an +explicit time integrator with the matrix-free framework applied to a +high-order discontinuous Galerkin discretization in space. The numerical +approach used here is identical to that used in step-67, however, we utilize +different advanced MatrixFree techniques to reach even a higher throughput. + +The two main features of this tutorial are: +- the usage of shared-memory features from MPI-3.0 and +- the usage of cell-centric loops, which allow to write to the global vector only + once and, therefore, are ideal for the usage of shared memory. + +Further topics we discuss in this tutorial are the usage and benefits of the +template argument VectorizedArrayType (instead of simply using +VectorizedArray) as well as the possibility to pass lambdas to +MatrixFree loops. + +For details on the numerics, we refer to the documentation of step-67. We +concentrate here only on the key differences. + +

Shared-memory and hybrid parallelization with MPI-3.0

+ +

Motivation

+ +There exist many shared-memory libraries that are based on threads like TBB, +OpenMP, or TaskFlow. Integrating such libraries into existing MPI programs +allows one to use shared memory. However, these libraries come with an overhead +for the programmer, since all parallelizable code sections have to be found and +transformed according to the library used, including the difficulty when some +third-party numerical library, like an iterative solver package, only relies on +MPI. + +Considering a purely MPI-parallelized FEM application, one can identify that +the major time and memory benefit of using shared memory would come from +accessing the part of the solution vector owned by the processes on the same +compute node without the need to make explicit copies and buffering them. +Fur this propose, MPI-3.0 provides shared-memory features based on so-called +windows, where processes can directly access the data of the neighbors on the same +shared-memory domain. + +

Basic MPI-3.0 commands

+ +A few relevant MPI-3.0 commands are worth discussing in detail. +A new MPI communicator comm_sm, which consists of processes from +the communicator comm that have access to the same shared memory, +can be created via: + +@code +MPI_Comm_split_type(comm, MPI_COMM_TYPE_SHARED, rank, MPI_INFO_NULL, &comm_sm); +@endcode + +The following code snippet shows the simplified allocation routines of +shared memory for the value type T and the size +local_size, as well as, how to query pointers to the data belonging +to processes in the same shared-memory domain: + +@code +MPI_Win win; // window +T * data_this; // pointer to locally-owned data +std::vector data_others; // pointers to shared data + +// configure shared memory +MPI_Info info; +MPI_Info_create(&info); +MPI_Info_set(info, "alloc_shared_noncontig", "true"); + +// allocate shared memory +MPI_Win_allocate_shared(local_size * sizeof(T), sizeof(T), info, comm_sm, &data_this, &win); + +// get pointers to the shared data owned by the processes in the same sm domain +data_others.resize(size_sm); +int disp_unit = 0; // displacement size - an output parameter we don't need right now +MPI_Aint ssize = 0; // window size - an output parameter we don't need right now +for (int i = 0; i < size_sm; ++i) + MPI_Win_shared_query(win, i, &ssize, &disp_unit, &data_others[i]); + +Assert(data_this == data_others[rank_sm], ExcMessage("Something went wrong!")); +@endcode + +Once the data is not needed anymore, the window has to be freed, which also +frees the locally-owned data: + +@code +MPI_Win_free(&win); +@endcode + +

MPI-3.0 and LinearAlgebra::distributed::Vector

+ +The commands mentioned in the last section are integrated into +LinearAlgebra::distributed::Vector and are used to allocate shared memory if +an optional (second) communicator is provided to the reinit()-functions. + +For example, a vector can be set up with a partitioner (containing the global +communicator) and a sub-communicator (containing the processes on the same +compute node): +@code +vec.reinit(partitioner, comm_sm); +@endcode + +Locally owned values and ghost values can be processed as usual. However, now +users also have read access to the values of the shared-memory neighbors via +the function: +@code +const std::vector> & +LinearAlgebra::distributed::Vector::shared_vector_data() const; +@endcode + +

MPI-3.0 and MatrixFree

+ +While LinearAlgebra::distributed::Vector provides the option to allocate +shared memory and to access the values of shared memory of neighboring processes +in a coordinated way, it does not actually exploit the benefits of the +usage of shared memory itself. + +The MatrixFree infrastructure, however, does: +- On the one hand, within the matrix-free loops MatrixFree::loop(), + MatrixFree::cell_loop(), and MatrixFree::loop_cell_centric(), only ghost + values that need to be updated are updated. Ghost values from + shared-memory neighbors can be accessed directly, making buffering, i.e., + copying of the values into the ghost region of a vector possibly redundant. + To deal with possible race conditions, necessary synchronizations are + performed within MatrixFree. In the case that values have to be buffered, + values are copied directly from the neighboring shared-memory process, + bypassing more expensive MPI operations based on MPI_ISend and + MPI_IRecv. +- On the other hand, classes like FEEvaluation and FEFaceEvaluation can read + directly from the shared memory, so buffering the values is indeed + not necessary in certain cases. + +To be able to use the shared-memory capabilities of MatrixFree, MatrixFree +has to be appropriately configured by providing the user-created sub-communicator: + +@code +typename MatrixFree::AdditionalData additional_data; + +// set flags as usual (not shown) + +additional_data.communicator_sm = comm_sm; + +data.reinit(mapping, dof_handler, constraint, quadrature, additional_data); +@endcode + + +

Cell-centric loops

+ +

Motivation: FCL vs. CCL

+ +"Face-centric loops" (short FCL) visit cells and faces (inner and boundary ones) in +separate loops. As a consequence, each entity is visited only once and fluxes +between cells are evaluated only once. How to perform face-centric loops +with the help of MatrixFree::loop() by providing three functions (one for +the cell integrals, one for the inner, and one for the boundary faces) has +been presented in step-59 and step-67. + +"Cell-centric loops" (short CCL or ECL (for element-centric loops) +in the hyper.deal release paper), in +contrast, process a cell and in direct succession process all its +faces (i.e., visit all faces twice). Their benefit has become clear for +modern CPU processor architecture in the literature @cite KronbichlerKormann2019, +although this kind of loop implies that fluxes have to be computed twice (for +each side of an interior face). CCL has two primary advantages: +- On the one hand, entries in the solution vector are written exactly once + back to main memory in the case of CCL, while in the case of FCL at least once + despite of cache-efficient scheduling of cell and face loops-due to cache + capacity misses. +- On the other hand, since each entry of the solution vector is accessed exactly + once, no synchronization between threads is needed while accessing the solution + vector in the case of CCL. This absence of race conditions during writing into + the destination vector makes CCL particularly suitable for shared-memory + parallelization. + +One should also note that although fluxes are computed twice in the case of CCL, +this does not automatically translate into doubling of the computation, since +values already interpolated to the cell quadrature points can be interpolated +to a face with a simple 1D interpolation. + +

Cell-centric loops and MatrixFree

+ +For cell-centric loop implementations, the function MatrixFree::loop_cell_centric() +can be used, to which the user can pass a function that should be performed on +each cell. + +To derive an appropriate function, which can be passed in MatrixFree::loop_cell_centric(), +one might, in principle, transform/merge the following three functions, which can +be passed to a MatrixFree::loop(): + +@code +matrix_free.template loop( + [&](const auto &data, auto &dst, const auto &src, const auto range) { + // operation performed on cells + + FEEvaluation phi(data); + for (unsigned int cell = range.first; cell < range.second; ++cell) + { + phi.reinit(cell); + phi.gather_evaluate(src, cell_evaluation_flags); + + // some operations on the cell quadrature points + + phi.integrate_scatter(cell_evaluation_flags, dst); + } + }, + [&](const auto &data, auto &dst, const auto &src, const auto range) { + // operation performed inner faces + + FEFaceEvaluation phi_m(data, /*is_interior_face=*/true); + FEFaceEvaluation phi_p(data, /*is_interior_face=*/false); + + for (unsigned int face = range.first; face < range.second; ++face) + { + phi_m.reinit(face); + phi_m.gather_evaluate(src, face_evaluation_flags); + phi_p.reinit(face); + phi_p.gather_evaluate(src, face_evaluation_flags); + + // some operations on the face quadrature points + + phi_m.integrate_scatter(face_evaluation_flags, dst); + phi_p.integrate_scatter(face_evaluation_flags, dst); + } + }, + [&](const auto &data, auto &dst, const auto &src, const auto range) { + // operation performed boundary faces + + FEFaceEvaluation phi_m(data, /*is_interior_face=*/true); + + for (unsigned int face = range.first; face < range.second; ++face) + { + phi_m.reinit(face); + phi_m.gather_evaluate(src, face_evaluation_flags); + + // some operations on the face quadrature points + + phi_m.integrate_scatter(face_evaluation_flags, dst); + } + }, + dst, + src); +@endcode + +in the following way: + +@code +matrix_free.template loop_cell_centric( + [&](const auto &data, auto &dst, const auto &src, const auto range) { + FEEvaluation phi(data); + FEFaceEvaluation phi_m(data, /*is_interior_face=*/true); + FEFaceEvaluation phi_p(data, /*is_interior_face=*/false); + + for (unsigned int cell = range.first; cell < range.second; ++cell) + { + phi.reinit(cell); + phi.gather_evaluate(src, cell_evaluation_flags); + + // some operations on the cell quadrature points + + phi.integrate_scatter(cell_evaluation_flags, dst); + + // loop over all faces of cell + for (unsigned int face = 0; face < GeometryInfo::faces_per_cell; ++face) + { + if (data.get_faces_by_cells_boundary_id(cell, face)[0] == + numbers::internal_face_boundary_id) + { + // internal face + phi_m.reinit(cell, face); + phi_m.gather_evaluate(src, face_evaluation_flags); + phi_p.reinit(cell, face); + phi_p.gather_evaluate(src, face_evaluation_flags); + + // some operations on the face quadrature points + + phi_m.integrate_scatter(face_evaluation_flags, dst); + } + else + { + // boundary face + phi_m.reinit(cell, face); + phi_m.gather_evaluate(src, face_evaluation_flags); + + // some operations on the face quadrature points + + phi_m.integrate_scatter(face_evaluation_flags, dst); + } + } + } + }, + dst, + src); +@endcode + +It should be noted that FEFaceEvaluation is initialized now with two numbers, +the cell number and the local face number. The given example only +highlights how to transform face-centric loops into cell-centric loops and +is by no means efficient, since data is read and written multiple times +from and to the global vector as well as computations are performed +redundantly. Below, we will discuss advanced techniques that target these issues. + +To be able to use MatrixFree::loop_cell_centric(), following flags of MatrixFree::AdditionalData +have to be enabled: + +@code +typename MatrixFree::AdditionalData additional_data; + +// set flags as usual (not shown) + +additional_data.hold_all_faces_to_owned_cells = true; +additional_data.mapping_update_flags_faces_by_cells = + additional_data.mapping_update_flags_inner_faces | + additional_data.mapping_update_flags_boundary_faces; + +data.reinit(mapping, dof_handler, constraint, quadrature, additional_data); +@endcode + +In particular, these flags enable that the internal data structures are set up +for all faces of the cells. + +Currently, cell-centric loops in deal.II only work for uniformly refined meshes +and if no constraints are applied (which is the standard case DG is normally +used). + + +

Providing lambdas to MatrixFree loops

+ +The examples given above have already used lambdas, which have been provided to +matrix-free loops. The following short examples present how to transform functions between +a version where a class and a pointer to one of its methods are used and a +variant where lambdas are utilized. + +In the following code, a class and a pointer to one of its methods, which should +be interpreted as cell integral, are passed to MatrixFree::loop(): + +@code +void +local_apply_cell(const MatrixFree & data, + VectorType & dst, + const VectorType & src, + const std::pair &range) const +{ + FEEvaluation phi(data); + for (unsigned int cell = range.first; cell < range.second; ++cell) + { + phi.reinit(cell); + phi.gather_evaluate(src, cell_evaluation_flags); + + // some operations on the quadrature points + + phi.integrate_scatter(cell_evaluation_flags, dst); + } +} +@endcode + +@code +matrix_free.cell_loop(&Operator::local_apply_cell, this, dst, src); +@endcode + +However, it is also possible to pass an anonymous function via a lambda function +with the same result: + +@code +matrix_free.template cell_loop( + [&](const auto &data, auto &dst, const auto &src, const auto range) { + FEEvaluation phi(data); + for (unsigned int cell = range.first; cell < range.second; ++cell) + { + phi.reinit(cell); + phi.gather_evaluate(src, cell_evaluation_flags); + + // some operations on the quadrature points + + phi.integrate_scatter(cell_evaluation_flags, dst); + } + }, + dst, + src); +@endcode + +

VectorizedArrayType

+ +The class VectorizedArray is a key component to achieve the high +node-level performance of the matrix-free algorithms in deal.II. +It is a wrapper class around a short vector of $n$ entries of type Number and +maps arithmetic operations to appropriate single-instruction/multiple-data +(SIMD) concepts by intrinsic functions. The length of the vector can be +queried by VectorizedArray::size() and its underlying number type by +VectorizedArray::value_type. + +In the default case (VectorizedArray), the vector length is +set at compile time of the library to +match the highest value supported by the given processor architecture. +However, also a second optional template argument can be +specified as VectorizedArray, where size explicitly +controls the vector length within the capabilities of a particular instruction +set. A full list of supported vector lengths is presented in the following table: + + + + + + + + + + + + + + + + + + + + + + + + + + + +
doublefloatISA
VectorizedArrayVectorizedArray(auto-vectorization)
VectorizedArrayVectorizedArraySSE2/AltiVec
VectorizedArrayVectorizedArrayAVX/AVX2
VectorizedArrayVectorizedArrayAVX-512
+ +This allows users to select the vector length/ISA and, as a consequence, the +number of cells to be processed at once in matrix-free operator evaluations, +possibly reducing the pressure on the caches, an severe issue for very high +degrees (and dimensions). + +A possible further reason to reduce the number of filled lanes +is to simplify debugging: instead of having to look at, e.g., 8 +cells, one can concentrate on a single cell. + +The interface of VectorizedArray also enables the replacement by any type with +a matching interface. Specifically, this prepares deal.II for the std::simd +class that is planned to become part of the C++23 standard. The following table +compares the deal.II-specific SIMD classes and the equivalent C++23 classes: + + + + + + + + + + + + + + + +
VectorizedArray (deal.II)std::simd (C++23)
VectorizedArraystd::experimental::native_simd
VectorizedArraystd::experimental::fixed_size_simd
diff --git a/examples/step-76/doc/kind b/examples/step-76/doc/kind new file mode 100644 index 0000000000..e62f4e7222 --- /dev/null +++ b/examples/step-76/doc/kind @@ -0,0 +1 @@ +fluids diff --git a/examples/step-76/doc/results.dox b/examples/step-76/doc/results.dox new file mode 100644 index 0000000000..2e5e9ed41d --- /dev/null +++ b/examples/step-76/doc/results.dox @@ -0,0 +1,165 @@ +

Results

+ +Running the program with the default settings on a machine with 40 processes +produces the following output: + +@code +Running with 40 MPI processes +Vectorization over 8 doubles = 512 bits (AVX512) +Number of degrees of freedom: 27.648.000 ( = 5 [vars] x 25.600 [cells] x 216 [dofs/cell/var] ) +Time step size: 0.000295952, minimal h: 0.0075, initial transport scaling: 0.00441179 +Time: 0, dt: 0.0003, norm rho: 5.385e-16, rho * u: 1.916e-16, energy: 1.547e-15 ++--------------------------------------+------------------+------------+------------------+ +| Total wallclock time elapsed | 17.52s 10 | 17.52s | 17.52s 11 | +| | | | +| Section | no. calls | min time rank | avg time | max time rank | ++--------------------------------------+------------------+------------+------------------+ +| compute errors | 1 | 0.009594s 16 | 0.009705s | 0.009819s 8 | +| compute transport speed | 22 | 0.1366s 0 | 0.1367s | 0.1368s 18 | +| output | 1 | 1.233s 0 | 1.233s | 1.233s 32 | +| rk time stepping total | 100 | 8.746s 35 | 8.746s | 8.746s 0 | +| rk_stage - integrals L_h | 500 | 8.742s 36 | 8.742s | 8.743s 2 | ++--------------------------------------+------------------+------------+------------------+ +@endcode + +and the following visual output: + + + + + + + + + + +
+ + + +
+ + + +
+ +As a reference, the results of step-67 using FCL are: + +@code +Running with 40 MPI processes +Vectorization over 8 doubles = 512 bits (AVX512) +Number of degrees of freedom: 27.648.000 ( = 5 [vars] x 25.600 [cells] x 216 [dofs/cell/var] ) +Time step size: 0.000295952, minimal h: 0.0075, initial transport scaling: 0.00441179 +Time: 0, dt: 0.0003, norm rho: 5.385e-16, rho * u: 1.916e-16, energy: 1.547e-15 ++-------------------------------------------+------------------+------------+------------------+ +| Total wallclock time elapsed | 13.33s 0 | 13.34s | 13.35s 34 | +| | | | +| Section | no. calls | min time rank | avg time | max time rank | ++-------------------------------------------+------------------+------------+------------------+ +| compute errors | 1 | 0.007977s 10 | 0.008053s | 0.008161s 30 | +| compute transport speed | 22 | 0.1228s 34 | 0.2227s | 0.3845s 0 | +| output | 1 | 1.255s 3 | 1.257s | 1.259s 27 | +| rk time stepping total | 100 | 11.15s 0 | 11.32s | 11.42s 34 | +| rk_stage - integrals L_h | 500 | 8.719s 10 | 8.932s | 9.196s 0 | +| rk_stage - inv mass + vec upd | 500 | 1.944s 0 | 2.377s | 2.55s 10 | ++-------------------------------------------+------------------+------------+------------------+ +@endcode + +By the modifications shown in this tutorial, we were able to achieve a speedup of +27% for the Runge-Kutta stages. + +

Possibilities for extensions

+ +The algorithms are easily extendable to higher dimensions: a high-dimensional +advection operator based on cell-centric loops +is part of the hyper.deal library. An extension of cell-centric loops +to locally-refined meshes is more involved. + +

Extension to the compressible Navier-Stokes equations

+ +The solver presented in this tutorial program can also be extended to the +compressible Navier–Stokes equations by adding viscous terms, as also +suggested in step-67. To keep as much of the performance obtained here despite +the additional cost of elliptic terms, e.g. via an interior penalty method, that +tutorial has proposed to switch the basis from FE_DGQ to FE_DGQHermite like in +the step-59 tutorial program. The reasoning behind this switch is that in the +case of FE_DGQ all values of neighboring cells (i.e., $k+1$ layers) are needed, +whilst in the case of FE_DGQHermite only 2 layers, making the latter +significantly more suitable for higher degrees. The additional layers have to be, +on the one hand, loaded from main memory during flux computation and, one the +other hand, have to be communicated. Using the shared-memory capabilities +introduced in this tutorial, the second point can be eliminated on a single +compute node or its influence can be reduced in a hybrid context. + +

Block Gauss-Seidel-like preconditioners

+ +Cell-centric loops could be used to create block Gauss-Seidel preconditioners +that are multiplicative within one process and additive over processes. These +type of preconditioners use during flux computation, in contrast to Jacobi-type +preconditioners, already updated values from neighboring cells. The following +pseudo-code visualizes how this could in principal be achieved: + +@code +// vector monitor if cells have been updated or not +Vector visit_flags(data.n_cell_batches () + data.n_ghost_cell_batches ()); + +// element centric loop with a modified kernel +data.template loop_cell_centric( + [&](const auto &data, auto &dst, const auto &src, const auto cell_range) { + + for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell) + { + // cell integral as usual (not shown) + + for (unsigned int face = 0; face < GeometryInfo::faces_per_cell; ++face) + { + const auto boundary_id = data.get_faces_by_cells_boundary_id(cell, face)[0]; + + if (boundary_id == numbers::internal_face_boundary_id) + { + phi_p.reinit(cell, face); + + const auto flags = phi_p.read_cell_data(visit_flags); + const auto all_neighbors_have_been_updated = + std::min(flags.begin(), + flags().begin() + data.n_active_entries_per_cell_batch(cell) == 1; + + if(all_neighbors_have_been_updated) + phi_p.gather_evaluate(dst, EvaluationFlags::values); + else + phi_p.gather_evaluate(src, EvaluationFlags::values); + + // continue as usual (not shown) + } + else + { + // boundary integral as usual (not shown) + } + } + + // continue as above and apply your favorite algorithm to invert + // the cell-local operator (not shown) + + // make cells as updated + phi.set_cell_data(visit_flags, VectorizedArrayType(1.0)); + } + }, + dst, + src, + true, + MatrixFree::DataAccessOnFaces::values); +@endcode + +For this purpose, one can exploit the cell-data vector capabilities of +MatrixFree and the range-based iteration capabilities of VectorizedArray. + +Please note that in the given example we process VectorizedArrayType::size() +number of blocks, since each lane corresponds to one block. We consider blocks +as updated if all blocks processed by a vector register have been updated. In +the case of Cartesian meshes this is a reasonable approach, however, for +general unstructured meshes this conservative approach might lead to a decrease in the +efficiency of the preconditioner. A reduction of cells processed in parallel +by explicitly reducing the number of lanes used by VectorizedArrayType +might increase the quality of the preconditioner, but with the cost that each +iteration might be more expensive. This dilemma leads us to a further +"possibility for extension": vectorization within an element. diff --git a/examples/step-76/doc/tooltip b/examples/step-76/doc/tooltip new file mode 100644 index 0000000000..3fe0c3bfa6 --- /dev/null +++ b/examples/step-76/doc/tooltip @@ -0,0 +1 @@ +An alternative implementation for an explicit time integrator for the Euler equations with matrix-free implementation. diff --git a/examples/step-76/step-76.cc b/examples/step-76/step-76.cc new file mode 100644 index 0000000000..bbc7986216 --- /dev/null +++ b/examples/step-76/step-76.cc @@ -0,0 +1,1642 @@ +/* --------------------------------------------------------------------- + * + * Copyright (C) 2020 by the deal.II authors + * + * This file is part of the deal.II library. + * + * The deal.II library is free software; you can use it, redistribute + * it, and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * The full text of the license can be found in the file LICENSE.md at + * the top level directory of deal.II. + * + * --------------------------------------------------------------------- + + * + * Author: Martin Kronbichler, Peter Munch, David Schneider, 2020 + */ + +// @sect3{Parameters and utility functions} + +// The same includes as in step-67: +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include + +// A new include for categorizing of cells according to their boundary IDs: +#include + + + +namespace Euler_DG +{ + using namespace dealii; + + // The same input parameters as in step-67: + constexpr unsigned int testcase = 1; + constexpr unsigned int dimension = 2; + constexpr unsigned int n_global_refinements = 2; + constexpr unsigned int fe_degree = 5; + constexpr unsigned int n_q_points_1d = fe_degree + 2; + + // This parameter specifies the size of the shared-memory group. Currently, + // only the values 1 and numbers::invalid_unsigned_int is possible, leading + // to the options that the memory features can be turned off or all processes + // having access to the same shared-memory domain are grouped together. + constexpr unsigned int group_size = numbers::invalid_unsigned_int; + + using Number = double; + + // Here, the type of the data structure is chosen for vectorization. In the + // default case, VectorizedArray is used, i.e., the highest + // instruction-set-architecture extension available on the given hardware with + // the maximum number of vector lanes is used. However, one might reduce + // the number of filled lanes, e.g., by writing + // using VectorizedArrayType = VectorizedArray to only + // process 4 cells. + using VectorizedArrayType = VectorizedArray; + + // The following parameters have not changed: + constexpr double gamma = 1.4; + constexpr double final_time = testcase == 0 ? 10 : 2.0; + constexpr double output_tick = testcase == 0 ? 1 : 0.05; + + const double courant_number = 0.15 / std::pow(fe_degree, 1.5); + + // Specify max number of time steps useful for performance studies. + constexpr unsigned int max_time_steps = numbers::invalid_unsigned_int; + + // Runge-Kutta-related functions copied from step-67 and slightly modified + // with the purpose to minimize global vector access: + enum LowStorageRungeKuttaScheme + { + stage_3_order_3, + stage_5_order_4, + stage_7_order_4, + stage_9_order_5, + }; + constexpr LowStorageRungeKuttaScheme lsrk_scheme = stage_5_order_4; + + + + class LowStorageRungeKuttaIntegrator + { + public: + LowStorageRungeKuttaIntegrator(const LowStorageRungeKuttaScheme scheme) + { + TimeStepping::runge_kutta_method lsrk; + switch (scheme) + { + case stage_3_order_3: + lsrk = TimeStepping::LOW_STORAGE_RK_STAGE3_ORDER3; + break; + case stage_5_order_4: + lsrk = TimeStepping::LOW_STORAGE_RK_STAGE5_ORDER4; + break; + case stage_7_order_4: + lsrk = TimeStepping::LOW_STORAGE_RK_STAGE7_ORDER4; + break; + case stage_9_order_5: + lsrk = TimeStepping::LOW_STORAGE_RK_STAGE9_ORDER5; + break; + + default: + AssertThrow(false, ExcNotImplemented()); + } + TimeStepping::LowStorageRungeKutta< + LinearAlgebra::distributed::Vector> + rk_integrator(lsrk); + std::vector ci; // not used + rk_integrator.get_coefficients(ai, bi, ci); + } + + unsigned int n_stages() const + { + return bi.size(); + } + + template + void perform_time_step(const Operator &pde_operator, + const double current_time, + const double time_step, + VectorType & solution, + VectorType & vec_ri, + VectorType & vec_ki) const + { + AssertDimension(ai.size() + 1, bi.size()); + + vec_ki.swap(solution); + + double sum_previous_bi = 0; + for (unsigned int stage = 0; stage < bi.size(); ++stage) + { + const double c_i = stage == 0 ? 0 : sum_previous_bi + ai[stage - 1]; + + pde_operator.perform_stage(stage, + current_time + c_i * time_step, + bi[stage] * time_step, + (stage == bi.size() - 1 ? + 0 : + ai[stage] * time_step), + (stage % 2 == 0 ? vec_ki : vec_ri), + (stage % 2 == 0 ? vec_ri : vec_ki), + solution); + + if (stage > 0) + sum_previous_bi += bi[stage - 1]; + } + } + + private: + std::vector bi; + std::vector ai; + }; + + + // Euler-specific utility functions from step-67: + enum EulerNumericalFlux + { + lax_friedrichs_modified, + harten_lax_vanleer, + }; + constexpr EulerNumericalFlux numerical_flux_type = lax_friedrichs_modified; + + + + template + class ExactSolution : public Function + { + public: + ExactSolution(const double time) + : Function(dim + 2, time) + {} + + virtual double value(const Point & p, + const unsigned int component = 0) const override; + }; + + + + template + double ExactSolution::value(const Point & x, + const unsigned int component) const + { + const double t = this->get_time(); + + switch (testcase) + { + case 0: + { + Assert(dim == 2, ExcNotImplemented()); + const double beta = 5; + + Point x0; + x0[0] = 5.; + const double radius_sqr = + (x - x0).norm_square() - 2. * (x[0] - x0[0]) * t + t * t; + const double factor = + beta / (numbers::PI * 2) * std::exp(1. - radius_sqr); + const double density_log = std::log2( + std::abs(1. - (gamma - 1.) / gamma * 0.25 * factor * factor)); + const double density = std::exp2(density_log * (1. / (gamma - 1.))); + const double u = 1. - factor * (x[1] - x0[1]); + const double v = factor * (x[0] - t - x0[0]); + + if (component == 0) + return density; + else if (component == 1) + return density * u; + else if (component == 2) + return density * v; + else + { + const double pressure = + std::exp2(density_log * (gamma / (gamma - 1.))); + return pressure / (gamma - 1.) + + 0.5 * (density * u * u + density * v * v); + } + } + + case 1: + { + if (component == 0) + return 1.; + else if (component == 1) + return 0.4; + else if (component == dim + 1) + return 3.097857142857143; + else + return 0.; + } + + default: + Assert(false, ExcNotImplemented()); + return 0.; + } + } + + + + template + inline DEAL_II_ALWAYS_INLINE // + Tensor<1, dim, Number> + euler_velocity(const Tensor<1, dim + 2, Number> &conserved_variables) + { + const Number inverse_density = Number(1.) / conserved_variables[0]; + + Tensor<1, dim, Number> velocity; + for (unsigned int d = 0; d < dim; ++d) + velocity[d] = conserved_variables[1 + d] * inverse_density; + + return velocity; + } + + template + inline DEAL_II_ALWAYS_INLINE // + Number + euler_pressure(const Tensor<1, dim + 2, Number> &conserved_variables) + { + const Tensor<1, dim, Number> velocity = + euler_velocity(conserved_variables); + + Number rho_u_dot_u = conserved_variables[1] * velocity[0]; + for (unsigned int d = 1; d < dim; ++d) + rho_u_dot_u += conserved_variables[1 + d] * velocity[d]; + + return (gamma - 1.) * (conserved_variables[dim + 1] - 0.5 * rho_u_dot_u); + } + + template + inline DEAL_II_ALWAYS_INLINE // + Tensor<1, dim + 2, Tensor<1, dim, Number>> + euler_flux(const Tensor<1, dim + 2, Number> &conserved_variables) + { + const Tensor<1, dim, Number> velocity = + euler_velocity(conserved_variables); + const Number pressure = euler_pressure(conserved_variables); + + Tensor<1, dim + 2, Tensor<1, dim, Number>> flux; + for (unsigned int d = 0; d < dim; ++d) + { + flux[0][d] = conserved_variables[1 + d]; + for (unsigned int e = 0; e < dim; ++e) + flux[e + 1][d] = conserved_variables[e + 1] * velocity[d]; + flux[d + 1][d] += pressure; + flux[dim + 1][d] = + velocity[d] * (conserved_variables[dim + 1] + pressure); + } + + return flux; + } + + template + inline DEAL_II_ALWAYS_INLINE // + Tensor<1, n_components, Number> + operator*(const Tensor<1, n_components, Tensor<1, dim, Number>> &matrix, + const Tensor<1, dim, Number> & vector) + { + Tensor<1, n_components, Number> result; + for (unsigned int d = 0; d < n_components; ++d) + result[d] = matrix[d] * vector; + return result; + } + + template + inline DEAL_II_ALWAYS_INLINE // + Tensor<1, dim + 2, Number> + euler_numerical_flux(const Tensor<1, dim + 2, Number> &u_m, + const Tensor<1, dim + 2, Number> &u_p, + const Tensor<1, dim, Number> & normal) + { + const auto velocity_m = euler_velocity(u_m); + const auto velocity_p = euler_velocity(u_p); + + const auto pressure_m = euler_pressure(u_m); + const auto pressure_p = euler_pressure(u_p); + + const auto flux_m = euler_flux(u_m); + const auto flux_p = euler_flux(u_p); + + switch (numerical_flux_type) + { + case lax_friedrichs_modified: + { + const auto lambda = + 0.5 * std::sqrt(std::max(velocity_p.norm_square() + + gamma * pressure_p * (1. / u_p[0]), + velocity_m.norm_square() + + gamma * pressure_m * (1. / u_m[0]))); + + return 0.5 * (flux_m * normal + flux_p * normal) + + 0.5 * lambda * (u_m - u_p); + } + + case harten_lax_vanleer: + { + const auto avg_velocity_normal = + 0.5 * ((velocity_m + velocity_p) * normal); + const auto avg_c = std::sqrt(std::abs( + 0.5 * gamma * + (pressure_p * (1. / u_p[0]) + pressure_m * (1. / u_m[0])))); + const Number s_pos = + std::max(Number(), avg_velocity_normal + avg_c); + const Number s_neg = + std::min(Number(), avg_velocity_normal - avg_c); + const Number inverse_s = Number(1.) / (s_pos - s_neg); + + return inverse_s * + ((s_pos * (flux_m * normal) - s_neg * (flux_p * normal)) - + s_pos * s_neg * (u_m - u_p)); + } + + default: + { + Assert(false, ExcNotImplemented()); + return {}; + } + } + } + + + + // General-purpose utility functions from step-67: + template + VectorizedArrayType + evaluate_function(const Function & function, + const Point &p_vectorized, + const unsigned int component) + { + VectorizedArrayType result; + for (unsigned int v = 0; v < VectorizedArrayType::size(); ++v) + { + Point p; + for (unsigned int d = 0; d < dim; ++d) + p[d] = p_vectorized[d][v]; + result[v] = function.value(p, component); + } + return result; + } + + + template + Tensor<1, n_components, VectorizedArrayType> + evaluate_function(const Function & function, + const Point &p_vectorized) + { + AssertDimension(function.n_components, n_components); + Tensor<1, n_components, VectorizedArrayType> result; + for (unsigned int v = 0; v < VectorizedArrayType::size(); ++v) + { + Point p; + for (unsigned int d = 0; d < dim; ++d) + p[d] = p_vectorized[d][v]; + for (unsigned int d = 0; d < n_components; ++d) + result[d][v] = function.value(p, d); + } + return result; + } + + + // @sect3{Euler operator using a cell-centric loop and MPI-3.0 shared memory} + + // Euler operator from step-67 with some changes as detailed below: + template + class EulerOperator + { + public: + static constexpr unsigned int n_quadrature_points_1d = n_points_1d; + + EulerOperator(TimerOutput &timer_output); + + ~EulerOperator(); + + void reinit(const Mapping & mapping, + const DoFHandler &dof_handler); + + void set_inflow_boundary(const types::boundary_id boundary_id, + std::unique_ptr> inflow_function); + + void set_subsonic_outflow_boundary( + const types::boundary_id boundary_id, + std::unique_ptr> outflow_energy); + + void set_wall_boundary(const types::boundary_id boundary_id); + + void set_body_force(std::unique_ptr> body_force); + + void + perform_stage(const unsigned int stage, + const Number cur_time, + const Number bi, + const Number ai, + const LinearAlgebra::distributed::Vector ¤t_ri, + LinearAlgebra::distributed::Vector & vec_ki, + LinearAlgebra::distributed::Vector &solution) const; + + void project(const Function & function, + LinearAlgebra::distributed::Vector &solution) const; + + std::array compute_errors( + const Function & function, + const LinearAlgebra::distributed::Vector &solution) const; + + double compute_cell_transport_speed( + const LinearAlgebra::distributed::Vector &solution) const; + + void + initialize_vector(LinearAlgebra::distributed::Vector &vector) const; + + private: + // Instance of SubCommunicatorWrapper containing the sub-communicator, which + // we need to pass to MatrixFree::reinit() to be able to exploit MPI-3.0 + // shared-memory capabilities: + MPI_Comm subcommunicator; + + MatrixFree data; + + TimerOutput &timer; + + std::map>> + inflow_boundaries; + std::map>> + subsonic_outflow_boundaries; + std::set wall_boundaries; + std::unique_ptr> body_force; + }; + + + + // New constructor, which creates a sub-communicator. The user can specify + // the size of the sub-communicator via the global parameter group_size. If + // the size is set to -1, all MPI processes of a + // shared-memory domain are combined to a group. The specified size is + // decisive for the benefit of the shared-memory capabilities of MatrixFree + // and, therefore, setting the size to -1 is a + // reasonable choice. By setting, the size to 1 users explicitly + // disable the MPI-3.0 shared-memory features of MatrixFree and rely + // completely on MPI-2.0 features, like MPI_Isend and + // MPI_Irecv. + template + EulerOperator::EulerOperator(TimerOutput &timer) + : timer(timer) + { +#if DEAL_II_MPI_VERSION_GTE(3, 0) + if (group_size == 1) + { + this->subcommunicator = MPI_COMM_SELF; + } + else if (group_size == numbers::invalid_unsigned_int) + { + const auto rank = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + + MPI_Comm_split_type(MPI_COMM_WORLD, + MPI_COMM_TYPE_SHARED, + rank, + MPI_INFO_NULL, + &subcommunicator); + } + else + { + Assert(false, ExcNotImplemented()); + } +#else + (void)subcommunicator; + (void)group_size; + this->subcommunicator = MPI_COMM_SELF; +#endif + } + + + // New destructor responsible for freeing of the sub-communicator. + template + EulerOperator::~EulerOperator() + { +#ifdef DEAL_II_WITH_MPI + if (this->subcommunicator != MPI_COMM_SELF) + MPI_Comm_free(&subcommunicator); +#endif + } + + + // Modified reinit() function to setup the internal data structures in + // MatrixFree in a way that it is usable by the cell-centric loops and + // the MPI-3.0 shared-memory capabilities are used: + template + void EulerOperator::reinit( + const Mapping & mapping, + const DoFHandler &dof_handler) + { + const std::vector *> dof_handlers = {&dof_handler}; + const AffineConstraints dummy; + const std::vector *> constraints = {&dummy}; + const std::vector> quadratures = {QGauss<1>(n_q_points_1d), + QGauss<1>(fe_degree + 1)}; + + typename MatrixFree::AdditionalData + additional_data; + additional_data.mapping_update_flags = + (update_gradients | update_JxW_values | update_quadrature_points | + update_values); + additional_data.mapping_update_flags_inner_faces = + (update_JxW_values | update_quadrature_points | update_normal_vectors | + update_values); + additional_data.mapping_update_flags_boundary_faces = + (update_JxW_values | update_quadrature_points | update_normal_vectors | + update_values); + additional_data.tasks_parallel_scheme = + MatrixFree::AdditionalData::none; + + // Categorize cells so that all lanes have the same boundary IDs for each + // face. This is strictly not necessary, however, allows to write simpler + // code in EulerOperator::perform_stage() without masking, since it is + // guaranteed that all cells grouped together (in a VectorizedArray) + // have to perform exactly the same operation also on the faces. + MatrixFreeTools::categorize_by_boundary_ids(dof_handler.get_triangulation(), + additional_data); + + // Enable MPI-3.0 shared-memory capabilities within MatrixFree by providing + // the sub-communicator: + additional_data.communicator_sm = subcommunicator; + + data.reinit( + mapping, dof_handlers, constraints, quadratures, additional_data); + } + + + // The following function does an entire stage of a Runge--Kutta update and is + // - alongside the slightly modified setup - the heart of this tutorial + // compared to step-67. + // + // In contrast to step-67, we are not executing the advection step + // (using MatrixFree::loop()) and the inverse mass-matrix step + // (using MatrixFree::cell_loop()) in sequence, but evaluate everything in + // one go inside of MatrixFree::loop_cell_centric(). This function expects + // a single function that is executed on each locally-owned (macro) cell as + // parameter so that we need to loop over all faces of that cell and perform + // needed integration steps on our own. + // + // The following function contains to a large extent copies of the following + // functions from step-67 so that comments related the evaluation of the weak + // form are skipped here: + // - EulerDG::EulerOperator::local_apply_cell + // - EulerDG::EulerOperator::local_apply_face + // - EulerDG::EulerOperator::local_apply_boundary_face + // - EulerDG::EulerOperator::local_apply_inverse_mass_matrix + template + void EulerOperator::perform_stage( + const unsigned int stage, + const Number current_time, + const Number bi, + const Number ai, + const LinearAlgebra::distributed::Vector ¤t_ri, + LinearAlgebra::distributed::Vector & vec_ki, + LinearAlgebra::distributed::Vector & solution) const + { + for (auto &i : inflow_boundaries) + i.second->set_time(current_time); + for (auto &i : subsonic_outflow_boundaries) + i.second->set_time(current_time); + + // Run a cell-centric loop by calling MatrixFree::loop_cell_centric() and + // providing a lambda containing the effects of the cell, face and + // boundary-face integrals: + data.template loop_cell_centric, + LinearAlgebra::distributed::Vector>( + [&](const auto &data, auto &dst, const auto &src, const auto cell_range) { + using FECellIntegral = FEEvaluation; + using FEFaceIntegral = FEFaceEvaluation; + + FECellIntegral phi(data); + FECellIntegral phi_temp(data); + FEFaceIntegral phi_m(data, true); + FEFaceIntegral phi_p(data, false); + + Tensor<1, dim, VectorizedArrayType> constant_body_force; + const Functions::ConstantFunction *constant_function = + dynamic_cast *>(body_force.get()); + + if (constant_function) + constant_body_force = + evaluate_function( + *constant_function, Point()); + + const dealii::internal::EvaluatorTensorProduct< + dealii::internal::EvaluatorVariant::evaluate_evenodd, + dim, + n_points_1d, + n_points_1d, + VectorizedArrayType> + eval(AlignedVector(), + data.get_shape_info().data[0].shape_gradients_collocation_eo, + AlignedVector()); + + AlignedVector buffer(phi.static_n_q_points * + phi.n_components); + + // Loop over all cell batches: + for (unsigned int cell = cell_range.first; cell < cell_range.second; + ++cell) + { + phi.reinit(cell); + + if (ai != Number()) + phi_temp.reinit(cell); + + // Read values from global vector and compute the values at the + // quadrature points: + if (ai != Number() && stage == 0) + { + phi.read_dof_values(src); + + for (unsigned int i = 0; + i < phi.static_dofs_per_component * (dim + 2); + ++i) + phi_temp.begin_dof_values()[i] = phi.begin_dof_values()[i]; + + phi.evaluate(EvaluationFlags::values); + } + else + { + phi.gather_evaluate(src, EvaluationFlags::values); + } + + // Buffer the computed values at the quadrature points, since + // these are overridden by FEEvaluation::submit_value() in the next + // step, however, are needed later on for the face integrals: + for (unsigned int i = 0; i < phi.static_n_q_points * (dim + 2); ++i) + buffer[i] = phi.begin_values()[i]; + + // Apply the cell integral at the cell quadrature points. See also + // the function EulerOperator::local_apply_cell() from + // step-67: + for (unsigned int q = 0; q < phi.n_q_points; ++q) + { + const auto w_q = phi.get_value(q); + phi.submit_gradient(euler_flux(w_q), q); + if (body_force.get() != nullptr) + { + const Tensor<1, dim, VectorizedArrayType> force = + constant_function ? + constant_body_force : + evaluate_function( + *body_force, phi.quadrature_point(q)); + + Tensor<1, dim + 2, VectorizedArrayType> forcing; + for (unsigned int d = 0; d < dim; ++d) + forcing[d + 1] = w_q[0] * force[d]; + for (unsigned int d = 0; d < dim; ++d) + forcing[dim + 1] += force[d] * w_q[d + 1]; + + phi.submit_value(forcing, q); + } + } + + // Test with the gradient of the test functions in the quadrature + // points. We skip the interpolation back to the support points + // of the element, since we first collect all contributions in the + // cell quadrature points and only perform the interpolation back + // as the final step. + { + auto *values_ptr = phi.begin_values(); + auto *gradient_ptr = phi.begin_gradients(); + + for (unsigned int c = 0; c < dim + 2; ++c) + { + if (dim >= 1 && body_force.get() == nullptr) + eval.template gradients<0, false, false>( + gradient_ptr + phi.static_n_q_points * 0, values_ptr); + else if (dim >= 1) + eval.template gradients<0, false, true>( + gradient_ptr + phi.static_n_q_points * 0, values_ptr); + if (dim >= 2) + eval.template gradients<1, false, true>( + gradient_ptr + phi.static_n_q_points * 1, values_ptr); + if (dim >= 3) + eval.template gradients<2, false, true>( + gradient_ptr + phi.static_n_q_points * 2, values_ptr); + + values_ptr += phi.static_n_q_points; + gradient_ptr += phi.static_n_q_points * dim; + } + } + + // Loop over all faces of the current cell: + for (unsigned int face = 0; + face < GeometryInfo::faces_per_cell; + ++face) + { + // Determine the boundary ID of the current face. Since we have + // set up MatrixFree in a way that all filled lanes have + // guaranteed the same boundary ID, we can select the + // boundary ID of the first lane. + const auto boundary_ids = + data.get_faces_by_cells_boundary_id(cell, face); + + Assert(std::equal(boundary_ids.begin(), + boundary_ids.begin() + + data.n_active_entries_per_cell_batch(cell), + boundary_ids.begin()), + ExcMessage("Boundary IDs of lanes differ.")); + + const auto boundary_id = boundary_ids[0]; + + phi_m.reinit(cell, face); + + // Interpolate the values from the cell quadrature points to the + // quadrature points of the current face via a simple 1D + // interpolation: + internal::FEFaceNormalEvaluationImpl:: + template interpolate_quadrature( + dim + 2, + data.get_shape_info(), + buffer.data(), + phi_m.begin_values(), + false, + face); + + // Check if the face is an internal or a boundary face and + // select a different code path based on this information: + if (boundary_id == numbers::internal_face_boundary_id) + { + // Process and internal face. The following lines of code + // are a copy of the function + // EulerDG::EulerOperator::local_apply_face + // from step-67: + phi_p.reinit(cell, face); + phi_p.gather_evaluate(src, EvaluationFlags::values); + + for (unsigned int q = 0; q < phi_m.n_q_points; ++q) + { + const auto numerical_flux = + euler_numerical_flux(phi_m.get_value(q), + phi_p.get_value(q), + phi_m.get_normal_vector(q)); + phi_m.submit_value(-numerical_flux, q); + } + } + else + { + // Process a boundary face. These following lines of code + // are a copy of the function + // EulerDG::EulerOperator::local_apply_boundary_face + // from step-67: + for (unsigned int q = 0; q < phi_m.n_q_points; ++q) + { + const auto w_m = phi_m.get_value(q); + const auto normal = phi_m.get_normal_vector(q); + + auto rho_u_dot_n = w_m[1] * normal[0]; + for (unsigned int d = 1; d < dim; ++d) + rho_u_dot_n += w_m[1 + d] * normal[d]; + + bool at_outflow = false; + + Tensor<1, dim + 2, VectorizedArrayType> w_p; + + if (wall_boundaries.find(boundary_id) != + wall_boundaries.end()) + { + w_p[0] = w_m[0]; + for (unsigned int d = 0; d < dim; ++d) + w_p[d + 1] = + w_m[d + 1] - 2. * rho_u_dot_n * normal[d]; + w_p[dim + 1] = w_m[dim + 1]; + } + else if (inflow_boundaries.find(boundary_id) != + inflow_boundaries.end()) + w_p = evaluate_function( + *inflow_boundaries.find(boundary_id)->second, + phi_m.quadrature_point(q)); + else if (subsonic_outflow_boundaries.find( + boundary_id) != + subsonic_outflow_boundaries.end()) + { + w_p = w_m; + w_p[dim + 1] = + evaluate_function(*subsonic_outflow_boundaries + .find(boundary_id) + ->second, + phi_m.quadrature_point(q), + dim + 1); + at_outflow = true; + } + else + AssertThrow(false, + ExcMessage( + "Unknown boundary id, did " + "you set a boundary condition for " + "this part of the domain boundary?")); + + auto flux = euler_numerical_flux(w_m, w_p, normal); + + if (at_outflow) + for (unsigned int v = 0; + v < VectorizedArrayType::size(); + ++v) + { + if (rho_u_dot_n[v] < -1e-12) + for (unsigned int d = 0; d < dim; ++d) + flux[d + 1][v] = 0.; + } + + phi_m.submit_value(-flux, q); + } + } + + // Evaluate local integrals related to cell by quadrature and + // add into cell contribution via a simple 1D interpolation: + internal::FEFaceNormalEvaluationImpl:: + template interpolate_quadrature( + dim + 2, + data.get_shape_info(), + phi_m.begin_values(), + phi.begin_values(), + false, + face); + } + + // Apply inverse mass matrix in the cell quadrature points. See + // also the function + // EulerDG::EulerOperator::local_apply_inverse_mass_matrix() + // from step-67: + for (unsigned int q = 0; q < phi.static_n_q_points; ++q) + { + const auto factor = VectorizedArrayType(1.0) / phi.JxW(q); + for (unsigned int c = 0; c < dim + 2; ++c) + phi.begin_values()[c * phi.static_n_q_points + q] = + phi.begin_values()[c * phi.static_n_q_points + q] * factor; + } + + // Transform values from collocation space to the original + // Gauss-Lobatto space: + internal::FEEvaluationImplBasisChange< + dealii::internal::EvaluatorVariant::evaluate_evenodd, + internal::EvaluatorQuantity::hessian, + dim, + degree + 1, + n_points_1d, + VectorizedArrayType, + VectorizedArrayType>::do_backward(dim + 2, + data.get_shape_info() + .data[0] + .inverse_shape_values_eo, + false, + phi.begin_values(), + phi.begin_dof_values()); + + // Perform Runge-Kutta update and write results back to global + // vectors: + if (ai == Number()) + { + for (unsigned int q = 0; q < phi.static_dofs_per_cell; ++q) + phi.begin_dof_values()[q] = bi * phi.begin_dof_values()[q]; + phi.distribute_local_to_global(solution); + } + else + { + if (stage != 0) + phi_temp.read_dof_values(solution); + + for (unsigned int q = 0; q < phi.static_dofs_per_cell; ++q) + { + const auto K_i = phi.begin_dof_values()[q]; + + phi.begin_dof_values()[q] = + phi_temp.begin_dof_values()[q] + (ai * K_i); + + phi_temp.begin_dof_values()[q] += bi * K_i; + } + phi.set_dof_values(dst); + phi_temp.set_dof_values(solution); + } + } + }, + vec_ki, + current_ri, + true, + MatrixFree::DataAccessOnFaces::values); + } + + + + // From here, the code of step-67 has not changed. + template + void EulerOperator::initialize_vector( + LinearAlgebra::distributed::Vector &vector) const + { + data.initialize_dof_vector(vector); + } + + + + template + void EulerOperator::set_inflow_boundary( + const types::boundary_id boundary_id, + std::unique_ptr> inflow_function) + { + AssertThrow(subsonic_outflow_boundaries.find(boundary_id) == + subsonic_outflow_boundaries.end() && + wall_boundaries.find(boundary_id) == wall_boundaries.end(), + ExcMessage("You already set the boundary with id " + + std::to_string(static_cast(boundary_id)) + + " to another type of boundary before now setting " + + "it as inflow")); + AssertThrow(inflow_function->n_components == dim + 2, + ExcMessage("Expected function with dim+2 components")); + + inflow_boundaries[boundary_id] = std::move(inflow_function); + } + + + + template + void EulerOperator::set_subsonic_outflow_boundary( + const types::boundary_id boundary_id, + std::unique_ptr> outflow_function) + { + AssertThrow(inflow_boundaries.find(boundary_id) == + inflow_boundaries.end() && + wall_boundaries.find(boundary_id) == wall_boundaries.end(), + ExcMessage("You already set the boundary with id " + + std::to_string(static_cast(boundary_id)) + + " to another type of boundary before now setting " + + "it as subsonic outflow")); + AssertThrow(outflow_function->n_components == dim + 2, + ExcMessage("Expected function with dim+2 components")); + + subsonic_outflow_boundaries[boundary_id] = std::move(outflow_function); + } + + + + template + void EulerOperator::set_wall_boundary( + const types::boundary_id boundary_id) + { + AssertThrow(inflow_boundaries.find(boundary_id) == + inflow_boundaries.end() && + subsonic_outflow_boundaries.find(boundary_id) == + subsonic_outflow_boundaries.end(), + ExcMessage("You already set the boundary with id " + + std::to_string(static_cast(boundary_id)) + + " to another type of boundary before now setting " + + "it as wall boundary")); + + wall_boundaries.insert(boundary_id); + } + + + + template + void EulerOperator::set_body_force( + std::unique_ptr> body_force) + { + AssertDimension(body_force->n_components, dim); + + this->body_force = std::move(body_force); + } + + + + template + void EulerOperator::project( + const Function & function, + LinearAlgebra::distributed::Vector &solution) const + { + FEEvaluation + phi(data, 0, 1); + MatrixFreeOperators::CellwiseInverseMassMatrix + inverse(phi); + solution.zero_out_ghost_values(); + for (unsigned int cell = 0; cell < data.n_cell_batches(); ++cell) + { + phi.reinit(cell); + for (unsigned int q = 0; q < phi.n_q_points; ++q) + phi.submit_dof_value(evaluate_function(function, + phi.quadrature_point(q)), + q); + inverse.transform_from_q_points_to_basis(dim + 2, + phi.begin_dof_values(), + phi.begin_dof_values()); + phi.set_dof_values(solution); + } + } + + + + template + std::array EulerOperator::compute_errors( + const Function & function, + const LinearAlgebra::distributed::Vector &solution) const + { + TimerOutput::Scope t(timer, "compute errors"); + double errors_squared[3] = {}; + FEEvaluation + phi(data, 0, 0); + + for (unsigned int cell = 0; cell < data.n_cell_batches(); ++cell) + { + phi.reinit(cell); + phi.gather_evaluate(solution, EvaluationFlags::values); + VectorizedArrayType local_errors_squared[3] = {}; + for (unsigned int q = 0; q < phi.n_q_points; ++q) + { + const auto error = + evaluate_function(function, phi.quadrature_point(q)) - + phi.get_value(q); + const auto JxW = phi.JxW(q); + + local_errors_squared[0] += error[0] * error[0] * JxW; + for (unsigned int d = 0; d < dim; ++d) + local_errors_squared[1] += (error[d + 1] * error[d + 1]) * JxW; + local_errors_squared[2] += (error[dim + 1] * error[dim + 1]) * JxW; + } + for (unsigned int v = 0; v < data.n_active_entries_per_cell_batch(cell); + ++v) + for (unsigned int d = 0; d < 3; ++d) + errors_squared[d] += local_errors_squared[d][v]; + } + + Utilities::MPI::sum(errors_squared, MPI_COMM_WORLD, errors_squared); + + std::array errors; + for (unsigned int d = 0; d < 3; ++d) + errors[d] = std::sqrt(errors_squared[d]); + + return errors; + } + + + + template + double EulerOperator::compute_cell_transport_speed( + const LinearAlgebra::distributed::Vector &solution) const + { + TimerOutput::Scope t(timer, "compute transport speed"); + Number max_transport = 0; + FEEvaluation + phi(data, 0, 1); + + for (unsigned int cell = 0; cell < data.n_cell_batches(); ++cell) + { + phi.reinit(cell); + phi.gather_evaluate(solution, EvaluationFlags::values); + VectorizedArrayType local_max = 0.; + for (unsigned int q = 0; q < phi.n_q_points; ++q) + { + const auto solution = phi.get_value(q); + const auto velocity = euler_velocity(solution); + const auto pressure = euler_pressure(solution); + + const auto inverse_jacobian = phi.inverse_jacobian(q); + const auto convective_speed = inverse_jacobian * velocity; + VectorizedArrayType convective_limit = 0.; + for (unsigned int d = 0; d < dim; ++d) + convective_limit = + std::max(convective_limit, std::abs(convective_speed[d])); + + const auto speed_of_sound = + std::sqrt(gamma * pressure * (1. / solution[0])); + + Tensor<1, dim, VectorizedArrayType> eigenvector; + for (unsigned int d = 0; d < dim; ++d) + eigenvector[d] = 1.; + for (unsigned int i = 0; i < 5; ++i) + { + eigenvector = transpose(inverse_jacobian) * + (inverse_jacobian * eigenvector); + VectorizedArrayType eigenvector_norm = 0.; + for (unsigned int d = 0; d < dim; ++d) + eigenvector_norm = + std::max(eigenvector_norm, std::abs(eigenvector[d])); + eigenvector /= eigenvector_norm; + } + const auto jac_times_ev = inverse_jacobian * eigenvector; + const auto max_eigenvalue = std::sqrt( + (jac_times_ev * jac_times_ev) / (eigenvector * eigenvector)); + local_max = + std::max(local_max, + max_eigenvalue * speed_of_sound + convective_limit); + } + + for (unsigned int v = 0; v < data.n_active_entries_per_cell_batch(cell); + ++v) + for (unsigned int d = 0; d < 3; ++d) + max_transport = std::max(max_transport, local_max[v]); + } + + max_transport = Utilities::MPI::max(max_transport, MPI_COMM_WORLD); + + return max_transport; + } + + + + template + class EulerProblem + { + public: + EulerProblem(); + + void run(); + + private: + void make_grid_and_dofs(); + + void output_results(const unsigned int result_number); + + LinearAlgebra::distributed::Vector solution; + + ConditionalOStream pcout; + +#ifdef DEAL_II_WITH_P4EST + parallel::distributed::Triangulation triangulation; +#else + Triangulation triangulation; +#endif + + FESystem fe; + MappingQGeneric mapping; + DoFHandler dof_handler; + + TimerOutput timer; + + EulerOperator euler_operator; + + double time, time_step; + + class Postprocessor : public DataPostprocessor + { + public: + Postprocessor(); + + virtual void evaluate_vector_field( + const DataPostprocessorInputs::Vector &inputs, + std::vector> &computed_quantities) const override; + + virtual std::vector get_names() const override; + + virtual std::vector< + DataComponentInterpretation::DataComponentInterpretation> + get_data_component_interpretation() const override; + + virtual UpdateFlags get_needed_update_flags() const override; + + private: + const bool do_schlieren_plot; + }; + }; + + + + template + EulerProblem::Postprocessor::Postprocessor() + : do_schlieren_plot(dim == 2) + {} + + + + template + void EulerProblem::Postprocessor::evaluate_vector_field( + const DataPostprocessorInputs::Vector &inputs, + std::vector> & computed_quantities) const + { + const unsigned int n_evaluation_points = inputs.solution_values.size(); + + if (do_schlieren_plot == true) + Assert(inputs.solution_gradients.size() == n_evaluation_points, + ExcInternalError()); + + Assert(computed_quantities.size() == n_evaluation_points, + ExcInternalError()); + Assert(inputs.solution_values[0].size() == dim + 2, ExcInternalError()); + Assert(computed_quantities[0].size() == + dim + 2 + (do_schlieren_plot == true ? 1 : 0), + ExcInternalError()); + + for (unsigned int q = 0; q < n_evaluation_points; ++q) + { + Tensor<1, dim + 2> solution; + for (unsigned int d = 0; d < dim + 2; ++d) + solution[d] = inputs.solution_values[q](d); + + const double density = solution[0]; + const Tensor<1, dim> velocity = euler_velocity(solution); + const double pressure = euler_pressure(solution); + + for (unsigned int d = 0; d < dim; ++d) + computed_quantities[q](d) = velocity[d]; + computed_quantities[q](dim) = pressure; + computed_quantities[q](dim + 1) = std::sqrt(gamma * pressure / density); + + if (do_schlieren_plot == true) + computed_quantities[q](dim + 2) = + inputs.solution_gradients[q][0] * inputs.solution_gradients[q][0]; + } + } + + + + template + std::vector EulerProblem::Postprocessor::get_names() const + { + std::vector names; + for (unsigned int d = 0; d < dim; ++d) + names.emplace_back("velocity"); + names.emplace_back("pressure"); + names.emplace_back("speed_of_sound"); + + if (do_schlieren_plot == true) + names.emplace_back("schlieren_plot"); + + return names; + } + + + + template + std::vector + EulerProblem::Postprocessor::get_data_component_interpretation() const + { + std::vector + interpretation; + for (unsigned int d = 0; d < dim; ++d) + interpretation.push_back( + DataComponentInterpretation::component_is_part_of_vector); + interpretation.push_back(DataComponentInterpretation::component_is_scalar); + interpretation.push_back(DataComponentInterpretation::component_is_scalar); + + if (do_schlieren_plot == true) + interpretation.push_back( + DataComponentInterpretation::component_is_scalar); + + return interpretation; + } + + + + template + UpdateFlags EulerProblem::Postprocessor::get_needed_update_flags() const + { + if (do_schlieren_plot == true) + return update_values | update_gradients; + else + return update_values; + } + + + + template + EulerProblem::EulerProblem() + : pcout(std::cout, Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) +#ifdef DEAL_II_WITH_P4EST + , triangulation(MPI_COMM_WORLD) +#endif + , fe(FE_DGQ(fe_degree), dim + 2) + , mapping(fe_degree) + , dof_handler(triangulation) + , timer(pcout, TimerOutput::never, TimerOutput::wall_times) + , euler_operator(timer) + , time(0) + , time_step(0) + {} + + + + template + void EulerProblem::make_grid_and_dofs() + { + switch (testcase) + { + case 0: + { + Point lower_left; + for (unsigned int d = 1; d < dim; ++d) + lower_left[d] = -5; + + Point upper_right; + upper_right[0] = 10; + for (unsigned int d = 1; d < dim; ++d) + upper_right[d] = 5; + + GridGenerator::hyper_rectangle(triangulation, + lower_left, + upper_right); + triangulation.refine_global(2); + + euler_operator.set_inflow_boundary( + 0, std::make_unique>(0)); + + break; + } + + case 1: + { + GridGenerator::channel_with_cylinder( + triangulation, 0.03, 1, 0, true); + + euler_operator.set_inflow_boundary( + 0, std::make_unique>(0)); + euler_operator.set_subsonic_outflow_boundary( + 1, std::make_unique>(0)); + + euler_operator.set_wall_boundary(2); + euler_operator.set_wall_boundary(3); + + if (dim == 3) + euler_operator.set_body_force( + std::make_unique>( + std::vector({0., 0., -0.2}))); + + break; + } + + default: + Assert(false, ExcNotImplemented()); + } + + triangulation.refine_global(n_global_refinements); + + dof_handler.distribute_dofs(fe); + + euler_operator.reinit(mapping, dof_handler); + euler_operator.initialize_vector(solution); + + std::locale s = pcout.get_stream().getloc(); + pcout.get_stream().imbue(std::locale("")); + pcout << "Number of degrees of freedom: " << dof_handler.n_dofs() + << " ( = " << (dim + 2) << " [vars] x " + << triangulation.n_global_active_cells() << " [cells] x " + << Utilities::pow(fe_degree + 1, dim) << " [dofs/cell/var] )" + << std::endl; + pcout.get_stream().imbue(s); + } + + + + template + void EulerProblem::output_results(const unsigned int result_number) + { + const std::array errors = + euler_operator.compute_errors(ExactSolution(time), solution); + const std::string quantity_name = testcase == 0 ? "error" : "norm"; + + pcout << "Time:" << std::setw(8) << std::setprecision(3) << time + << ", dt: " << std::setw(8) << std::setprecision(2) << time_step + << ", " << quantity_name << " rho: " << std::setprecision(4) + << std::setw(10) << errors[0] << ", rho * u: " << std::setprecision(4) + << std::setw(10) << errors[1] << ", energy:" << std::setprecision(4) + << std::setw(10) << errors[2] << std::endl; + + { + TimerOutput::Scope t(timer, "output"); + + Postprocessor postprocessor; + DataOut data_out; + + DataOutBase::VtkFlags flags; + flags.write_higher_order_cells = true; + data_out.set_flags(flags); + + data_out.attach_dof_handler(dof_handler); + { + std::vector names; + names.emplace_back("density"); + for (unsigned int d = 0; d < dim; ++d) + names.emplace_back("momentum"); + names.emplace_back("energy"); + + std::vector + interpretation; + interpretation.push_back( + DataComponentInterpretation::component_is_scalar); + for (unsigned int d = 0; d < dim; ++d) + interpretation.push_back( + DataComponentInterpretation::component_is_part_of_vector); + interpretation.push_back( + DataComponentInterpretation::component_is_scalar); + + data_out.add_data_vector(dof_handler, solution, names, interpretation); + } + data_out.add_data_vector(solution, postprocessor); + + LinearAlgebra::distributed::Vector reference; + if (testcase == 0 && dim == 2) + { + reference.reinit(solution); + euler_operator.project(ExactSolution(time), reference); + reference.sadd(-1., 1, solution); + std::vector names; + names.emplace_back("error_density"); + for (unsigned int d = 0; d < dim; ++d) + names.emplace_back("error_momentum"); + names.emplace_back("error_energy"); + + std::vector + interpretation; + interpretation.push_back( + DataComponentInterpretation::component_is_scalar); + for (unsigned int d = 0; d < dim; ++d) + interpretation.push_back( + DataComponentInterpretation::component_is_part_of_vector); + interpretation.push_back( + DataComponentInterpretation::component_is_scalar); + + data_out.add_data_vector(dof_handler, + reference, + names, + interpretation); + } + + Vector mpi_owner(triangulation.n_active_cells()); + mpi_owner = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + data_out.add_data_vector(mpi_owner, "owner"); + + data_out.build_patches(mapping, + fe.degree, + DataOut::curved_inner_cells); + + const std::string filename = + "solution_" + Utilities::int_to_string(result_number, 3) + ".vtu"; + data_out.write_vtu_in_parallel(filename, MPI_COMM_WORLD); + } + } + + + + template + void EulerProblem::run() + { + { + const unsigned int n_vect_number = VectorizedArrayType::size(); + const unsigned int n_vect_bits = 8 * sizeof(Number) * n_vect_number; + + pcout << "Running with " + << Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD) + << " MPI processes" << std::endl; + pcout << "Vectorization over " << n_vect_number << " " + << (std::is_same::value ? "doubles" : "floats") + << " = " << n_vect_bits << " bits (" + << Utilities::System::get_current_vectorization_level() << ")" + << std::endl; + } + + make_grid_and_dofs(); + + const LowStorageRungeKuttaIntegrator integrator(lsrk_scheme); + + LinearAlgebra::distributed::Vector rk_register_1; + LinearAlgebra::distributed::Vector rk_register_2; + rk_register_1.reinit(solution); + rk_register_2.reinit(solution); + + euler_operator.project(ExactSolution(time), solution); + + + double min_vertex_distance = std::numeric_limits::max(); + for (const auto &cell : triangulation.active_cell_iterators()) + if (cell->is_locally_owned()) + min_vertex_distance = + std::min(min_vertex_distance, cell->minimum_vertex_distance()); + min_vertex_distance = + Utilities::MPI::min(min_vertex_distance, MPI_COMM_WORLD); + + time_step = courant_number * integrator.n_stages() / + euler_operator.compute_cell_transport_speed(solution); + pcout << "Time step size: " << time_step + << ", minimal h: " << min_vertex_distance + << ", initial transport scaling: " + << 1. / euler_operator.compute_cell_transport_speed(solution) + << std::endl + << std::endl; + + output_results(0); + + unsigned int timestep_number = 0; + + while (time < final_time - 1e-12 && timestep_number < max_time_steps) + { + ++timestep_number; + if (timestep_number % 5 == 0) + time_step = + courant_number * integrator.n_stages() / + Utilities::truncate_to_n_digits( + euler_operator.compute_cell_transport_speed(solution), 3); + + { + TimerOutput::Scope t(timer, "rk time stepping total"); + integrator.perform_time_step(euler_operator, + time, + time_step, + solution, + rk_register_1, + rk_register_2); + } + + time += time_step; + + if (static_cast(time / output_tick) != + static_cast((time - time_step) / output_tick) || + time >= final_time - 1e-12) + output_results( + static_cast(std::round(time / output_tick))); + } + + timer.print_wall_time_statistics(MPI_COMM_WORLD); + pcout << std::endl; + } + +} // namespace Euler_DG + + +int main(int argc, char **argv) +{ + using namespace Euler_DG; + using namespace dealii; + + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + + try + { + deallog.depth_console(0); + + EulerProblem euler_problem; + euler_problem.run(); + } + catch (std::exception &exc) + { + std::cerr << std::endl + << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Exception on processing: " << std::endl + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + + return 1; + } + catch (...) + { + std::cerr << std::endl + << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Unknown exception!" << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; + } + + return 0; +} -- 2.39.5