journal = {{SIAM} Journal on Scientific Computing}
}
+% ------------------------------------
+% Step 68
+% ------------------------------------
+
+@article{Blais2013,
+ title={Dealing with more than two materials in the FVCF--ENIP method},
+ author={Blais, Bruno and Braeunig, Jean-Philippe and Chauveheid, Daniel and Ghidaglia, Jean-Michel and Loub{\`e}re, Rapha{\"e}l},
+ journal={European Journal of Mechanics-B/Fluids},
+ volume={42},
+ pages={1--9},
+ year={2013},
+ publisher={Elsevier}
+}
+
+@article{Blais2019,
+ title={Experimental Methods in Chemical Engineering: Discrete Element Method—DEM},
+ author={Blais, Bruno and Vidal, David and Bertrand, Francois and Patience, Gregory S and Chaouki, Jamal},
+ journal={The Canadian Journal of Chemical Engineering},
+ volume={97},
+ number={7},
+ pages={1964--1973},
+ year={2019},
+ publisher={Wiley Online Library}
+}
+
+@article{Gassmoller2019,
+ title={Evaluating the accuracy of hybrid finite element/particle-in-cell methods for modelling incompressible Stokes flow},
+ author={Gassm{\"o}ller, Rene and Lokavarapu, Harsha and Bangerth, Wolfgang and Puckett, Elbridge Gerry},
+ journal={Geophysical Journal International},
+ volume={219},
+ number={3},
+ pages={1915--1938},
+ year={2019},
+ publisher={Oxford University Press}
+}
+
% ------------------------------------
% Step 69
* <td>step-21</td>
* <td> The time dependent two-phase flow in
* porous media. Extensions of mixed Laplace discretizations. More
- * complicated block solvers. Simple explicit (forward Euler) time
+ * complicated block solvers. Simple explicit (forward Euler) time
* stepping.
* <br/> Keywords: TensorFunction, FE_RaviartThomas,
* VectorTools::project(), DiscreteTime
* <tr valign="top">
* <td>step-23</td>
* <td> Finally a "real" time dependent problem, the wave equation.
- * Fractional time stepping (explicit, fully implicit and Crank-Nicholson
+ * Fractional time stepping (explicit, fully implicit and Crank-Nicholson
* method).
* <br/> Keywords: MatrixCreator, VectorTools::project()
* </td></tr>
* </td></tr>
*
* <tr valign="top">
+ * <td>step-68</td>
+ * <td> Simulation of the motion of massless tracer particles in a vortical flow.
+ * Parallel simulation of the advection of particles with load balancing.
+ * </td></tr>
+ *
+ * <tr valign="top">
* <td>step-69</td>
* <td> Hyperbolic conservation laws: a first-order guaranteed maximum
- * wavespeed method for the compressible Euler equations. Explicit time
+ * wavespeed method for the compressible Euler equations. Explicit time
* stepping.
* </td></tr>
*
* </td>
* </tr>
*
+ * <tr valign="top">
+ * <td> Particles
+ * </td>
+ * <td>
+ * step-19,
+ * step-68
+ * </td>
+ * </tr>
+ *
* </table>
* <h4><b>Linear solvers</b></h4>
* <table align="center" width="90%">
--- /dev/null
+New: Tutorial example (step-68) showcasing parallel simulation of the advection
+of particles including load balancing.
+<br>
+(Bruno Blais, Toni El Geitani Nehme, Rene Gassmöller, Peter Munch, 2020/05/23)
CMAKE_MINIMUM_REQUIRED(VERSION 2.8.12)
-FIND_PACKAGE(deal.II 9.2.0 QUIET
+FIND_PACKAGE(deal.II 9.3.0 QUIET
HINTS ${deal.II_DIR} ${DEAL_II_DIR} ../ ../../ $ENV{DEAL_II_DIR}
)
IF(NOT ${deal.II_FOUND})
<br>
<i>
-
+This program was contributed by
Bruno Blais (Polytechnique Montréal),
-Toni El Geitani Nehme (Polytechnique Montreal),
+Toni El Geitani Nehme (Polytechnique Montréal),
Rene Gassmöller (University of California Davis),
-and Peter Munch (Technical University of Munich and Helmholtz-Zentrum Geesthacht)
+and Peter Munch (Technical University of Munich and Helmholtz-Zentrum Geesthacht).
+Bruno Blais was supported by NSERC Discovery grant
+RGPIN-2020-04510, by Compute Canada and Calcul Québec.
</i>
<h1>Introduction</h1>
number of applications. Particles are routinely used
as massless tracers to visualize the dynamic of a transient flow. They
can also play an intrinsic role as part of a more complex finite element
- model, as is the case for the Particle-In-Cell (PIC) method (Gassmöller et al. 2018)
+ model, as is the case for the Particle-In-Cell (PIC) method @cite GLHPW2018
or they can even be used to simulate the motion of granular matter, as in
- the Discrete Element Method (DEM) (Blais et al. 2019). In the case
+ the Discrete Element Method (DEM) @cite Blais2019. In the case
of DEM, the resulting model is not related to the finite element method anymore,
but just leads to a system of ordinary differential equation which describes
the motion of the particles and the dynamic of their collisions. All of
these models can be built using deal.II's particle handling capabilities.
-In the present step, we use particles as massless tracer to illustrate
+In the present step, we use particles as massless tracers to illustrate
the dynamic of a vortical flow. Since the particles are massless tracers,
the position of each particle $i$ is described by the
following ordinary differential equation (ODE):
\frac{d \textbf{x}_i}{dt} =\textbf{u}(\textbf{x}_i)
@f]
-where $\textbf{x}_i$ is the postion of particle $i$ and $\textbf{u}(\textbf{x}_i)$ the flow velocity at its position.
+where $\textbf{x}_i$ is the position of particle $i$ and $\textbf{u}(\textbf{x}_i)$ the flow velocity at its position.
In the present step, this ODE is solved using the explicit Euler method. The resulting scheme is:
@f[
\textbf{x}_{i}^{n+1} = \textbf{x}_{i}^{n} + \Delta t \; \textbf{u}(\textbf{x}_{i}^{n})
of particle $i$ at time $t+\Delta t$ and $t$, respectively and where $\Delta t$
is the time step. In the present step, the velocity at the location of particles
is obtained in two different fashions:
-- By evaluating the velocity function at the location of the particles.
+- By evaluating the velocity function at the location of the particles;
- By evaluating the velocity function on a background triangulation and, using
a finite element support, interpolating at the position of the particle.
The first approach is not practical, since the velocity profile
is generally not known analytically. The second approach, based on interpolating a solution
at the position of the particles, mimics exactly what would be done in a
-realistic computational fluid dynamic simulation. In this step, we illustrate both strategies.
-
+realistic computational fluid dynamic simulation, and this follows the way we have also evaluated
+the finite element solution at particle locations in step-19. In this step, we illustrate both strategies.
We note that much greater accuracy could be obtained by using a fourth
order Runge-Kutta method or another appropriate scheme for the time integration
-of the motion of the particles. Implementing a more advanced advection scheme
+of the motion of the particles. Implementing a more advanced time-integration scheme
would be a straightforward extension of this step.
<h3>Particles in deal.II</h3>
a ParticleHandler, particles have access to a Particles::PropertyPool. This PropertyPool is
an array which can be used to store an arbitrary number of properties
associated with the particles. Consequently, users can build their own
-particle solver and attribute the desired properties to the particles (e.g. mass, charge,
+particle solver and attribute the desired properties to the particles (e.g., mass, charge,
diameter, temperature, etc.). In the present tutorial, this is used to
store the value of the fluid velocity and the process id to which the particles
belong.
- Exchanging the particles that leave local domains between the processors;
- Load balancing the simulation so that every processor has a similar computational load.
These challenges and their solution in deal.II have been discussed in more detail in
-Gassmöller et al. (2018), but we will summarize them below.
+@cite GLHPW2018, but we will summarize them below.
+
+There are of course also questions on simply setting up a code that uses particles. These have largely already been
+addressed in step-19. Some more advanced techniques will also be discussed in step-70.
<h4>Parallel particle generation</h4>
Generating distributed particles in a scalable way is not straightforward since
the processor to which they belong must first be identified before the cell in
-which they are located is found. Deal.II provides numerous capabilities to
+which they are located is found. deal.II provides numerous capabilities to
generate particles through the Particles::Generator namespace. Some of these
particle generators create particles only on the locally owned subdomain. For
example, Particles::Generators::regular_reference_locations() creates particles
limited number of time-steps and, consequently, does not constitute a large
portion of the computational cost. For these occasions, deal.II provides
convenient Particles::Generators that can globally insert the particles even if
-they are not located on the subdomain from which they are created. The
+the particle is not located in a cell owned by the parallel process on which the call to create the particle is initiated. The
generators first locate on which subdomain the particles are situated, identify
in which cell they are located and exchange the necessary information among
the processors to ensure that the particle is generated with the right
properties. Consequently, this type of particle generation can be communication
intensive. The Particles::Generators::dof_support_points and the
Particles::Generators::quadrature_points generate particles using a
-triangulation and the points of an associated dof_handler or quadrature
+triangulation and the points of an associated DoFHandler or quadrature
respectively. The triangulation that is used to generate the particles can be
-the same triangulation as used for the background mesh, in which case these
+the same triangulation that is used for the background mesh, in which case these
functions are very similar to the
Particles::Generators::regular_reference_locations() function described in the
previous paragraph. However, the triangulation used to generate particles can
which is useful to generate particles in particular shapes (as in this
example), or to transfer information between two different computational grids
(as in step-70). Furthermore, the Particles::ParticleHandler class provides the
-Particles::ParticleHandler::insert_global_particles function which enables the
+Particles::ParticleHandler::insert_global_particles function() which enables the
global insertion of particles from a vector of arbitrary points and a global
vector of bounding boxes. In the present step, we use the
-Particles::Generators::quadrature_points on a non-matching triangulation to
-insert the particle in the shape of a disk.
+Particles::Generators::quadrature_points() function on a non-matching triangulation to
+insert particle located at positions in the shape of a disk.
<h4>Particle exchange</h4>
the locally owned subdomain and need to be transferred to their new owner
processes. This situation can arise in two very different ways: First, if the
previous owning process knows the new owner of the particles that were lost
-(for example because the particles moved into ghost cells of a distributed
+(for example because the particles moved from the locally owned cell of one processor
+into an adjacent ghost cells of a distributed
triangulation) then the transfer can be handled efficiently as a point-to-point
communication between each process and the new owners. This transfer happens
automatically whenever particles are sorted into their new cells. Secondly,
the previous owner may not know to which process the particle has moved. In
this case the particle is discarded by default, as a global search for the
-owner can be expensive. Step-19 shows how such a discarded particle can still
+owner can be expensive. step-19 shows how such a discarded particle can still
be collected, interpreted, and potentially reinserted by the user. In the
present example we prevent the second case by imposing a CFL criterion on the
timestep to ensure particles will at most move into the ghost layer of the
process. However, if some cells own many more particles than other cells, or if
the particles of one cell are much more computationally expensive than the
particles in other cells, then this problem no longer scales efficiently (for a
-discussion of what we consider "scalable" programs, see @ref
-GlossParallelScaling "this glossary entry"). Thus, we have to apply a form of
+discussion of what we consider "scalable" programs, see
+@ref GlossParallelScaling "this glossary entry"). Thus, we have to apply a form of
"load balancing", which means we estimate the computational load that is
associated with each cell and its particles. Repartitioning the mesh then
accounts for this combined computational load instead of the simplified
-assumption of the number of cells.
+assumption of the number of cells @cite GLHPW2018.
In this section we only discussed the particle-specific challenges in distributed
computation. Parallel challenges that particles share with
<h3>The testcase</h3>
In the present step, we use particles as massless tracers to illustrate
-the dynamics of a particular vortical flow : the Rayleigh-Kothe Vortex. This flow pattern
+the dynamics of a particular vortical flow: the Rayleigh-Kothe Vortex. This flow pattern
is generally used as a complex test case for interface tracking methods
-(e.g. volume-of-fluid and level set approches) since
-it leads to strong rotation and elongation of the fluid (Blais et al. 2013).
+(e.g., volume-of-fluid and level set approaches) since
+it leads to strong rotation and elongation of the fluid @cite Blais2013.
The stream function $\Psi$ of this Rayleigh-Kothe vortex is defined as:
@f[
\Psi = \frac{1}{\pi} sin^2 (\pi x) \sin^2 (\pi y) \cos \left( \pi \frac{t}{T} \right)
@f]
-where $T$ is the period of the flow. The velocity profile in 2D ($\textbf{u}=[u,v]^T$) is :
+where $T$ is half the period of the flow. The velocity profile in 2D ($\textbf{u}=[u,v]^T$) is :
@f{eqnarray*}
u &=& - \frac{\partial\Psi}{\partial y} = -2 \sin^2 (\pi x) \sin (\pi y) \cos (\pi y) \cos \left( \pi \frac{t}{T} \right)\\
v &=& \frac{\partial\Psi}{\partial x} = 2 \cos(\pi x) \sin(\pi x) \sin^2 (\pi y) \cos \left( \pi \frac{t}{T} \right)
@f}
+The velocity profile is illustrated in the following animation:
+@htmlonly
+<p align="center">
+ <iframe width="560" height="500" src="https://youtu.be/m6hQm7etji8"
+ frameborder="0"
+ allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
+ allowfullscreen></iframe>
+ </p>
+@endhtmlonly
+
It can be seen that this velocity reverses periodically due to the term
$\cos \left( \pi \frac{t}{T} \right)$ and that material will end up at its
starting position after every period of length $t=2T$. We will run this tutorial
solution as the velocity for each particle. Therefore in this model there is no
error in the assigned velocity to the particles, and any deviation of particle
positions from the analytical position at a given time results from the error
-in solving the equation of motion for the particle. In the second model the
+in solving the equation of motion for the particle inexactly, using a time stepping method. In the second model the
analytical velocity field is first interpolated to a finite-element vector
space (to simulate the case that the velocity was obtained from solving a
-finite-element problem). This finite-element "solution" is then evaluated at
+finite-element problem, in the same way as the ODE for each particle in step-19 depends on a finite element
+solution). This finite-element "solution" is then evaluated at
the locations of the particles to solve their equation of motion. The
difference between the two cases allows to assess whether the chosen
finite-element space is sufficiently accurate to advect the particles with the
optimal convergence rate of the chosen particle advection scheme, a question
that is important in practice to determine the accuracy of the combined
-algorithm (see e.g. Gassmöller et al., 2019).
-
-
-
-<h3>References</h3>
-
-<ul>
-<li> Blais, Bruno, et al. (2019) "Experimental Methods in Chemical Engineering: Discrete
- Element Method—DEM." The Canadian Journal of Chemical Engineering 97.7 : 1964-1973.
-
-<li> Gassmöller, Rene, et al. (2019). "Evaluating the accuracy of hybrid finite
- element/particle-in-cell methods for modelling incompressible Stokes flow".
- Geophysical Journal International, 219.3, 1915-1938.
-
-<li>Gassmöller, Rene, et al. (2018). "Flexible and Scalable Particle‐in‐Cell Methods With
- Adaptive Mesh Refinement for Geodynamic Computations." Geochemistry, Geophysics,
- Geosystems 19.9 : 3596-3604.
-
- <li>Blais, Bruno, et al. (2013) "Dealing with more than two materials in the FVCF–ENIP method."
- European Journal of Mechanics-B/Fluids 42 1-9.
- </ul>
+algorithm (see e.g. @cite Gassmoller2019).
Repartitioning triangulation after particle generation
Writing particle output file: interpolated-particles-0
Writing background field file: background-0
-Writing particle output file: ainterpolated-particles-10
+Writing particle output file: interpolated-particles-10
Writing background field file: background-10
Writing particle output file: interpolated-particles-20
Writing background field file: background-20
<h3>Possibilities for extensions</h3>
-This steps highlights some of the main capabilities for handling particles in deal.II, notably their
+This program highlights some of the main capabilities for handling particles in deal.II, notably their
capacity to be used in distributed parallel simulations. However, this step could
-be exteded in numerous manners:
+be extended in numerous manners:
- High-order time integration (for example using a Runge-Kutta 4 method) could be
used to increase the accuracy and allow for an increased time-step size.
- The full equation of motion (with inertia) could be solved for the particles. In
this case the particles would need to have additional properties such as their mass,
-and their diameter.
+as in step-19, and if one wanted to also consider interactions with the fluid, their diameter.
- Coupling to a flow solver. This step could be straightforwardly coupled to any parallel
-steps in which the Stokes (step-32, step-70) or the Navier-Stokes equations are solved (e.g. step-57).
+program in which the Stokes (step-32, step-70) or the Navier-Stokes equations are solved (e.g., step-57).
- Computing the difference in final particle positions between the two models
would allow to quantify the influence of the interpolation error on particle motion.
// @sect3{Include files}
-// The majority of the include files are generic
-
#include <deal.II/base/bounding_box.h>
#include <deal.II/base/conditional_ostream.h>
#include <deal.II/base/discrete_time.h>
// a collection of points with some attached properties (e.g., an id) floating
// on a parallel::distributed::Triangulation. The methods and classes in the
// namespace Particles allows one to easily implement Particle-In-Cell methods
-// and particle tracing on distributed triangulations
+// and particle tracing on distributed triangulations:
#include <deal.II/particles/particle_handler.h>
// We import the particles generator
// which allow us to insert the particles. In the present step, the particle
-// are globally inserted using a non-matching hyper-shell triangulation
+// are globally inserted using a non-matching hyper-shell triangulation:
#include <deal.II/particles/generators.h>
// Since the particles do not form a triangulation, they have their
// own specific DataOut class which will enable us to write them
-// to commonly used parallel vtu format
+// to commonly used parallel vtu format:
#include <deal.II/particles/data_out.h>
#include <cmath>
// members of this class and the corresponding entries in the
// ParameterHandler. Thanks to the use of the
// ParameterHandler::add_parameter() method, this connection is trivial, but
- // requires all members of this class to be writeable.
+ // requires all members of this class to be writable.
class ParticleTrackingParameters : public ParameterAcceptor
{
public:
// @sect3{Velocity profile}
- // The velocity profile is provided as a Function object. We provide the
- // velocity profile. In the present step, this function is hard-coded within
+ // The velocity profile is provided as a Function object.
+ // This function is hard-coded within
// the example.
template <int dim>
class Vortex : public Function<dim>
Vector<double> & values) const
{
const double T = 4;
- // Since the velocity profile is time dependant, the present time in the
- // simulation must be gathered from the Function object.
+ // The velocity profile for the Rayleigh-Kothe vertex is time-dependent.
+ // Consequently, the current time in the
+ // simulation (t) must be gathered from the Function object.
const double t = this->get_time();
const double px = numbers::PI * point(0);
}
}
- // @sect3{The <code>PatricleTracking</code> class declaration}
+ // @sect3{The <code>ParticleTracking</code> class declaration}
// We are now ready to introduce the main class of our tutorial program.
template <int dim>
// initialize the degrees of freedom on the background grid
void setup_background_dofs();
+ // In one of the test case, the function is mapped to the background grid
+ // and a finite element interpolation is used to calculate the velocity
+ // at the particle location. This function calculates the value of the
+ // function at the support point of the triangulation.
void interpolate_function_to_field();
// The next two functions are responsible for carrying out step of explicit
// @sect4{Constructor}
- // Constructors and destructors are rather trivial. They are very similar
+ // The constructors and destructors are rather trivial. They are very similar
// to what is done in step-40. We set the processors we want to work on
// to all machines available (MPI_COMM_WORLD) and
// initialize the <code>pcout</code> variable to only allow processor zero
const typename parallel::distributed::Triangulation<dim>::CellStatus status)
const
{
- // Assign no weight to cells we do not own.
+ // We do not assign any weight to cells we do not own (i.e artificial cells)
if (!cell->is_locally_owned())
return 0;
// This determines how important particle work is compared to cell
// work (by default every cell has a weight of 1000).
// We set the weight per particle much higher to indicate that
- // the particle load is the only one that is important to distribute
- // in this example. The optimal value of this number depends on the
+ // the particle load is the only one that is important to distribute the
+ // cells. in this example. The optimal value of this number depends on the
// application and can range from 0 (cheap particle operations,
// expensive cell operations) to much larger than 1000 (expensive
- // particle operations, cheap cell operations, like in this example).
+ // particle operations, cheap cell operations, like presumed in this
+ // example).
const unsigned int particle_weight = 10000;
// This example does not use adaptive refinement, therefore every cell
// 2. How to pack the particles before shipping data around
// 3. How to unpack the particles after repartitioning
//
- // Attach the correct functions to the signals inside
- // parallel::distributed::Triangulation, which will be called every time the
- // repartition() function is called.
- // These connections only need to be created once, so we might as well
- // have set them up in the constructor of this class, but for the purpose
- // of this example we want to group the particle related instructions.
+ // We attach the correct functions to the signals inside
+ // parallel::distributed::Triangulation. These signal will be called every
+ // time the repartition() function is called. These connections only need to
+ // be created once, so we might as well have set them up in the constructor
+ // of this class, but for the purpose of this example we want to group the
+ // particle related instructions.
background_triangulation.signals.cell_weight.connect(
[&](
const typename parallel::distributed::Triangulation<dim>::cell_iterator
&particle_handler,
false));
- // Establish the background triangulation where the particles are living
- // and the number of properties of the particles
+ // This initializes the background triangulation where the particles are
+ // living and the number of properties of the particles.
particle_handler.initialize(background_triangulation, mapping, 1 + dim);
// We create a particle triangulation which is solely used to generate
// the points which will be used to insert the particles. This
// triangulation is an hyper_shell which is off-set from the
- // center of the simulation domain.
+ // center of the simulation domain. This will be used to generate a
+ // disk filled with particles which will allow an easy monitoring
+ // of the motion due to the vortex.
Point<dim> center;
center[0] = 0.5;
center[1] = 0.75;
// The quadrature points particle generator generates particles only
// on locally owned active cells. We therefore count how many of those
// are present in the triangulation, as this will be required to
- // initialize the properties
+ // initialize the properties.
unsigned int n_locally_owned_cells = 0;
for (const auto &cell : particle_triangulation.active_cell_iterators())
if (cell->is_locally_owned())
n_locally_owned_cells++;
- // We generate an empty vector of properties. We will fix the properties
- // of the particles once they are generated.
+ // We generate an empty vector of properties. We will attribute the
+ // properties to the particles once they are generated.
std::vector<std::vector<double>> properties(n_locally_owned_cells,
std::vector<double>(dim + 1,
0.));
// We generate the particles at the position of a single
- // points quadrature
+ // points quadrature. Consequently, one particle will be generated
+ // at the centroid of each cell.
Particles::Generators::quadrature_points(particle_triangulation,
QGauss<dim>(1),
global_bounding_boxes,
mapping,
properties);
- // Displaying the total number of generated particles in the domain
pcout << "Number of particles inserted: "
<< particle_handler.n_global_particles() << std::endl;
}
// @sect4{Background DOFs and interpolation}
- // Sets up the background degree of freedom used for the velocity
- // interpolation And allocate the field vector where the entire
- // solution of the velocity field is stored
+ // This function sets up the background degree of freedom used for the
+ // velocity interpolation and allocates the field vector where the entire
+ // solution of the velocity field is stored.
template <int dim>
void ParticleTracking<dim>::setup_background_dofs()
{
mpi_communicator);
}
- // Interpolates the Vortex velocity field to the field vector
+ // This function takes care of interpolating the
+ // vortex velocity field to the field vector. This is achieved rather easily
+ // by using the <code>VectorTools::interpolate</code> function.
template <int dim>
void ParticleTracking<dim>::interpolate_function_to_field()
{
// @sect4{Time integration of the trajectories}
// We integrate the particle trajectories
- // using an analytically defined velocity field. This is a relatively trivial
- // usage of the particles.
+ // using an analytically defined velocity field. This demonstrates a
+ // relatively trivial usage of the particles.
template <int dim>
void ParticleTracking<dim>::euler_step_analytical(double dt)
{
particle != particle_handler.end();
++particle)
{
- // Get the velocity using the current location of particle
+ // We calculate the velocity of the particles using their current
+ // location.
Point<dim> particle_location = particle->get_location();
velocity.vector_value(particle_location, particle_velocity);
- // Updating the position of the particles and Setting the old position
- // equal to the new position of the particle
+ // This updates the position of the particles and sets the old position
+ // equal to the new position of the particle.
for (int d = 0; d < dim; ++d)
particle_location[d] += particle_velocity[d] * dt;
particle->set_location(particle_location);
- // Store the processor id and the particle velocity in the particle
- // properties
+ // We store the processor id (a scalar) and the particle velocity (a
+ // vector) in the particle properties. In this example, this is done
+ // purely for visualization purposes.
ArrayView<double> properties = particle->get_properties();
for (int d = 0; d < dim; ++d)
properties[d] = particle_velocity[d];
typename DoFHandler<dim>::cell_iterator(*cell, &fluid_dh);
dh_cell->get_dof_indices(dof_indices);
- // Gather the velocity information in a local vector to prevent
+ // This gathers the velocity information in a local vector to prevent
// dynamically re-accessing everything when there are multiple particles
- // in a cell
+ // in a cell.
for (unsigned int j = 0; j < fluid_fe.dofs_per_cell; ++j)
{
dof_data_per_cell[j] = field_relevant(dof_indices[j]);
}
- // Compute the velocity at the particle locations by evaluating
+ // This compute the velocity at the particle locations by evaluating
// the finite element solution at the position of the particles.
// This is essentially an optimized version of the particle advection
// functionality in step 19, but instead of creating quadrature
particle_location[d] += particle_velocity[d] * dt;
particle->set_location(particle_location);
- // Store the particle velocity and the processor id in the particle
- // properties
+ // Again, we store the particle velocity and the processor id in the
+ // particle properties for visualization purposes.
ArrayView<double> properties = particle->get_properties();
for (int d = 0; d < dim; ++d)
properties[d] = particle_velocity[d];
// @sect4{Data output}
// These two functions take care of writing both the particles
- // and the background mesh to vtu with a pvtu record
-
+ // and the background mesh to vtu with a pvtu record. This ensures
+ // that the simulation results can be visualized when the simulation is
+ // launched in parallel.
template <int dim>
void ParticleTracking<dim>::output_particles(unsigned int it)
{
setup_background_dofs();
- // Set the initial property of the particles by doing an
- // explicit Euler iteration with a time-step of 0.
+ // We set the initial property of the particles by doing an
+ // explicit Euler iteration with a time-step of 0 both in the case
+ // of the analytical and the interpolated approach.
if (interpolated_velocity)
{
interpolate_function_to_field();
output_particles(discrete_time.get_step_number());
output_background(discrete_time.get_step_number());
- // Looping over time in order to move the particles
+ // The particles are advected by looping over time.
while (!discrete_time.is_at_end())
{
discrete_time.advance_time();
else
euler_step_analytical(discrete_time.get_previous_step_size());
+ // After the particles have been moved, it is necessary to identify
+ // in which cell they now reside. This is achieved by calling
+ // <code>sort_particles_into_subdomains_and_cells</code>
particle_handler.sort_particles_into_subdomains_and_cells();
if ((discrete_time.get_step_number() % par.output_frequency) == 0)