From db2486dcece3b0fc643454738d7d7402623acac2 Mon Sep 17 00:00:00 2001 From: Bruno Date: Thu, 4 Feb 2021 08:58:34 -0500 Subject: [PATCH] WIP Add step-68 as a test --- tests/particles/step-68.cc | 788 ++++++++++++++++++ .../step-68.with_p4est=true.mpirun=2.output | 64 ++ .../particles/step-68.with_p4est=true.output | 64 ++ 3 files changed, 916 insertions(+) create mode 100644 tests/particles/step-68.cc create mode 100644 tests/particles/step-68.with_p4est=true.mpirun=2.output create mode 100644 tests/particles/step-68.with_p4est=true.output diff --git a/tests/particles/step-68.cc b/tests/particles/step-68.cc new file mode 100644 index 0000000000..9343555003 --- /dev/null +++ b/tests/particles/step-68.cc @@ -0,0 +1,788 @@ +/* --------------------------------------------------------------------- + * + * Copyright (C) 2020 by the deal.II authors + * + * This file is part of the deal.II library. + * + * The deal.II library is free software; you can use it, redistribute + * it, and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * The full text of the license can be found in the file LICENSE.md at + * the top level directory of deal.II. + * + * --------------------------------------------------------------------- + + * + * Authors: Bruno Blais, Toni El Geitani Nehme, Rene Gassmoeller, Peter Munch + */ + +// @sect3{Include files} + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include + +// From the following include file we import the ParticleHandler class +// that allows you to manage +// a collection of particles (objects of type Particles::Particle), representing +// a collection of points with some attached properties (e.g., an id) floating +// on a parallel::distributed::Triangulation. The methods and classes in the +// namespace Particles allows one to easily implement Particle-In-Cell methods +// and particle tracing on distributed triangulations: +#include + +// We import the particles generator +// which allow us to insert the particles. In the present step, the particle +// are globally inserted using a non-matching hyper-shell triangulation: +#include + +// Since the particles do not form a triangulation, they have their +// own specific DataOut class which will enable us to write them +// to commonly used parallel vtu format (or any number of other file formats): +#include + +#include +#include + +#include "../tests.h" + + + +namespace Step68 +{ + using namespace dealii; + + // @sect3{Velocity profile} + + // The velocity profile is provided as a Function object. + // This function is hard-coded within + // the example. + template + class Vortex : public Function + { + public: + Vortex() + : Function(dim) + {} + + + virtual void + vector_value(const Point &point, + Vector & values) const override; + }; + + + // The velocity profile for the Rayleigh-Kothe vertex is time-dependent. + // Consequently, the current time in the + // simulation (t) must be gathered from the Function object. + template + void + Vortex::vector_value(const Point &point, + Vector & values) const + { + const double T = 4; + const double t = this->get_time(); + + const double px = numbers::PI * point(0); + const double py = numbers::PI * point(1); + const double pt = numbers::PI / T * t; + + values[0] = -2 * cos(pt) * pow(sin(px), 2) * sin(py) * cos(py); + values[1] = 2 * cos(pt) * pow(sin(py), 2) * sin(px) * cos(px); + if (dim == 3) + { + values[2] = 0; + } + } + + + + // @sect3{The ParticleTracking class declaration} + + // We are now ready to introduce the main class of our tutorial program. + template + class ParticleTracking + { + public: + ParticleTracking(const bool interpolated_velocity); + void + run(); + + private: + // This function is responsible for the initial + // generation of the particles on top of the background grid. + void + generate_particles(); + + // When the velocity profile is interpolated to the position of the + // particles, it must first be stored using degrees of freedom. + // Consequently, as is the case for other parallel case (e.g. step-40) we + // initialize the degrees of freedom on the background grid. + void + setup_background_dofs(); + + // In one of the test cases, the function is mapped to the background grid + // and a finite element interpolation is used to calculate the velocity + // at the particle location. This function calculates the value of the + // function at the support point of the triangulation. + void + interpolate_function_to_field(); + + // The next two functions are responsible for carrying out step of explicit + // Euler time integration for the cases where the velocity field is + // interpolated at the positions of the particles or calculated + // analytically, respectively. + void + euler_step_interpolated(const double dt); + void + euler_step_analytical(const double dt); + + // The `cell_weight()` function indicates to the triangulation how much + // computational work is expected to happen on this cell, and consequently + // how the domain needs to be partitioned so that every MPI rank receives a + // roughly equal amount of work (potentially not an equal number of cells). + // While the function is called from the outside, it is connected to the + // corresponding signal from inside this class, therefore it can be + // `private`. + unsigned int + cell_weight( + const typename parallel::distributed::Triangulation::cell_iterator + &cell, + const typename parallel::distributed::Triangulation::CellStatus + status) const; + + // The following two functions are responsible for outputting the simulation + // results for the particles and for the velocity profile on the background + // mesh, respectively. + void + output_particles(const unsigned int it); + void + output_background(const unsigned int it); + + // Write particles to dealii log + void + log_particles(); + + MPI_Comm mpi_communicator; + parallel::distributed::Triangulation background_triangulation; + Particles::ParticleHandler particle_handler; + + DoFHandler fluid_dh; + FESystem fluid_fe; + MappingQ1 mapping; + LinearAlgebra::distributed::Vector velocity_field; + + Vortex velocity; + + bool interpolated_velocity; + + // Simulation parameters + std::string output_directory = "./"; + static const unsigned int velocity_degree = 1; + static constexpr double time_step = 0.002; + static constexpr double final_time = 4.0; + static const unsigned int output_frequency = 1000; + static const unsigned int repartition_frequency = 1000; + + // We allow every grid to be refined independently. In this tutorial, no + // physics is resolved on the fluid grid, and its velocity is calculated + // analytically. + static const unsigned int fluid_refinement = 3; + static const unsigned int particle_insertion_refinement = 1; + }; + + + + // @sect3{The PatricleTracking class implementation} + + // @sect4{Constructor} + + // The constructors and destructors are rather trivial. They are very similar + // to what is done in step-40. We set the processors we want to work on + // to all machines available (`MPI_COMM_WORLD`) and + // initialize the pcout variable to only allow processor zero + // to output anything to the standard output. + + template + ParticleTracking::ParticleTracking(const bool interpolated_velocity) + : mpi_communicator(MPI_COMM_WORLD) + , background_triangulation(mpi_communicator) + , fluid_dh(background_triangulation) + , fluid_fe(FE_Q(velocity_degree), dim) + , interpolated_velocity(interpolated_velocity) + + {} + + + + // @sect4{Cell weight} + + // This function is the key component that allow us to dynamically balance the + // computational load for this example. The function attributes a weight to + // every cell that represents the computational work on this cell. Here the + // majority of work is expected to happen on the particles, therefore the + // return value of this function (representing "work for this cell") is + // calculated based on the number of particles in the current cell. + // The function is + // connected to the cell_weight() signal inside the triangulation, and will be + // called once per cell, whenever the triangulation repartitions the domain + // between ranks (the connection is created inside the + // generate_particles() function of this class). + template + unsigned int + ParticleTracking::cell_weight( + const typename parallel::distributed::Triangulation::cell_iterator + & cell, + const typename parallel::distributed::Triangulation::CellStatus status) + const + { + // We do not assign any weight to cells we do not own (i.e., artificial + // or ghost cells) + if (!cell->is_locally_owned()) + return 0; + + // This determines how important particle work is compared to cell + // work (by default every cell has a weight of 1000). + // We set the weight per particle much higher to indicate that + // the particle load is the only one that is important to distribute the + // cells in this example. The optimal value of this number depends on the + // application and can range from 0 (cheap particle operations, + // expensive cell operations) to much larger than 1000 (expensive + // particle operations, cheap cell operations, like presumed in this + // example). + const unsigned int particle_weight = 10000; + + // This example does not use adaptive refinement, therefore every cell + // should have the status `CELL_PERSIST`. However this function can also + // be used to distribute load during refinement, therefore we consider + // refined or coarsened cells as well. + if (status == parallel::distributed::Triangulation::CELL_PERSIST || + status == parallel::distributed::Triangulation::CELL_REFINE) + { + const unsigned int n_particles_in_cell = + particle_handler.n_particles_in_cell(cell); + return n_particles_in_cell * particle_weight; + } + else if (status == parallel::distributed::Triangulation::CELL_COARSEN) + { + unsigned int n_particles_in_cell = 0; + + for (unsigned int child_index = 0; child_index < cell->n_children(); + ++child_index) + n_particles_in_cell += + particle_handler.n_particles_in_cell(cell->child(child_index)); + + return n_particles_in_cell * particle_weight; + } + + Assert(false, ExcInternalError()); + return 0; + } + + + + // @sect4{Particles generation} + + // This function generates the tracer particles and the background + // triangulation on which these particles evolve. + template + void + ParticleTracking::generate_particles() + { + // We create a hyper cube triangulation which we globally refine. This + // triangulation covers the full trajectory of the particles. + GridGenerator::hyper_cube(background_triangulation, 0, 1); + background_triangulation.refine_global(fluid_refinement); + + // In order to consider the particles when repartitioning the triangulation + // the algorithm needs to know three things: + // + // 1. How much weight to assign to each cell (how many particles are in + // there); + // 2. How to pack the particles before shipping data around; + // 3. How to unpack the particles after repartitioning. + // + // We attach the correct functions to the signals inside + // parallel::distributed::Triangulation. These signal will be called every + // time the repartition() function is called. These connections only need to + // be created once, so we might as well have set them up in the constructor + // of this class, but for the purpose of this example we want to group the + // particle related instructions. + background_triangulation.signals.cell_weight.connect( + [&]( + const typename parallel::distributed::Triangulation::cell_iterator + &cell, + const typename parallel::distributed::Triangulation::CellStatus + status) -> unsigned int { return this->cell_weight(cell, status); }); + + background_triangulation.signals.pre_distributed_repartition.connect( + [this]() { this->particle_handler.register_store_callback_function(); }); + + background_triangulation.signals.post_distributed_repartition.connect( + [&]() { this->particle_handler.register_load_callback_function(false); }); + + // This initializes the background triangulation where the particles are + // living and the number of properties of the particles. + particle_handler.initialize(background_triangulation, mapping, 1 + dim); + + // We create a particle triangulation which is solely used to generate + // the points which will be used to insert the particles. This + // triangulation is a hyper shell which is offset from the + // center of the simulation domain. This will be used to generate a + // disk filled with particles which will allow an easy monitoring + // of the motion due to the vortex. + Point center; + center[0] = 0.5; + center[1] = 0.75; + if (dim == 3) + center[2] = 0.5; + + const double outer_radius = 0.15; + const double inner_radius = 0.01; + + parallel::distributed::Triangulation particle_triangulation( + MPI_COMM_WORLD); + + GridGenerator::hyper_shell( + particle_triangulation, center, inner_radius, outer_radius, 6); + particle_triangulation.refine_global(particle_insertion_refinement); + + // We generate the necessary bounding boxes for the particles generator. + // These bounding boxes are required to quickly identify in which + // process's subdomain the inserted particle lies, and which cell owns it. + const auto my_bounding_box = GridTools::compute_mesh_predicate_bounding_box( + background_triangulation, IteratorFilters::LocallyOwnedCell()); + const auto global_bounding_boxes = + Utilities::MPI::all_gather(MPI_COMM_WORLD, my_bounding_box); + + // We generate an empty vector of properties. We will attribute the + // properties to the particles once they are generated. + std::vector> properties( + particle_triangulation.n_locally_owned_active_cells(), + std::vector(dim + 1, 0.)); + + // We generate the particles at the position of a single + // point quadrature. Consequently, one particle will be generated + // at the centroid of each cell. + Particles::Generators::quadrature_points(particle_triangulation, + QMidpoint(), + global_bounding_boxes, + particle_handler, + mapping, + properties); + if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0) + deallog << "Number of particles inserted: " + << particle_handler.n_global_particles() << std::endl; + } + + + + // @sect4{Background DOFs and interpolation} + + // This function sets up the background degrees of freedom used for the + // velocity interpolation and allocates the field vector where the entire + // solution of the velocity field is stored. + template + void + ParticleTracking::setup_background_dofs() + { + fluid_dh.distribute_dofs(fluid_fe); + const IndexSet locally_owned_dofs = fluid_dh.locally_owned_dofs(); + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(fluid_dh, locally_relevant_dofs); + + velocity_field.reinit(locally_owned_dofs, + locally_relevant_dofs, + mpi_communicator); + } + + + + // This function takes care of interpolating the + // vortex velocity field to the field vector. This is achieved rather easily + // by using the VectorTools::interpolate() function. + template + void + ParticleTracking::interpolate_function_to_field() + { + velocity_field.zero_out_ghosts(); + VectorTools::interpolate(mapping, fluid_dh, velocity, velocity_field); + velocity_field.update_ghost_values(); + } + + + + // @sect4{Time integration of the trajectories} + + // We integrate the particle trajectories + // using an analytically defined velocity field. This demonstrates a + // relatively trivial usage of the particles. + template + void + ParticleTracking::euler_step_analytical(const double dt) + { + const unsigned int this_mpi_rank = + Utilities::MPI::this_mpi_process(mpi_communicator); + Vector particle_velocity(dim); + + // Looping over all particles in the domain using a particle iterator + for (auto &particle : particle_handler) + { + // We calculate the velocity of the particles using their current + // location. + Point particle_location = particle.get_location(); + velocity.vector_value(particle_location, particle_velocity); + + // This updates the position of the particles and sets the old position + // equal to the new position of the particle. + for (int d = 0; d < dim; ++d) + particle_location[d] += particle_velocity[d] * dt; + + particle.set_location(particle_location); + + // We store the processor id (a scalar) and the particle velocity (a + // vector) in the particle properties. In this example, this is done + // purely for visualization purposes. + ArrayView properties = particle.get_properties(); + for (int d = 0; d < dim; ++d) + properties[d] = particle_velocity[d]; + properties[dim] = this_mpi_rank; + } + } + + + + // In contrast to the previous function in this function we + // integrate the particle trajectories by interpolating the value of + // the velocity field at the degrees of freedom to the position of + // the particles. + template + void + ParticleTracking::euler_step_interpolated(const double dt) + { + Vector local_dof_values(fluid_fe.dofs_per_cell); + + // We loop over all the local particles. Although this could be achieved + // directly by looping over all the cells, this would force us + // to loop over numerous cells which do not contain particles. + // Rather, we loop over all the particles, but, we get the reference + // of the cell in which the particle lies and then loop over all particles + // within that cell. This enables us to gather the values of the velocity + // out of the `velocity_field` vector once and use them for all particles + // that lie within the cell. + auto particle = particle_handler.begin(); + while (particle != particle_handler.end()) + { + const auto cell = + particle->get_surrounding_cell(background_triangulation); + const auto dh_cell = + typename DoFHandler::cell_iterator(*cell, &fluid_dh); + + dh_cell->get_dof_values(velocity_field, local_dof_values); + + // Next, compute the velocity at the particle locations by evaluating + // the finite element solution at the position of the particles. + // This is essentially an optimized version of the particle advection + // functionality in step 19, but instead of creating quadrature + // objects and FEValues objects for each cell, we do the + // evaluation by hand, which is somewhat more efficient and only + // matters for this tutorial, because the particle work is the + // dominant cost of the whole program. + const auto pic = particle_handler.particles_in_cell(cell); + Assert(pic.begin() == particle, ExcInternalError()); + for (auto &p : pic) + { + const Point reference_location = p.get_reference_location(); + Tensor<1, dim> particle_velocity; + for (unsigned int j = 0; j < fluid_fe.dofs_per_cell; ++j) + { + const auto comp_j = fluid_fe.system_to_component_index(j); + + particle_velocity[comp_j.first] += + fluid_fe.shape_value(j, reference_location) * + local_dof_values[j]; + } + + Point particle_location = particle->get_location(); + for (int d = 0; d < dim; ++d) + particle_location[d] += particle_velocity[d] * dt; + p.set_location(particle_location); + + // Again, we store the particle velocity and the processor id in the + // particle properties for visualization purposes. + ArrayView properties = p.get_properties(); + for (int d = 0; d < dim; ++d) + properties[d] = particle_velocity[d]; + + properties[dim] = + Utilities::MPI::this_mpi_process(mpi_communicator); + + ++particle; + } + } + } + + + + // @sect4{Data output} + + // The next two functions take care of writing both the particles + // and the background mesh to vtu with a pvtu record. This ensures + // that the simulation results can be visualized when the simulation is + // launched in parallel. + template + void + ParticleTracking::output_particles(const unsigned int it) + { + Particles::DataOut particle_output; + + std::vector solution_names(dim, "velocity"); + solution_names.push_back("process_id"); + + std::vector + data_component_interpretation( + dim, DataComponentInterpretation::component_is_part_of_vector); + data_component_interpretation.push_back( + DataComponentInterpretation::component_is_scalar); + + particle_output.build_patches(particle_handler, + solution_names, + data_component_interpretation); + const std::string output_folder(output_directory); + const std::string file_name(interpolated_velocity ? + "interpolated-particles" : + "analytical-particles"); + + if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0) + deallog << "Writing particle output file: " << file_name << "-" << it + << std::endl; + + particle_output.write_vtu_with_pvtu_record( + output_folder, file_name, it, mpi_communicator, 6); + } + + + + template + void + ParticleTracking::output_background(const unsigned int it) + { + std::vector solution_names(dim, "velocity"); + std::vector + data_component_interpretation( + dim, DataComponentInterpretation::component_is_part_of_vector); + + DataOut data_out; + + // Attach the solution data to data_out object + data_out.attach_dof_handler(fluid_dh); + data_out.add_data_vector(velocity_field, + solution_names, + DataOut::type_dof_data, + data_component_interpretation); + Vector subdomain(background_triangulation.n_active_cells()); + for (unsigned int i = 0; i < subdomain.size(); ++i) + subdomain(i) = background_triangulation.locally_owned_subdomain(); + data_out.add_data_vector(subdomain, "subdomain"); + + data_out.build_patches(mapping); + + const std::string output_folder(output_directory); + const std::string file_name("background"); + + if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0) + deallog << "Writing background field file: " << file_name << "-" << it + << std::endl; + + data_out.write_vtu_with_pvtu_record( + output_folder, file_name, it, mpi_communicator, 6); + } + + + + // @sect4{Running the simulation} + // This function orchestrates the entire simulation. It is very similar + // to the other time dependent tutorial programs -- take step-21 or step-26 as + // an example. Note that we use the DiscreteTime class to monitor the time, + // the time-step and the step-number. This function is relatively + // straightforward. + + template + void + ParticleTracking::run() + { + DiscreteTime discrete_time(0, final_time, time_step); + + generate_particles(); + + if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0) + deallog << "Repartitioning triangulation after particle generation" + << std::endl; + background_triangulation.repartition(); + + // We set the initial property of the particles by doing an + // explicit Euler iteration with a time-step of 0 both in the case + // of the analytical and the interpolated approach. + if (interpolated_velocity) + { + setup_background_dofs(); + interpolate_function_to_field(); + euler_step_interpolated(0.); + } + else + euler_step_analytical(0.); + + output_particles(discrete_time.get_step_number()); + if (interpolated_velocity) + output_background(discrete_time.get_step_number()); + + // The particles are advected by looping over time. + while (!discrete_time.is_at_end()) + { + discrete_time.advance_time(); + velocity.set_time(discrete_time.get_previous_time()); + + if ((discrete_time.get_step_number() % repartition_frequency) == 0) + { + background_triangulation.repartition(); + if (interpolated_velocity) + setup_background_dofs(); + } + + if (interpolated_velocity) + { + interpolate_function_to_field(); + euler_step_interpolated(discrete_time.get_previous_step_size()); + } + else + euler_step_analytical(discrete_time.get_previous_step_size()); + + // After the particles have been moved, it is necessary to identify + // in which cell they now reside. This is achieved by calling + // sort_particles_into_subdomains_and_cells + particle_handler.sort_particles_into_subdomains_and_cells(); + + if ((discrete_time.get_step_number() % output_frequency) == 0) + { + output_particles(discrete_time.get_step_number()); + if (interpolated_velocity) + output_background(discrete_time.get_step_number()); + } + } + log_particles(); + } + + template + void + ParticleTracking::log_particles() + { + if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0) + deallog << "Particles location" << std::endl; + for (unsigned int proc = 0; + proc < Utilities::MPI::n_mpi_processes(mpi_communicator); + ++proc) + { + if (Utilities::MPI::this_mpi_process(mpi_communicator) == proc) + { + for (auto part : particle_handler) + { + deallog << part.get_location() << std::endl; + } + } + MPI_Barrier(mpi_communicator); + } + } + + + +} // namespace Step68 + + + +// @sect3{The main() function} + +// The remainder of the code, the `main()` function, is standard. +// We note that we run the particle tracking with the analytical velocity +// and the interpolated velocity and produce both results +int +main(int argc, char *argv[]) +{ + using namespace Step68; + using namespace dealii; + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + + initlog(); + deallog.depth_console(1); + + try + { + std::string prm_file; + if (argc > 1) + prm_file = argv[1]; + else + prm_file = "parameters.prm"; + + { + Step68::ParticleTracking<2> particle_tracking(false); + particle_tracking.run(); + } + { + Step68::ParticleTracking<2> particle_tracking(true); + particle_tracking.run(); + } + } + catch (std::exception &exc) + { + std::cerr << std::endl + << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Exception on processing: " << std::endl + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + + return 1; + } + catch (...) + { + std::cerr << std::endl + << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Unknown exception!" << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; + } + + return 0; +} diff --git a/tests/particles/step-68.with_p4est=true.mpirun=2.output b/tests/particles/step-68.with_p4est=true.mpirun=2.output new file mode 100644 index 0000000000..6788faebb4 --- /dev/null +++ b/tests/particles/step-68.with_p4est=true.mpirun=2.output @@ -0,0 +1,64 @@ + +DEAL::Number of particles inserted: 24 +DEAL::Repartitioning triangulation after particle generation +DEAL::Writing particle output file: analytical-particles-0 +DEAL::Writing particle output file: analytical-particles-1000 +DEAL::Writing particle output file: analytical-particles-2000 +DEAL::Particles location +DEAL::0.406412 0.734191 +DEAL::0.431983 0.684517 +DEAL::0.475595 0.747746 +DEAL::0.479874 0.652444 +DEAL::0.486369 0.728375 +DEAL::0.406474 0.787612 +DEAL::0.433208 0.833310 +DEAL::0.480928 0.859777 +DEAL::0.475771 0.769133 +DEAL::0.486655 0.787079 +DEAL::0.505864 0.716196 +DEAL::0.529232 0.714603 +DEAL::0.549978 0.724169 +DEAL::0.540635 0.648620 +DEAL::0.561942 0.742488 +DEAL::0.595944 0.672859 +DEAL::0.627051 0.720283 +DEAL::0.505916 0.796997 +DEAL::0.538993 0.858949 +DEAL::0.528847 0.796079 +DEAL::0.549447 0.784270 +DEAL::0.561753 0.764590 +DEAL::0.592056 0.829624 +DEAL::0.624984 0.778592 +DEAL::Number of particles inserted: 24 +DEAL::Repartitioning triangulation after particle generation +DEAL::Writing particle output file: interpolated-particles-0 +DEAL::Writing background field file: background-0 +DEAL::Writing particle output file: interpolated-particles-1000 +DEAL::Writing background field file: background-1000 +DEAL::Writing particle output file: interpolated-particles-2000 +DEAL::Writing background field file: background-2000 +DEAL::Particles location +DEAL::0.400130 0.731196 +DEAL::0.429540 0.682369 +DEAL::0.469064 0.746158 +DEAL::0.481181 0.651665 +DEAL::0.480772 0.726826 +DEAL::0.405024 0.786934 +DEAL::0.430296 0.832760 +DEAL::0.478813 0.859498 +DEAL::0.474379 0.768388 +DEAL::0.484713 0.786526 +DEAL::0.501301 0.714975 +DEAL::0.524473 0.713858 +DEAL::0.544304 0.724010 +DEAL::0.555643 0.742845 +DEAL::0.541116 0.647681 +DEAL::0.592865 0.672731 +DEAL::0.621474 0.721859 +DEAL::0.503905 0.796598 +DEAL::0.536869 0.858844 +DEAL::0.526482 0.795790 +DEAL::0.547001 0.784040 +DEAL::0.559416 0.764401 +DEAL::0.589307 0.829611 +DEAL::0.622637 0.778386 diff --git a/tests/particles/step-68.with_p4est=true.output b/tests/particles/step-68.with_p4est=true.output new file mode 100644 index 0000000000..6788faebb4 --- /dev/null +++ b/tests/particles/step-68.with_p4est=true.output @@ -0,0 +1,64 @@ + +DEAL::Number of particles inserted: 24 +DEAL::Repartitioning triangulation after particle generation +DEAL::Writing particle output file: analytical-particles-0 +DEAL::Writing particle output file: analytical-particles-1000 +DEAL::Writing particle output file: analytical-particles-2000 +DEAL::Particles location +DEAL::0.406412 0.734191 +DEAL::0.431983 0.684517 +DEAL::0.475595 0.747746 +DEAL::0.479874 0.652444 +DEAL::0.486369 0.728375 +DEAL::0.406474 0.787612 +DEAL::0.433208 0.833310 +DEAL::0.480928 0.859777 +DEAL::0.475771 0.769133 +DEAL::0.486655 0.787079 +DEAL::0.505864 0.716196 +DEAL::0.529232 0.714603 +DEAL::0.549978 0.724169 +DEAL::0.540635 0.648620 +DEAL::0.561942 0.742488 +DEAL::0.595944 0.672859 +DEAL::0.627051 0.720283 +DEAL::0.505916 0.796997 +DEAL::0.538993 0.858949 +DEAL::0.528847 0.796079 +DEAL::0.549447 0.784270 +DEAL::0.561753 0.764590 +DEAL::0.592056 0.829624 +DEAL::0.624984 0.778592 +DEAL::Number of particles inserted: 24 +DEAL::Repartitioning triangulation after particle generation +DEAL::Writing particle output file: interpolated-particles-0 +DEAL::Writing background field file: background-0 +DEAL::Writing particle output file: interpolated-particles-1000 +DEAL::Writing background field file: background-1000 +DEAL::Writing particle output file: interpolated-particles-2000 +DEAL::Writing background field file: background-2000 +DEAL::Particles location +DEAL::0.400130 0.731196 +DEAL::0.429540 0.682369 +DEAL::0.469064 0.746158 +DEAL::0.481181 0.651665 +DEAL::0.480772 0.726826 +DEAL::0.405024 0.786934 +DEAL::0.430296 0.832760 +DEAL::0.478813 0.859498 +DEAL::0.474379 0.768388 +DEAL::0.484713 0.786526 +DEAL::0.501301 0.714975 +DEAL::0.524473 0.713858 +DEAL::0.544304 0.724010 +DEAL::0.555643 0.742845 +DEAL::0.541116 0.647681 +DEAL::0.592865 0.672731 +DEAL::0.621474 0.721859 +DEAL::0.503905 0.796598 +DEAL::0.536869 0.858844 +DEAL::0.526482 0.795790 +DEAL::0.547001 0.784040 +DEAL::0.559416 0.764401 +DEAL::0.589307 0.829611 +DEAL::0.622637 0.778386 -- 2.39.5