From: Rene Gassmoeller Date: Fri, 22 May 2020 17:11:17 +0000 (-0700) Subject: Add load balancing. X-Git-Tag: v9.3.0-rc1~1101^2~33 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=977e0e01eaac63e62b7cd7f83ba952e260c8228b;p=dealii.git Add load balancing. --- diff --git a/examples/step-x/step-x.cc b/examples/step-x/step-x.cc index e4bac1f9f4..ff5651b50e 100644 --- a/examples/step-x/step-x.cc +++ b/examples/step-x/step-x.cc @@ -103,6 +103,7 @@ namespace Stepx double time_step = 0.002; double final_time = 4.0; unsigned int output_frequency = 10; + unsigned int repartition_frequency = 5; // We allow every grid to be refined independently. In this tutorial, no // physics is resolved on the fluid grid, and its velocity is calculated @@ -125,6 +126,10 @@ namespace Stepx output_frequency, "Iteration frequency at which output results are written"); + add_parameter("Repartition frequency", + repartition_frequency, + "Iteration frequency at which the mesh is load balanced"); + add_parameter("Output directory", output_directory); add_parameter("Time step", time_step); @@ -184,11 +189,13 @@ namespace Stepx void run_analytical_velocity(); + unsigned int + cell_weight(const typename parallel::distributed::Triangulation::cell_iterator &cell, + const typename parallel::distributed::Triangulation::CellStatus status); + private: void particles_generation(); - // void - // parallel_weight(); void setup_background_dofs(); void @@ -239,6 +246,39 @@ namespace Stepx {} + template + unsigned int + ParticleTracking::cell_weight(const typename parallel::distributed::Triangulation::cell_iterator &cell, + const typename parallel::distributed::Triangulation::CellStatus status) + { + if (cell->is_active() && !cell->is_locally_owned()) + return 0; + + // This determines how important particle distribution is compared to cell distribution + // (1 cell == 1000). We set this number much higher to indicate the particle load is the + // only one that is important to distribute. + const unsigned int particle_weight = 10000; + + if (status == parallel::distributed::Triangulation::CELL_PERSIST + || status == parallel::distributed::Triangulation::CELL_REFINE) + { + const unsigned int n_particles_in_cell = particle_handler.n_particles_in_cell(cell); + return n_particles_in_cell * particle_weight; + } + else if (status == parallel::distributed::Triangulation::CELL_COARSEN) + { + unsigned int n_particles_in_cell = 0; + + for (unsigned int child_index = 0; child_index < GeometryInfo::max_children_per_cell; ++child_index) + n_particles_in_cell += particle_handler.n_particles_in_cell(cell->child(child_index)); + + return n_particles_in_cell * particle_weight; + } + + Assert (false, ExcInternalError()); + return 0; + } + // Generation of particles using the grid where particles are generated at the // locations of the degrees of freedom. template @@ -249,6 +289,30 @@ namespace Stepx GridGenerator::hyper_cube(background_triangulation, 0, 1); background_triangulation.refine_global(par.fluid_refinement); + // In order to consider the particles when repartitioning the triangulation + // the algorithm needs to know three things: + // 1. How much weight to assign to each cell (how many particles are in there) + // 2. How to pack the particles before shipping data around + // 3. How to unpack the particles after repartitioning + // Attach the correct functions to the signals inside parallel::distributed::Triangulation, + // which will be called every time the repartition() function is called. + background_triangulation.signals.cell_weight.connect( + [&] (const typename parallel::distributed::Triangulation::cell_iterator &cell, + const typename parallel::distributed::Triangulation::CellStatus status) + -> unsigned int + { + return this->cell_weight(cell, status); + }); + + background_triangulation.signals.pre_distributed_repartition.connect(std::bind( + &Particles::ParticleHandler::register_store_callback_function, + &particle_handler)); + + background_triangulation.signals.post_distributed_repartition.connect(std::bind( + &Particles::ParticleHandler::register_load_callback_function, + &particle_handler, + false)); + // Establish where the particles are living particle_handler.initialize(background_triangulation, mapping); @@ -415,6 +479,8 @@ namespace Stepx std::string file_name(interpolated_velocity ? "interpolated-particles" : "analytical-particles"); + pcout << "Writing particle output file: " << file_name << "-" << it << std::endl; + particle_output.write_vtu_with_pvtu_record( output_folder, file_name, it, mpi_communicator, 6); } @@ -448,6 +514,8 @@ namespace Stepx std::string output_folder(par.output_directory); std::string file_name("background"); + pcout << "Writing background field file: " << file_name << "-" << it << std::endl; + data_out.write_vtu_with_pvtu_record( output_folder, file_name, it, mpi_communicator, 6); } @@ -459,10 +527,13 @@ namespace Stepx DiscreteTime discrete_time(0, par.final_time, par.time_step); particles_generation(); + + pcout << "Repartitioning triangulation after particle generation" << std::endl; + background_triangulation.repartition(); + setup_background_dofs(); interpolate_function_to_field(); - output_particles(discrete_time.get_step_number()); output_background(discrete_time.get_step_number()); @@ -471,6 +542,14 @@ namespace Stepx { discrete_time.advance_time(); velocity.set_time(discrete_time.get_previous_time()); + + if ((discrete_time.get_step_number() % par.repartition_frequency) == 0) + { + pcout << "Repartitioning triangulation after particle advection" << std::endl; + background_triangulation.repartition(); + setup_background_dofs(); + } + interpolate_function_to_field(); if (interpolated_velocity)