void
exchange_ghost_particles();
+ /**
+ * Update all particles that live in cells that are ghost cells to
+ * other processes.
+ */
+ void
+ update_ghost_particles();
+
/**
* Callback function that should be called before every refinement
* and when writing checkpoints. This function is used to
*/
std::multimap<internal::LevelInd, Particle<dim, spacedim>> ghost_particles;
+ /**
+ * Set of particles that currently live in the ghost cells of the local
+ * domain, organized by the subdomain_id. These
+ * particles are equivalent to the ghost entries in distributed vectors.
+ */
+ std::map<types::subdomain_id, std::vector<particle_iterator>>
+ ghost_particles_by_domain;
+
/**
* This variable stores how many particles are stored globally. It is
* calculated by update_cached_numbers().
, property_pool(std::make_unique<PropertyPool>(0))
, particles()
, ghost_particles()
+ , ghost_particles_by_domain()
, global_number_of_particles(0)
, global_max_particles_per_cell(0)
, next_free_particle_index(0)
, property_pool(std::make_unique<PropertyPool>(n_properties))
, particles()
, ghost_particles()
+ , ghost_particles_by_domain()
, global_number_of_particles(0)
, global_max_particles_per_cell(0)
, next_free_particle_index(0)
global_number_of_particles = particle_handler.global_number_of_particles;
global_max_particles_per_cell =
particle_handler.global_max_particles_per_cell;
- next_free_particle_index = particle_handler.next_free_particle_index;
- particles = particle_handler.particles;
- ghost_particles = particle_handler.ghost_particles;
- handle = particle_handler.handle;
+ next_free_particle_index = particle_handler.next_free_particle_index;
+ particles = particle_handler.particles;
+ ghost_particles = particle_handler.ghost_particles;
+ ghost_particles_by_domain = particle_handler.ghost_particles_by_domain;
+ handle = particle_handler.handle;
// copy dynamic properties
auto from_particle = particle_handler.begin();
// First clear the current ghost_particle information
ghost_particles.clear();
- std::map<types::subdomain_id, std::vector<particle_iterator>>
- ghost_particles_by_domain;
+ ghost_particles_by_domain.clear();
+
const std::set<types::subdomain_id> ghost_owners =
parallel_triangulation->ghost_owners();
#endif
}
+ template <int dim, int spacedim>
+ void
+ ParticleHandler<dim, spacedim>::update_ghost_particles()
+ {
+ // Nothing to do in serial computations
+ const auto parallel_triangulation =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ &*triangulation);
+ if (parallel_triangulation != nullptr)
+ {
+ if (dealii::Utilities::MPI::n_mpi_processes(
+ parallel_triangulation->get_communicator()) == 1)
+ return;
+ }
+ else
+ return;
+
+#ifdef DEAL_II_WITH_MPI
+ // First clear the current ghost_particle information
+ ghost_particles.clear();
+
+ send_recv_particles(ghost_particles_by_domain, ghost_particles);
+#endif
+ }
+
#ifdef DEAL_II_WITH_MPI
switch (status)
{
- case parallel::distributed::Triangulation<dim, spacedim>::CELL_PERSIST:
- {
+ case parallel::distributed::Triangulation<dim,
+ spacedim>::CELL_PERSIST: {
auto position_hint = particles.end();
for (const auto &particle : loaded_particles_on_cell)
{
}
break;
- case parallel::distributed::Triangulation<dim, spacedim>::CELL_COARSEN:
- {
+ case parallel::distributed::Triangulation<dim,
+ spacedim>::CELL_COARSEN: {
typename std::multimap<internal::LevelInd,
Particle<dim, spacedim>>::iterator
position_hint = particles.end();
}
break;
- case parallel::distributed::Triangulation<dim, spacedim>::CELL_REFINE:
- {
+ case parallel::distributed::Triangulation<dim,
+ spacedim>::CELL_REFINE: {
std::vector<
typename std::multimap<internal::LevelInd,
Particle<dim, spacedim>>::iterator>