From: Rene Gassmoeller Date: Fri, 13 Oct 2017 20:03:17 +0000 (-0600) Subject: Add ParticleHandler class X-Git-Tag: v9.0.0-rc1~763^2~12 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=de3819151ab22ba948e941a51adba2fac50ca54a;p=dealii.git Add ParticleHandler class --- diff --git a/include/deal.II/particles/particle.h b/include/deal.II/particles/particle.h index dcf5480de8..b492ecc5a5 100644 --- a/include/deal.II/particles/particle.h +++ b/include/deal.II/particles/particle.h @@ -25,65 +25,65 @@ DEAL_II_NAMESPACE_OPEN /** - * A namespace that contains all classes that are related to the particle - * implementation, in particular the fundamental Particle class. + * A namespace for all type definitions related to particles. */ -namespace Particles +namespace types { /** - * A namespace for all type definitions related to particles. + * Typedef of cell level/index pair. TODO: replace this by the + * active_cell_index. */ - namespace types - { - /** - * Typedef of cell level/index pair. TODO: replace this by the - * active_cell_index. - */ - typedef std::pair LevelInd; + typedef std::pair LevelInd; - /* Type definitions */ + /* Type definitions */ #ifdef DEAL_II_WITH_64BIT_INDICES - /** - * The type used for indices of particles. While in - * sequential computations the 4 billion indices of 32-bit unsigned integers - * is plenty, parallel computations using hundreds of processes can overflow - * this number and we need a bigger index space. We here utilize the same - * build variable that controls the dof indices because the number - * of degrees of freedom and the number of particles are typically on the same - * order of magnitude. - * - * The data type always indicates an unsigned integer type. - */ - typedef unsigned long long int particle_index; + /** + * The type used for indices of particles. While in + * sequential computations the 4 billion indices of 32-bit unsigned integers + * is plenty, parallel computations using hundreds of processes can overflow + * this number and we need a bigger index space. We here utilize the same + * build variable that controls the dof indices because the number + * of degrees of freedom and the number of particles are typically on the same + * order of magnitude. + * + * The data type always indicates an unsigned integer type. + */ + typedef unsigned long long int particle_index; - /** - * An identifier that denotes the MPI type associated with - * types::global_dof_index. - */ + /** + * An identifier that denotes the MPI type associated with + * types::global_dof_index. + */ # define PARTICLE_INDEX_MPI_TYPE MPI_UNSIGNED_LONG_LONG #else - /** - * The type used for indices of particles. While in - * sequential computations the 4 billion indices of 32-bit unsigned integers - * is plenty, parallel computations using hundreds of processes can overflow - * this number and we need a bigger index space. We here utilize the same - * build variable that controls the dof indices because the number - * of degrees of freedom and the number of particles are typically on the same - * order of magnitude. - * - * The data type always indicates an unsigned integer type. - */ - typedef unsigned int particle_index; + /** + * The type used for indices of particles. While in + * sequential computations the 4 billion indices of 32-bit unsigned integers + * is plenty, parallel computations using hundreds of processes can overflow + * this number and we need a bigger index space. We here utilize the same + * build variable that controls the dof indices because the number + * of degrees of freedom and the number of particles are typically on the same + * order of magnitude. + * + * The data type always indicates an unsigned integer type. + */ + typedef unsigned int particle_index; - /** - * An identifier that denotes the MPI type associated with - * types::global_dof_index. - */ + /** + * An identifier that denotes the MPI type associated with + * types::global_dof_index. + */ # define PARTICLE_INDEX_MPI_TYPE MPI_UNSIGNED #endif - } +} +/** + * A namespace that contains all classes that are related to the particle + * implementation, in particular the fundamental Particle class. + */ +namespace Particles +{ /** * Base class of particles - represents a particle with position, * an ID number and a variable number of properties. This class diff --git a/include/deal.II/particles/particle_handler.h b/include/deal.II/particles/particle_handler.h new file mode 100644 index 0000000000..e00d9cec1b --- /dev/null +++ b/include/deal.II/particles/particle_handler.h @@ -0,0 +1,497 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#ifndef dealii_particles_particle_handler_h +#define dealii_particles_particle_handler_h + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +DEAL_II_NAMESPACE_OPEN + +namespace Particles +{ + /** + * This class manages the storage and handling of particles. It provides + * the data structures necessary to store particles efficiently, accessor + * functions to iterate over particles and find particles, and algorithms + * to distribute particles in parallel domains. + * + * @ingroup Particle + */ + template + class ParticleHandler: public Subscriptor + { + public: + /** + * A type that can be used to iterate over all particles in the domain. + */ + typedef ParticleIterator particle_iterator; + + /** + * A type that represents a range of particles. + */ + typedef boost::iterator_range particle_iterator_range; + + /** + * Default constructor. + */ + ParticleHandler(); + + /** + * Constructor that initializes the particle handler with respect to + * a given triangulation and MPI communicator. Pointers to the + * triangulation and the communicator are stored inside of the particle + * + */ + ParticleHandler(const parallel::distributed::Triangulation &tria, + const Mapping &mapping, + const MPI_Comm mpi_communicator, + const unsigned int n_properties = 0); + + /** + * Destructor. + */ + ~ParticleHandler(); + + /** + * Initialize the particle handler. This function does not clear the + * internal data structures, it just sets the connections to the + * MPI communicator and the triangulation. + */ + void initialize(const parallel::distributed::Triangulation &tria, + const Mapping &mapping, + const MPI_Comm mpi_communicator, + const unsigned int n_properties = 0); + + /** + * Clear all particle related data. + */ + void clear(); + + /** + * Only clear particle data, but keep cache information about number + * of particles. This is useful during reorganization of particle data + * between processes. + */ + void clear_particles(); + + /** + * Return an iterator to the first particle. + */ + ParticleHandler::particle_iterator begin() const; + + /** + * Return an iterator to the first particle. + */ + particle_iterator begin(); + + /** + * Return an iterator past the end of the particles. + */ + particle_iterator end() const; + + /** + * Return an iterator past the end of the particles. + */ + particle_iterator end(); + + /** + * Return a pair of particle iterators that mark the begin and end of + * the particles in a particular cell. The last iterator is the first + * particle that is no longer in the cell. + */ + particle_iterator_range + particles_in_cell(const typename parallel::distributed::Triangulation::active_cell_iterator &cell); + + + /** + * Return a pair of particle iterators that mark the begin and end of + * the particles in a particular cell. The last iterator is the first + * particle that is no longer in the cell. + */ + particle_iterator_range + particles_in_cell(const typename parallel::distributed::Triangulation::active_cell_iterator &cell) const; + + /** + * Remove a particle pointed to by the iterator. + */ + void + remove_particle(const particle_iterator &particle); + + /** + * Insert a particle into the collection of particles. Return an iterator + * to the new position of the particle. This function involves a copy of + * the particle and its properties. Note that this function is of $O(N \log N)$ + * complexity for $N$ particles. + */ + particle_iterator + insert_particle(const Particle &particle, + const typename parallel::distributed::Triangulation::active_cell_iterator &cell); + + /** + * Insert a number of particle into the collection of particles. + * This function involves a copy of the particles and their properties. + * Note that this function is of O(n_existing_particles + n_particles) complexity. + */ + void + insert_particles(const std::multimap > &particles); + + /** + * This function allows to register three additional functions that are + * called every time a particle is transferred to another process + * (i.e. during sorting into cells, during ghost particle transfer, or + * during serialization of all particles). + * + * @param size_callback A function that is called when serializing + * particle data. The function gets no arguments and is expected to + * return the size of the additional data that is serialized per + * particle. Note that this currently implies the data size has to be + * the same for every particle. + * @param store_callback A function that is called once per particle + * when serializing particle data. Arguments to the function are a + * particle iterator that identifies the current particle and a void + * pointer that points to a data block of size size_callback() in which + * the function can store additional data. The function is expected to + * return a void pointer pointing to a position right after its data + * block. + * @param load_callback A function that is called once per particle + * when deserializing particle data. Arguments to the function are a + * particle iterator that identifies the current particle and a void + * pointer that points to a data block of size size_callback() in which + * additional data was stored by the store_callback function. The + * function is expected to return a void pointer pointing to a position + * right after its data block. + */ + void + register_additional_store_load_functions(const std::function &size_callback, + const std::function &store_callback, + const std::function &load_callback); + + /** + * Return the total number of particles that were managed by this class + * the last time the update_n_global_particles() function was called. + * The actual number of particles may have changed since then if + * particles have been added or removed. + * + * @return Total number of particles in simulation. + */ + types::particle_index n_global_particles() const; + + /** + * Return the number of particles in the local part of the + * triangulation. + */ + types::particle_index n_locally_owned_particles() const; + + /** + * Return the number of properties each particle has. + */ + unsigned int n_properties_per_particle() const; + + /** + * Return a reference to the property pool that owns all particle + * properties, and organizes them physically. + */ + PropertyPool & + get_property_pool() const; + + /** + * Return the number of particles in the given cell. + */ + unsigned int + n_particles_in_cell(const typename Triangulation::active_cell_iterator &cell) const; + + /** + * Returns a vector that contains a tensor for every vertex-cell + * combination of the output of dealii::GridTools::vertex_to_cell_map() + * (which is expected as input parameter for this function). + * Each tensor represents a geometric vector from the vertex to the + * respective cell center. + */ + std::vector > > + vertex_to_cell_centers_directions(const std::vector::active_cell_iterator> > &vertex_to_cells) const; + + /** + * Finds the cells containing each particle for all locally owned + * particles. If particles moved out of the local subdomain + * they will be sent to their new process and inserted there. + * After this function call every particle is either on its current + * process and in its current cell, or deleted (if it could not find + * its new process or cell). + * + * TODO: Extend this to allow keeping particles on other processes + * around (with an invalid cell). + */ + void + sort_particles_into_subdomains_and_cells(); + + + /** + * Exchanges all particles that live in cells that are ghost cells to + * other processes. Clears and re-populates the ghost_neighbors + * member variable. + */ + void + exchange_ghost_particles(); + + /** + * Serialize the contents of this class. + */ + template + void serialize (Archive &ar, const unsigned int version); + + private: + /** + * A private typedef for cell iterator that makes the code of this class + * easier to read. + */ + typedef typename parallel::distributed::Triangulation::active_cell_iterator active_cell_it; + + /** + * Address of the triangulation to work on. + */ + SmartPointer,ParticleHandler > triangulation; + + /** + * Address of the mapping to work on. + */ + SmartPointer,ParticleHandler > mapping; + + /** + * MPI communicator. + */ + MPI_Comm mpi_communicator; + + /** + * Set of particles currently in the local domain, organized by + * the level/index of the cell they are in. + */ + std::multimap > particles; + + /** + * Set of particles currently in the ghost cells of the local domain, + * organized by the level/index of the cell they are in. These + * particles are marked read-only. + */ + std::multimap > ghost_particles; + + /** + * This variable stores how many particles are stored globally. It is + * calculated by update_n_global_particles(). + */ + types::particle_index global_number_of_particles; + + /** + * The maximum number of particles per cell in the global domain. This + * variable is important to store and load particle data during + * repartition and serialization of the solution. Note that the + * variable is only updated when it is needed, e.g. after particle + * movement, before/after mesh refinement, before creating a + * checkpoint and after resuming from a checkpoint. + */ + unsigned int global_max_particles_per_cell; + + /** + * This variable stores the next free particle index that is available + * globally in case new particles need to be generated. + */ + types::particle_index next_free_particle_index; + + /** + * This object owns and organizes the memory for all particle + * properties. + */ + std::unique_ptr property_pool; + + /** + * A function that can be registered by calling + * register_additional_store_load_functions. It is called when serializing + * particle data. The function gets no arguments and is expected to + * return the size of the additional data that is serialized per + * particle. Note that this currently implies the data size has to be + * the same for every particle, but it does not have to be the same for + * every serialization process (e.g. a serialization during particle + * movement might include temporary data, while a serialization after + * movement was finished does not need to transfer this data). + */ + std::function size_callback; + + /** + * A function that can be registered by calling + * register_additional_store_load_functions. It is called once per + * particle when serializing particle data. Arguments to the function + * are a particle iterator that identifies the current particle and a void + * pointer that points to a data block of size size_callback() in which + * the function can store additional data. The function is expected to + * return a void pointer pointing to a position right after its data + * block. + */ + std::function store_callback; + + /** + * A function that is called once per particle + * when deserializing particle data. Arguments to the function are a + * particle iterator that identifies the current particle and a void + * pointer that points to a data block of size size_callback() from + * which the function can load additional data. This block was filled + * by the store_callback function during serialization. This function + * is expected to return a void pointer pointing to a position right + * after its data block. + */ + std::function load_callback; + + /** + * This variable is set by the register_store_callback_function() + * function and used by the register_load_callback_function() function + * to check where the particle data was stored. + */ + unsigned int data_offset; + + /** + * Calculates the number of particles in the global model domain. + */ + void + update_n_global_particles(); + + /** + * Calculates and stores the number of particles in the cell that + * contains the most particles in the global model (stored in the + * member variable global_max_particles_per_cell). This variable is a + * state variable, because it is needed to serialize and deserialize + * the particle data correctly in parallel (it determines the size of + * the data chunks per cell that are stored and read). Before accessing + * the variable this function has to be called, unless the state was + * read from another source (e.g. after resuming from a checkpoint). + */ + void + update_global_max_particles_per_cell(); + + /** + * Calculates the next free particle index in the global model domain. + * This equals one plus the highest particle index currently active. + */ + void + update_next_free_particle_index(); + + /** + * Transfer particles that have crossed subdomain boundaries to other + * processors. + * All received particles and their new cells will be appended to the + * @p received_particles vector. + * + * @param [in] particles_to_send All particles that should be sent and + * their new subdomain_ids are in this map. + * + * @param [in,out] received_particles Vector that stores all received + * particles. Note that it is not required nor checked that the list + * is empty, received particles are simply attached to the end of + * the vector. + * + * @param [in] new_cells_for_particles Optional vector of cell + * iterators with the same structure as @p particles_to_send. If this + * parameter is given it should contain the cell iterator for every + * particle to be send in which the particle belongs. This parameter + * is necessary if the cell information of the particle iterator is + * outdated (e.g. after particle movement). + */ + void + send_recv_particles(const std::vector > &particles_to_send, + std::multimap > &received_particles, + const std::vector > &new_cells_for_particles = std::vector > ()); + + + + /** + * Callback function that should be called before every + * refinement and when writing checkpoints. + * Allows registering store_particles() in the triangulation. + */ + void + register_store_callback_function(const bool serialization); + + /** + * Callback function that should be called after every + * refinement and after resuming from a checkpoint. + * Allows registering load_particles() in the triangulation. + */ + void + register_load_callback_function(const bool serialization); + + /** + * Called by listener functions from Triangulation for every cell + * before a refinement step. All particles have to be attached to their + * cell to be sent around to the new processes. + */ + void + store_particles(const typename parallel::distributed::Triangulation::cell_iterator &cell, + const typename parallel::distributed::Triangulation::CellStatus status, + void *data) const; + + /** + * Called by listener functions after a refinement step. The local map + * of particles has to be read from the triangulation user_pointer. + */ + void + load_particles(const typename parallel::distributed::Triangulation::cell_iterator &cell, + const typename parallel::distributed::Triangulation::CellStatus status, + const void *data); + + /** + * Get a map between subdomain id and a contiguous + * number from 0 to n_neighbors, which is interpreted as the neighbor index. + * In other words the returned map answers the question: Given a subdomain id, which + * neighbor of the current processor's domain owns this subdomain? + */ + std::map + get_subdomain_id_to_neighbor_map() const; + }; + + /* -------------------------- inline and template functions ---------------------- */ + + template + template + void ParticleHandler::serialize (Archive &ar, const unsigned int) + { + // Note that we do not serialize the particle data itself. Instead we + // use the serialization functionality of the triangulation class, because + // this guarantees that data is immediately shipped to new processes if + // the domain is distributed differently after resuming from a checkpoint. + ar //&particles + &global_number_of_particles + &global_max_particles_per_cell + &next_free_particle_index; + } +} + +DEAL_II_NAMESPACE_CLOSE + +#endif + diff --git a/source/particles/CMakeLists.txt b/source/particles/CMakeLists.txt index 9b0dc52ed9..39d3138064 100644 --- a/source/particles/CMakeLists.txt +++ b/source/particles/CMakeLists.txt @@ -20,6 +20,7 @@ SET(_src particle.cc particle_accessor.cc particle_iterator.cc + particle_handler.cc property_pool.cc ) @@ -27,6 +28,7 @@ SET(_inst particle.inst.in particle_accessor.inst.in particle_iterator.inst.in + particle_handler.inst.in ) FILE(GLOB _header diff --git a/source/particles/particle_handler.cc b/source/particles/particle_handler.cc new file mode 100644 index 0000000000..600197433b --- /dev/null +++ b/source/particles/particle_handler.cc @@ -0,0 +1,1099 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#include + +#include +#include + +DEAL_II_NAMESPACE_OPEN + +namespace Particles +{ + template + ParticleHandler::ParticleHandler() + : + triangulation(), + mpi_communicator(), + particles(), + ghost_particles(), + global_number_of_particles(0), + global_max_particles_per_cell(0), + next_free_particle_index(0), + property_pool(new PropertyPool(0)), + size_callback(), + store_callback(), + load_callback(), + data_offset(numbers::invalid_unsigned_int) + {} + + + + template + ParticleHandler::ParticleHandler(const parallel::distributed::Triangulation &triangulation, + const Mapping &mapping, + const MPI_Comm mpi_communicator, + const unsigned int n_properties) + : + triangulation(&triangulation, typeid(*this).name()), + mapping(&mapping, typeid(*this).name()), + mpi_communicator(mpi_communicator), + particles(), + ghost_particles(), + global_number_of_particles(0), + global_max_particles_per_cell(0), + next_free_particle_index(0), + property_pool(new PropertyPool(n_properties)), + size_callback(), + store_callback(), + load_callback(), + data_offset(numbers::invalid_unsigned_int) + {} + + + + template + ParticleHandler::~ParticleHandler() + {} + + + + template + void + ParticleHandler::initialize(const parallel::distributed::Triangulation &tria, + const Mapping &mapp, + const MPI_Comm communicator, + const unsigned int n_properties) + { + triangulation = &tria; + mapping = &mapp; + mpi_communicator = communicator; + + // Create the memory pool that will store all particle properties + property_pool.reset(new PropertyPool(n_properties)); + } + + + + template + void + ParticleHandler::clear() + { + clear_particles(); + global_number_of_particles = 0; + next_free_particle_index = 0; + global_max_particles_per_cell = 0; + } + + + + template + void + ParticleHandler::clear_particles() + { + particles.clear(); + } + + + + template + typename ParticleHandler::particle_iterator + ParticleHandler::begin() const + { + return particle_iterator(particles,(const_cast *> (this))->particles.begin()); + } + + + + template + typename ParticleHandler::particle_iterator + ParticleHandler::begin() + { + return ParticleHandler::particle_iterator(particles,particles.begin()); + } + + + + template + typename ParticleHandler::particle_iterator + ParticleHandler::end() const + { + return (const_cast *> (this))->end(); + } + + + + template + typename ParticleHandler::particle_iterator + ParticleHandler::end() + { + return ParticleHandler::particle_iterator(particles,particles.end()); + } + + + + template + typename ParticleHandler::particle_iterator_range + ParticleHandler::particles_in_cell(const active_cell_it &cell) const + { + return (const_cast *> (this))->particles_in_cell(cell); + } + + + + template + typename ParticleHandler::particle_iterator_range + ParticleHandler::particles_in_cell(const active_cell_it &cell) + { + const types::LevelInd level_index = std::make_pair (cell->level(),cell->index()); + + std::pair >::iterator, + typename std::multimap >::iterator> particles_in_cell; + + if (!cell->is_ghost()) + particles_in_cell = particles.equal_range(level_index); + else + particles_in_cell = ghost_particles.equal_range(level_index); + + return boost::make_iterator_range(particle_iterator(particles,particles_in_cell.first), + particle_iterator(particles,particles_in_cell.second)); + } + + + + template + void + ParticleHandler::remove_particle(const ParticleHandler::particle_iterator &particle) + { + particles.erase(particle->particle); + } + + + + template + typename ParticleHandler::particle_iterator + ParticleHandler::insert_particle(const Particle &particle, + const typename parallel::distributed::Triangulation::active_cell_iterator &cell) + { + typename std::multimap >::iterator it = + particles.insert(std::make_pair(types::LevelInd(cell->level(),cell->index()),particle)); + + particle_iterator particle_it (particles,it); + particle_it->set_property_pool(*property_pool); + + if (particle.has_properties()) + for (unsigned int n=0; nget_properties()[n] = particle.get_properties()[n]; + + return particle_it; + } + + + + template + void + ParticleHandler::insert_particles(const std::multimap > &new_particles) + { + particles.insert(new_particles.begin(),new_particles.end()); + } + + + + template + types::particle_index + ParticleHandler::n_global_particles() const + { + return global_number_of_particles; + } + + + + template + types::particle_index + ParticleHandler::n_locally_owned_particles() const + { + return particles.size(); + } + + + + template + unsigned int + ParticleHandler::n_properties_per_particle() const + { + return property_pool->n_properties_per_slot(); + } + + + + template + void + ParticleHandler::update_n_global_particles() + { + global_number_of_particles = dealii::Utilities::MPI::sum (particles.size(), mpi_communicator); + } + + + + template + unsigned int + ParticleHandler::n_particles_in_cell(const typename Triangulation::active_cell_iterator &cell) const + { + const types::LevelInd found_cell = std::make_pair (cell->level(),cell->index()); + + if (cell->is_locally_owned()) + return particles.count(found_cell); + else if (cell->is_ghost()) + return ghost_particles.count(found_cell); + else if (cell->is_artificial()) + AssertThrow(false,ExcInternalError()); + + return 0; + } + + + + template + void + ParticleHandler::update_next_free_particle_index() + { + types::particle_index locally_highest_index = 0; + for (particle_iterator particle = begin(); particle != end(); ++particle) + locally_highest_index = std::max(locally_highest_index,particle->get_id()); + + next_free_particle_index = dealii::Utilities::MPI::max (locally_highest_index, mpi_communicator) + 1; + } + + + + template + void + ParticleHandler::update_global_max_particles_per_cell() + { + unsigned int local_max_particles_per_cell(0); + active_cell_it cell = triangulation->begin_active(); + for (; cell!=triangulation->end(); ++cell) + if (cell->is_locally_owned()) + { + local_max_particles_per_cell = std::max(local_max_particles_per_cell, + n_particles_in_cell(cell)); + } + + global_max_particles_per_cell = dealii::Utilities::MPI::max(local_max_particles_per_cell,mpi_communicator); + } + + + + template + PropertyPool & + ParticleHandler::get_property_pool () const + { + return *property_pool; + } + + + + template + std::map + ParticleHandler::get_subdomain_id_to_neighbor_map() const + { + std::map subdomain_id_to_neighbor_map; + const std::set ghost_owners = triangulation->ghost_owners(); + std::set::const_iterator ghost_owner = ghost_owners.begin(); + + for (unsigned int neighbor_id=0; neighbor_id + std::vector > > + ParticleHandler::vertex_to_cell_centers_directions(const std::vector > &vertex_to_cells) const + { + const std::vector > &vertices = triangulation->get_vertices(); + const unsigned int n_vertices = vertex_to_cells.size(); + + std::vector > > vertex_to_cell_centers(n_vertices); + for (unsigned int vertex=0; vertexvertex_used(vertex)) + { + const unsigned int n_neighbor_cells = vertex_to_cells[vertex].size(); + vertex_to_cell_centers[vertex].resize(n_neighbor_cells); + + typename std::set::active_cell_iterator>::iterator it = vertex_to_cells[vertex].begin(); + for (unsigned int cell=0; cellcenter() - vertices[vertex]; + vertex_to_cell_centers[vertex][cell] /= vertex_to_cell_centers[vertex][cell].norm(); + } + } + return vertex_to_cell_centers; + } + + + + namespace + { + /** + * This function is used as comparison argument to std::sort to sort the + * vector of tensors @p center_directions by its scalar product with the + * @p particle_direction tensor. The sorted indices allow to + * loop over @p center_directions with increasing angle between + * @p particle_direction and @p center_directions. This function assumes + * that @p particle_direction and @p center_directions are normalized + * to length one before calling this function. + */ + template + bool + compare_particle_association(const unsigned int a, + const unsigned int b, + const Tensor<1,dim> &particle_direction, + const std::vector > ¢er_directions) + { + const double scalar_product_a = center_directions[a] * particle_direction; + const double scalar_product_b = center_directions[b] * particle_direction; + + // The function is supposed to return if a is before b. We are looking + // for the alignment of particle direction and center direction, + // therefore return if the scalar product of a is larger. + return (scalar_product_a > scalar_product_b); + } + + /** + * Returns the local vertex index of cell @p cell that is closest to + * the given location @p position. + */ + template + unsigned int + get_closest_vertex_of_cell(const typename Triangulation::active_cell_iterator &cell, + const Point &position) + { + double minimum_distance = std::numeric_limits::max(); + unsigned int closest_vertex = numbers::invalid_unsigned_int; + + for (unsigned int v=0; v::vertices_per_cell; ++v) + { + const double vertex_distance = position.distance(cell->vertex(v)); + if (vertex_distance < minimum_distance) + { + closest_vertex = v; + minimum_distance = vertex_distance; + } + } + + return closest_vertex; + } + } + + + + template + void + ParticleHandler::sort_particles_into_subdomains_and_cells() + { + std::vector particles_out_of_cell; + particles_out_of_cell.reserve(n_locally_owned_particles()); + + // Now update the reference locations of the moved particles + for (particle_iterator it=begin(); it!=end(); ++it) + { + const typename parallel::distributed::Triangulation::cell_iterator cell = it->get_surrounding_cell(*triangulation); + + try + { + const Point p_unit = mapping->transform_real_to_unit_cell(cell, it->get_location()); + if (GeometryInfo::is_inside_unit_cell(p_unit)) + { + it->set_reference_location(p_unit); + } + else + { + // The particle has left the cell + particles_out_of_cell.push_back(it); + } + } + catch (typename Mapping::ExcTransformationFailed &) + { + // The particle has left the cell + particles_out_of_cell.push_back(it); + } + } + + // TODO: The current algorithm only works for CFL numbers <= 1.0, + // because it only knows the subdomain_id of ghost cells, but not + // of artificial cells. + + // There are three reasons why a particle is not in its old cell: + // It moved to another cell, to another subdomain or it left the mesh. + // Particles that moved to another cell are updated and stored inside the + // sorted_particles vector, particles that moved to another domain are + // collected in the moved_particles_domain vector. Particles that left + // the mesh completely are ignored and removed. + std::vector > > sorted_particles; + std::vector > moved_particles; + std::vector > moved_cells; + + // We do not know exactly how many particles are lost, exchanged between + // domains, or remain on this process. Therefore we pre-allocate approximate + // sizes for these vectors. If more space is needed an automatic and + // relatively fast (compared to other parts of this algorithm) + // re-allocation will happen. + typedef typename std::vector::size_type vector_size; + sorted_particles.reserve(static_cast (particles_out_of_cell.size()*1.25)); + const std::map subdomain_to_neighbor_map(get_subdomain_id_to_neighbor_map()); + + moved_particles.resize(subdomain_to_neighbor_map.size()); + moved_cells.resize(subdomain_to_neighbor_map.size()); + + for (unsigned int i=0; i (particles_out_of_cell.size()*0.25)); + moved_cells[i].reserve(static_cast (particles_out_of_cell.size()*0.25)); + } + + { + // Create a map from vertices to adjacent cells + const std::vector::active_cell_iterator> > + vertex_to_cells(GridTools::vertex_to_cell_map(*triangulation)); + + // Create a corresponding map of vectors from vertex to cell center + const std::vector > > vertex_to_cell_centers(vertex_to_cell_centers_directions(vertex_to_cells)); + + std::vector neighbor_permutation; + + // Find the cells that the particles moved to. + typename std::vector::iterator it = particles_out_of_cell.begin(), + end_particle = particles_out_of_cell.end(); + + for (; it!=end_particle; ++it) + { + // The cell the particle is in + Point current_reference_position; + bool found_cell = false; + + // Check if the particle is in one of the old cell's neighbors + // that are adjacent to the closest vertex + active_cell_it current_cell = (*it)->get_surrounding_cell(*triangulation); + + const unsigned int closest_vertex = get_closest_vertex_of_cell(current_cell,(*it)->get_location()); + Tensor<1,spacedim> vertex_to_particle = (*it)->get_location() - current_cell->vertex(closest_vertex); + vertex_to_particle /= vertex_to_particle.norm(); + + const unsigned int closest_vertex_index = current_cell->vertex_index(closest_vertex); + const unsigned int n_neighbor_cells = vertex_to_cells[closest_vertex_index].size(); + + neighbor_permutation.resize(n_neighbor_cells); + for (unsigned int i=0; i, + std::placeholders::_1, + std::placeholders::_2, + std::cref(vertex_to_particle), + std::cref(vertex_to_cell_centers[closest_vertex_index]))); + + // Search all of the cells adjacent to the closest vertex of the previous cell + // Most likely we will find the particle in them. + for (unsigned int i=0; i::active_cell_iterator>::const_iterator cell = vertex_to_cells[closest_vertex_index].begin(); + std::advance(cell,neighbor_permutation[i]); + const Point p_unit = mapping->transform_real_to_unit_cell(*cell, + (*it)->get_location()); + if (GeometryInfo::is_inside_unit_cell(p_unit)) + { + current_cell = *cell; + current_reference_position = p_unit; + found_cell = true; + break; + } + } + catch (typename Mapping::ExcTransformationFailed &) + {} + } + + if (!found_cell) + { + // The particle is not in a neighbor of the old cell. + // Look for the new cell in the whole local domain. + // This case is rare. + try + { + const std::pair::active_cell_iterator, + Point > current_cell_and_position = + GridTools::find_active_cell_around_point<> (*mapping, + *triangulation, + (*it)->get_location()); + current_cell = current_cell_and_position.first; + current_reference_position = current_cell_and_position.second; + } + catch (GridTools::ExcPointNotFound &) + { + // We can find no cell for this particle. It has left the + // domain due to an integration error or an open boundary. + continue; + } + } + + // If we are here, we found a cell and reference position for this particle + (*it)->set_reference_location(current_reference_position); + + // Reinsert the particle into our domain if we own its cell. + // Mark it for MPI transfer otherwise + if (current_cell->is_locally_owned()) + { + sorted_particles.push_back(std::make_pair(types::LevelInd(current_cell->level(),current_cell->index()), + (*it)->particle->second)); + } + else + { + const unsigned int neighbor_index = subdomain_to_neighbor_map.find(current_cell->subdomain_id())->second; + moved_particles[neighbor_index].push_back(*it); + moved_cells[neighbor_index].push_back(current_cell); + } + } + } + + // Sort the updated particles. This pre-sort speeds up inserting + // them into particles to O(N) complexity. + std::multimap > sorted_particles_map; + + // Exchange particles between processors if we have more than one process + if (dealii::Utilities::MPI::n_mpi_processes(mpi_communicator) > 1) + send_recv_particles(moved_particles,sorted_particles_map,moved_cells); + + sorted_particles_map.insert(sorted_particles.begin(),sorted_particles.end()); + + for (unsigned int i=0; i + void + ParticleHandler::exchange_ghost_particles() + { + // Nothing to do in serial computations + if (dealii::Utilities::MPI::n_mpi_processes(mpi_communicator) == 1) + return; + + // First clear the current ghost_particle information + ghost_particles.clear(); + + const std::map subdomain_to_neighbor_map(get_subdomain_id_to_neighbor_map()); + + std::vector > ghost_particles_by_domain(subdomain_to_neighbor_map.size()); + + std::vector > vertex_to_neighbor_subdomain(triangulation->n_vertices()); + + active_cell_it + cell = triangulation->begin_active(), + endc = triangulation->end(); + for (; cell != endc; ++cell) + { + if (cell->is_ghost()) + for (unsigned int v=0; v::vertices_per_cell; ++v) + vertex_to_neighbor_subdomain[cell->vertex_index(v)].insert(cell->subdomain_id()); + } + + cell = triangulation->begin_active(); + for (; cell != endc; ++cell) + { + if (!cell->is_ghost()) + { + std::set cell_to_neighbor_subdomain; + for (unsigned int v=0; v::vertices_per_cell; ++v) + { + cell_to_neighbor_subdomain.insert(vertex_to_neighbor_subdomain[cell->vertex_index(v)].begin(), + vertex_to_neighbor_subdomain[cell->vertex_index(v)].end()); + } + + if (cell_to_neighbor_subdomain.size() > 0) + { + const particle_iterator_range particle_range = particles_in_cell(cell); + + for (std::set::const_iterator domain=cell_to_neighbor_subdomain.begin(); + domain != cell_to_neighbor_subdomain.end(); ++domain) + { + const unsigned int neighbor_id = subdomain_to_neighbor_map.find(*domain)->second; + + for (typename particle_iterator_range::iterator particle = particle_range.begin(); particle != particle_range.end(); ++particle) + ghost_particles_by_domain[neighbor_id].push_back(particle); + } + } + } + } + + send_recv_particles(ghost_particles_by_domain, + ghost_particles); + } + + + + template + void + ParticleHandler::send_recv_particles(const std::vector > &particles_to_send, + std::multimap > &received_particles, + const std::vector > &send_cells) + { + // Determine the communication pattern + const std::set ghost_owners = triangulation->ghost_owners(); + const std::vector neighbors (ghost_owners.begin(), + ghost_owners.end()); + const unsigned int n_neighbors = neighbors.size(); + + Assert(n_neighbors == particles_to_send.size(), + ExcMessage("The particles to send to other processes should be sorted into a vector " + "containing as many vectors of particles as there are neighbor processes. This " + "is not the case for an unknown reason. Contact the developers if you encounter " + "this error.")); + + unsigned int n_send_particles = 0; + for (unsigned int i=0; i n_send_data(n_neighbors,0); + std::vector send_offsets(n_neighbors,0); + std::vector send_data; + + // Only serialize things if there are particles to be send. + // We can not return early even if no particles + // are send, because we might receive particles from other processes + if (n_send_particles > 0) + { + // Allocate space for sending particle data + const unsigned int particle_size = begin()->serialized_size_in_bytes() + cellid_size + (size_callback ? size_callback() : 0); + send_data.resize(n_send_particles * particle_size); + void *data = static_cast (&send_data.front()); + + // Serialize the data sorted by receiving process + for (types::subdomain_id neighbor_id = 0; neighbor_id < n_neighbors; ++neighbor_id) + { + send_offsets[neighbor_id] = reinterpret_cast (data) - reinterpret_cast (&send_data.front()); + + for (unsigned int i=0; iget_surrounding_cell(*triangulation); + else + cell = send_cells[neighbor_id][i]; + + const CellId::binary_type cellid = cell->id().template to_binary(); + memcpy(data, &cellid, cellid_size); + data = static_cast(data) + cellid_size; + + particles_to_send[neighbor_id][i]->write_data(data); + if (store_callback) + data = store_callback(particles_to_send[neighbor_id][i],data); + } + n_send_data[neighbor_id] = reinterpret_cast (data) - send_offsets[neighbor_id] - reinterpret_cast (&send_data.front()); + } + } + + // Containers for the data we will receive from other processors + std::vector n_recv_data(n_neighbors); + std::vector recv_offsets(n_neighbors); + + // Notify other processors how many particles we will send + { + std::vector n_requests(2*n_neighbors); + for (unsigned int i=0; i recv_data(total_recv_data); + + // Exchange the particle data between domains + { + std::vector requests(2*n_neighbors); + unsigned int send_ops = 0; + unsigned int recv_ops = 0; + + for (unsigned int i=0; i 0) + { + MPI_Irecv(&(recv_data[recv_offsets[i]]), n_recv_data[i], MPI_CHAR, neighbors[i], 1, mpi_communicator,&(requests[send_ops])); + send_ops++; + } + + for (unsigned int i=0; i 0) + { + MPI_Isend(&(send_data[send_offsets[i]]), n_send_data[i], MPI_CHAR, neighbors[i], 1, mpi_communicator,&(requests[send_ops+recv_ops])); + recv_ops++; + } + MPI_Waitall(send_ops+recv_ops,&requests[0],MPI_STATUSES_IGNORE); + } + + // Put the received particles into the domain if they are in the triangulation + const void *recv_data_it = static_cast (&recv_data.front()); + + while (reinterpret_cast (recv_data_it) - reinterpret_cast (&recv_data.front()) < total_recv_data) + { + CellId::binary_type binary_cellid; + memcpy(&binary_cellid, recv_data_it, cellid_size); + const CellId id(binary_cellid); + recv_data_it = static_cast (recv_data_it) + cellid_size; + + const active_cell_it cell = id.to_cell(*triangulation); + + typename std::multimap >::iterator recv_particle = + received_particles.insert(std::make_pair(types::LevelInd(cell->level(),cell->index()), + Particle(recv_data_it,*property_pool))); + + if (load_callback) + recv_data_it = load_callback(particle_iterator(received_particles,recv_particle), + recv_data_it); + } + + AssertThrow(recv_data_it == &recv_data.back()+1, + ExcMessage("The amount of data that was read into new particles " + "does not match the amount of data sent around.")); + } + + + template + void + ParticleHandler::register_additional_store_load_functions(const std::function &size_callb, + const std::function &store_callb, + const std::function &load_callb) + { + size_callback = size_callb; + store_callback = store_callb; + load_callback = load_callb; + } + + + + template + void + ParticleHandler::register_store_callback_function(const bool serialization) + { + parallel::distributed::Triangulation *non_const_triangulation = + const_cast *> (&(*triangulation)); + + // Only save and load particles if there are any, we might get here for + // example if somebody created a ParticleHandler but generated 0 particles. + update_global_max_particles_per_cell(); + + if (global_max_particles_per_cell > 0) + { + const std::function::cell_iterator &, + const typename parallel::distributed::Triangulation::CellStatus, void *) > callback_function + = std::bind(&ParticleHandler::store_particles, + std::cref(*this), + std::placeholders::_1, + std::placeholders::_2, + std::placeholders::_3); + + // Compute the size per serialized particle. This is simple if we own + // particles, simply ask one of them. Otherwise create a temporary particle, + // ask it for its size and add the size of its properties. + const std::size_t size_per_particle = (particles.size() > 0) + ? + begin()->serialized_size_in_bytes() + : + Particle().serialized_size_in_bytes() + + property_pool->n_properties_per_slot() * sizeof(double); + + // We need to transfer the number of particles for this cell and + // the particle data itself. If we are in the process of refinement + // (i.e. not in serialization) we need to provide 2^dim times the + // space for the data in case a cell is coarsened and all particles + // of the children have to be stored in the parent cell. + const std::size_t transfer_size_per_cell = sizeof (unsigned int) + + (size_per_particle * global_max_particles_per_cell) * + (serialization ? + 1 + : + std::pow(2,dim)); + + data_offset = non_const_triangulation->register_data_attach(transfer_size_per_cell,callback_function); + } + } + + + + template + void + ParticleHandler::register_load_callback_function(const bool serialization) + { + // All particles have been stored, when we reach this point. Empty the + // particle data. + clear_particles(); + + parallel::distributed::Triangulation *non_const_triangulation = + const_cast *> (&(*triangulation)); + + // If we are resuming from a checkpoint, we first have to register the + // store function again, to set the triangulation in the same state as + // before the serialization. Only by this it knows how to deserialize the + // data correctly. Only do this if something was actually stored. + if (serialization && (global_max_particles_per_cell > 0)) + { + const std::function::cell_iterator &, + const typename parallel::distributed::Triangulation::CellStatus, void *) > callback_function + = std::bind(&ParticleHandler::store_particles, + std::cref(*this), + std::placeholders::_1, + std::placeholders::_2, + std::placeholders::_3); + + // Compute the size per serialized particle. This is simple if we own + // particles, simply ask one of them. Otherwise create a temporary particle, + // ask it for its size and add the size of its properties. + const std::size_t size_per_particle = (particles.size() > 0) + ? + begin()->serialized_size_in_bytes() + : + Particle().serialized_size_in_bytes() + + property_pool->n_properties_per_slot() * sizeof(double); + + // We need to transfer the number of particles for this cell and + // the particle data itself and we need to provide 2^dim times the + // space for the data in case a cell is coarsened + const std::size_t transfer_size_per_cell = sizeof (unsigned int) + + (size_per_particle * global_max_particles_per_cell); + data_offset = non_const_triangulation->register_data_attach(transfer_size_per_cell,callback_function); + } + + // Check if something was stored and load it + if (data_offset != numbers::invalid_unsigned_int) + { + const std::function::cell_iterator &, + const typename parallel::distributed::Triangulation::CellStatus, + const void *) > callback_function + = std::bind(&ParticleHandler::load_particles, + std::ref(*this), + std::placeholders::_1, + std::placeholders::_2, + std::placeholders::_3); + + non_const_triangulation->notify_ready_to_unpack(data_offset,callback_function); + + // Reset offset and update global number of particles. The number + // can change because of discarded or newly generated particles + data_offset = numbers::invalid_unsigned_int; + update_n_global_particles(); + } + } + + + + template + void + ParticleHandler::store_particles(const typename parallel::distributed::Triangulation::cell_iterator &cell, + const typename parallel::distributed::Triangulation::CellStatus status, + void *data) const + { + unsigned int n_particles(0); + + // If the cell persist or is refined store all particles of the current cell. + if (status == parallel::distributed::Triangulation::CELL_PERSIST + || status == parallel::distributed::Triangulation::CELL_REFINE) + { + const boost::iterator_range particle_range + = particles_in_cell(cell); + n_particles = std::distance(particle_range.begin(),particle_range.end()); + + unsigned int *ndata = static_cast (data); + *ndata = n_particles; + data = static_cast (ndata + 1); + + for (particle_iterator particle = particle_range.begin(); + particle != particle_range.end(); ++particle) + { + particle->write_data(data); + } + } + // If this cell is the parent of children that will be coarsened, collect + // the particles of all children. + else if (status == parallel::distributed::Triangulation::CELL_COARSEN) + { + for (unsigned int child_index = 0; child_index < GeometryInfo::max_children_per_cell; ++child_index) + { + const typename parallel::distributed::Triangulation::cell_iterator child = cell->child(child_index); + n_particles += n_particles_in_cell(child); + } + + unsigned int *ndata = static_cast (data); + *ndata = n_particles; + + data = static_cast (ndata + 1); + + for (unsigned int child_index = 0; child_index < GeometryInfo::max_children_per_cell; ++child_index) + { + const typename parallel::distributed::Triangulation::cell_iterator child = cell->child(child_index); + const boost::iterator_range particle_range + = particles_in_cell(child); + + for (particle_iterator particle = particle_range.begin(); + particle != particle_range.end(); ++particle) + { + particle->write_data(data); + } + } + } + else + Assert (false, ExcInternalError()); + + } + + template + void + ParticleHandler::load_particles(const typename parallel::distributed::Triangulation::cell_iterator &cell, + const typename parallel::distributed::Triangulation::CellStatus status, + const void *data) + { + const unsigned int *n_particles_in_cell_ptr = static_cast (data); + const void *pdata = reinterpret_cast (n_particles_in_cell_ptr + 1); + + if (*n_particles_in_cell_ptr == 0) + return; + + // Load all particles from the data stream and store them in the local + // particle map. + if (status == parallel::distributed::Triangulation::CELL_PERSIST) + { + typename std::multimap >::iterator position_hint = particles.end(); + for (unsigned int i = 0; i < *n_particles_in_cell_ptr; ++i) + { + // Use std::multimap::emplace_hint to speed up insertion of + // particles. This is a C++11 function, but not all compilers + // that report a -std=c++11 (like gcc 4.6) implement it, so + // require C++14 instead. +#ifdef DEAL_II_WITH_CXX14 + position_hint = particles.emplace_hint(position_hint, + std::make_pair(cell->level(),cell->index()), + Particle(pdata,*property_pool)); +#else + position_hint = particles.insert(position_hint, + std::make_pair(std::make_pair(cell->level(),cell->index()), + Particle(pdata,*property_pool))); +#endif + ++position_hint; + } + } + + else if (status == parallel::distributed::Triangulation::CELL_COARSEN) + { + typename std::multimap >::iterator position_hint = particles.end(); + for (unsigned int i = 0; i < *n_particles_in_cell_ptr; ++i) + { + // Use std::multimap::emplace_hint to speed up insertion of + // particles. This is a C++11 function, but not all compilers + // that report a -std=c++11 (like gcc 4.6) implement it, so + // require C++14 instead. +#ifdef DEAL_II_WITH_CXX14 + position_hint = particles.emplace_hint(position_hint, + std::make_pair(cell->level(),cell->index()), + Particle(pdata,*property_pool)); +#else + position_hint = particles.insert(position_hint, + std::make_pair(std::make_pair(cell->level(),cell->index()), + Particle(pdata,*property_pool))); +#endif + const Point p_unit = mapping->transform_real_to_unit_cell(cell, position_hint->second.get_location()); + position_hint->second.set_reference_location(p_unit); + ++position_hint; + } + } + else if (status == parallel::distributed::Triangulation::CELL_REFINE) + { + std::vector >::iterator > position_hints(GeometryInfo::max_children_per_cell); + for (unsigned int child_index=0; child_index::max_children_per_cell; ++child_index) + { + const typename parallel::distributed::Triangulation::cell_iterator child = cell->child(child_index); + position_hints[child_index] = particles.upper_bound(std::make_pair(child->level(),child->index())); + } + + for (unsigned int i = 0; i < *n_particles_in_cell_ptr; ++i) + { + Particle p (pdata,*property_pool); + + for (unsigned int child_index = 0; child_index < GeometryInfo::max_children_per_cell; ++child_index) + { + const typename parallel::distributed::Triangulation::cell_iterator child = cell->child(child_index); + + try + { + const Point p_unit = mapping->transform_real_to_unit_cell(child, + p.get_location()); + if (GeometryInfo::is_inside_unit_cell(p_unit)) + { + p.set_reference_location(p_unit); + // Use std::multimap::emplace_hint to speed up insertion of + // particles. This is a C++11 function, but not all compilers + // that report a -std=c++11 (like gcc 4.6) implement it, so + // require C++14 instead. +#ifdef DEAL_II_WITH_CXX14 + position_hints[child_index] = particles.emplace_hint(position_hints[child_index], + std::make_pair(child->level(),child->index()), + std::move(p)); +#else + position_hints[child_index] = particles.insert(position_hints[child_index], + std::make_pair(std::make_pair(child->level(),child->index()), + p)); +#endif + ++position_hints[child_index]; + break; + } + } + catch (typename Mapping::ExcTransformationFailed &) + {} + } + } + } + } +} + +DEAL_II_NAMESPACE_CLOSE + +DEAL_II_NAMESPACE_OPEN + +#include "particle_handler.inst" + +DEAL_II_NAMESPACE_CLOSE diff --git a/source/particles/particle_handler.inst.in b/source/particles/particle_handler.inst.in new file mode 100644 index 0000000000..536a0a2a5d --- /dev/null +++ b/source/particles/particle_handler.inst.in @@ -0,0 +1,26 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : SPACE_DIMENSIONS) +{ +#if deal_II_dimension <= deal_II_space_dimension + namespace Particles + \{ + template + class ParticleHandler ; + \} +#endif +}