* the triangulation from whoever owns them.
*
* In order to keep track of what mpi process received what points, a map
- * from mpi process to IndexSet is returned by the function. This IndexSet contains
- * the local indices of the points that were passed to this function on the
- * calling mpi process, and that falls within the part of triangulation
- * owned by this mpi process.
+ * from mpi process to IndexSet is returned by the function. This IndexSet
+ * contains the local indices of the points that were passed to this
+ * function on the calling mpi process, and that falls within the part of
+ * triangulation owned by this mpi process.
*
* @param[in] A vector of points that do not need to be on the local
- * processor
+ * processor, but have to be in the triangulation that is associated with
+ * this ParticleHandler object.
*
* @param[in] A vector of vectors of bounding boxes. The bounding boxes
* global_bboxes[rk] describe which part of the mesh is locally owned by
* or it should be `positions.size()*this->n_properties_per_particle()`.
* Notice that this function call will transfer the properties from the
* local mpi process to the final mpi process that will own each of the
- * particle, and it may therefore be communication intensive.
+ * particle, and it may therefore be communication intensive. Properties
+ * should be ordered particle wise, i.e., for N particles with n properties
+ * each:
+ * @code
+ * particle_1_property_1, particle_1_property_2, ..., particle_1_property_n,
+ * particle_2_property_1, particle_2_property_2, ..., particle_2_property_n,
+ * ...
+ * particle_N_property_1, particle_N_property_2, ..., particle_N_property_n.
+ * @endcode
*
- * @return A pair of maps from owner to IndexSet, that contains the local
- * indices of the points that other mpi processes have sent to the current
- * processor, and a map that identifies the new owner of the points that
- * were originally located on this processor.
+ * @return A map from owner to IndexSet, that contains the local indices
+ * of the points that were passed to this function on the calling mpi
+ * process, and that falls within the part of triangulation owned by this
+ * mpi process.
*
* @author : Bruno Blais, Luca Heltai 2019
*/
AssertDimension(properties.size(),
positions.size() * n_properties_per_particle());
- const auto my_cpu =
+ const auto mpi_process =
Utilities::MPI::this_mpi_process(triangulation->get_communicator());
- const auto n_cpus =
+ const auto n_mpi_processes =
Utilities::MPI::n_mpi_processes(triangulation->get_communicator());
GridTools::Cache<dim, spacedim> cache(*triangulation, *mapping);
positions.size());
// Calculate all starting points locally
- std::vector<unsigned int> starting_points(n_cpus);
+ std::vector<unsigned int> particle_start_indices(n_mpi_processes);
- for (unsigned int i = 0; i < starting_points.size(); ++i)
+ unsigned int particle_start_index = 0;
+ for (unsigned int process = 0; process < particle_start_indices.size();
+ ++process)
{
- starting_points[i] = std::accumulate(n_particles_per_proc.begin(),
- n_particles_per_proc.begin() + i,
- 0u);
+ particle_start_indices[process] = particle_start_index;
+ particle_start_index += n_particles_per_proc[process];
}
auto distributed_tuple =
// Finally create the particles
std::vector<typename Triangulation<dim, spacedim>::active_cell_iterator>
- cell_iterators = std::get<0>(distributed_tuple);
- std::vector<std::vector<Point<dim>>> dist_reference_points =
+ local_cells_containing_particles = std::get<0>(distributed_tuple);
+
+ // The reference position of every particle in the local part of the
+ // triangulation.
+ std::vector<std::vector<Point<dim>>> local_reference_positions =
std::get<1>(distributed_tuple);
- std::vector<std::vector<unsigned int>> dist_map =
+ // The original index in the positions vector for each particle in the local
+ // part of the triangulation
+ std::vector<std::vector<unsigned int>> origin_indices_of_local_particles =
std::get<2>(distributed_tuple);
- std::vector<std::vector<Point<spacedim>>> dist_points =
+ // The real spatial position of every particle in the local part of the
+ // triangulation.
+ std::vector<std::vector<Point<spacedim>>> local_positions =
std::get<3>(distributed_tuple);
- std::vector<std::vector<unsigned int>> dist_procs =
+ // The MPI process that inserted each particle
+ std::vector<std::vector<unsigned int>> calling_process_index =
std::get<4>(distributed_tuple);
- // Create the multimap of particles
+ // Create the multimap of local particles
std::multimap<typename Triangulation<dim, spacedim>::active_cell_iterator,
Particle<dim, spacedim>>
particles;
// Create the map of cpu to indices, indicating whom sent us what
// point
- std::map<unsigned int, IndexSet> cpu_to_indices;
+ std::map<unsigned int, IndexSet> origin_process_to_local_particle_indices;
- for (unsigned int i_cell = 0; i_cell < cell_iterators.size(); ++i_cell)
+ for (unsigned int i_cell = 0;
+ i_cell < local_cells_containing_particles.size();
+ ++i_cell)
{
for (unsigned int i_particle = 0;
- i_particle < dist_points[i_cell].size();
+ i_particle < local_positions[i_cell].size();
++i_particle)
{
- const auto &local_id = dist_map[i_cell][i_particle];
- const auto &cpu = dist_procs[i_cell][i_particle];
-
- const unsigned int particle_id = local_id + starting_points[cpu];
-
- particles.emplace(
- cell_iterators[i_cell],
- Particle<dim, spacedim>(dist_points[i_cell][i_particle],
- dist_reference_points[i_cell][i_particle],
- particle_id));
-
- if (cpu_to_indices.find(cpu) == cpu_to_indices.end())
- cpu_to_indices.insert({cpu, IndexSet(n_particles_per_proc[cpu])});
-
- cpu_to_indices[cpu].add_index(local_id);
+ const auto &local_id_on_calling_process =
+ origin_indices_of_local_particles[i_cell][i_particle];
+ const auto &calling_process =
+ calling_process_index[i_cell][i_particle];
+
+ const unsigned int particle_id =
+ local_id_on_calling_process +
+ particle_start_indices[calling_process];
+
+ particles.emplace(local_cells_containing_particles[i_cell],
+ Particle<dim, spacedim>(
+ local_positions[i_cell][i_particle],
+ local_reference_positions[i_cell][i_particle],
+ particle_id));
+
+ if (origin_process_to_local_particle_indices.find(
+ calling_process) ==
+ origin_process_to_local_particle_indices.end())
+ origin_process_to_local_particle_indices.insert(
+ {calling_process,
+ IndexSet(n_particles_per_proc[calling_process])});
+
+ origin_process_to_local_particle_indices[calling_process].add_index(
+ local_id_on_calling_process);
}
}
this->insert_particles(particles);
- for (auto &c : cpu_to_indices)
- c.second.compress();
+ for (auto &process_and_particle_indices :
+ origin_process_to_local_particle_indices)
+ process_and_particle_indices.second.compress();
// Take care of properties, if the input vector contains them.
const auto global_n_properties =
{
// [TODO]: fix this in some_to_some, to allow communication from
// my cpu to my cpu.
- auto cpu_to_indices_to_send = cpu_to_indices;
- if (cpu_to_indices_to_send.find(my_cpu) != cpu_to_indices_to_send.end())
- cpu_to_indices_to_send.erase(cpu_to_indices_to_send.find(my_cpu));
+ auto cpu_to_indices_to_send = origin_process_to_local_particle_indices;
+ if (cpu_to_indices_to_send.find(mpi_process) !=
+ cpu_to_indices_to_send.end())
+ cpu_to_indices_to_send.erase(
+ cpu_to_indices_to_send.find(mpi_process));
// Gather whom I sent my own particles to, to decide whom to send
// the particle properties
// Compute the association between particle id and start of
// property data in the vector containing all local properties
std::map<types::particle_index, unsigned int> property_start;
- for (const auto &it : cpu_to_indices)
- if (it.first != my_cpu)
+ for (const auto &it : origin_process_to_local_particle_indices)
+ if (it.first != mpi_process)
{
unsigned int sequential_index = 0;
// Process all properties coming from other mpi processes
for (const auto &el : it.second)
{
types::particle_index particle_id =
- el + starting_points[it.first];
+ el + particle_start_indices[it.first];
property_start.insert({particle_id, local_properties.size()});
local_properties.insert(
for (const auto &el : it.second)
{
types::particle_index particle_id =
- el + starting_points[my_cpu];
+ el + particle_start_indices[mpi_process];
property_start.insert({particle_id, local_properties.size()});
local_properties.insert(local_properties.end(),
local_properties.begin() + start + n_properties_per_particle()});
}
}
- return cpu_to_indices;
+ return origin_process_to_local_particle_indices;
}