From: Luca Heltai Date: Thu, 28 Nov 2019 18:38:12 +0000 (+0100) Subject: Code review. X-Git-Tag: v9.2.0-rc1~798^2~5 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=354db31cb16e14ab779274d2027c7581f201a6ec;p=dealii.git Code review. --- diff --git a/include/deal.II/particles/particle_handler.h b/include/deal.II/particles/particle_handler.h index 0e57175c64..1ce0eabac3 100644 --- a/include/deal.II/particles/particle_handler.h +++ b/include/deal.II/particles/particle_handler.h @@ -249,13 +249,14 @@ namespace Particles * the triangulation from whoever owns them. * * In order to keep track of what mpi process received what points, a map - * from mpi process to IndexSet is returned by the function. This IndexSet contains - * the local indices of the points that were passed to this function on the - * calling mpi process, and that falls within the part of triangulation - * owned by this mpi process. + * from mpi process to IndexSet is returned by the function. This IndexSet + * contains the local indices of the points that were passed to this + * function on the calling mpi process, and that falls within the part of + * triangulation owned by this mpi process. * * @param[in] A vector of points that do not need to be on the local - * processor + * processor, but have to be in the triangulation that is associated with + * this ParticleHandler object. * * @param[in] A vector of vectors of bounding boxes. The bounding boxes * global_bboxes[rk] describe which part of the mesh is locally owned by @@ -269,12 +270,20 @@ namespace Particles * or it should be `positions.size()*this->n_properties_per_particle()`. * Notice that this function call will transfer the properties from the * local mpi process to the final mpi process that will own each of the - * particle, and it may therefore be communication intensive. + * particle, and it may therefore be communication intensive. Properties + * should be ordered particle wise, i.e., for N particles with n properties + * each: + * @code + * particle_1_property_1, particle_1_property_2, ..., particle_1_property_n, + * particle_2_property_1, particle_2_property_2, ..., particle_2_property_n, + * ... + * particle_N_property_1, particle_N_property_2, ..., particle_N_property_n. + * @endcode * - * @return A pair of maps from owner to IndexSet, that contains the local - * indices of the points that other mpi processes have sent to the current - * processor, and a map that identifies the new owner of the points that - * were originally located on this processor. + * @return A map from owner to IndexSet, that contains the local indices + * of the points that were passed to this function on the calling mpi + * process, and that falls within the part of triangulation owned by this + * mpi process. * * @author : Bruno Blais, Luca Heltai 2019 */ diff --git a/source/particles/particle_handler.cc b/source/particles/particle_handler.cc index 2603e395a7..555ecd3bb9 100644 --- a/source/particles/particle_handler.cc +++ b/source/particles/particle_handler.cc @@ -454,10 +454,10 @@ namespace Particles AssertDimension(properties.size(), positions.size() * n_properties_per_particle()); - const auto my_cpu = + const auto mpi_process = Utilities::MPI::this_mpi_process(triangulation->get_communicator()); - const auto n_cpus = + const auto n_mpi_processes = Utilities::MPI::n_mpi_processes(triangulation->get_communicator()); GridTools::Cache cache(*triangulation, *mapping); @@ -468,13 +468,14 @@ namespace Particles positions.size()); // Calculate all starting points locally - std::vector starting_points(n_cpus); + std::vector particle_start_indices(n_mpi_processes); - for (unsigned int i = 0; i < starting_points.size(); ++i) + unsigned int particle_start_index = 0; + for (unsigned int process = 0; process < particle_start_indices.size(); + ++process) { - starting_points[i] = std::accumulate(n_particles_per_proc.begin(), - n_particles_per_proc.begin() + i, - 0u); + particle_start_indices[process] = particle_start_index; + particle_start_index += n_particles_per_proc[process]; } auto distributed_tuple = @@ -484,52 +485,72 @@ namespace Particles // Finally create the particles std::vector::active_cell_iterator> - cell_iterators = std::get<0>(distributed_tuple); - std::vector>> dist_reference_points = + local_cells_containing_particles = std::get<0>(distributed_tuple); + + // The reference position of every particle in the local part of the + // triangulation. + std::vector>> local_reference_positions = std::get<1>(distributed_tuple); - std::vector> dist_map = + // The original index in the positions vector for each particle in the local + // part of the triangulation + std::vector> origin_indices_of_local_particles = std::get<2>(distributed_tuple); - std::vector>> dist_points = + // The real spatial position of every particle in the local part of the + // triangulation. + std::vector>> local_positions = std::get<3>(distributed_tuple); - std::vector> dist_procs = + // The MPI process that inserted each particle + std::vector> calling_process_index = std::get<4>(distributed_tuple); - // Create the multimap of particles + // Create the multimap of local particles std::multimap::active_cell_iterator, Particle> particles; // Create the map of cpu to indices, indicating whom sent us what // point - std::map cpu_to_indices; + std::map origin_process_to_local_particle_indices; - for (unsigned int i_cell = 0; i_cell < cell_iterators.size(); ++i_cell) + for (unsigned int i_cell = 0; + i_cell < local_cells_containing_particles.size(); + ++i_cell) { for (unsigned int i_particle = 0; - i_particle < dist_points[i_cell].size(); + i_particle < local_positions[i_cell].size(); ++i_particle) { - const auto &local_id = dist_map[i_cell][i_particle]; - const auto &cpu = dist_procs[i_cell][i_particle]; - - const unsigned int particle_id = local_id + starting_points[cpu]; - - particles.emplace( - cell_iterators[i_cell], - Particle(dist_points[i_cell][i_particle], - dist_reference_points[i_cell][i_particle], - particle_id)); - - if (cpu_to_indices.find(cpu) == cpu_to_indices.end()) - cpu_to_indices.insert({cpu, IndexSet(n_particles_per_proc[cpu])}); - - cpu_to_indices[cpu].add_index(local_id); + const auto &local_id_on_calling_process = + origin_indices_of_local_particles[i_cell][i_particle]; + const auto &calling_process = + calling_process_index[i_cell][i_particle]; + + const unsigned int particle_id = + local_id_on_calling_process + + particle_start_indices[calling_process]; + + particles.emplace(local_cells_containing_particles[i_cell], + Particle( + local_positions[i_cell][i_particle], + local_reference_positions[i_cell][i_particle], + particle_id)); + + if (origin_process_to_local_particle_indices.find( + calling_process) == + origin_process_to_local_particle_indices.end()) + origin_process_to_local_particle_indices.insert( + {calling_process, + IndexSet(n_particles_per_proc[calling_process])}); + + origin_process_to_local_particle_indices[calling_process].add_index( + local_id_on_calling_process); } } this->insert_particles(particles); - for (auto &c : cpu_to_indices) - c.second.compress(); + for (auto &process_and_particle_indices : + origin_process_to_local_particle_indices) + process_and_particle_indices.second.compress(); // Take care of properties, if the input vector contains them. const auto global_n_properties = @@ -538,9 +559,11 @@ namespace Particles { // [TODO]: fix this in some_to_some, to allow communication from // my cpu to my cpu. - auto cpu_to_indices_to_send = cpu_to_indices; - if (cpu_to_indices_to_send.find(my_cpu) != cpu_to_indices_to_send.end()) - cpu_to_indices_to_send.erase(cpu_to_indices_to_send.find(my_cpu)); + auto cpu_to_indices_to_send = origin_process_to_local_particle_indices; + if (cpu_to_indices_to_send.find(mpi_process) != + cpu_to_indices_to_send.end()) + cpu_to_indices_to_send.erase( + cpu_to_indices_to_send.find(mpi_process)); // Gather whom I sent my own particles to, to decide whom to send // the particle properties @@ -582,15 +605,15 @@ namespace Particles // Compute the association between particle id and start of // property data in the vector containing all local properties std::map property_start; - for (const auto &it : cpu_to_indices) - if (it.first != my_cpu) + for (const auto &it : origin_process_to_local_particle_indices) + if (it.first != mpi_process) { unsigned int sequential_index = 0; // Process all properties coming from other mpi processes for (const auto &el : it.second) { types::particle_index particle_id = - el + starting_points[it.first]; + el + particle_start_indices[it.first]; property_start.insert({particle_id, local_properties.size()}); local_properties.insert( @@ -610,7 +633,7 @@ namespace Particles for (const auto &el : it.second) { types::particle_index particle_id = - el + starting_points[my_cpu]; + el + particle_start_indices[mpi_process]; property_start.insert({particle_id, local_properties.size()}); local_properties.insert(local_properties.end(), @@ -634,7 +657,7 @@ namespace Particles local_properties.begin() + start + n_properties_per_particle()}); } } - return cpu_to_indices; + return origin_process_to_local_particle_indices; }