From: Luca Heltai Date: Thu, 11 Jun 2020 07:58:15 +0000 (+0200) Subject: Add ids in add_global_particles X-Git-Tag: v9.3.0-rc1~1390^2~4 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=3e77e0ae8bd3c40a5d9d5fcca69be10d6b590ff1;p=dealii.git Add ids in add_global_particles --- diff --git a/include/deal.II/particles/particle_handler.h b/include/deal.II/particles/particle_handler.h index 136ec3a5d8..82b5b0f4f9 100644 --- a/include/deal.II/particles/particle_handler.h +++ b/include/deal.II/particles/particle_handler.h @@ -290,6 +290,10 @@ namespace Particles * process that will own each of the particles, and it may therefore be * communication intensive. * + * @param[in] ids (Optional) A vector of ids to associate to each particle. + * If the vector is empty, the ids are computed automatically, by assigning + * them as a continuous range starting from first available index. + * * @return A map from owner to IndexSet, that contains the local indices * of the points that were passed to this function on the calling mpi * process, and that falls within the part of triangulation owned by this @@ -299,8 +303,9 @@ namespace Particles insert_global_particles( const std::vector> &positions, const std::vector>> - & global_bounding_boxes, - const std::vector> &properties = {}); + & global_bounding_boxes, + const std::vector> & properties = {}, + const std::vector &ids = {}); /** * Set the position of the particles by using the values contained in the diff --git a/source/particles/particle_handler.cc b/source/particles/particle_handler.cc index bf5e16f8b0..f740e56d03 100644 --- a/source/particles/particle_handler.cc +++ b/source/particles/particle_handler.cc @@ -488,8 +488,9 @@ namespace Particles ParticleHandler::insert_global_particles( const std::vector> &positions, const std::vector>> - & global_bounding_boxes, - const std::vector> &properties) + & global_bounding_boxes, + const std::vector> & properties, + const std::vector &ids) { if (!properties.empty()) { @@ -500,6 +501,9 @@ namespace Particles #endif } + if (!ids.empty()) + AssertDimension(ids.size(), positions.size()); + const auto tria = dynamic_cast *>( &(*triangulation)); @@ -508,8 +512,6 @@ namespace Particles const auto n_mpi_processes = Utilities::MPI::n_mpi_processes(comm); - GridTools::Cache cache(*triangulation, *mapping); - // Compute the global number of properties const auto n_global_properties = Utilities::MPI::sum(properties.size(), comm); @@ -521,7 +523,7 @@ namespace Particles // Calculate all starting points locally std::vector particle_start_indices(n_mpi_processes); - unsigned int particle_start_index = 0; + unsigned int particle_start_index = get_next_free_particle_index(); for (unsigned int process = 0; process < particle_start_indices.size(); ++process) { @@ -595,35 +597,71 @@ namespace Particles std::map>> locally_owned_properties_from_other_processes; - if (n_global_properties > 0) + // A map from mpi process to ids, ordered as in the IndexSet. + // Notice that this ordering maybe different from the ordering in the + // vectors above, since no local ordering is guaranteed by the + // distribute_compute_point_locations() call. + // This is only filled if ids.size() is > 0 + std::map> + locally_owned_ids_from_other_processes; + + if (n_global_properties > 0 || !ids.empty()) { // Gather whom I sent my own particles to, to decide whom to send - // the particle properties + // the particle properties or the ids auto send_to_cpu = Utilities::MPI::some_to_some( comm, original_process_to_local_particle_indices); - std::map>> - non_locally_owned_properties; - // Prepare the vector of properties to send - for (const auto &it : send_to_cpu) + if (n_global_properties > 0) { - std::vector> properties_to_send( - it.second.n_elements(), - std::vector(n_properties_per_particle())); - unsigned int index = 0; - for (const auto &el : it.second) - properties_to_send[index++] = properties[el]; - non_locally_owned_properties.insert({it.first, properties_to_send}); + std::map>> + non_locally_owned_properties; + + for (const auto &it : send_to_cpu) + { + std::vector> properties_to_send( + it.second.n_elements(), + std::vector(n_properties_per_particle())); + unsigned int index = 0; + for (const auto &el : it.second) + properties_to_send[index++] = properties[el]; + non_locally_owned_properties.insert( + {it.first, properties_to_send}); + } + + // Send the non locally owned properties to each mpi process + // that needs them + locally_owned_properties_from_other_processes = + Utilities::MPI::some_to_some(comm, non_locally_owned_properties); + + AssertDimension( + locally_owned_properties_from_other_processes.size(), + original_process_to_local_particle_indices.size()); } - // Send the non locally owned properties to each mpi process - // that needs them - locally_owned_properties_from_other_processes = - Utilities::MPI::some_to_some(comm, non_locally_owned_properties); + if (!ids.empty()) + { + std::map> + non_locally_owned_ids; + for (const auto &it : send_to_cpu) + { + std::vector ids_to_send( + it.second.n_elements()); + unsigned int index = 0; + for (const auto &el : it.second) + ids_to_send[index++] = ids[el]; + non_locally_owned_ids.insert({it.first, ids_to_send}); + } + + // Send the non locally owned ids to each mpi process + // that needs them + locally_owned_ids_from_other_processes = + Utilities::MPI::some_to_some(comm, non_locally_owned_ids); - AssertDimension(locally_owned_properties_from_other_processes.size(), - original_process_to_local_particle_indices.size()); + AssertDimension(locally_owned_ids_from_other_processes.size(), + original_process_to_local_particle_indices.size()); + } } @@ -643,29 +681,34 @@ namespace Particles { const unsigned int local_id_on_calling_process = original_indices_of_local_particles[i_cell][i_particle]; + const unsigned int calling_process = calling_process_indices[i_cell][i_particle]; + const unsigned int index_within_set = + original_process_to_local_particle_indices[calling_process] + .index_within_set(local_id_on_calling_process); + const unsigned int particle_id = - local_id_on_calling_process + - particle_start_indices[calling_process]; + ids.empty() ? + local_id_on_calling_process + + particle_start_indices[calling_process] : + locally_owned_ids_from_other_processes[calling_process] + [index_within_set]; Particle particle( local_positions[i_cell][i_particle], local_reference_positions[i_cell][i_particle], particle_id); + particle.set_property_pool(get_property_pool()); + if (n_global_properties > 0) { - const unsigned int index_within_set = - original_process_to_local_particle_indices[calling_process] - .index_within_set(local_id_on_calling_process); - const auto &this_particle_properties = locally_owned_properties_from_other_processes [calling_process][index_within_set]; - particle.set_property_pool(get_property_pool()); particle.set_properties(this_particle_properties); }