From b700a73cb6d668128f4b2cdf1b861fd0ffa0a51c Mon Sep 17 00:00:00 2001 From: Luca Heltai Date: Mon, 25 Nov 2019 10:41:07 +0100 Subject: [PATCH] Moved implementation to .cc file. --- include/deal.II/particles/particle_handler.h | 195 +----------------- source/particles/particle_handler.cc | 200 +++++++++++++++++++ 2 files changed, 201 insertions(+), 194 deletions(-) diff --git a/include/deal.II/particles/particle_handler.h b/include/deal.II/particles/particle_handler.h index d12ae6abd6..fb652162a3 100644 --- a/include/deal.II/particles/particle_handler.h +++ b/include/deal.II/particles/particle_handler.h @@ -283,200 +283,7 @@ namespace Particles const std::vector> &positions, const std::vector>> & global_bounding_boxes, - const std::vector &properties = std::vector()) - { - if (!properties.empty()) - AssertDimension(properties.size(), - positions.size() * n_properties_per_particle()); - - const auto my_cpu = - Utilities::MPI::this_mpi_process(triangulation->get_communicator()); - - const auto n_cpus = - Utilities::MPI::n_mpi_processes(triangulation->get_communicator()); - - GridTools::Cache cache(*triangulation, *mapping); - - // Gather the number of points per processor - auto n_particles_per_proc = - Utilities::MPI::all_gather(triangulation->get_communicator(), - positions.size()); - - // Calculate all starting points locally - std::vector starting_points(n_cpus); - - for (unsigned int i = 0; i < starting_points.size(); ++i) - { - starting_points[i] = std::accumulate(n_particles_per_proc.begin(), - n_particles_per_proc.begin() + i, - 0u); - } - - auto distributed_tuple = - GridTools::distributed_compute_point_locations(cache, - positions, - global_bounding_boxes); - - // Finally create the particles - std::vector::active_cell_iterator> - cell_iterators = std::get<0>(distributed_tuple); - std::vector>> dist_reference_points = - std::get<1>(distributed_tuple); - std::vector> dist_map = - std::get<2>(distributed_tuple); - std::vector>> dist_points = - std::get<3>(distributed_tuple); - std::vector> dist_procs = - std::get<4>(distributed_tuple); - - // Create the multimap of particles - std::multimap::active_cell_iterator, - Particle> - particles; - - // Create the map of cpu to indices, indicating whom sent us what - // point - std::map cpu_to_indices; - - for (unsigned int i_cell = 0; i_cell < cell_iterators.size(); ++i_cell) - { - for (unsigned int i_particle = 0; - i_particle < dist_points[i_cell].size(); - ++i_particle) - { - const auto &local_id = dist_map[i_cell][i_particle]; - const auto &cpu = dist_procs[i_cell][i_particle]; - - const unsigned int particle_id = local_id + starting_points[cpu]; - - particles.emplace(cell_iterators[i_cell], - Particle( - dist_points[i_cell][i_particle], - dist_reference_points[i_cell][i_particle], - particle_id)); - - if (cpu_to_indices.find(cpu) == cpu_to_indices.end()) - cpu_to_indices.insert( - {cpu, IndexSet(n_particles_per_proc[cpu])}); - - cpu_to_indices[cpu].add_index(local_id); - } - } - - this->insert_particles(particles); - for (auto &c : cpu_to_indices) - c.second.compress(); - - // Take care of properties, if the input vector contains them. - const auto sum_pro = - Utilities::MPI::sum(properties.size(), - triangulation->get_communicator()); - if (sum_pro) - { - // [TODO]: fix this in some_to_some, to allow communication from - // my cpu to my cpu. - auto cpu_to_indices_to_send = cpu_to_indices; - if (cpu_to_indices_to_send.find(my_cpu) != - cpu_to_indices_to_send.end()) - cpu_to_indices_to_send.erase(cpu_to_indices_to_send.find(my_cpu)); - - // Gather whom I sent my own particles to, to decide whom to send - // the particle properties - auto send_to_cpu = - Utilities::MPI::some_to_some(triangulation->get_communicator(), - cpu_to_indices_to_send); - std::map> - non_locally_owned_properties; - - // Prepare the vector of non_locally_owned properties, - for (const auto &it : send_to_cpu) - { - std::vector properties_to_send; - properties_to_send.reserve(it.second.n_elements() * - n_properties_per_particle()); - - for (const auto &el : it.second) - properties_to_send.insert( - properties_to_send.end(), - properties.begin() + el * n_properties_per_particle(), - properties.begin() + (el + 1) * n_properties_per_particle()); - - non_locally_owned_properties.insert( - {it.first, properties_to_send}); - } - - // Send the non locally owned properties to each mpi process - // that needs them - auto locally_owned_properties_from_other_cpus = - Utilities::MPI::some_to_some(triangulation->get_communicator(), - non_locally_owned_properties); - - // Store all local properties in a single vector. This includes - // properties coming from my own mpi process, and properties that - // were sent to me in the call above. - std::vector local_properties; - local_properties.reserve(n_locally_owned_particles() * - n_properties_per_particle()); - - // Compute the association between particle id and start of - // property data in the vector containing all local properties - std::map property_start; - for (const auto &it : cpu_to_indices) - if (it.first != my_cpu) - { - unsigned int sequential_index = 0; - // Process all properties coming from other mpi processes - for (const auto &el : it.second) - { - types::particle_index particle_id = - el + starting_points[it.first]; - property_start.insert( - {particle_id, local_properties.size()}); - - local_properties.insert( - local_properties.end(), - locally_owned_properties_from_other_cpus.at(it.first) - .begin() + - sequential_index * n_properties_per_particle(), - locally_owned_properties_from_other_cpus.at(it.first) - .begin() + - (sequential_index + 1) * n_properties_per_particle()); - sequential_index++; - } - } - else - { - // Process all properties that we already own - for (const auto &el : it.second) - { - types::particle_index particle_id = - el + starting_points[my_cpu]; - property_start.insert( - {particle_id, local_properties.size()}); - - local_properties.insert(local_properties.end(), - properties.begin() + - el * n_properties_per_particle(), - properties.begin() + - (el + 1) * - n_properties_per_particle()); - } - } - // Actually fill the property pool of each particle. - for (auto particle : *this) - { - particle.set_property_pool(get_property_pool()); - const auto id = particle.get_id(); - Assert(property_start.find(id) != property_start.end(), - ExcInternalError()); - const auto start = property_start[id]; - particle.set_properties({local_properties.begin() + start, - local_properties.begin() + start + - n_properties_per_particle()}); - } - } - return cpu_to_indices; - } + const std::vector &properties = std::vector()); /** * This function allows to register three additional functions that are diff --git a/source/particles/particle_handler.cc b/source/particles/particle_handler.cc index 4709d909f0..da549ee6de 100644 --- a/source/particles/particle_handler.cc +++ b/source/particles/particle_handler.cc @@ -442,6 +442,206 @@ namespace Particles + template + std::map + ParticleHandler::insert_global_particles( + const std::vector> &positions, + const std::vector>> + & global_bounding_boxes, + const std::vector &properties) + { + if (!properties.empty()) + AssertDimension(properties.size(), + positions.size() * n_properties_per_particle()); + + const auto my_cpu = + Utilities::MPI::this_mpi_process(triangulation->get_communicator()); + + const auto n_cpus = + Utilities::MPI::n_mpi_processes(triangulation->get_communicator()); + + GridTools::Cache cache(*triangulation, *mapping); + + // Gather the number of points per processor + auto n_particles_per_proc = + Utilities::MPI::all_gather(triangulation->get_communicator(), + positions.size()); + + // Calculate all starting points locally + std::vector starting_points(n_cpus); + + for (unsigned int i = 0; i < starting_points.size(); ++i) + { + starting_points[i] = std::accumulate(n_particles_per_proc.begin(), + n_particles_per_proc.begin() + i, + 0u); + } + + const auto n_global_particles = + std::accumulate(n_particles_per_proc.begin(), n_particles_per_proc.end()); + + auto distributed_tuple = + GridTools::distributed_compute_point_locations(cache, + positions, + global_bounding_boxes); + + // Finally create the particles + std::vector::active_cell_iterator> + cell_iterators = std::get<0>(distributed_tuple); + std::vector>> dist_reference_points = + std::get<1>(distributed_tuple); + std::vector> dist_map = + std::get<2>(distributed_tuple); + std::vector>> dist_points = + std::get<3>(distributed_tuple); + std::vector> dist_procs = + std::get<4>(distributed_tuple); + + // Create the multimap of particles + std::multimap::active_cell_iterator, + Particle> + particles; + + // Create the map of cpu to indices, indicating whom sent us what + // point + std::map cpu_to_indices; + + for (unsigned int i_cell = 0; i_cell < cell_iterators.size(); ++i_cell) + { + for (unsigned int i_particle = 0; + i_particle < dist_points[i_cell].size(); + ++i_particle) + { + const auto &local_id = dist_map[i_cell][i_particle]; + const auto &cpu = dist_procs[i_cell][i_particle]; + + const unsigned int particle_id = local_id + starting_points[cpu]; + + particles.emplace( + cell_iterators[i_cell], + Particle(dist_points[i_cell][i_particle], + dist_reference_points[i_cell][i_particle], + particle_id)); + + if (cpu_to_indices.find(cpu) == cpu_to_indices.end()) + cpu_to_indices.insert({cpu, IndexSet(n_particles_per_proc[cpu])}); + + cpu_to_indices[cpu].add_index(local_id); + } + } + + this->insert_particles(particles); + for (auto &c : cpu_to_indices) + c.second.compress(); + + // Take care of properties, if the input vector contains them. + const auto global_n_properties = + Utilities::MPI::sum(properties.size(), triangulation->get_communicator()); + if (global_n_properties > 0) + { + // [TODO]: fix this in some_to_some, to allow communication from + // my cpu to my cpu. + auto cpu_to_indices_to_send = cpu_to_indices; + if (cpu_to_indices_to_send.find(my_cpu) != cpu_to_indices_to_send.end()) + cpu_to_indices_to_send.erase(cpu_to_indices_to_send.find(my_cpu)); + + // Gather whom I sent my own particles to, to decide whom to send + // the particle properties + auto send_to_cpu = + Utilities::MPI::some_to_some(triangulation->get_communicator(), + cpu_to_indices_to_send); + std::map> + non_locally_owned_properties; + + // Prepare the vector of non_locally_owned properties, + for (const auto &it : send_to_cpu) + { + std::vector properties_to_send; + properties_to_send.reserve(it.second.n_elements() * + n_properties_per_particle()); + + for (const auto &el : it.second) + properties_to_send.insert( + properties_to_send.end(), + properties.begin() + el * n_properties_per_particle(), + properties.begin() + (el + 1) * n_properties_per_particle()); + + non_locally_owned_properties.insert({it.first, properties_to_send}); + } + + // Send the non locally owned properties to each mpi process + // that needs them + auto locally_owned_properties_from_other_cpus = + Utilities::MPI::some_to_some(triangulation->get_communicator(), + non_locally_owned_properties); + + // Store all local properties in a single vector. This includes + // properties coming from my own mpi process, and properties that + // were sent to me in the call above. + std::vector local_properties; + local_properties.reserve(n_locally_owned_particles() * + n_properties_per_particle()); + + // Compute the association between particle id and start of + // property data in the vector containing all local properties + std::map property_start; + for (const auto &it : cpu_to_indices) + if (it.first != my_cpu) + { + unsigned int sequential_index = 0; + // Process all properties coming from other mpi processes + for (const auto &el : it.second) + { + types::particle_index particle_id = + el + starting_points[it.first]; + property_start.insert({particle_id, local_properties.size()}); + + local_properties.insert( + local_properties.end(), + locally_owned_properties_from_other_cpus.at(it.first) + .begin() + + sequential_index * n_properties_per_particle(), + locally_owned_properties_from_other_cpus.at(it.first) + .begin() + + (sequential_index + 1) * n_properties_per_particle()); + sequential_index++; + } + } + else + { + // Process all properties that we already own + for (const auto &el : it.second) + { + types::particle_index particle_id = + el + starting_points[my_cpu]; + property_start.insert({particle_id, local_properties.size()}); + + local_properties.insert(local_properties.end(), + properties.begin() + + el * n_properties_per_particle(), + properties.begin() + + (el + 1) * + n_properties_per_particle()); + } + } + // Actually fill the property pool of each particle. + for (auto particle : *this) + { + particle.set_property_pool(get_property_pool()); + const auto id = particle.get_id(); + Assert(property_start.find(id) != property_start.end(), + ExcInternalError()); + const auto start = property_start[id]; + particle.set_properties( + {local_properties.begin() + start, + local_properties.begin() + start + n_properties_per_particle()}); + } + } + return cpu_to_indices; + } + + + template types::particle_index ParticleHandler::n_global_particles() const -- 2.39.5