]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Moved implementation to .cc file.
authorLuca Heltai <luca.heltai@sissa.it>
Mon, 25 Nov 2019 09:41:07 +0000 (10:41 +0100)
committerLuca Heltai <luca.heltai@sissa.it>
Tue, 3 Dec 2019 07:19:55 +0000 (08:19 +0100)
include/deal.II/particles/particle_handler.h
source/particles/particle_handler.cc

index d12ae6abd67ac8bfb699bde5d3dd7f90fd694fa8..fb652162a344e3d1a2b29da850423c6468ab134a 100644 (file)
@@ -283,200 +283,7 @@ namespace Particles
       const std::vector<Point<spacedim>> &positions,
       const std::vector<std::vector<BoundingBox<spacedim>>>
         &                        global_bounding_boxes,
-      const std::vector<double> &properties = std::vector<double>())
-    {
-      if (!properties.empty())
-        AssertDimension(properties.size(),
-                        positions.size() * n_properties_per_particle());
-
-      const auto my_cpu =
-        Utilities::MPI::this_mpi_process(triangulation->get_communicator());
-
-      const auto n_cpus =
-        Utilities::MPI::n_mpi_processes(triangulation->get_communicator());
-
-      GridTools::Cache<dim, spacedim> cache(*triangulation, *mapping);
-
-      // Gather the number of points per processor
-      auto n_particles_per_proc =
-        Utilities::MPI::all_gather(triangulation->get_communicator(),
-                                   positions.size());
-
-      // Calculate all starting points locally
-      std::vector<unsigned int> starting_points(n_cpus);
-
-      for (unsigned int i = 0; i < starting_points.size(); ++i)
-        {
-          starting_points[i] = std::accumulate(n_particles_per_proc.begin(),
-                                               n_particles_per_proc.begin() + i,
-                                               0u);
-        }
-
-      auto distributed_tuple =
-        GridTools::distributed_compute_point_locations(cache,
-                                                       positions,
-                                                       global_bounding_boxes);
-
-      // Finally create the particles
-      std::vector<typename Triangulation<dim, spacedim>::active_cell_iterator>
-                                           cell_iterators = std::get<0>(distributed_tuple);
-      std::vector<std::vector<Point<dim>>> dist_reference_points =
-        std::get<1>(distributed_tuple);
-      std::vector<std::vector<unsigned int>> dist_map =
-        std::get<2>(distributed_tuple);
-      std::vector<std::vector<Point<spacedim>>> dist_points =
-        std::get<3>(distributed_tuple);
-      std::vector<std::vector<unsigned int>> dist_procs =
-        std::get<4>(distributed_tuple);
-
-      // Create the multimap of particles
-      std::multimap<typename Triangulation<dim, spacedim>::active_cell_iterator,
-                    Particle<dim, spacedim>>
-        particles;
-
-      // Create the map of cpu to indices, indicating whom sent us what
-      // point
-      std::map<unsigned int, IndexSet> cpu_to_indices;
-
-      for (unsigned int i_cell = 0; i_cell < cell_iterators.size(); ++i_cell)
-        {
-          for (unsigned int i_particle = 0;
-               i_particle < dist_points[i_cell].size();
-               ++i_particle)
-            {
-              const auto &local_id = dist_map[i_cell][i_particle];
-              const auto &cpu      = dist_procs[i_cell][i_particle];
-
-              const unsigned int particle_id = local_id + starting_points[cpu];
-
-              particles.emplace(cell_iterators[i_cell],
-                                Particle<dim, spacedim>(
-                                  dist_points[i_cell][i_particle],
-                                  dist_reference_points[i_cell][i_particle],
-                                  particle_id));
-
-              if (cpu_to_indices.find(cpu) == cpu_to_indices.end())
-                cpu_to_indices.insert(
-                  {cpu, IndexSet(n_particles_per_proc[cpu])});
-
-              cpu_to_indices[cpu].add_index(local_id);
-            }
-        }
-
-      this->insert_particles(particles);
-      for (auto &c : cpu_to_indices)
-        c.second.compress();
-
-      // Take care of properties, if the input vector contains them.
-      const auto sum_pro =
-        Utilities::MPI::sum(properties.size(),
-                            triangulation->get_communicator());
-      if (sum_pro)
-        {
-          // [TODO]: fix this in some_to_some, to allow communication from
-          // my cpu to my cpu.
-          auto cpu_to_indices_to_send = cpu_to_indices;
-          if (cpu_to_indices_to_send.find(my_cpu) !=
-              cpu_to_indices_to_send.end())
-            cpu_to_indices_to_send.erase(cpu_to_indices_to_send.find(my_cpu));
-
-          // Gather whom I sent my own particles to, to decide whom to send
-          // the particle properties
-          auto send_to_cpu =
-            Utilities::MPI::some_to_some(triangulation->get_communicator(),
-                                         cpu_to_indices_to_send);
-          std::map<unsigned int, std::vector<double>>
-            non_locally_owned_properties;
-
-          // Prepare the vector of non_locally_owned properties,
-          for (const auto &it : send_to_cpu)
-            {
-              std::vector<double> properties_to_send;
-              properties_to_send.reserve(it.second.n_elements() *
-                                         n_properties_per_particle());
-
-              for (const auto &el : it.second)
-                properties_to_send.insert(
-                  properties_to_send.end(),
-                  properties.begin() + el * n_properties_per_particle(),
-                  properties.begin() + (el + 1) * n_properties_per_particle());
-
-              non_locally_owned_properties.insert(
-                {it.first, properties_to_send});
-            }
-
-          // Send the non locally owned properties to each mpi process
-          // that needs them
-          auto locally_owned_properties_from_other_cpus =
-            Utilities::MPI::some_to_some(triangulation->get_communicator(),
-                                         non_locally_owned_properties);
-
-          // Store all local properties in a single vector. This includes
-          // properties coming from my own mpi process, and properties that
-          // were sent to me in the call above.
-          std::vector<double> local_properties;
-          local_properties.reserve(n_locally_owned_particles() *
-                                   n_properties_per_particle());
-
-          // Compute the association between particle id and start of
-          // property data in the vector containing all local properties
-          std::map<types::particle_index, unsigned int> property_start;
-          for (const auto &it : cpu_to_indices)
-            if (it.first != my_cpu)
-              {
-                unsigned int sequential_index = 0;
-                // Process all properties coming from other mpi processes
-                for (const auto &el : it.second)
-                  {
-                    types::particle_index particle_id =
-                      el + starting_points[it.first];
-                    property_start.insert(
-                      {particle_id, local_properties.size()});
-
-                    local_properties.insert(
-                      local_properties.end(),
-                      locally_owned_properties_from_other_cpus.at(it.first)
-                          .begin() +
-                        sequential_index * n_properties_per_particle(),
-                      locally_owned_properties_from_other_cpus.at(it.first)
-                          .begin() +
-                        (sequential_index + 1) * n_properties_per_particle());
-                    sequential_index++;
-                  }
-              }
-            else
-              {
-                // Process all properties that we already own
-                for (const auto &el : it.second)
-                  {
-                    types::particle_index particle_id =
-                      el + starting_points[my_cpu];
-                    property_start.insert(
-                      {particle_id, local_properties.size()});
-
-                    local_properties.insert(local_properties.end(),
-                                            properties.begin() +
-                                              el * n_properties_per_particle(),
-                                            properties.begin() +
-                                              (el + 1) *
-                                                n_properties_per_particle());
-                  }
-              }
-          // Actually fill the property pool of each particle.
-          for (auto particle : *this)
-            {
-              particle.set_property_pool(get_property_pool());
-              const auto id = particle.get_id();
-              Assert(property_start.find(id) != property_start.end(),
-                     ExcInternalError());
-              const auto start = property_start[id];
-              particle.set_properties({local_properties.begin() + start,
-                                       local_properties.begin() + start +
-                                         n_properties_per_particle()});
-            }
-        }
-      return cpu_to_indices;
-    }
+      const std::vector<double> &properties = std::vector<double>());
 
     /**
      * This function allows to register three additional functions that are
index 4709d909f07719b690d9563e38a14cb59e098644..da549ee6deda7df9e1cfb6ffc55d712fc8068409 100644 (file)
@@ -442,6 +442,206 @@ namespace Particles
 
 
 
+  template <int dim, int spacedim>
+  std::map<unsigned int, IndexSet>
+  ParticleHandler<dim, spacedim>::insert_global_particles(
+    const std::vector<Point<spacedim>> &positions,
+    const std::vector<std::vector<BoundingBox<spacedim>>>
+      &                        global_bounding_boxes,
+    const std::vector<double> &properties)
+  {
+    if (!properties.empty())
+      AssertDimension(properties.size(),
+                      positions.size() * n_properties_per_particle());
+
+    const auto my_cpu =
+      Utilities::MPI::this_mpi_process(triangulation->get_communicator());
+
+    const auto n_cpus =
+      Utilities::MPI::n_mpi_processes(triangulation->get_communicator());
+
+    GridTools::Cache<dim, spacedim> cache(*triangulation, *mapping);
+
+    // Gather the number of points per processor
+    auto n_particles_per_proc =
+      Utilities::MPI::all_gather(triangulation->get_communicator(),
+                                 positions.size());
+
+    // Calculate all starting points locally
+    std::vector<unsigned int> starting_points(n_cpus);
+
+    for (unsigned int i = 0; i < starting_points.size(); ++i)
+      {
+        starting_points[i] = std::accumulate(n_particles_per_proc.begin(),
+                                             n_particles_per_proc.begin() + i,
+                                             0u);
+      }
+
+    const auto n_global_particles =
+      std::accumulate(n_particles_per_proc.begin(), n_particles_per_proc.end());
+
+    auto distributed_tuple =
+      GridTools::distributed_compute_point_locations(cache,
+                                                     positions,
+                                                     global_bounding_boxes);
+
+    // Finally create the particles
+    std::vector<typename Triangulation<dim, spacedim>::active_cell_iterator>
+                                         cell_iterators = std::get<0>(distributed_tuple);
+    std::vector<std::vector<Point<dim>>> dist_reference_points =
+      std::get<1>(distributed_tuple);
+    std::vector<std::vector<unsigned int>> dist_map =
+      std::get<2>(distributed_tuple);
+    std::vector<std::vector<Point<spacedim>>> dist_points =
+      std::get<3>(distributed_tuple);
+    std::vector<std::vector<unsigned int>> dist_procs =
+      std::get<4>(distributed_tuple);
+
+    // Create the multimap of particles
+    std::multimap<typename Triangulation<dim, spacedim>::active_cell_iterator,
+                  Particle<dim, spacedim>>
+      particles;
+
+    // Create the map of cpu to indices, indicating whom sent us what
+    // point
+    std::map<unsigned int, IndexSet> cpu_to_indices;
+
+    for (unsigned int i_cell = 0; i_cell < cell_iterators.size(); ++i_cell)
+      {
+        for (unsigned int i_particle = 0;
+             i_particle < dist_points[i_cell].size();
+             ++i_particle)
+          {
+            const auto &local_id = dist_map[i_cell][i_particle];
+            const auto &cpu      = dist_procs[i_cell][i_particle];
+
+            const unsigned int particle_id = local_id + starting_points[cpu];
+
+            particles.emplace(
+              cell_iterators[i_cell],
+              Particle<dim, spacedim>(dist_points[i_cell][i_particle],
+                                      dist_reference_points[i_cell][i_particle],
+                                      particle_id));
+
+            if (cpu_to_indices.find(cpu) == cpu_to_indices.end())
+              cpu_to_indices.insert({cpu, IndexSet(n_particles_per_proc[cpu])});
+
+            cpu_to_indices[cpu].add_index(local_id);
+          }
+      }
+
+    this->insert_particles(particles);
+    for (auto &c : cpu_to_indices)
+      c.second.compress();
+
+    // Take care of properties, if the input vector contains them.
+    const auto global_n_properties =
+      Utilities::MPI::sum(properties.size(), triangulation->get_communicator());
+    if (global_n_properties > 0)
+      {
+        // [TODO]: fix this in some_to_some, to allow communication from
+        // my cpu to my cpu.
+        auto cpu_to_indices_to_send = cpu_to_indices;
+        if (cpu_to_indices_to_send.find(my_cpu) != cpu_to_indices_to_send.end())
+          cpu_to_indices_to_send.erase(cpu_to_indices_to_send.find(my_cpu));
+
+        // Gather whom I sent my own particles to, to decide whom to send
+        // the particle properties
+        auto send_to_cpu =
+          Utilities::MPI::some_to_some(triangulation->get_communicator(),
+                                       cpu_to_indices_to_send);
+        std::map<unsigned int, std::vector<double>>
+          non_locally_owned_properties;
+
+        // Prepare the vector of non_locally_owned properties,
+        for (const auto &it : send_to_cpu)
+          {
+            std::vector<double> properties_to_send;
+            properties_to_send.reserve(it.second.n_elements() *
+                                       n_properties_per_particle());
+
+            for (const auto &el : it.second)
+              properties_to_send.insert(
+                properties_to_send.end(),
+                properties.begin() + el * n_properties_per_particle(),
+                properties.begin() + (el + 1) * n_properties_per_particle());
+
+            non_locally_owned_properties.insert({it.first, properties_to_send});
+          }
+
+        // Send the non locally owned properties to each mpi process
+        // that needs them
+        auto locally_owned_properties_from_other_cpus =
+          Utilities::MPI::some_to_some(triangulation->get_communicator(),
+                                       non_locally_owned_properties);
+
+        // Store all local properties in a single vector. This includes
+        // properties coming from my own mpi process, and properties that
+        // were sent to me in the call above.
+        std::vector<double> local_properties;
+        local_properties.reserve(n_locally_owned_particles() *
+                                 n_properties_per_particle());
+
+        // Compute the association between particle id and start of
+        // property data in the vector containing all local properties
+        std::map<types::particle_index, unsigned int> property_start;
+        for (const auto &it : cpu_to_indices)
+          if (it.first != my_cpu)
+            {
+              unsigned int sequential_index = 0;
+              // Process all properties coming from other mpi processes
+              for (const auto &el : it.second)
+                {
+                  types::particle_index particle_id =
+                    el + starting_points[it.first];
+                  property_start.insert({particle_id, local_properties.size()});
+
+                  local_properties.insert(
+                    local_properties.end(),
+                    locally_owned_properties_from_other_cpus.at(it.first)
+                        .begin() +
+                      sequential_index * n_properties_per_particle(),
+                    locally_owned_properties_from_other_cpus.at(it.first)
+                        .begin() +
+                      (sequential_index + 1) * n_properties_per_particle());
+                  sequential_index++;
+                }
+            }
+          else
+            {
+              // Process all properties that we already own
+              for (const auto &el : it.second)
+                {
+                  types::particle_index particle_id =
+                    el + starting_points[my_cpu];
+                  property_start.insert({particle_id, local_properties.size()});
+
+                  local_properties.insert(local_properties.end(),
+                                          properties.begin() +
+                                            el * n_properties_per_particle(),
+                                          properties.begin() +
+                                            (el + 1) *
+                                              n_properties_per_particle());
+                }
+            }
+        // Actually fill the property pool of each particle.
+        for (auto particle : *this)
+          {
+            particle.set_property_pool(get_property_pool());
+            const auto id = particle.get_id();
+            Assert(property_start.find(id) != property_start.end(),
+                   ExcInternalError());
+            const auto start = property_start[id];
+            particle.set_properties(
+              {local_properties.begin() + start,
+               local_properties.begin() + start + n_properties_per_particle()});
+          }
+      }
+    return cpu_to_indices;
+  }
+
+
+
   template <int dim, int spacedim>
   types::particle_index
   ParticleHandler<dim, spacedim>::n_global_particles() const

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.