]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Insert particles globally.
authorLuca Heltai <luca.heltai@sissa.it>
Thu, 21 Nov 2019 18:30:17 +0000 (19:30 +0100)
committerLuca Heltai <luca.heltai@sissa.it>
Tue, 3 Dec 2019 07:19:55 +0000 (08:19 +0100)
Create and insert a number of particles into the collection of
particles.
This function takes a list of positions and creates a set of particles

at these positions, which are then distributed and added to the local

particle collection of a procesor. Note that this function uses

GridTools::distributed_compute_point_locations. Consequently, it can

require intense communications between the processors.

Co-authored-by: Bruno Blais <blais.bruno@gmail.com>
include/deal.II/particles/particle_handler.h
tests/particles/particle_handler_09.cc [new file with mode: 0644]
tests/particles/particle_handler_09.with_p4est=true.mpirun=2.output [new file with mode: 0644]
tests/particles/particle_handler_10.cc [new file with mode: 0644]
tests/particles/particle_handler_10.with_p4est=true.mpirun=2.output [new file with mode: 0644]
tests/particles/particle_handler_11.cc [new file with mode: 0644]
tests/particles/particle_handler_11.with_p4est=true.mpirun=2.output [new file with mode: 0644]
tests/particles/particle_handler_12.cc [new file with mode: 0644]
tests/particles/particle_handler_12.with_p4est=true.mpirun=2.output [new file with mode: 0644]
tests/particles/particle_handler_13.cc [new file with mode: 0644]
tests/particles/particle_handler_13.with_p4est=true.mpirun=2.output [new file with mode: 0644]

index d5a5f10c1a7b91631c2f301f0b56acb9c7ac874c..d12ae6abd67ac8bfb699bde5d3dd7f90fd694fa8 100644 (file)
 #define dealii_particles_particle_handler_h
 
 #include <deal.II/base/array_view.h>
+#include <deal.II/base/data_out_base.h>
 #include <deal.II/base/mpi.h>
 #include <deal.II/base/smartpointer.h>
 #include <deal.II/base/subscriptor.h>
+#include <deal.II/base/utilities.h>
 
 #include <deal.II/distributed/tria.h>
 
 #include <deal.II/fe/mapping.h>
 
+#include <deal.II/grid/filtered_iterator.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/grid/grid_tools_cache.h>
+
 #include <deal.II/particles/particle.h>
 #include <deal.II/particles/particle_iterator.h>
 #include <deal.II/particles/property_pool.h>
@@ -228,6 +234,250 @@ namespace Particles
     void
     insert_particles(const std::vector<Point<spacedim>> &positions);
 
+    /**
+     * Create and insert a number of particles into the collection of particles.
+     * This function takes a list of positions and creates a set of particles
+     * at these positions, which are then distributed and added to the local
+     * particle collection of a procesor. Note that this function uses
+     * GridTools::distributed_compute_point_locations. Consequently, it can
+     * require intense communications between the processors.
+     *
+     * This function figures out what mpi process owns all points that do not
+     * fall within the locally owned part of the triangulation, it sends
+     * to that process the points passed to this function on this process,
+     * and receives from the points that fall within the locally owned cells of
+     * the triangulation from whoever owns them.
+     *
+     * In order to keep track of what mpi process recieved what points, a maps
+     * from mpi process to IndexSet is returned by the function, that contains
+     * the local indices of the points that were passed to this function on the
+     * calling mpi process, and that falls within the part of triangulation
+     * owned by this mpi process.
+     *
+     * @param[in] A vector of points that do not need to be on the local
+     * processor
+     *
+     * @param[in] A vector of vectors of bounding boxes. The bounding boxes
+     * global_bboxes[rk] describe which part of the mesh is locally owned by
+     * the mpi process with rank rk. The local description can be obtained from
+     * GridTools::compute_mesh_predicate_bounding_box, and the global one can
+     * be obtained by passing the local ones to Utilities::MPI::all_gather.
+     *
+     * @param[in] (Optional) a vector of properties associated with each
+     * local point. The size of the vector should be either zero (no
+     * properties will be transfered nor attached to the generated particles)
+     * or it should be `positions.size()*this->n_properties_per_particle()`.
+     * Notice that this function call will tranfer the properties from the
+     * local mpi process to the final mpi process that will own each of the
+     * particle, and it may therefore be communication intensive.
+     *
+     * @return A pair of maps from owner to IndexSet, that contains the local
+     * indices of the points that other mpi processes have sent to the current
+     * processor, and a map that identifies the new owner of the points that
+     * were originally located on this processor.
+     *
+     * @author : Bruno Blais, Luca Heltai 2019
+     */
+    std::map<unsigned int, IndexSet>
+    insert_global_particles(
+      const std::vector<Point<spacedim>> &positions,
+      const std::vector<std::vector<BoundingBox<spacedim>>>
+        &                        global_bounding_boxes,
+      const std::vector<double> &properties = std::vector<double>())
+    {
+      if (!properties.empty())
+        AssertDimension(properties.size(),
+                        positions.size() * n_properties_per_particle());
+
+      const auto my_cpu =
+        Utilities::MPI::this_mpi_process(triangulation->get_communicator());
+
+      const auto n_cpus =
+        Utilities::MPI::n_mpi_processes(triangulation->get_communicator());
+
+      GridTools::Cache<dim, spacedim> cache(*triangulation, *mapping);
+
+      // Gather the number of points per processor
+      auto n_particles_per_proc =
+        Utilities::MPI::all_gather(triangulation->get_communicator(),
+                                   positions.size());
+
+      // Calculate all starting points locally
+      std::vector<unsigned int> starting_points(n_cpus);
+
+      for (unsigned int i = 0; i < starting_points.size(); ++i)
+        {
+          starting_points[i] = std::accumulate(n_particles_per_proc.begin(),
+                                               n_particles_per_proc.begin() + i,
+                                               0u);
+        }
+
+      auto distributed_tuple =
+        GridTools::distributed_compute_point_locations(cache,
+                                                       positions,
+                                                       global_bounding_boxes);
+
+      // Finally create the particles
+      std::vector<typename Triangulation<dim, spacedim>::active_cell_iterator>
+                                           cell_iterators = std::get<0>(distributed_tuple);
+      std::vector<std::vector<Point<dim>>> dist_reference_points =
+        std::get<1>(distributed_tuple);
+      std::vector<std::vector<unsigned int>> dist_map =
+        std::get<2>(distributed_tuple);
+      std::vector<std::vector<Point<spacedim>>> dist_points =
+        std::get<3>(distributed_tuple);
+      std::vector<std::vector<unsigned int>> dist_procs =
+        std::get<4>(distributed_tuple);
+
+      // Create the multimap of particles
+      std::multimap<typename Triangulation<dim, spacedim>::active_cell_iterator,
+                    Particle<dim, spacedim>>
+        particles;
+
+      // Create the map of cpu to indices, indicating whom sent us what
+      // point
+      std::map<unsigned int, IndexSet> cpu_to_indices;
+
+      for (unsigned int i_cell = 0; i_cell < cell_iterators.size(); ++i_cell)
+        {
+          for (unsigned int i_particle = 0;
+               i_particle < dist_points[i_cell].size();
+               ++i_particle)
+            {
+              const auto &local_id = dist_map[i_cell][i_particle];
+              const auto &cpu      = dist_procs[i_cell][i_particle];
+
+              const unsigned int particle_id = local_id + starting_points[cpu];
+
+              particles.emplace(cell_iterators[i_cell],
+                                Particle<dim, spacedim>(
+                                  dist_points[i_cell][i_particle],
+                                  dist_reference_points[i_cell][i_particle],
+                                  particle_id));
+
+              if (cpu_to_indices.find(cpu) == cpu_to_indices.end())
+                cpu_to_indices.insert(
+                  {cpu, IndexSet(n_particles_per_proc[cpu])});
+
+              cpu_to_indices[cpu].add_index(local_id);
+            }
+        }
+
+      this->insert_particles(particles);
+      for (auto &c : cpu_to_indices)
+        c.second.compress();
+
+      // Take care of properties, if the input vector contains them.
+      const auto sum_pro =
+        Utilities::MPI::sum(properties.size(),
+                            triangulation->get_communicator());
+      if (sum_pro)
+        {
+          // [TODO]: fix this in some_to_some, to allow communication from
+          // my cpu to my cpu.
+          auto cpu_to_indices_to_send = cpu_to_indices;
+          if (cpu_to_indices_to_send.find(my_cpu) !=
+              cpu_to_indices_to_send.end())
+            cpu_to_indices_to_send.erase(cpu_to_indices_to_send.find(my_cpu));
+
+          // Gather whom I sent my own particles to, to decide whom to send
+          // the particle properties
+          auto send_to_cpu =
+            Utilities::MPI::some_to_some(triangulation->get_communicator(),
+                                         cpu_to_indices_to_send);
+          std::map<unsigned int, std::vector<double>>
+            non_locally_owned_properties;
+
+          // Prepare the vector of non_locally_owned properties,
+          for (const auto &it : send_to_cpu)
+            {
+              std::vector<double> properties_to_send;
+              properties_to_send.reserve(it.second.n_elements() *
+                                         n_properties_per_particle());
+
+              for (const auto &el : it.second)
+                properties_to_send.insert(
+                  properties_to_send.end(),
+                  properties.begin() + el * n_properties_per_particle(),
+                  properties.begin() + (el + 1) * n_properties_per_particle());
+
+              non_locally_owned_properties.insert(
+                {it.first, properties_to_send});
+            }
+
+          // Send the non locally owned properties to each mpi process
+          // that needs them
+          auto locally_owned_properties_from_other_cpus =
+            Utilities::MPI::some_to_some(triangulation->get_communicator(),
+                                         non_locally_owned_properties);
+
+          // Store all local properties in a single vector. This includes
+          // properties coming from my own mpi process, and properties that
+          // were sent to me in the call above.
+          std::vector<double> local_properties;
+          local_properties.reserve(n_locally_owned_particles() *
+                                   n_properties_per_particle());
+
+          // Compute the association between particle id and start of
+          // property data in the vector containing all local properties
+          std::map<types::particle_index, unsigned int> property_start;
+          for (const auto &it : cpu_to_indices)
+            if (it.first != my_cpu)
+              {
+                unsigned int sequential_index = 0;
+                // Process all properties coming from other mpi processes
+                for (const auto &el : it.second)
+                  {
+                    types::particle_index particle_id =
+                      el + starting_points[it.first];
+                    property_start.insert(
+                      {particle_id, local_properties.size()});
+
+                    local_properties.insert(
+                      local_properties.end(),
+                      locally_owned_properties_from_other_cpus.at(it.first)
+                          .begin() +
+                        sequential_index * n_properties_per_particle(),
+                      locally_owned_properties_from_other_cpus.at(it.first)
+                          .begin() +
+                        (sequential_index + 1) * n_properties_per_particle());
+                    sequential_index++;
+                  }
+              }
+            else
+              {
+                // Process all properties that we already own
+                for (const auto &el : it.second)
+                  {
+                    types::particle_index particle_id =
+                      el + starting_points[my_cpu];
+                    property_start.insert(
+                      {particle_id, local_properties.size()});
+
+                    local_properties.insert(local_properties.end(),
+                                            properties.begin() +
+                                              el * n_properties_per_particle(),
+                                            properties.begin() +
+                                              (el + 1) *
+                                                n_properties_per_particle());
+                  }
+              }
+          // Actually fill the property pool of each particle.
+          for (auto particle : *this)
+            {
+              particle.set_property_pool(get_property_pool());
+              const auto id = particle.get_id();
+              Assert(property_start.find(id) != property_start.end(),
+                     ExcInternalError());
+              const auto start = property_start[id];
+              particle.set_properties({local_properties.begin() + start,
+                                       local_properties.begin() + start +
+                                         n_properties_per_particle()});
+            }
+        }
+      return cpu_to_indices;
+    }
+
     /**
      * This function allows to register three additional functions that are
      * called every time a particle is transferred to another process
diff --git a/tests/particles/particle_handler_09.cc b/tests/particles/particle_handler_09.cc
new file mode 100644 (file)
index 0000000..aea3d55
--- /dev/null
@@ -0,0 +1,98 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Test insert_global_particles. Make sure we don't lose particles
+// along the way
+
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/fe/mapping_q.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_tools.h>
+
+#include <deal.II/particles/particle_handler.h>
+
+#include "../tests.h"
+
+template <int dim, int spacedim>
+void
+test()
+{
+  parallel::distributed::Triangulation<dim, spacedim> tr(MPI_COMM_WORLD);
+
+  GridGenerator::hyper_cube(tr);
+  tr.refine_global(2);
+
+  MappingQ<dim, spacedim> mapping(1);
+
+  Particles::ParticleHandler<dim, spacedim> particle_handler(tr, mapping);
+
+  const unsigned int n_points = 10;
+  const unsigned int my_cpu = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+  const unsigned int n_cpus = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+  Testing::srand(my_cpu + 1);
+
+  // Distribute the local points to the processor that owns them
+  // on the triangulation
+  auto my_bounding_box = GridTools::compute_mesh_predicate_bounding_box(
+    tr, IteratorFilters::LocallyOwnedCell());
+
+  auto global_bounding_boxes =
+    Utilities::MPI::all_gather(MPI_COMM_WORLD, my_bounding_box);
+
+  std::vector<Point<spacedim>> points(n_points);
+  for (auto &p : points)
+    p = random_point<spacedim>();
+
+  auto cpu_to_index =
+    particle_handler.insert_global_particles(points, global_bounding_boxes);
+
+  for (auto p : cpu_to_index)
+    if (my_cpu == p.first)
+      {
+        deallog << "On CPU " << my_cpu << ", we own             : ";
+        p.second.print(deallog);
+      }
+    else
+      {
+        deallog << "On CPU " << my_cpu << ", we received from " << p.first
+                << " : ";
+        p.second.print(deallog);
+      }
+}
+
+
+
+int
+main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+  MPILogInitAll all;
+
+  deallog.push("2d/2d");
+  test<2, 2>();
+  deallog.pop();
+  // deallog.push("2d/3d");
+  // test<2, 3>();
+  // deallog.pop();
+  deallog.push("3d/3d");
+  test<3, 3>();
+  deallog.pop();
+}
diff --git a/tests/particles/particle_handler_09.with_p4est=true.mpirun=2.output b/tests/particles/particle_handler_09.with_p4est=true.mpirun=2.output
new file mode 100644 (file)
index 0000000..decbf12
--- /dev/null
@@ -0,0 +1,11 @@
+
+DEAL:0:2d/2d::On CPU 0, we own             : {0, 2}
+DEAL:0:2d/2d::On CPU 0, we received from 1 : {[1,3], [5,7], 9}
+DEAL:0:3d/3d::On CPU 0, we own             : {[1,2], 6, [8,9]}
+DEAL:0:3d/3d::On CPU 0, we received from 1 : {[0,1], 3, 7, 9}
+
+DEAL:1:2d/2d::On CPU 1, we received from 0 : {1, [3,9]}
+DEAL:1:2d/2d::On CPU 1, we own             : {0, 4, 8}
+DEAL:1:3d/3d::On CPU 1, we received from 0 : {0, [3,5], 7}
+DEAL:1:3d/3d::On CPU 1, we own             : {2, [4,6], 8}
+
diff --git a/tests/particles/particle_handler_10.cc b/tests/particles/particle_handler_10.cc
new file mode 100644 (file)
index 0000000..0d85034
--- /dev/null
@@ -0,0 +1,99 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Test insert_global_particles. Make sure we don't lose particles
+// along the way. Test the case where all particles are owned by a
+// single mpi process
+
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/fe/mapping_q.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_tools.h>
+
+#include <deal.II/particles/particle_handler.h>
+
+#include "../tests.h"
+
+template <int dim, int spacedim>
+void
+test()
+{
+  parallel::distributed::Triangulation<dim, spacedim> tr(MPI_COMM_WORLD);
+
+  GridGenerator::hyper_cube(tr);
+  tr.refine_global(2);
+
+  MappingQ<dim, spacedim> mapping(1);
+
+  Particles::ParticleHandler<dim, spacedim> particle_handler(tr, mapping);
+
+  const unsigned int n_points = 10;
+  const unsigned int my_cpu = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+  const unsigned int n_cpus = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+  Testing::srand(my_cpu + 1);
+
+  // Distribute the local points to the processor that owns them
+  // on the triangulation
+  auto my_bounding_box = GridTools::compute_mesh_predicate_bounding_box(
+    tr, IteratorFilters::LocallyOwnedCell());
+
+  auto global_bounding_boxes =
+    Utilities::MPI::all_gather(MPI_COMM_WORLD, my_bounding_box);
+
+  std::vector<Point<spacedim>> points(n_points);
+  for (auto &p : points)
+    p = random_point<spacedim>(0, 0.1);
+
+  auto cpu_to_index =
+    particle_handler.insert_global_particles(points, global_bounding_boxes);
+
+  for (auto p : cpu_to_index)
+    if (my_cpu == p.first)
+      {
+        deallog << "On CPU " << my_cpu << ", we own             : ";
+        p.second.print(deallog);
+      }
+    else
+      {
+        deallog << "On CPU " << my_cpu << ", we received from " << p.first
+                << " : ";
+        p.second.print(deallog);
+      }
+}
+
+
+
+int
+main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+  MPILogInitAll all;
+
+  deallog.push("2d/2d");
+  test<2, 2>();
+  deallog.pop();
+  // deallog.push("2d/3d");
+  // test<2, 3>();
+  // deallog.pop();
+  deallog.push("3d/3d");
+  test<3, 3>();
+  deallog.pop();
+}
diff --git a/tests/particles/particle_handler_10.with_p4est=true.mpirun=2.output b/tests/particles/particle_handler_10.with_p4est=true.mpirun=2.output
new file mode 100644 (file)
index 0000000..b9840a1
--- /dev/null
@@ -0,0 +1,7 @@
+
+DEAL:0:2d/2d::On CPU 0, we own             : {[0,9]}
+DEAL:0:2d/2d::On CPU 0, we received from 1 : {[0,9]}
+DEAL:0:3d/3d::On CPU 0, we own             : {[0,9]}
+DEAL:0:3d/3d::On CPU 0, we received from 1 : {[0,9]}
+
+
diff --git a/tests/particles/particle_handler_11.cc b/tests/particles/particle_handler_11.cc
new file mode 100644 (file)
index 0000000..ce58531
--- /dev/null
@@ -0,0 +1,99 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Test insert_global_particles. Make sure we don't lose particles
+// along the way. Test the case where we pass one property as well.
+
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/fe/mapping_q.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_tools.h>
+
+#include <deal.II/particles/particle_handler.h>
+
+#include <unistd.h>
+
+#include <iostream>
+
+#include "../tests.h"
+
+template <int dim, int spacedim>
+void
+test()
+{
+  parallel::distributed::Triangulation<dim, spacedim> tr(MPI_COMM_WORLD);
+
+  GridGenerator::hyper_cube(tr);
+  tr.refine_global(2);
+
+  MappingQ<dim, spacedim> mapping(1);
+
+  Particles::ParticleHandler<dim, spacedim> particle_handler(tr, mapping, 1);
+
+  const unsigned int n_points = 10;
+  const unsigned int my_cpu = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+  const unsigned int n_cpus = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+  Testing::srand(my_cpu + 1);
+
+  // Distribute the local points to the processor that owns them
+  // on the triangulation
+  auto my_bounding_box = GridTools::compute_mesh_predicate_bounding_box(
+    tr, IteratorFilters::LocallyOwnedCell());
+
+  auto global_bounding_boxes =
+    Utilities::MPI::all_gather(MPI_COMM_WORLD, my_bounding_box);
+
+  std::vector<Point<spacedim>> points(n_points);
+  std::vector<double>          properties(n_points, my_cpu);
+
+  for (auto &p : points)
+    p = random_point<spacedim>();
+
+  auto cpu_to_index =
+    particle_handler.insert_global_particles(points,
+                                             global_bounding_boxes,
+                                             properties);
+
+  for (auto p : particle_handler)
+    {
+      deallog << "Particle : " << p.get_id() << ", property: "
+              << static_cast<unsigned int>(p.get_properties()[0]) << std::endl;
+    }
+}
+
+
+
+int
+main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+  MPILogInitAll all;
+
+  deallog.push("2d/2d");
+  test<2, 2>();
+  deallog.pop();
+  // deallog.push("2d/3d");
+  // test<2, 3>();
+  // deallog.pop();
+  deallog.push("3d/3d");
+  test<3, 3>();
+  deallog.pop();
+}
diff --git a/tests/particles/particle_handler_11.with_p4est=true.mpirun=2.output b/tests/particles/particle_handler_11.with_p4est=true.mpirun=2.output
new file mode 100644 (file)
index 0000000..69e2bc4
--- /dev/null
@@ -0,0 +1,43 @@
+
+DEAL:0:2d/2d::Particle : 11, property: 1
+DEAL:0:2d/2d::Particle : 16, property: 1
+DEAL:0:2d/2d::Particle : 12, property: 1
+DEAL:0:2d/2d::Particle : 13, property: 1
+DEAL:0:2d/2d::Particle : 2, property: 0
+DEAL:0:2d/2d::Particle : 0, property: 0
+DEAL:0:2d/2d::Particle : 15, property: 1
+DEAL:0:2d/2d::Particle : 17, property: 1
+DEAL:0:2d/2d::Particle : 19, property: 1
+DEAL:0:3d/3d::Particle : 19, property: 1
+DEAL:0:3d/3d::Particle : 8, property: 0
+DEAL:0:3d/3d::Particle : 11, property: 1
+DEAL:0:3d/3d::Particle : 6, property: 0
+DEAL:0:3d/3d::Particle : 9, property: 0
+DEAL:0:3d/3d::Particle : 2, property: 0
+DEAL:0:3d/3d::Particle : 10, property: 1
+DEAL:0:3d/3d::Particle : 17, property: 1
+DEAL:0:3d/3d::Particle : 1, property: 0
+DEAL:0:3d/3d::Particle : 13, property: 1
+
+DEAL:1:2d/2d::Particle : 9, property: 0
+DEAL:1:2d/2d::Particle : 4, property: 0
+DEAL:1:2d/2d::Particle : 5, property: 0
+DEAL:1:2d/2d::Particle : 6, property: 0
+DEAL:1:2d/2d::Particle : 18, property: 1
+DEAL:1:2d/2d::Particle : 3, property: 0
+DEAL:1:2d/2d::Particle : 14, property: 1
+DEAL:1:2d/2d::Particle : 8, property: 0
+DEAL:1:2d/2d::Particle : 10, property: 1
+DEAL:1:2d/2d::Particle : 1, property: 0
+DEAL:1:2d/2d::Particle : 7, property: 0
+DEAL:1:3d/3d::Particle : 7, property: 0
+DEAL:1:3d/3d::Particle : 14, property: 1
+DEAL:1:3d/3d::Particle : 15, property: 1
+DEAL:1:3d/3d::Particle : 12, property: 1
+DEAL:1:3d/3d::Particle : 3, property: 0
+DEAL:1:3d/3d::Particle : 16, property: 1
+DEAL:1:3d/3d::Particle : 0, property: 0
+DEAL:1:3d/3d::Particle : 4, property: 0
+DEAL:1:3d/3d::Particle : 18, property: 1
+DEAL:1:3d/3d::Particle : 5, property: 0
+
diff --git a/tests/particles/particle_handler_12.cc b/tests/particles/particle_handler_12.cc
new file mode 100644 (file)
index 0000000..bf180ca
--- /dev/null
@@ -0,0 +1,100 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Test insert_global_particles. Make sure we don't lose particles
+// along the way. Test the case where all particles are owned by a
+// single mpi process, and where we add one property for particle.
+
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/fe/mapping_q.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_tools.h>
+
+#include <deal.II/particles/particle_handler.h>
+
+#include <unistd.h>
+
+#include <iostream>
+
+#include "../tests.h"
+
+template <int dim, int spacedim>
+void
+test()
+{
+  parallel::distributed::Triangulation<dim, spacedim> tr(MPI_COMM_WORLD);
+
+  GridGenerator::hyper_cube(tr);
+  tr.refine_global(2);
+
+  MappingQ<dim, spacedim> mapping(1);
+
+  Particles::ParticleHandler<dim, spacedim> particle_handler(tr, mapping, 1);
+
+  const unsigned int n_points = 10;
+  const unsigned int my_cpu = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+  const unsigned int n_cpus = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+  Testing::srand(my_cpu + 1);
+
+  // Distribute the local points to the processor that owns them
+  // on the triangulation
+  auto my_bounding_box = GridTools::compute_mesh_predicate_bounding_box(
+    tr, IteratorFilters::LocallyOwnedCell());
+
+  auto global_bounding_boxes =
+    Utilities::MPI::all_gather(MPI_COMM_WORLD, my_bounding_box);
+
+  std::vector<Point<spacedim>> points(n_points);
+  std::vector<double>          properties(n_points, my_cpu);
+
+  for (auto &p : points)
+    p = random_point<spacedim>(0, 0.1);
+
+  auto cpu_to_index =
+    particle_handler.insert_global_particles(points,
+                                             global_bounding_boxes,
+                                             properties);
+
+  for (auto p : particle_handler)
+    {
+      deallog << "Particle : " << p.get_id() << ", property: "
+              << static_cast<unsigned int>(p.get_properties()[0]) << std::endl;
+    }
+}
+
+
+
+int
+main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+  MPILogInitAll all;
+
+  deallog.push("2d/2d");
+  test<2, 2>();
+  deallog.pop();
+  // deallog.push("2d/3d");
+  // test<2, 3>();
+  // deallog.pop();
+  deallog.push("3d/3d");
+  test<3, 3>();
+  deallog.pop();
+}
diff --git a/tests/particles/particle_handler_12.with_p4est=true.mpirun=2.output b/tests/particles/particle_handler_12.with_p4est=true.mpirun=2.output
new file mode 100644 (file)
index 0000000..a117ddd
--- /dev/null
@@ -0,0 +1,43 @@
+
+DEAL:0:2d/2d::Particle : 0, property: 0
+DEAL:0:2d/2d::Particle : 1, property: 0
+DEAL:0:2d/2d::Particle : 2, property: 0
+DEAL:0:2d/2d::Particle : 3, property: 0
+DEAL:0:2d/2d::Particle : 4, property: 0
+DEAL:0:2d/2d::Particle : 5, property: 0
+DEAL:0:2d/2d::Particle : 6, property: 0
+DEAL:0:2d/2d::Particle : 7, property: 0
+DEAL:0:2d/2d::Particle : 8, property: 0
+DEAL:0:2d/2d::Particle : 9, property: 0
+DEAL:0:2d/2d::Particle : 10, property: 1
+DEAL:0:2d/2d::Particle : 11, property: 1
+DEAL:0:2d/2d::Particle : 12, property: 1
+DEAL:0:2d/2d::Particle : 13, property: 1
+DEAL:0:2d/2d::Particle : 14, property: 1
+DEAL:0:2d/2d::Particle : 15, property: 1
+DEAL:0:2d/2d::Particle : 16, property: 1
+DEAL:0:2d/2d::Particle : 17, property: 1
+DEAL:0:2d/2d::Particle : 18, property: 1
+DEAL:0:2d/2d::Particle : 19, property: 1
+DEAL:0:3d/3d::Particle : 0, property: 0
+DEAL:0:3d/3d::Particle : 1, property: 0
+DEAL:0:3d/3d::Particle : 2, property: 0
+DEAL:0:3d/3d::Particle : 3, property: 0
+DEAL:0:3d/3d::Particle : 4, property: 0
+DEAL:0:3d/3d::Particle : 5, property: 0
+DEAL:0:3d/3d::Particle : 6, property: 0
+DEAL:0:3d/3d::Particle : 7, property: 0
+DEAL:0:3d/3d::Particle : 8, property: 0
+DEAL:0:3d/3d::Particle : 9, property: 0
+DEAL:0:3d/3d::Particle : 10, property: 1
+DEAL:0:3d/3d::Particle : 11, property: 1
+DEAL:0:3d/3d::Particle : 12, property: 1
+DEAL:0:3d/3d::Particle : 13, property: 1
+DEAL:0:3d/3d::Particle : 14, property: 1
+DEAL:0:3d/3d::Particle : 15, property: 1
+DEAL:0:3d/3d::Particle : 16, property: 1
+DEAL:0:3d/3d::Particle : 17, property: 1
+DEAL:0:3d/3d::Particle : 18, property: 1
+DEAL:0:3d/3d::Particle : 19, property: 1
+
+
diff --git a/tests/particles/particle_handler_13.cc b/tests/particles/particle_handler_13.cc
new file mode 100644 (file)
index 0000000..a3ef323
--- /dev/null
@@ -0,0 +1,117 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Test insert_global_particles. Make sure we don't lose particles
+// along the way. Test the case where all particles are owned by a
+// single mpi process, and where we add two properties per particle.
+
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/fe/mapping_q.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_tools.h>
+
+#include <deal.II/particles/particle_handler.h>
+
+#include <unistd.h>
+
+#include <iostream>
+
+#include "../tests.h"
+
+template <int dim, int spacedim>
+void
+test()
+{
+  parallel::distributed::Triangulation<dim, spacedim> tr(MPI_COMM_WORLD);
+
+  GridGenerator::hyper_cube(tr);
+  tr.refine_global(2);
+
+  MappingQ<dim, spacedim> mapping(1);
+
+  Particles::ParticleHandler<dim, spacedim> particle_handler(tr, mapping, 2);
+
+  const unsigned int n_points = 3;
+  const unsigned int my_cpu = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+  const unsigned int n_cpus = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+  Testing::srand(my_cpu + 1);
+
+
+  // Distribute the local points to the processor that owns them
+  // on the triangulation
+  auto my_bounding_box = GridTools::compute_mesh_predicate_bounding_box(
+    tr, IteratorFilters::LocallyOwnedCell());
+
+  auto global_bounding_boxes =
+    Utilities::MPI::all_gather(MPI_COMM_WORLD, my_bounding_box);
+
+  std::vector<Point<spacedim>> points(n_points);
+  std::vector<double>          properties(2 * n_points, my_cpu + 1000);
+
+  for (unsigned int i = 0; i < n_points; ++i)
+    properties[2 * i + 1] = i + 100;
+
+  for (auto &p : points)
+    p = random_point<spacedim>();
+
+  auto cpu_to_index =
+    particle_handler.insert_global_particles(points,
+                                             global_bounding_boxes,
+                                             properties);
+
+  for (const auto &c : cpu_to_index)
+    {
+      deallog << "From cpu: " << c.first << " I got : ";
+      c.second.print(deallog);
+    }
+
+  if (cpu_to_index.find(my_cpu) != cpu_to_index.end())
+    cpu_to_index.erase(cpu_to_index.find(my_cpu));
+  auto received = Utilities::MPI::some_to_some(MPI_COMM_WORLD, cpu_to_index);
+
+
+  for (const auto &c : received)
+    {
+      deallog << "To cpu : " << c.first << " I sent : ";
+      c.second.print(deallog);
+    }
+
+
+  for (auto p : particle_handler)
+    {
+      deallog << "Particle : " << p.get_id() << ", properties: "
+              << static_cast<unsigned int>(p.get_properties()[0]) << " - "
+              << static_cast<unsigned int>(p.get_properties()[1]) << std::endl;
+    }
+}
+
+
+
+int
+main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+  MPILogInitAll all;
+
+  deallog.push("2d/2d");
+  test<2, 2>();
+  deallog.pop();
+}
diff --git a/tests/particles/particle_handler_13.with_p4est=true.mpirun=2.output b/tests/particles/particle_handler_13.with_p4est=true.mpirun=2.output
new file mode 100644 (file)
index 0000000..549ecac
--- /dev/null
@@ -0,0 +1,15 @@
+
+DEAL:0:2d/2d::From cpu: 0 I got : {0, 2}
+DEAL:0:2d/2d::From cpu: 1 I got : {[1,2]}
+DEAL:0:2d/2d::To cpu : 1 I sent : {1}
+DEAL:0:2d/2d::Particle : 4, properties: 1001 - 101
+DEAL:0:2d/2d::Particle : 5, properties: 1001 - 102
+DEAL:0:2d/2d::Particle : 2, properties: 1000 - 102
+DEAL:0:2d/2d::Particle : 0, properties: 1000 - 100
+
+DEAL:1:2d/2d::From cpu: 0 I got : {1}
+DEAL:1:2d/2d::From cpu: 1 I got : {0}
+DEAL:1:2d/2d::To cpu : 0 I sent : {[1,2]}
+DEAL:1:2d/2d::Particle : 3, properties: 1001 - 100
+DEAL:1:2d/2d::Particle : 1, properties: 1000 - 101
+

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.