--- /dev/null
+New: The function Particles::ParticleHandler::add_global_particles() now takes
+another optional argument, that allows one to set ids arbitrarily. Moreover,
+now the numbering of the ids is correct also if we call the method more than
+one time. Newly added particles, if ids are not specified, now correctly get
+the first available ids.
+Added a new version of Particles::ParticleHandler::add_global_particles() that
+takes a vector of Particles::Particle objects instead of just their positions.
+This can be used in conjunction with the signal
+Particles::ParticleHandler::Signals::particle_lost() to reinsert
+Particles::Particle objects that went out of the locally owned and ghost cells.
+<br> (Luca Heltai, 2020/06/11)
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Test insert_global_particles twice. Make sure we don't lose particles
+// along the way, and that the global ids don't overlap.
+
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/std_cxx20/iota_view.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/fe/mapping_q.h>
+
+#include <deal.II/grid/filtered_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/grid/grid_tools_cache.h>
+
+#include <deal.II/particles/particle_handler.h>
+
+#include <unistd.h>
+
+#include <iostream>
+
+#include "../tests.h"
+
+template <int dim, int spacedim>
+void
+insert(
+ Particles::ParticleHandler<dim, spacedim> & particle_handler,
+ unsigned int n_points,
+ unsigned starting_id,
+ const std::vector<std::vector<BoundingBox<spacedim>>> &global_bounding_boxes)
+{
+ const unsigned int my_cpu = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ const unsigned int n_cpus = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ std::vector<Point<spacedim>> points(n_points);
+ std::vector<std::vector<double>> properties(n_points,
+ {my_cpu + 1000.0,
+ my_cpu + 1000.0});
+ std::vector<types::particle_index> ids(n_points);
+
+ for (unsigned int i = 0; i < n_points; ++i)
+ {
+ points[i] = random_point<spacedim>();
+ ids[i] = starting_id + my_cpu * n_points + i;
+ properties[i][1] = ids[i];
+ }
+
+ auto cpu_to_index =
+ particle_handler.insert_global_particles(points,
+ global_bounding_boxes,
+ properties);
+
+ for (const auto &c : cpu_to_index)
+ {
+ deallog << "From cpu: " << c.first << " I got : ";
+ c.second.print(deallog);
+ }
+
+ if (cpu_to_index.find(my_cpu) != cpu_to_index.end())
+ cpu_to_index.erase(cpu_to_index.find(my_cpu));
+ auto received = Utilities::MPI::some_to_some(MPI_COMM_WORLD, cpu_to_index);
+
+ for (const auto &c : received)
+ {
+ deallog << "To cpu : " << c.first << " I sent : ";
+ c.second.print(deallog);
+ }
+}
+
+
+template <int dim, int spacedim>
+void
+test()
+{
+ parallel::distributed::Triangulation<dim, spacedim> tr(MPI_COMM_WORLD);
+
+ GridGenerator::hyper_cube(tr);
+ tr.refine_global(2);
+
+ MappingQ<dim, spacedim> mapping(1);
+
+ Particles::ParticleHandler<dim, spacedim> particle_handler(tr, mapping, 2);
+
+ const unsigned int n_points = 3;
+ const unsigned int my_cpu = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ const unsigned int n_cpus = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+ Testing::srand(my_cpu + 1);
+
+
+ // Distribute the local points to the processor that owns them
+ // on the triangulation
+ auto my_bounding_box = GridTools::compute_mesh_predicate_bounding_box(
+ tr, IteratorFilters::LocallyOwnedCell());
+
+ auto global_bounding_boxes =
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, my_bounding_box);
+
+ insert(particle_handler, n_points, 100, global_bounding_boxes);
+ insert(particle_handler, n_points, 200, global_bounding_boxes);
+
+ for (auto p : particle_handler)
+ {
+ deallog << "Particle : " << p.get_id() << ", properties: "
+ << static_cast<unsigned int>(p.get_properties()[0]) << " - "
+ << static_cast<unsigned int>(p.get_properties()[1]) << std::endl;
+ }
+}
+
+
+
+int
+main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ MPILogInitAll all;
+
+ deallog.push("2d/2d");
+ test<2, 2>();
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d/2d::From cpu: 0 I got : {0, 2}
+DEAL:0:2d/2d::From cpu: 1 I got : {[1,2]}
+DEAL:0:2d/2d::To cpu : 1 I sent : {1}
+DEAL:0:2d/2d::From cpu: 1 I got : {0, 2}
+DEAL:0:2d/2d::To cpu : 1 I sent : {[0,2]}
+DEAL:0:2d/2d::Particle : 4, properties: 1001 - 104
+DEAL:0:2d/2d::Particle : 5, properties: 1001 - 105
+DEAL:0:2d/2d::Particle : 9, properties: 1001 - 203
+DEAL:0:2d/2d::Particle : 2, properties: 1000 - 102
+DEAL:0:2d/2d::Particle : 0, properties: 1000 - 100
+DEAL:0:2d/2d::Particle : 11, properties: 1001 - 205
+
+DEAL:1:2d/2d::From cpu: 0 I got : {1}
+DEAL:1:2d/2d::From cpu: 1 I got : {0}
+DEAL:1:2d/2d::To cpu : 0 I sent : {[1,2]}
+DEAL:1:2d/2d::From cpu: 0 I got : {[0,2]}
+DEAL:1:2d/2d::From cpu: 1 I got : {1}
+DEAL:1:2d/2d::To cpu : 0 I sent : {0, 2}
+DEAL:1:2d/2d::Particle : 7, properties: 1000 - 201
+DEAL:1:2d/2d::Particle : 8, properties: 1000 - 202
+DEAL:1:2d/2d::Particle : 6, properties: 1000 - 200
+DEAL:1:2d/2d::Particle : 10, properties: 1001 - 204
+DEAL:1:2d/2d::Particle : 3, properties: 1001 - 103
+DEAL:1:2d/2d::Particle : 1, properties: 1000 - 101
+
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Test insert_global_particles twice. Make sure we don't lose particles
+// along the way, and that the global ids don't overlap. Test that we can
+// set ids arbitrarily.
+
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/std_cxx20/iota_view.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/fe/mapping_q.h>
+
+#include <deal.II/grid/filtered_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/grid/grid_tools_cache.h>
+
+#include <deal.II/particles/particle_handler.h>
+
+#include <unistd.h>
+
+#include <iostream>
+
+#include "../tests.h"
+
+template <int dim, int spacedim>
+void
+insert(
+ Particles::ParticleHandler<dim, spacedim> & particle_handler,
+ unsigned int n_points,
+ unsigned starting_id,
+ const std::vector<std::vector<BoundingBox<spacedim>>> &global_bounding_boxes)
+{
+ const unsigned int my_cpu = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ const unsigned int n_cpus = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ std::vector<Point<spacedim>> points(n_points);
+ std::vector<std::vector<double>> properties(n_points,
+ {my_cpu + 1000.0,
+ my_cpu + 1000.0});
+ std::vector<types::particle_index> ids(n_points);
+
+ for (unsigned int i = 0; i < n_points; ++i)
+ {
+ points[i] = random_point<spacedim>();
+ ids[i] = starting_id + my_cpu * n_points + i;
+ properties[i][1] = ids[i];
+ }
+
+ auto cpu_to_index = particle_handler.insert_global_particles(
+ points, global_bounding_boxes, properties, ids);
+
+ for (const auto &c : cpu_to_index)
+ {
+ deallog << "From cpu: " << c.first << " I got : ";
+ c.second.print(deallog);
+ }
+
+ if (cpu_to_index.find(my_cpu) != cpu_to_index.end())
+ cpu_to_index.erase(cpu_to_index.find(my_cpu));
+ auto received = Utilities::MPI::some_to_some(MPI_COMM_WORLD, cpu_to_index);
+
+ for (const auto &c : received)
+ {
+ deallog << "To cpu : " << c.first << " I sent : ";
+ c.second.print(deallog);
+ }
+}
+
+
+template <int dim, int spacedim>
+void
+test()
+{
+ parallel::distributed::Triangulation<dim, spacedim> tr(MPI_COMM_WORLD);
+
+ GridGenerator::hyper_cube(tr);
+ tr.refine_global(2);
+
+ MappingQ<dim, spacedim> mapping(1);
+
+ Particles::ParticleHandler<dim, spacedim> particle_handler(tr, mapping, 2);
+
+ const unsigned int n_points = 3;
+ const unsigned int my_cpu = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ const unsigned int n_cpus = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+ Testing::srand(my_cpu + 1);
+
+
+ // Distribute the local points to the processor that owns them
+ // on the triangulation
+ auto my_bounding_box = GridTools::compute_mesh_predicate_bounding_box(
+ tr, IteratorFilters::LocallyOwnedCell());
+
+ auto global_bounding_boxes =
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, my_bounding_box);
+
+ insert(particle_handler, n_points, 100, global_bounding_boxes);
+ insert(particle_handler, n_points, 200, global_bounding_boxes);
+
+ for (auto p : particle_handler)
+ {
+ deallog << "Particle : " << p.get_id() << ", properties: "
+ << static_cast<unsigned int>(p.get_properties()[0]) << " - "
+ << static_cast<unsigned int>(p.get_properties()[1]) << std::endl;
+ }
+}
+
+
+
+int
+main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ MPILogInitAll all;
+
+ deallog.push("2d/2d");
+ test<2, 2>();
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d/2d::From cpu: 0 I got : {0, 2}
+DEAL:0:2d/2d::From cpu: 1 I got : {[1,2]}
+DEAL:0:2d/2d::To cpu : 1 I sent : {1}
+DEAL:0:2d/2d::From cpu: 1 I got : {0, 2}
+DEAL:0:2d/2d::To cpu : 1 I sent : {[0,2]}
+DEAL:0:2d/2d::Particle : 104, properties: 1001 - 104
+DEAL:0:2d/2d::Particle : 105, properties: 1001 - 105
+DEAL:0:2d/2d::Particle : 203, properties: 1001 - 203
+DEAL:0:2d/2d::Particle : 102, properties: 1000 - 102
+DEAL:0:2d/2d::Particle : 100, properties: 1000 - 100
+DEAL:0:2d/2d::Particle : 205, properties: 1001 - 205
+
+DEAL:1:2d/2d::From cpu: 0 I got : {1}
+DEAL:1:2d/2d::From cpu: 1 I got : {0}
+DEAL:1:2d/2d::To cpu : 0 I sent : {[1,2]}
+DEAL:1:2d/2d::From cpu: 0 I got : {[0,2]}
+DEAL:1:2d/2d::From cpu: 1 I got : {1}
+DEAL:1:2d/2d::To cpu : 0 I sent : {0, 2}
+DEAL:1:2d/2d::Particle : 201, properties: 1000 - 201
+DEAL:1:2d/2d::Particle : 202, properties: 1000 - 202
+DEAL:1:2d/2d::Particle : 200, properties: 1000 - 200
+DEAL:1:2d/2d::Particle : 204, properties: 1001 - 204
+DEAL:1:2d/2d::Particle : 103, properties: 1001 - 103
+DEAL:1:2d/2d::Particle : 101, properties: 1000 - 101
+