--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+// like particle_handler_04, but for shared triangulations in
+// parallel computations. The shared triangulation is partitioned
+// using zoltan to maximise compatibility
+
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/fe/mapping_q.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_tools.h>
+
+#include <deal.II/particles/particle_handler.h>
+
+#include "../tests.h"
+
+template <int dim, int spacedim>
+void
+test()
+{
+ {
+ const MPI_Comm comm = MPI_COMM_WORLD;
+
+ parallel::shared::Triangulation<dim, spacedim> tria_shared(
+ MPI_COMM_WORLD,
+ typename Triangulation<dim, spacedim>::MeshSmoothing(
+ Triangulation<dim, spacedim>::limit_level_difference_at_vertices),
+ true,
+ typename parallel::shared::Triangulation<dim, spacedim>::Settings(
+ parallel::shared::Triangulation<dim, spacedim>::partition_zorder));
+
+ GridGenerator::hyper_cube(tria_shared);
+ tria_shared.refine_global(2);
+ MappingQ<dim, spacedim> mapping(1);
+
+
+ // both processes create a particle handler,
+ // but only the first creates particles
+ Particles::ParticleHandler<dim, spacedim> particle_handler(tria_shared,
+ mapping);
+
+ if (Utilities::MPI::this_mpi_process(tria_shared.get_communicator()) == 0)
+ {
+ std::vector<Point<spacedim>> position(2);
+ std::vector<Point<dim>> reference_position(2);
+
+ for (unsigned int i = 0; i < dim; ++i)
+ {
+ position[0](i) = 0.125;
+ position[1](i) = 0.525;
+ }
+
+ Particles::Particle<dim, spacedim> particle1(position[0],
+ reference_position[0],
+ 0);
+ Particles::Particle<dim, spacedim> particle2(position[1],
+ reference_position[1],
+ 1);
+
+ typename Triangulation<dim, spacedim>::active_cell_iterator cell1(
+ &tria_shared, 2, 0);
+ typename Triangulation<dim, spacedim>::active_cell_iterator cell2(
+ &tria_shared, 2, 0);
+
+ particle_handler.insert_particle(particle1, cell1);
+ particle_handler.insert_particle(particle2, cell2);
+
+ for (const auto &particle : particle_handler)
+ deallog << "Before sort particle id " << particle.get_id()
+ << " is in cell "
+ << particle.get_surrounding_cell(tria_shared)
+ << " on process "
+ << Utilities::MPI::this_mpi_process(
+ tria_shared.get_communicator())
+ << std::flush << std::endl;
+ }
+
+
+
+ particle_handler.sort_particles_into_subdomains_and_cells();
+
+ for (const auto &particle : particle_handler)
+ deallog << "After sort particle id " << particle.get_id()
+ << " is in cell " << particle.get_surrounding_cell(tria_shared)
+ << " on process "
+ << Utilities::MPI::this_mpi_process(
+ tria_shared.get_communicator())
+ << std::flush << std::endl;
+
+ // Move all points up by 0.5. This will change cell for particle 1 and will
+ // move particle 2 out of the domain. Note that we need to change the
+ // coordinate dim-1 despite having a spacedim point.
+ Point<spacedim> shift;
+ shift(dim - 1) = 0.5;
+ for (auto &particle : particle_handler)
+ particle.set_location(particle.get_location() + shift);
+
+ particle_handler.sort_particles_into_subdomains_and_cells();
+ for (const auto &particle : particle_handler)
+ deallog << "After shift particle id " << particle.get_id()
+ << " is in cell " << particle.get_surrounding_cell(tria_shared)
+ << " on process "
+ << Utilities::MPI::this_mpi_process(
+ tria_shared.get_communicator())
+ << std::flush << std::endl;
+ }
+
+ deallog << "OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ MPILogInitAll all;
+
+ deallog.push("2d/2d");
+ test<2, 2>();
+ deallog.pop();
+ deallog.push("2d/3d");
+ test<2, 3>();
+ deallog.pop();
+ deallog.push("3d/3d");
+ test<3, 3>();
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d/2d::Before sort particle id 0 is in cell 2.0 on process 0
+DEAL:0:2d/2d::Before sort particle id 1 is in cell 2.0 on process 0
+DEAL:0:2d/2d::After sort particle id 0 is in cell 2.0 on process 0
+DEAL:0:2d/2d::OK
+DEAL:0:2d/3d::Before sort particle id 0 is in cell 2.0 on process 0
+DEAL:0:2d/3d::Before sort particle id 1 is in cell 2.0 on process 0
+DEAL:0:2d/3d::After sort particle id 0 is in cell 2.0 on process 0
+DEAL:0:2d/3d::OK
+DEAL:0:3d/3d::Before sort particle id 0 is in cell 2.0 on process 0
+DEAL:0:3d/3d::Before sort particle id 1 is in cell 2.0 on process 0
+DEAL:0:3d/3d::After sort particle id 0 is in cell 2.0 on process 0
+DEAL:0:3d/3d::OK
+
+DEAL:1:2d/2d::After sort particle id 1 is in cell 2.12 on process 1
+DEAL:1:2d/2d::After shift particle id 0 is in cell 2.8 on process 1
+DEAL:1:2d/2d::OK
+DEAL:1:2d/3d::After sort particle id 1 is in cell 2.12 on process 1
+DEAL:1:2d/3d::After shift particle id 0 is in cell 2.8 on process 1
+DEAL:1:2d/3d::OK
+DEAL:1:3d/3d::After sort particle id 1 is in cell 2.56 on process 1
+DEAL:1:3d/3d::After shift particle id 0 is in cell 2.32 on process 1
+DEAL:1:3d/3d::OK
+