--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_particles_utilities
+#define dealii_particles_utilities
+
+#include <deal.II/base/config.h>
+
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/point.h>
+#include <deal.II/base/quadrature.h>
+
+#include <deal.II/dofs/dof_handler.h>
+
+#include <deal.II/fe/component_mask.h>
+#include <deal.II/fe/fe.h>
+#include <deal.II/fe/mapping_q1.h>
+
+#include <deal.II/grid/grid_tools_cache.h>
+
+#include <deal.II/lac/affine_constraints.h>
+
+#include <deal.II/particles/particle_handler.h>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+
+namespace Particles
+{
+ /**
+ * A namespace for functions offering tools to handle ParticleHandlers and
+ * their coupling with DoFHandlers
+ */
+ namespace Utilities
+ {
+ /**
+ * Create an interpolation sparsity pattern for particles.
+ *
+ * Given a triangulation representing the domains $\Omega$, a particle
+ * handler of particles in $\Omega$, and a scalar finite element space
+ * $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the sparsity pattern
+ * that would be necessary to assemble the matrix
+ * \f[
+ * M_{ij} \dealcoloneq v_j(x_i) ,
+ * \f]
+ * where $V(\Omega)$ is the finite element space associated with the
+ * `space_dh`, and the index `i` is given by the particle id whose position
+ * is `x_i`.
+ *
+ * In the case of vector valued finite element spaces, the components on
+ * which interpolation must be performed can be selected using a component
+ * mask. Only primitive finite element spaces are supported.
+ *
+ * When selecting more than one component, the resulting sparsity will have
+ * dimension equal to `particle_handler.n_global_particles() *
+ * mask.n_selected_components()` times `space_dh.n_dofs()`, and the
+ * corresponding matrix entries are given by
+ * \f[
+ * M_{(i*n_comps+k),j} \dealcoloneq v_j(x_i) \cdot e_{comp_j},
+ * \f]
+ * where `comp_j` is the only non zero component of the vector valued basis
+ * function `v_j` (equal to `fe.system_to_component_index(j).first`), and
+ * `k` corresponds to its index within the selected components of the mask.
+ *
+ * The `sparsity` is filled by locating the position of the particle with
+ * index `i` within the particle handler with respect to the embedding
+ * triangulation $\Omega$, and coupling it with all the local degrees of
+ * freedom specified in the component mask @p space_comps, following the
+ * ordering in which they are selected in the mask @p space_comps.
+ *
+ * If a particle does not fall within $\Omega$, it is ignored, and the
+ * corresponding rows of the sparsity will be empty.
+ *
+ * Constraints of the form supported by the AffineConstraints class may be
+ * supplied with the @p constraints argument. The method
+ * AffineConstraints::add_entries_local_to_global() is used to fill the
+ * final sparsity pattern.
+ *
+ * @author Bruno Blais, Luca Heltai, 2019
+ */
+ template <int dim,
+ int spacedim,
+ typename SparsityType,
+ typename number = double>
+ void
+ create_interpolation_sparsity_pattern(
+ const DoFHandler<dim, spacedim> & space_dh,
+ const Particles::ParticleHandler<dim, spacedim> &particle_handler,
+ SparsityType & sparsity,
+ const AffineConstraints<number> & constraints =
+ AffineConstraints<number>(),
+ const ComponentMask &space_comps = ComponentMask());
+
+ /**
+ * Create an interpolation matrix for particles.
+ *
+ * Given a triangulation representing the domains $\Omega$, a particle
+ * handler of particles in $\Omega$, and a scalar finite element space
+ * $V(\Omega) = \text{span}\{v_j\}_{j=0}^n$, compute the matrix
+ * \f[
+ * M_{ij} \dealcoloneq v_j(x_i) ,
+ * \f]
+ * where $V(\Omega)$ is the finite element space associated with the
+ * `space_dh`, and the index `i` is given by the particle id whose position
+ * is `x_i`.
+ *
+ * In the case of vector valued finite element spaces, the components on
+ * which interpolation must be performed can be selected using a component
+ * mask. Only primitive finite element spaces are supported.
+ *
+ * When selecting more than one component, the resulting sparsity will have
+ * dimension equal to `particle_handler.n_global_particles() *
+ * mask.n_selected_components()` times `space_dh.n_dofs()`, and the
+ * corresponding matrix entries are given by
+ * \f[
+ * M_{(i*n_comps+k),j} \dealcoloneq v_j(x_i) \cdot e_{comp_j},
+ * \f]
+ * where `comp_j` is the only non zero component of the vector valued basis
+ * function `v_j` (equal to `fe.system_to_component_index(j).first`), and
+ * `k` corresponds to its index within the selected components of the mask.
+ *
+ * The matrix is filled by locating the position of the particle with
+ * index `i` within the particle handler with respect to the embedding
+ * triangulation $\Omega$, and coupling it with all the local degrees of
+ * freedom specified in the component mask @p space_comps, following the
+ * ordering in which they are selected in the mask @p space_comps.
+ *
+ * If a particle does not fall within $\Omega$, it is ignored, and the
+ * corresponding rows of the matrix will be zero.
+ *
+ * Constraints of the form supported by the AffineConstraints class may be
+ * supplied with the @p constraints argument. The method
+ * AffineConstraints::distribute_local_to_global() is used to distribute
+ * the entries of the matrix to respect the given constraints.
+ *
+ * @author Bruno Blais, Luca Heltai, 2019
+ */
+ template <int dim, int spacedim, typename MatrixType>
+ void
+ create_interpolation_matrix(
+ const DoFHandler<dim, spacedim> & space_dh,
+ const Particles::ParticleHandler<dim, spacedim> &particle_handler,
+ MatrixType & matrix,
+ const AffineConstraints<typename MatrixType::value_type> &constraints =
+ AffineConstraints<typename MatrixType::value_type>(),
+ const ComponentMask &space_comps = ComponentMask());
+ } // namespace Utilities
+} // namespace Particles
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_BINARY_DIR})
SET(_src
+ data_out.cc
particle.cc
particle_accessor.cc
particle_iterator.cc
particle_handler.cc
generators.cc
property_pool.cc
- data_out.cc
+ utilities.cc
)
SET(_inst
+ data_out.inst.in
particle.inst.in
particle_accessor.inst.in
particle_iterator.inst.in
particle_handler.inst.in
generators.inst.in
- data_out.inst.in
+ utilities.inst.in
)
FILE(GLOB _header
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/base/config.h>
+
+#include <deal.II/lac/generic_linear_algebra.h>
+
+#include <deal.II/particles/utilities.h>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+
+namespace Particles
+{
+ namespace Utilities
+ {
+ template <int dim, int spacedim, typename SparsityType, typename number>
+ void
+ create_interpolation_sparsity_pattern(
+ const DoFHandler<dim, spacedim> & space_dh,
+ const Particles::ParticleHandler<dim, spacedim> &particle_handler,
+ SparsityType & sparsity,
+ const AffineConstraints<number> & constraints,
+ const ComponentMask & space_comps)
+ {
+ if (particle_handler.n_locally_owned_particles() == 0)
+ return; // nothing to do here
+
+ const auto &tria = space_dh.get_triangulation();
+ const auto &fe = space_dh.get_fe();
+ const auto max_particles_per_cell =
+ particle_handler.n_global_max_particles_per_cell();
+
+
+ // Take care of components
+ const ComponentMask comps =
+ (space_comps.size() == 0 ? ComponentMask(fe.n_components(), true) :
+ space_comps);
+ AssertDimension(comps.size(), fe.n_components());
+
+ const auto n_comps = comps.n_selected_components();
+
+ // Global to local indices
+ std::vector<unsigned int> space_gtl(fe.n_components(),
+ numbers::invalid_unsigned_int);
+ for (unsigned int i = 0, j = 0; i < space_gtl.size(); ++i)
+ if (comps[i])
+ space_gtl[i] = j++;
+
+ // [TODO]: when the add_entries_local_to_global below will implement
+ // the version with the dof_mask, this should be uncommented.
+ // // Construct a dof_mask, used to distribute entries to the sparsity
+ // Table<2, bool> dof_mask(max_particles_per_cell * n_comps,
+ // fe.dofs_per_cell);
+ // dof_mask.fill(false);
+ // for (unsigned int i = 0; i < space_fe.dofs_per_cell; ++i)
+ // {
+ // const auto comp_i = space_fe.system_to_component_index(i).first;
+ // if (space_gtl[comp_i] != numbers::invalid_unsigned_int)
+ // for (unsigned int j = 0; j < max_particles_per_cell; ++j)
+ // dof_mask(i, j * n_comps + space_gtl[comp_i]) = true;
+ // }
+
+ std::vector<types::global_dof_index> dof_indices(fe.dofs_per_cell);
+ std::vector<types::particle_index> particle_indices(
+ max_particles_per_cell * n_comps);
+
+ auto particle = particle_handler.begin();
+ while (particle != particle_handler.end())
+ {
+ const auto &cell = particle->get_surrounding_cell(tria);
+ const auto dh_cell =
+ typename DoFHandler<dim, spacedim>::cell_iterator(*cell, &space_dh);
+ dh_cell->get_dof_indices(dof_indices);
+ const auto pic = particle_handler.particles_in_cell(cell);
+ const auto n_particles = particle_handler.n_particles_in_cell(cell);
+ particle_indices.resize(n_particles * n_comps);
+ Assert(pic.begin() == particle, ExcInternalError());
+ for (unsigned int i = 0; particle != pic.end(); ++particle, ++i)
+ {
+ const auto p_id = particle->get_id();
+ for (unsigned int j = 0; j < fe.dofs_per_cell; ++j)
+ {
+ const auto comp_j =
+ space_gtl[fe.system_to_component_index(j).first];
+ if (comp_j != numbers::invalid_unsigned_int)
+ constraints.add_entries_local_to_global(
+ {p_id * n_comps + comp_j}, {dof_indices[j]}, sparsity);
+ }
+ }
+ // [TODO]: when this works, use this:
+ // constraints.add_entries_local_to_global(particle_indices,
+ // dof_indices,
+ // sparsity,
+ // dof_mask);
+ }
+ }
+
+
+
+ template <int dim, int spacedim, typename MatrixType>
+ void
+ create_interpolation_matrix(
+ const DoFHandler<dim, spacedim> & space_dh,
+ const Particles::ParticleHandler<dim, spacedim> &particle_handler,
+ MatrixType & matrix,
+ const AffineConstraints<typename MatrixType::value_type> &constraints,
+ const ComponentMask & space_comps)
+ {
+ if (particle_handler.n_locally_owned_particles() == 0)
+ {
+ matrix.compress(VectorOperation::add);
+ return; // nothing else to do here
+ }
+
+ AssertDimension(matrix.n(), space_dh.n_dofs());
+
+ const auto &tria = space_dh.get_triangulation();
+ const auto &fe = space_dh.get_fe();
+ const auto max_particles_per_cell =
+ particle_handler.n_global_max_particles_per_cell();
+
+ // Take care of components
+ const ComponentMask comps =
+ (space_comps.size() == 0 ? ComponentMask(fe.n_components(), true) :
+ space_comps);
+ AssertDimension(comps.size(), fe.n_components());
+ const auto n_comps = comps.n_selected_components();
+
+ AssertDimension(matrix.m(),
+ particle_handler.n_global_particles() * n_comps);
+
+
+ // Global to local indices
+ std::vector<unsigned int> space_gtl(fe.n_components(),
+ numbers::invalid_unsigned_int);
+ for (unsigned int i = 0, j = 0; i < space_gtl.size(); ++i)
+ if (comps[i])
+ space_gtl[i] = j++;
+
+ // [TODO]: when the add_entries_local_to_global below will implement
+ // the version with the dof_mask, this should be uncommented.
+ // // Construct a dof_mask, used to distribute entries to the sparsity
+ // Table<2, bool> dof_mask(max_particles_per_cell * n_comps,
+ // fe.dofs_per_cell);
+ // dof_mask.fill(false);
+ // for (unsigned int i = 0; i < space_fe.dofs_per_cell; ++i)
+ // {
+ // const auto comp_i = space_fe.system_to_component_index(i).first;
+ // if (space_gtl[comp_i] != numbers::invalid_unsigned_int)
+ // for (unsigned int j = 0; j < max_particles_per_cell; ++j)
+ // dof_mask(i, j * n_comps + space_gtl[comp_i]) = true;
+ // }
+
+ std::vector<types::global_dof_index> dof_indices(fe.dofs_per_cell);
+ std::vector<types::particle_index> particle_indices(
+ max_particles_per_cell * n_comps);
+
+ FullMatrix<typename MatrixType::value_type> local_matrix(
+ max_particles_per_cell * n_comps, fe.dofs_per_cell);
+
+ auto particle = particle_handler.begin();
+ while (particle != particle_handler.end())
+ {
+ const auto &cell = particle->get_surrounding_cell(tria);
+ const auto &dh_cell =
+ typename DoFHandler<dim, spacedim>::cell_iterator(*cell, &space_dh);
+ dh_cell->get_dof_indices(dof_indices);
+ const auto pic = particle_handler.particles_in_cell(cell);
+ const auto n_particles = particle_handler.n_particles_in_cell(cell);
+ particle_indices.resize(n_particles * n_comps);
+ local_matrix.reinit({n_particles * n_comps, fe.dofs_per_cell});
+ Assert(pic.begin() == particle, ExcInternalError());
+ for (unsigned int i = 0; particle != pic.end(); ++particle, ++i)
+ {
+ const auto &reference_location =
+ particle->get_reference_location();
+
+ for (unsigned int d = 0; d < n_comps; ++d)
+ particle_indices[i * n_comps + d] =
+ particle->get_id() * n_comps + d;
+
+ for (unsigned int j = 0; j < fe.dofs_per_cell; ++j)
+ {
+ const auto comp_j =
+ space_gtl[fe.system_to_component_index(j).first];
+ if (comp_j != numbers::invalid_unsigned_int)
+ local_matrix(i * n_comps + comp_j, j) =
+ fe.shape_value(j, reference_location);
+ }
+ }
+ constraints.distribute_local_to_global(local_matrix,
+ particle_indices,
+ dof_indices,
+ matrix);
+ }
+ matrix.compress(VectorOperation::add);
+ }
+
+#include "utilities.inst"
+
+ } // namespace Utilities
+} // namespace Particles
+DEAL_II_NAMESPACE_CLOSE
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2020 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : SPACE_DIMENSIONS;
+ Sparsity : SPARSITY_PATTERNS;
+ scalar : REAL_SCALARS)
+ {
+#if deal_II_dimension <= deal_II_space_dimension && deal_II_dimension > 1
+ template void create_interpolation_sparsity_pattern<deal_II_dimension,
+ deal_II_space_dimension,
+ Sparsity,
+ scalar>(
+ const DoFHandler<deal_II_dimension, deal_II_space_dimension> &space_dh,
+ const Particles::ParticleHandler<deal_II_dimension,
+ deal_II_space_dimension>
+ & particle_handler,
+ Sparsity & sparsity,
+ const AffineConstraints<scalar> &constraints,
+ const ComponentMask & space_comps);
+#endif
+ }
+
+
+for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : SPACE_DIMENSIONS;
+ Matrix : SPARSE_MATRICES)
+ {
+#if deal_II_dimension <= deal_II_space_dimension && deal_II_dimension > 1
+ template void create_interpolation_matrix<deal_II_dimension,
+ deal_II_space_dimension,
+ Matrix>(
+ const DoFHandler<deal_II_dimension, deal_II_space_dimension> &space_dh,
+ const Particles::ParticleHandler<deal_II_dimension,
+ deal_II_space_dimension>
+ & particle_handler,
+ Matrix & matrix,
+ const AffineConstraints<Matrix::value_type> &constraints,
+ const ComponentMask & space_comps);
+#endif
+ }
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Test that an interpolation sparsity can be constructed for a single
+// particle per processor for every valid dimension pair that exist
+
+#include <deal.II/base/point.h>
+#include <deal.II/base/tensor.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_q.h>
+
+#include <deal.II/grid/grid_generator.h>
+
+#include <deal.II/lac/dynamic_sparsity_pattern.h>
+#include <deal.II/lac/sparsity_pattern.h>
+#include <deal.II/lac/sparsity_tools.h>
+
+#include <deal.II/particles/generators.h>
+#include <deal.II/particles/particle_handler.h>
+#include <deal.II/particles/utilities.h>
+
+#include "../tests.h"
+
+using namespace dealii;
+
+template <int dim, int spacedim>
+void
+test()
+{
+ const unsigned int my_mpi_id =
+ Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+
+ parallel::distributed::Triangulation<dim, spacedim> space_tria(
+ MPI_COMM_WORLD);
+ MappingQ1<dim, spacedim> mapping;
+ GridGenerator::hyper_cube(space_tria, -1, 1);
+ space_tria.refine_global(2);
+
+ // Start by creating a particle handler that contains one particle per
+ // processor
+ Particles::ParticleHandler<dim, spacedim> particle_handler(space_tria,
+ mapping);
+
+ const unsigned int estimated_n_particles_per_processor = 1;
+
+ Particles::Generators::probabilistic_locations<dim, spacedim>(
+ space_tria,
+ ConstantFunction<spacedim, double>(1.),
+ true,
+ Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD) *
+ estimated_n_particles_per_processor,
+ particle_handler);
+
+
+ FE_Q<dim, spacedim> space_fe(1);
+
+ ComponentMask space_mask(space_fe.n_components(), true);
+
+ const auto n_comps = space_mask.n_selected_components();
+
+ DoFHandler<dim, spacedim> space_dh(space_tria);
+ space_dh.distribute_dofs(space_fe);
+
+ IndexSet locally_owned_dofs = space_dh.locally_owned_dofs();
+ IndexSet locally_relevant_dofs;
+ DoFTools::extract_locally_relevant_dofs(space_dh, locally_relevant_dofs);
+
+ if (my_mpi_id == 0)
+ deallog << "dim: " << dim << ", spacedim: " << spacedim << std::endl
+ << "Total number of particles: "
+ << particle_handler.n_global_particles() << std::endl
+
+ << "Space FE: " << space_fe.get_name() << std::endl
+ << "Space dofs: " << space_dh.n_dofs() << std::endl;
+
+ DynamicSparsityPattern dsp(particle_handler.n_global_particles() * n_comps,
+ space_dh.n_dofs());
+
+ AffineConstraints<double> constraints;
+
+ // Build the interpolation sparsity
+ Particles::Utilities::create_interpolation_sparsity_pattern(
+ space_dh, particle_handler, dsp, constraints, space_mask);
+
+ const auto n_local_particles_dofs =
+ particle_handler.n_locally_owned_particles() * n_comps;
+
+ auto particle_sizes =
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, n_local_particles_dofs);
+
+ const auto my_start = std::accumulate(particle_sizes.begin(),
+ particle_sizes.begin() + my_mpi_id,
+ 0u);
+
+ IndexSet local_particle_index_set(particle_handler.n_global_particles() *
+ n_comps);
+
+ local_particle_index_set.add_range(my_start,
+ my_start + n_local_particles_dofs);
+
+ auto global_particles_index_set =
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, n_local_particles_dofs);
+
+ SparsityTools::distribute_sparsity_pattern(dsp,
+ global_particles_index_set,
+ MPI_COMM_WORLD,
+ local_particle_index_set);
+ // Actually log the sparsity
+ {
+ SparsityPattern sparsity_pattern;
+ sparsity_pattern.copy_from(dsp);
+ sparsity_pattern.print_gnuplot(deallog.get_file_stream());
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ MPILogInitAll init;
+
+ deallog.push("2d/2d");
+ test<2, 2>();
+ deallog.pop();
+ deallog.push("2d/3d");
+ test<2, 3>();
+ deallog.pop();
+ deallog.push("3d/3d");
+ test<3, 3>();
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d/2d::dim: 2, spacedim: 2
+DEAL:0:2d/2d::Total number of particles: 1
+DEAL:0:2d/2d::Space FE: FE_Q<2>(1)
+DEAL:0:2d/2d::Space dofs: 25
+8 0
+13 0
+17 0
+21 0
+DEAL:0:2d/3d::dim: 2, spacedim: 3
+DEAL:0:2d/3d::Total number of particles: 1
+DEAL:0:2d/3d::Space FE: FE_Q<2,3>(1)
+DEAL:0:2d/3d::Space dofs: 25
+8 0
+13 0
+17 0
+21 0
+DEAL:0:3d/3d::dim: 3, spacedim: 3
+DEAL:0:3d/3d::Total number of particles: 1
+DEAL:0:3d/3d::Space FE: FE_Q<3>(1)
+DEAL:0:3d/3d::Space dofs: 125
+58 0
+59 0
+61 0
+62 0
+106 0
+107 0
+109 0
+110 0
--- /dev/null
+
+DEAL:0:2d/2d::dim: 2, spacedim: 2
+DEAL:0:2d/2d::Total number of particles: 2
+DEAL:0:2d/2d::Space FE: FE_Q<2>(1)
+DEAL:0:2d/2d::Space dofs: 25
+5 0
+8 0
+10 0
+13 0
+DEAL:0:2d/3d::dim: 2, spacedim: 3
+DEAL:0:2d/3d::Total number of particles: 2
+DEAL:0:2d/3d::Space FE: FE_Q<2,3>(1)
+DEAL:0:2d/3d::Space dofs: 25
+5 0
+8 0
+10 0
+13 0
+DEAL:0:3d/3d::dim: 3, spacedim: 3
+DEAL:0:3d/3d::Total number of particles: 2
+DEAL:0:3d/3d::Space FE: FE_Q<3>(1)
+DEAL:0:3d/3d::Space dofs: 125
+35 0
+36 0
+37 0
+38 0
+63 0
+64 0
+65 0
+66 0
+
+7 -1
+8 -1
+16 -1
+17 -1
+7 -1
+8 -1
+16 -1
+17 -1
+78 -1
+80 -1
+82 -1
+83 -1
+87 -1
+89 -1
+91 -1
+92 -1
+
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Test that an interpolation sparsity can be constructed for a single
+// particle per processor for every valid dimension pair that exist
+// when there are more than one components on the DoFHandler
+// that is associated with the triangulation, and we select only one
+// component
+
+#include <deal.II/base/point.h>
+#include <deal.II/base/tensor.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_system.h>
+
+#include <deal.II/grid/grid_generator.h>
+
+#include <deal.II/lac/dynamic_sparsity_pattern.h>
+#include <deal.II/lac/sparsity_pattern.h>
+#include <deal.II/lac/sparsity_tools.h>
+
+#include <deal.II/particles/generators.h>
+#include <deal.II/particles/particle_handler.h>
+#include <deal.II/particles/utilities.h>
+
+#include "../tests.h"
+
+using namespace dealii;
+
+template <int dim, int spacedim>
+void
+test()
+{
+ const unsigned int my_mpi_id =
+ Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+
+ parallel::distributed::Triangulation<dim, spacedim> space_tria(
+ MPI_COMM_WORLD);
+ MappingQ1<dim, spacedim> mapping;
+ GridGenerator::hyper_cube(space_tria, -1, 1);
+ space_tria.refine_global(2);
+
+ // Start by creating a particle handler that contains one particle per
+ // processor
+ Particles::ParticleHandler<dim, spacedim> particle_handler(space_tria,
+ mapping);
+
+ const unsigned int estimated_n_particles_per_processor = 1;
+
+ Particles::Generators::probabilistic_locations<dim, spacedim>(
+ space_tria,
+ ConstantFunction<spacedim, double>(1.),
+ true,
+ Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD) *
+ estimated_n_particles_per_processor,
+ particle_handler);
+
+
+ FESystem<dim, spacedim> space_fe(FE_Q<dim, spacedim>(1), 3);
+
+ ComponentMask space_mask({false, false, true});
+
+ const auto n_comps = space_mask.n_selected_components();
+
+ DoFHandler<dim, spacedim> space_dh(space_tria);
+ space_dh.distribute_dofs(space_fe);
+
+ IndexSet locally_owned_dofs = space_dh.locally_owned_dofs();
+ IndexSet locally_relevant_dofs;
+ DoFTools::extract_locally_relevant_dofs(space_dh, locally_relevant_dofs);
+
+ if (my_mpi_id == 0)
+ deallog << "dim: " << dim << ", spacedim: " << spacedim << std::endl
+ << "Total number of particles: "
+ << particle_handler.n_global_particles() << std::endl
+
+ << "Space FE: " << space_fe.get_name() << std::endl
+ << "Space dofs: " << space_dh.n_dofs() << std::endl;
+
+ DynamicSparsityPattern dsp(particle_handler.n_global_particles() * n_comps,
+ space_dh.n_dofs());
+
+ AffineConstraints<double> constraints;
+
+ // Build the interpolation sparsity
+ Particles::Utilities::create_interpolation_sparsity_pattern(
+ space_dh, particle_handler, dsp, constraints, space_mask);
+
+ const auto n_local_particles_dofs =
+ particle_handler.n_locally_owned_particles() * n_comps;
+
+ auto particle_sizes =
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, n_local_particles_dofs);
+
+ const auto my_start = std::accumulate(particle_sizes.begin(),
+ particle_sizes.begin() + my_mpi_id,
+ 0u);
+
+ IndexSet local_particle_index_set(particle_handler.n_global_particles() *
+ n_comps);
+
+ local_particle_index_set.add_range(my_start,
+ my_start + n_local_particles_dofs);
+
+ auto global_particles_index_set =
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, n_local_particles_dofs);
+
+ SparsityTools::distribute_sparsity_pattern(dsp,
+ global_particles_index_set,
+ MPI_COMM_WORLD,
+ local_particle_index_set);
+ // Actually log the sparsity
+ {
+ SparsityPattern sparsity_pattern;
+ sparsity_pattern.copy_from(dsp);
+ sparsity_pattern.print_gnuplot(deallog.get_file_stream());
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ MPILogInitAll init;
+
+ deallog.push("2d/2d");
+ test<2, 2>();
+ deallog.pop();
+ deallog.push("2d/3d");
+ test<2, 3>();
+ deallog.pop();
+ deallog.push("3d/3d");
+ test<3, 3>();
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d/2d::dim: 2, spacedim: 2
+DEAL:0:2d/2d::Total number of particles: 1
+DEAL:0:2d/2d::Space FE: FESystem<2>[FE_Q<2>(1)^3]
+DEAL:0:2d/2d::Space dofs: 75
+26 0
+41 0
+53 0
+65 0
+DEAL:0:2d/3d::dim: 2, spacedim: 3
+DEAL:0:2d/3d::Total number of particles: 1
+DEAL:0:2d/3d::Space FE: FESystem<2,3>[FE_Q<2,3>(1)^3]
+DEAL:0:2d/3d::Space dofs: 75
+26 0
+41 0
+53 0
+65 0
+DEAL:0:3d/3d::dim: 3, spacedim: 3
+DEAL:0:3d/3d::Total number of particles: 1
+DEAL:0:3d/3d::Space FE: FESystem<3>[FE_Q<3>(1)^3]
+DEAL:0:3d/3d::Space dofs: 375
+176 0
+179 0
+185 0
+188 0
+320 0
+323 0
+329 0
+332 0
--- /dev/null
+
+DEAL:0:2d/2d::dim: 2, spacedim: 2
+DEAL:0:2d/2d::Total number of particles: 2
+DEAL:0:2d/2d::Space FE: FESystem<2>[FE_Q<2>(1)^3]
+DEAL:0:2d/2d::Space dofs: 75
+17 0
+26 0
+32 0
+41 0
+DEAL:0:2d/3d::dim: 2, spacedim: 3
+DEAL:0:2d/3d::Total number of particles: 2
+DEAL:0:2d/3d::Space FE: FESystem<2,3>[FE_Q<2,3>(1)^3]
+DEAL:0:2d/3d::Space dofs: 75
+17 0
+26 0
+32 0
+41 0
+DEAL:0:3d/3d::dim: 3, spacedim: 3
+DEAL:0:3d/3d::Total number of particles: 2
+DEAL:0:3d/3d::Space FE: FESystem<3>[FE_Q<3>(1)^3]
+DEAL:0:3d/3d::Space dofs: 375
+107 0
+110 0
+113 0
+116 0
+191 0
+194 0
+197 0
+200 0
+
+23 -1
+26 -1
+50 -1
+53 -1
+23 -1
+26 -1
+50 -1
+53 -1
+236 -1
+242 -1
+248 -1
+251 -1
+263 -1
+269 -1
+275 -1
+278 -1
+
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Test that an interpolation sparsity can be constructed for a single
+// particle per processor for every valid dimension pair that exist
+// when there are more than one components on the DoFHandler
+// that is associated with the triangulation
+
+#include <deal.II/base/point.h>
+#include <deal.II/base/tensor.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_system.h>
+
+#include <deal.II/grid/grid_generator.h>
+
+#include <deal.II/lac/dynamic_sparsity_pattern.h>
+#include <deal.II/lac/sparsity_pattern.h>
+#include <deal.II/lac/sparsity_tools.h>
+
+#include <deal.II/particles/generators.h>
+#include <deal.II/particles/particle_handler.h>
+#include <deal.II/particles/utilities.h>
+
+#include "../tests.h"
+
+using namespace dealii;
+
+template <int dim, int spacedim>
+void
+test()
+{
+ const unsigned int my_mpi_id =
+ Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+
+ parallel::distributed::Triangulation<dim, spacedim> space_tria(
+ MPI_COMM_WORLD);
+ MappingQ1<dim, spacedim> mapping;
+ GridGenerator::hyper_cube(space_tria, -1, 1);
+ space_tria.refine_global(2);
+
+ // Start by creating a particle handler that contains one particle per
+ // processor
+ Particles::ParticleHandler<dim, spacedim> particle_handler(space_tria,
+ mapping);
+
+ const unsigned int estimated_n_particles_per_processor = 1;
+
+ Particles::Generators::probabilistic_locations<dim, spacedim>(
+ space_tria,
+ ConstantFunction<spacedim, double>(1.),
+ true,
+ Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD) *
+ estimated_n_particles_per_processor,
+ particle_handler);
+
+
+ FESystem<dim, spacedim> space_fe(FE_Q<dim, spacedim>(1), 3);
+
+ ComponentMask space_mask(space_fe.n_components(), true);
+
+ const auto n_comps = space_mask.n_selected_components();
+
+ DoFHandler<dim, spacedim> space_dh(space_tria);
+ space_dh.distribute_dofs(space_fe);
+
+ IndexSet locally_owned_dofs = space_dh.locally_owned_dofs();
+ IndexSet locally_relevant_dofs;
+ DoFTools::extract_locally_relevant_dofs(space_dh, locally_relevant_dofs);
+
+ if (my_mpi_id == 0)
+ deallog << "dim: " << dim << ", spacedim: " << spacedim << std::endl
+ << "Total number of particles: "
+ << particle_handler.n_global_particles() << std::endl
+
+ << "Space FE: " << space_fe.get_name() << std::endl
+ << "Space dofs: " << space_dh.n_dofs() << std::endl;
+
+ DynamicSparsityPattern dsp(particle_handler.n_global_particles() * n_comps,
+ space_dh.n_dofs());
+
+ AffineConstraints<double> constraints;
+
+ // Build the interpolation sparsity
+ Particles::Utilities::create_interpolation_sparsity_pattern(
+ space_dh, particle_handler, dsp, constraints, space_mask);
+
+ const auto n_local_particles_dofs =
+ particle_handler.n_locally_owned_particles() * n_comps;
+
+ auto particle_sizes =
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, n_local_particles_dofs);
+
+ const auto my_start = std::accumulate(particle_sizes.begin(),
+ particle_sizes.begin() + my_mpi_id,
+ 0u);
+
+ IndexSet local_particle_index_set(particle_handler.n_global_particles() *
+ n_comps);
+
+ local_particle_index_set.add_range(my_start,
+ my_start + n_local_particles_dofs);
+
+ auto global_particles_index_set =
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, n_local_particles_dofs);
+
+ SparsityTools::distribute_sparsity_pattern(dsp,
+ global_particles_index_set,
+ MPI_COMM_WORLD,
+ local_particle_index_set);
+ // Actually log the sparsity
+ {
+ SparsityPattern sparsity_pattern;
+ sparsity_pattern.copy_from(dsp);
+ sparsity_pattern.print_gnuplot(deallog.get_file_stream());
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ MPILogInitAll init;
+
+ deallog.push("2d/2d");
+ test<2, 2>();
+ deallog.pop();
+ deallog.push("2d/3d");
+ test<2, 3>();
+ deallog.pop();
+ deallog.push("3d/3d");
+ test<3, 3>();
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d/2d::dim: 2, spacedim: 2
+DEAL:0:2d/2d::Total number of particles: 1
+DEAL:0:2d/2d::Space FE: FESystem<2>[FE_Q<2>(1)^3]
+DEAL:0:2d/2d::Space dofs: 75
+24 0
+39 0
+51 0
+63 0
+25 -1
+40 -1
+52 -1
+64 -1
+26 -2
+41 -2
+53 -2
+65 -2
+DEAL:0:2d/3d::dim: 2, spacedim: 3
+DEAL:0:2d/3d::Total number of particles: 1
+DEAL:0:2d/3d::Space FE: FESystem<2,3>[FE_Q<2,3>(1)^3]
+DEAL:0:2d/3d::Space dofs: 75
+24 0
+39 0
+51 0
+63 0
+25 -1
+40 -1
+52 -1
+64 -1
+26 -2
+41 -2
+53 -2
+65 -2
+DEAL:0:3d/3d::dim: 3, spacedim: 3
+DEAL:0:3d/3d::Total number of particles: 1
+DEAL:0:3d/3d::Space FE: FESystem<3>[FE_Q<3>(1)^3]
+DEAL:0:3d/3d::Space dofs: 375
+174 0
+177 0
+183 0
+186 0
+318 0
+321 0
+327 0
+330 0
+175 -1
+178 -1
+184 -1
+187 -1
+319 -1
+322 -1
+328 -1
+331 -1
+176 -2
+179 -2
+185 -2
+188 -2
+320 -2
+323 -2
+329 -2
+332 -2
--- /dev/null
+
+DEAL:0:2d/2d::dim: 2, spacedim: 2
+DEAL:0:2d/2d::Total number of particles: 2
+DEAL:0:2d/2d::Space FE: FESystem<2>[FE_Q<2>(1)^3]
+DEAL:0:2d/2d::Space dofs: 75
+15 0
+24 0
+30 0
+39 0
+16 -1
+25 -1
+31 -1
+40 -1
+17 -2
+26 -2
+32 -2
+41 -2
+DEAL:0:2d/3d::dim: 2, spacedim: 3
+DEAL:0:2d/3d::Total number of particles: 2
+DEAL:0:2d/3d::Space FE: FESystem<2,3>[FE_Q<2,3>(1)^3]
+DEAL:0:2d/3d::Space dofs: 75
+15 0
+24 0
+30 0
+39 0
+16 -1
+25 -1
+31 -1
+40 -1
+17 -2
+26 -2
+32 -2
+41 -2
+DEAL:0:3d/3d::dim: 3, spacedim: 3
+DEAL:0:3d/3d::Total number of particles: 2
+DEAL:0:3d/3d::Space FE: FESystem<3>[FE_Q<3>(1)^3]
+DEAL:0:3d/3d::Space dofs: 375
+105 0
+108 0
+111 0
+114 0
+189 0
+192 0
+195 0
+198 0
+106 -1
+109 -1
+112 -1
+115 -1
+190 -1
+193 -1
+196 -1
+199 -1
+107 -2
+110 -2
+113 -2
+116 -2
+191 -2
+194 -2
+197 -2
+200 -2
+
+21 -3
+24 -3
+48 -3
+51 -3
+22 -4
+25 -4
+49 -4
+52 -4
+23 -5
+26 -5
+50 -5
+53 -5
+21 -3
+24 -3
+48 -3
+51 -3
+22 -4
+25 -4
+49 -4
+52 -4
+23 -5
+26 -5
+50 -5
+53 -5
+234 -3
+240 -3
+246 -3
+249 -3
+261 -3
+267 -3
+273 -3
+276 -3
+235 -4
+241 -4
+247 -4
+250 -4
+262 -4
+268 -4
+274 -4
+277 -4
+236 -5
+242 -5
+248 -5
+251 -5
+263 -5
+269 -5
+275 -5
+278 -5
+
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Test that an interpolation matrix can be constructed for a single
+// particle per vertex on a square matrix, and check we get an identity
+// matrix
+
+#include <deal.II/base/point.h>
+#include <deal.II/base/tensor.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_system.h>
+
+#include <deal.II/grid/grid_generator.h>
+
+#include <deal.II/lac/dynamic_sparsity_pattern.h>
+#include <deal.II/lac/sparse_matrix.h>
+#include <deal.II/lac/sparsity_pattern.h>
+#include <deal.II/lac/sparsity_tools.h>
+
+#include <deal.II/particles/generators.h>
+#include <deal.II/particles/particle_handler.h>
+#include <deal.II/particles/utilities.h>
+
+#include "../tests.h"
+
+using namespace dealii;
+
+template <int dim, int spacedim>
+void
+test()
+{
+ const unsigned int my_mpi_id =
+ Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+
+ parallel::distributed::Triangulation<dim, spacedim> space_tria(
+ MPI_COMM_WORLD);
+ MappingQ1<dim, spacedim> mapping;
+ GridGenerator::hyper_cube(space_tria, -1, 1);
+ // space_tria.refine_global(1);
+
+ // Start by creating a particle handler that contains one particle per
+ // processor
+ Particles::ParticleHandler<dim, spacedim> particle_handler(space_tria,
+ mapping);
+
+ const unsigned int estimated_n_particles_per_processor = 1;
+
+
+ FE_Q<dim, spacedim> space_fe(1);
+
+ ComponentMask space_mask(space_fe.n_components(), true);
+
+ Particles::Generators::regular_reference_locations<dim, spacedim>(
+ space_tria, space_fe.get_unit_support_points(), particle_handler);
+
+ const auto n_comps = space_mask.n_selected_components();
+
+ DoFHandler<dim, spacedim> space_dh(space_tria);
+ space_dh.distribute_dofs(space_fe);
+
+ IndexSet locally_owned_dofs = space_dh.locally_owned_dofs();
+ IndexSet locally_relevant_dofs;
+ DoFTools::extract_locally_relevant_dofs(space_dh, locally_relevant_dofs);
+
+ if (my_mpi_id == 0)
+ deallog << "dim: " << dim << ", spacedim: " << spacedim << std::endl
+ << "Total number of particles: "
+ << particle_handler.n_global_particles() << std::endl
+
+ << "Space FE: " << space_fe.get_name() << std::endl
+ << "Space dofs: " << space_dh.n_dofs() << std::endl;
+
+ DynamicSparsityPattern dsp(particle_handler.n_global_particles() * n_comps,
+ space_dh.n_dofs());
+
+ AffineConstraints<double> constraints;
+
+ // Build the interpolation sparsity
+ Particles::Utilities::create_interpolation_sparsity_pattern(
+ space_dh, particle_handler, dsp, constraints, space_mask);
+
+ const auto n_local_particles_dofs =
+ particle_handler.n_locally_owned_particles() * n_comps;
+
+ auto particle_sizes =
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, n_local_particles_dofs);
+
+ const auto my_start = std::accumulate(particle_sizes.begin(),
+ particle_sizes.begin() + my_mpi_id,
+ 0u);
+
+ IndexSet local_particle_index_set(particle_handler.n_global_particles() *
+ n_comps);
+
+ local_particle_index_set.add_range(my_start,
+ my_start + n_local_particles_dofs);
+
+ auto global_particles_index_set =
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, n_local_particles_dofs);
+
+ SparsityTools::distribute_sparsity_pattern(dsp,
+ global_particles_index_set,
+ MPI_COMM_WORLD,
+ local_particle_index_set);
+ // Actually log the sparsity
+
+ SparsityPattern sparsity_pattern;
+ sparsity_pattern.copy_from(dsp);
+ // Create a SparseMatrix
+ SparseMatrix<double> matrix;
+ matrix.reinit(sparsity_pattern);
+
+ // Build the interpolation matrix
+ Particles::Utilities::create_interpolation_matrix(
+ space_dh, particle_handler, matrix, constraints, space_mask);
+
+ matrix.print(deallog.get_file_stream());
+}
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ MPILogInitAll init;
+
+ deallog.push("2d/2d");
+ test<2, 2>();
+ deallog.pop();
+ deallog.push("2d/3d");
+ test<2, 3>();
+ deallog.pop();
+ deallog.push("3d/3d");
+ test<3, 3>();
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d/2d::dim: 2, spacedim: 2
+DEAL:0:2d/2d::Total number of particles: 4
+DEAL:0:2d/2d::Space FE: FE_Q<2>(1)
+DEAL:0:2d/2d::Space dofs: 4
+(0,0) 1.00000
+(0,1) 0.00000
+(0,2) 0.00000
+(0,3) 0.00000
+(1,1) 1.00000
+(1,0) 0.00000
+(1,2) 0.00000
+(1,3) 0.00000
+(2,2) 1.00000
+(2,0) 0.00000
+(2,1) 0.00000
+(2,3) 0.00000
+(3,3) 1.00000
+(3,0) 0.00000
+(3,1) 0.00000
+(3,2) 0.00000
+DEAL:0:2d/3d::dim: 2, spacedim: 3
+DEAL:0:2d/3d::Total number of particles: 4
+DEAL:0:2d/3d::Space FE: FE_Q<2,3>(1)
+DEAL:0:2d/3d::Space dofs: 4
+(0,0) 1.00000
+(0,1) 0.00000
+(0,2) 0.00000
+(0,3) 0.00000
+(1,1) 1.00000
+(1,0) 0.00000
+(1,2) 0.00000
+(1,3) 0.00000
+(2,2) 1.00000
+(2,0) 0.00000
+(2,1) 0.00000
+(2,3) 0.00000
+(3,3) 1.00000
+(3,0) 0.00000
+(3,1) 0.00000
+(3,2) 0.00000
+DEAL:0:3d/3d::dim: 3, spacedim: 3
+DEAL:0:3d/3d::Total number of particles: 8
+DEAL:0:3d/3d::Space FE: FE_Q<3>(1)
+DEAL:0:3d/3d::Space dofs: 8
+(0,0) 1.00000
+(0,1) 0.00000
+(0,2) 0.00000
+(0,3) 0.00000
+(0,4) 0.00000
+(0,5) 0.00000
+(0,6) 0.00000
+(0,7) 0.00000
+(1,1) 1.00000
+(1,0) 0.00000
+(1,2) 0.00000
+(1,3) 0.00000
+(1,4) 0.00000
+(1,5) 0.00000
+(1,6) 0.00000
+(1,7) 0.00000
+(2,2) 1.00000
+(2,0) 0.00000
+(2,1) 0.00000
+(2,3) 0.00000
+(2,4) 0.00000
+(2,5) 0.00000
+(2,6) 0.00000
+(2,7) 0.00000
+(3,3) 1.00000
+(3,0) 0.00000
+(3,1) 0.00000
+(3,2) 0.00000
+(3,4) 0.00000
+(3,5) 0.00000
+(3,6) 0.00000
+(3,7) 0.00000
+(4,4) 1.00000
+(4,0) 0.00000
+(4,1) 0.00000
+(4,2) 0.00000
+(4,3) 0.00000
+(4,5) 0.00000
+(4,6) 0.00000
+(4,7) 0.00000
+(5,5) 1.00000
+(5,0) 0.00000
+(5,1) 0.00000
+(5,2) 0.00000
+(5,3) 0.00000
+(5,4) 0.00000
+(5,6) 0.00000
+(5,7) 0.00000
+(6,6) 1.00000
+(6,0) 0.00000
+(6,1) 0.00000
+(6,2) 0.00000
+(6,3) 0.00000
+(6,4) 0.00000
+(6,5) 0.00000
+(6,7) 0.00000
+(7,7) 1.00000
+(7,0) 0.00000
+(7,1) 0.00000
+(7,2) 0.00000
+(7,3) 0.00000
+(7,4) 0.00000
+(7,5) 0.00000
+(7,6) 0.00000
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Test that an interpolation matrix can be constructed to interpolate
+// Gauss quadrature points, and check the interpolation on a linear
+// problem with Trilinos
+
+#include <deal.II/base/function_lib.h>
+#include <deal.II/base/point.h>
+#include <deal.II/base/tensor.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_system.h>
+
+#include <deal.II/grid/grid_generator.h>
+
+#include <deal.II/lac/dynamic_sparsity_pattern.h>
+#include <deal.II/lac/sparse_matrix.h>
+#include <deal.II/lac/sparsity_pattern.h>
+#include <deal.II/lac/sparsity_tools.h>
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/lac/trilinos_sparsity_pattern.h>
+
+#include <deal.II/numerics/vector_tools.h>
+
+#include <deal.II/particles/generators.h>
+#include <deal.II/particles/particle_handler.h>
+#include <deal.II/particles/utilities.h>
+
+#include "../tests.h"
+
+using namespace dealii;
+
+template <int dim, int spacedim>
+void
+test()
+{
+ const unsigned int my_mpi_id =
+ Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+
+ parallel::distributed::Triangulation<dim, spacedim> space_tria(
+ MPI_COMM_WORLD);
+ MappingQ1<dim, spacedim> mapping;
+ GridGenerator::hyper_cube(space_tria, -1, 1);
+ space_tria.refine_global(2);
+
+ // Start by creating a particle handler that contains one particle per
+ // processor
+ Particles::ParticleHandler<dim, spacedim> particle_handler(space_tria,
+ mapping);
+
+ FE_Q<dim, spacedim> space_fe(1);
+
+ ComponentMask space_mask(space_fe.n_components(), true);
+
+ Particles::Generators::regular_reference_locations<dim, spacedim>(
+ space_tria, QGauss<dim>(2).get_points(), particle_handler);
+
+ const auto n_comps = space_mask.n_selected_components();
+
+ DoFHandler<dim, spacedim> space_dh(space_tria);
+ space_dh.distribute_dofs(space_fe);
+
+ IndexSet locally_owned_dofs = space_dh.locally_owned_dofs();
+ IndexSet locally_relevant_dofs;
+ DoFTools::extract_locally_relevant_dofs(space_dh, locally_relevant_dofs);
+
+ if (my_mpi_id == 0)
+ deallog << "dim: " << dim << ", spacedim: " << spacedim << std::endl
+ << "Total number of particles: "
+ << particle_handler.n_global_particles() << std::endl
+
+ << "Space FE: " << space_fe.get_name() << std::endl
+ << "Space dofs: " << space_dh.n_dofs() << std::endl;
+
+ DynamicSparsityPattern dsp(particle_handler.n_global_particles() * n_comps,
+ space_dh.n_dofs());
+
+ AffineConstraints<double> constraints;
+
+ // Build the interpolation sparsity
+ Particles::Utilities::create_interpolation_sparsity_pattern(
+ space_dh, particle_handler, dsp, constraints, space_mask);
+
+ const auto n_local_particles_dofs =
+ particle_handler.n_locally_owned_particles() * n_comps;
+
+ auto particle_sizes =
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, n_local_particles_dofs);
+
+ const auto my_start = std::accumulate(particle_sizes.begin(),
+ particle_sizes.begin() + my_mpi_id,
+ 0u);
+
+ IndexSet local_particle_index_set(particle_handler.n_global_particles() *
+ n_comps);
+
+ local_particle_index_set.add_range(my_start,
+ my_start + n_local_particles_dofs);
+
+ auto global_particles_index_set =
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, n_local_particles_dofs);
+
+ SparsityTools::distribute_sparsity_pattern(dsp,
+ global_particles_index_set,
+ MPI_COMM_WORLD,
+ local_particle_index_set);
+ // Create a SparseMatrix
+ TrilinosWrappers::SparseMatrix matrix;
+ matrix.reinit(local_particle_index_set,
+ locally_owned_dofs,
+ dsp,
+ MPI_COMM_WORLD);
+
+ // Build the interpolation matrix
+ Particles::Utilities::create_interpolation_matrix(
+ space_dh, particle_handler, matrix, constraints, space_mask);
+
+ // Create dofs and particle vectors
+ TrilinosWrappers::MPI::Vector field(locally_owned_dofs, MPI_COMM_WORLD);
+ TrilinosWrappers::MPI::Vector interpolation(local_particle_index_set,
+ MPI_COMM_WORLD);
+
+ Tensor<1, spacedim> exponents;
+ for (unsigned int i = 0; i < spacedim; ++i)
+ exponents[i] = 1;
+ Functions::Monomial<spacedim> linear(exponents, space_mask.size());
+
+ VectorTools::interpolate(space_dh, linear, field);
+ matrix.vmult(interpolation, field);
+
+ Vector<double> values(space_mask.size());
+
+ for (auto particle : particle_handler)
+ {
+ const auto &location = particle.get_location();
+ const auto &id = particle.get_id();
+ linear.vector_value(location, values);
+ for (unsigned int i = 0, j = 0; i < values.size(); ++i)
+ if (space_mask[i])
+ if (std::abs(values[i] - interpolation(id * n_comps + (j++))) > 1e-10)
+ deallog << "NOT OK" << std::endl;
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ MPILogInitAll init;
+
+ deallog.push("2d/2d");
+ test<2, 2>();
+ deallog.pop();
+ deallog.push("2d/3d");
+ test<2, 3>();
+ deallog.pop();
+ deallog.push("3d/3d");
+ test<3, 3>();
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d/2d::dim: 2, spacedim: 2
+DEAL:0:2d/2d::Total number of particles: 64
+DEAL:0:2d/2d::Space FE: FE_Q<2>(1)
+DEAL:0:2d/2d::Space dofs: 25
+DEAL:0:2d/3d::dim: 2, spacedim: 3
+DEAL:0:2d/3d::Total number of particles: 64
+DEAL:0:2d/3d::Space FE: FE_Q<2,3>(1)
+DEAL:0:2d/3d::Space dofs: 25
+DEAL:0:3d/3d::dim: 3, spacedim: 3
+DEAL:0:3d/3d::Total number of particles: 512
+DEAL:0:3d/3d::Space FE: FE_Q<3>(1)
+DEAL:0:3d/3d::Space dofs: 125
--- /dev/null
+
+DEAL:0:2d/2d::dim: 2, spacedim: 2
+DEAL:0:2d/2d::Total number of particles: 64
+DEAL:0:2d/2d::Space FE: FE_Q<2>(1)
+DEAL:0:2d/2d::Space dofs: 25
+DEAL:0:2d/3d::dim: 2, spacedim: 3
+DEAL:0:2d/3d::Total number of particles: 64
+DEAL:0:2d/3d::Space FE: FE_Q<2,3>(1)
+DEAL:0:2d/3d::Space dofs: 25
+DEAL:0:3d/3d::dim: 3, spacedim: 3
+DEAL:0:3d/3d::Total number of particles: 512
+DEAL:0:3d/3d::Space FE: FE_Q<3>(1)
+DEAL:0:3d/3d::Space dofs: 125
+
+