<h1>Results</h1>
Since the main purpose of this tutorial is to demonstrate how to use the
-CUDAWrappers::MatrixFree interface, not to compute anything useful in
+Portable::MatrixFree interface, not to compute anything useful in
itself, we just show the expected output here:
@code
Cycle 0
// The following ones include the data structures for the
// implementation of matrix-free methods on GPU:
-#include <deal.II/base/cuda.h>
-
-#include <deal.II/matrix_free/cuda_fe_evaluation.h>
-#include <deal.II/matrix_free/cuda_matrix_free.h>
+#include <deal.II/matrix_free/portable_fe_evaluation.h>
+#include <deal.II/matrix_free/portable_matrix_free.h>
#include <deal.II/matrix_free/operators.h>
#include <fstream>
// Next, we define a class that implements the varying coefficients
// we want to use in the Helmholtz operator. Later, we want to pass
- // an object of this type to a CUDAWrappers::MatrixFree
+ // an object of this type to a Portable::MatrixFree
// object that expects the class to have an `operator()` that fills the
// values provided in the constructor for a given cell. This operator
// needs to run on the device, so it needs to be marked as
: coef(coefficient)
{}
- DEAL_II_HOST_DEVICE void operator()(
- const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data,
- const unsigned int cell,
- const unsigned int q) const;
+ DEAL_II_HOST_DEVICE void
+ operator()(const typename Portable::MatrixFree<dim, double>::Data *gpu_data,
+ const unsigned int cell,
+ const unsigned int q) const;
- // Since CUDAWrappers::MatrixFree::Data doesn't know about the size of its
+ // Since Portable::MatrixFree::Data doesn't know about the size of its
// arrays, we need to store the number of quadrature points and the
// number of degrees of freedom in this class to do necessary index
// conversions.
template <int dim, int fe_degree>
DEAL_II_HOST_DEVICE void
VaryingCoefficientFunctor<dim, fe_degree>::operator()(
- const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data,
- const unsigned int cell,
- const unsigned int q) const
+ const typename Portable::MatrixFree<dim, double>::Data *gpu_data,
+ const unsigned int cell,
+ const unsigned int q) const
{
const unsigned int pos = gpu_data->local_q_point_id(cell, n_q_points, q);
const Point<dim> q_point = gpu_data->get_quadrature_point(cell, q);
{
public:
DEAL_II_HOST_DEVICE HelmholtzOperatorQuad(
- const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data,
- double *coef,
- int cell)
+ const typename Portable::MatrixFree<dim, double>::Data *gpu_data,
+ double *coef,
+ int cell)
: gpu_data(gpu_data)
, coef(coef)
, cell(cell)
{}
DEAL_II_HOST_DEVICE void operator()(
- CUDAWrappers::FEEvaluation<dim, fe_degree, fe_degree + 1, 1, double>
- *fe_eval,
+ Portable::FEEvaluation<dim, fe_degree, fe_degree + 1, 1, double> *fe_eval,
const int q_point) const;
static const unsigned int n_q_points =
dealii::Utilities::pow(fe_degree + 1, dim);
private:
- const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data;
- double *coef;
- int cell;
+ const typename Portable::MatrixFree<dim, double>::Data *gpu_data;
+ double *coef;
+ int cell;
};
// here:
template <int dim, int fe_degree>
DEAL_II_HOST_DEVICE void HelmholtzOperatorQuad<dim, fe_degree>::operator()(
- CUDAWrappers::FEEvaluation<dim, fe_degree, fe_degree + 1, 1, double>
- *fe_eval,
+ Portable::FEEvaluation<dim, fe_degree, fe_degree + 1, 1, double> *fe_eval,
const int q_point) const
{
const unsigned int pos =
class LocalHelmholtzOperator
{
public:
- // Again, the CUDAWrappers::MatrixFree object doesn't know about the number
+ // Again, the Portable::MatrixFree object doesn't know about the number
// of degrees of freedom and the number of quadrature points so we need
// to store these for index calculations in the call operator.
static constexpr unsigned int n_dofs_1d = fe_degree + 1;
: coef(coefficient)
{}
- DEAL_II_HOST_DEVICE void operator()(
- const unsigned int cell,
- const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data,
- CUDAWrappers::SharedData<dim, double> *shared_data,
- const double *src,
- double *dst) const;
+ DEAL_II_HOST_DEVICE void
+ operator()(const unsigned int cell,
+ const typename Portable::MatrixFree<dim, double>::Data *gpu_data,
+ Portable::SharedData<dim, double> *shared_data,
+ const double *src,
+ double *dst) const;
private:
double *coef;
// vector.
template <int dim, int fe_degree>
DEAL_II_HOST_DEVICE void LocalHelmholtzOperator<dim, fe_degree>::operator()(
- const unsigned int cell,
- const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data,
- CUDAWrappers::SharedData<dim, double> *shared_data,
- const double *src,
- double *dst) const
+ const unsigned int cell,
+ const typename Portable::MatrixFree<dim, double>::Data *gpu_data,
+ Portable::SharedData<dim, double> *shared_data,
+ const double *src,
+ double *dst) const
{
- CUDAWrappers::FEEvaluation<dim, fe_degree, fe_degree + 1, 1, double>
- fe_eval(gpu_data, shared_data);
+ Portable::FEEvaluation<dim, fe_degree, fe_degree + 1, 1, double> fe_eval(
+ gpu_data, shared_data);
fe_eval.read_dof_values(src);
fe_eval.evaluate(EvaluationFlags::values | EvaluationFlags::gradients);
fe_eval.apply_for_each_quad_point(
const;
private:
- CUDAWrappers::MatrixFree<dim, double> mf_data;
+ Portable::MatrixFree<dim, double> mf_data;
LinearAlgebra::distributed::Vector<double, MemorySpace::Default> coef;
};
const AffineConstraints<double> &constraints)
{
MappingQ<dim> mapping(fe_degree);
- typename CUDAWrappers::MatrixFree<dim, double>::AdditionalData
- additional_data;
+ typename Portable::MatrixFree<dim, double>::AdditionalData additional_data;
additional_data.mapping_update_flags = update_values | update_gradients |
update_JxW_values |
update_quadrature_points;
// This solve() function finally contains the calls to the new classes
// previously discussed. Here we don't use any preconditioner, i.e.,
// precondition by the identity matrix, to focus just on the peculiarities of
- // the CUDAWrappers::MatrixFree framework. Of course, in a real application
+ // the Portable::MatrixFree framework. Of course, in a real application
// the choice of a suitable preconditioner is crucial but we have at least the
// same restrictions as in step-37 since matrix entries are computed on the
// fly and not stored.
#ifndef dealii_cuda_fe_evaluation_h
#define dealii_cuda_fe_evaluation_h
-#include <deal.II/base/config.h>
-
-#include <deal.II/base/memory_space.h>
-#include <deal.II/base/tensor.h>
-#include <deal.II/base/utilities.h>
-
-#include <deal.II/lac/cuda_atomic.h>
-#include <deal.II/lac/cuda_vector.h>
-
-#include <deal.II/matrix_free/cuda_hanging_nodes_internal.h>
-#include <deal.II/matrix_free/cuda_matrix_free.h>
-#include <deal.II/matrix_free/cuda_matrix_free.templates.h>
-#include <deal.II/matrix_free/cuda_tensor_product_kernels.h>
-#include <deal.II/matrix_free/evaluation_flags.h>
-
-#include <Kokkos_Core.hpp>
+#include <deal.II/matrix_free/portable_fe_evaluation.h>
DEAL_II_NAMESPACE_OPEN
*/
namespace CUDAWrappers
{
- /**
- * This class provides all the functions necessary to evaluate functions at
- * quadrature points and cell integrations. In functionality, this class is
- * similar to FEValues<dim>.
- *
- * This class has five template arguments:
- *
- * @tparam dim Dimension in which this class is to be used
- *
- * @tparam fe_degree Degree of the tensor prodict finite element with fe_degree+1
- * degrees of freedom per coordinate direction
- *
- * @tparam n_q_points_1d Number of points in the quadrature formular in 1d,
- * defaults to fe_degree+1
- *
- * @tparam n_components Number of vector components when solving a system of
- * PDEs. If the same operation is applied to several components of a PDE (e.g.
- * a vector Laplace equation), they can be applied simultaneously with one
- * call (and often more efficiently). Defaults to 1
- *
- * @tparam Number Number format, @p double or @p float. Defaults to @p
- * double.
- *
- * @ingroup CUDAWrappers
- */
- template <int dim,
- int fe_degree,
- int n_q_points_1d = fe_degree + 1,
- int n_components_ = 1,
- typename Number = double>
- class FEEvaluation
- {
- public:
- /**
- * An alias for scalar quantities.
- */
- using value_type = Number;
-
- /**
- * An alias for vectorial quantities.
- */
- using gradient_type = Tensor<1, dim, Number>;
-
- /**
- * An alias to kernel specific information.
- */
- using data_type = typename MatrixFree<dim, Number>::Data;
-
- /**
- * Dimension.
- */
- static constexpr unsigned int dimension = dim;
-
- /**
- * Number of components.
- */
- static constexpr unsigned int n_components = n_components_;
-
- /**
- * Number of quadrature points per cell.
- */
- static constexpr unsigned int n_q_points =
- Utilities::pow(n_q_points_1d, dim);
-
- /**
- * Number of tensor degrees of freedoms per cell.
- */
- static constexpr unsigned int tensor_dofs_per_cell =
- Utilities::pow(fe_degree + 1, dim);
-
- /**
- * Constructor.
- */
- DEAL_II_HOST_DEVICE
- FEEvaluation(const data_type *data, SharedData<dim, Number> *shdata);
-
- /**
- * For the vector @p src, read out the values on the degrees of freedom of
- * the current cell, and store them internally. Similar functionality as
- * the function DoFAccessor::get_interpolated_dof_values when no
- * constraints are present, but it also includes constraints from hanging
- * nodes, so once can see it as a similar function to
- * AffineConstraints::read_dof_values() as well.
- */
- DEAL_II_HOST_DEVICE void
- read_dof_values(const Number *src);
-
- /**
- * Take the value stored internally on dof values of the current cell and
- * sum them into the vector @p dst. The function also applies constraints
- * during the write operation. The functionality is hence similar to the
- * function AffineConstraints::distribute_local_to_global.
- */
- DEAL_II_HOST_DEVICE void
- distribute_local_to_global(Number *dst) const;
-
- /**
- * Evaluate the function values and the gradients of the FE function given
- * at the DoF values in the input vector at the quadrature points on the
- * unit cell. The function arguments specify which parts shall actually be
- * computed. This function needs to be called before the functions
- * @p get_value() or @p get_gradient() give useful information.
- */
- DEAL_II_HOST_DEVICE void
- evaluate(const EvaluationFlags::EvaluationFlags evaluate_flag);
-
- /**
- * Evaluate the function values and the gradients of the FE function given
- * at the DoF values in the input vector at the quadrature points on the
- * unit cell. The function arguments specify which parts shall actually be
- * computed. This function needs to be called before the functions
- * @p get_value() or @p get_gradient() give useful information.
- */
- DEAL_II_DEPRECATED_EARLY_WITH_COMMENT(
- "Use the version taking EvaluationFlags.")
- DEAL_II_HOST_DEVICE
- void
- evaluate(const bool evaluate_val, const bool evaluate_grad);
-
- /**
- * This function takes the values and/or gradients that are stored on
- * quadrature points, tests them by all the basis functions/gradients on
- * the cell and performs the cell integration. The two function arguments
- * @p integrate_val and @p integrate_grad are used to enable/disable some
- * of the values or the gradients.
- */
- DEAL_II_HOST_DEVICE void
- integrate(const EvaluationFlags::EvaluationFlags integration_flag);
-
- /**
- * This function takes the values and/or gradients that are stored on
- * quadrature points, tests them by all the basis functions/gradients on
- * the cell and performs the cell integration. The two function arguments
- * @p integrate_val and @p integrate_grad are used to enable/disable some
- * of the values or the gradients.
- */
- DEAL_II_DEPRECATED_EARLY_WITH_COMMENT(
- "Use the version taking EvaluationFlags.")
- DEAL_II_HOST_DEVICE
- void
- integrate(const bool integrate_val, const bool integrate_grad);
-
- /**
- * Same as above, except that the quadrature point is computed from thread
- * id.
- */
- DEAL_II_HOST_DEVICE value_type
- get_value(int q_point) const;
-
- /**
- * Same as above, except that the local dof index is computed from the
- * thread id.
- */
- DEAL_II_HOST_DEVICE value_type
- get_dof_value(int q_point) const;
-
- /**
- * Same as above, except that the quadrature point is computed from the
- * thread id.
- */
- DEAL_II_HOST_DEVICE void
- submit_value(const value_type &val_in, int q_point);
-
- /**
- * Same as above, except that the local dof index is computed from the
- * thread id.
- */
- DEAL_II_HOST_DEVICE void
- submit_dof_value(const value_type &val_in, int q_point);
-
- /**
- * Same as above, except that the quadrature point is computed from the
- * thread id.
- */
- DEAL_II_HOST_DEVICE gradient_type
- get_gradient(int q_point) const;
-
- /**
- * Same as above, except that the quadrature point is computed from the
- * thread id.
- */
- DEAL_II_HOST_DEVICE void
- submit_gradient(const gradient_type &grad_in, int q_point);
-
- // clang-format off
- /**
- * Same as above, except that the functor @p func only takes a single input
- * argument (fe_eval) and computes the quadrature point from the thread id.
- *
- * @p func needs to define
- * \code
- * DEAL_II_HOST_DEVICE void operator()(
- * CUDAWrappers::FEEvaluation<dim, fe_degree, n_q_points_1d, n_components, Number> *fe_eval) const;
- * \endcode
- */
- // clang-format on
- template <typename Functor>
- DEAL_II_HOST_DEVICE void
- apply_for_each_quad_point(const Functor &func);
-
- private:
- const data_type *data;
- SharedData<dim, Number> *shared_data;
- int cell_id;
- };
-
-
-
- template <int dim,
- int fe_degree,
- int n_q_points_1d,
- int n_components_,
- typename Number>
- DEAL_II_HOST_DEVICE
- FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
- FEEvaluation(const data_type *data, SharedData<dim, Number> *shdata)
- : data(data)
- , shared_data(shdata)
- , cell_id(shared_data->team_member.league_rank())
- {}
-
-
-
- template <int dim,
- int fe_degree,
- int n_q_points_1d,
- int n_components_,
- typename Number>
- DEAL_II_HOST_DEVICE void
- FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
- read_dof_values(const Number *src)
- {
- static_assert(n_components_ == 1, "This function only supports FE with one \
- components");
- // Populate the scratch memory
- Kokkos::parallel_for(Kokkos::TeamThreadRange(shared_data->team_member,
- n_q_points),
- [&](const int &i) {
- shared_data->values(i) =
- src[data->local_to_global(cell_id, i)];
- });
- shared_data->team_member.team_barrier();
-
- internal::resolve_hanging_nodes<dim, fe_degree, false>(
- shared_data->team_member,
- data->constraint_weights,
- data->constraint_mask(cell_id),
- shared_data->values);
- }
-
-
-
- template <int dim,
- int fe_degree,
- int n_q_points_1d,
- int n_components_,
- typename Number>
- DEAL_II_HOST_DEVICE void
- FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
- distribute_local_to_global(Number *dst) const
- {
- static_assert(n_components_ == 1, "This function only supports FE with one \
- components");
-
- internal::resolve_hanging_nodes<dim, fe_degree, true>(
- shared_data->team_member,
- data->constraint_weights,
- data->constraint_mask(cell_id),
- shared_data->values);
-
- if (data->use_coloring)
- {
- Kokkos::parallel_for(Kokkos::TeamThreadRange(shared_data->team_member,
- n_q_points),
- [&](const int &i) {
- dst[data->local_to_global(cell_id, i)] +=
- shared_data->values(i);
- });
- }
- else
- {
- Kokkos::parallel_for(
- Kokkos::TeamThreadRange(shared_data->team_member, n_q_points),
- [&](const int &i) {
- Kokkos::atomic_add(&dst[data->local_to_global(cell_id, i)],
- shared_data->values(i));
- });
- }
- }
-
-
-
- template <int dim,
- int fe_degree,
- int n_q_points_1d,
- int n_components_,
- typename Number>
- DEAL_II_HOST_DEVICE void
- FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::evaluate(
- const EvaluationFlags::EvaluationFlags evaluate_flag)
- {
- // First evaluate the gradients because it requires values that will be
- // changed if evaluate_val is true
- internal::EvaluatorTensorProduct<
- internal::EvaluatorVariant::evaluate_general,
- dim,
- fe_degree,
- n_q_points_1d,
- Number>
- evaluator_tensor_product(shared_data->team_member,
- data->shape_values,
- data->shape_gradients,
- data->co_shape_gradients);
-
- if ((evaluate_flag & EvaluationFlags::values) &&
- (evaluate_flag & EvaluationFlags::gradients))
- {
- evaluator_tensor_product.evaluate_values_and_gradients(
- shared_data->values, shared_data->gradients);
- shared_data->team_member.team_barrier();
- }
- else if (evaluate_flag & EvaluationFlags::gradients)
- {
- evaluator_tensor_product.evaluate_gradients(shared_data->values,
- shared_data->gradients);
- shared_data->team_member.team_barrier();
- }
- else if (evaluate_flag & EvaluationFlags::values)
- {
- evaluator_tensor_product.evaluate_values(shared_data->values);
- shared_data->team_member.team_barrier();
- }
- }
-
-
-
- template <int dim,
- int fe_degree,
- int n_q_points_1d,
- int n_components_,
- typename Number>
- DEAL_II_HOST_DEVICE void
- FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::evaluate(
- const bool evaluate_val,
- const bool evaluate_grad)
- {
- evaluate(
- (evaluate_val ? EvaluationFlags::values : EvaluationFlags::nothing) |
- (evaluate_grad ? EvaluationFlags::gradients : EvaluationFlags::nothing));
- }
-
-
-
- template <int dim,
- int fe_degree,
- int n_q_points_1d,
- int n_components_,
- typename Number>
- DEAL_II_HOST_DEVICE void
- FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::integrate(
- const EvaluationFlags::EvaluationFlags integration_flag)
- {
- internal::EvaluatorTensorProduct<
- internal::EvaluatorVariant::evaluate_general,
- dim,
- fe_degree,
- n_q_points_1d,
- Number>
- evaluator_tensor_product(shared_data->team_member,
- data->shape_values,
- data->shape_gradients,
- data->co_shape_gradients);
-
- if ((integration_flag & EvaluationFlags::values) &&
- (integration_flag & EvaluationFlags::gradients))
- {
- evaluator_tensor_product.integrate_values_and_gradients(
- shared_data->values, shared_data->gradients);
- }
- else if (integration_flag & EvaluationFlags::values)
- {
- evaluator_tensor_product.integrate_values(shared_data->values);
- shared_data->team_member.team_barrier();
- }
- else if (integration_flag & EvaluationFlags::gradients)
- {
- evaluator_tensor_product.template integrate_gradients<false>(
- shared_data->values, shared_data->gradients);
- shared_data->team_member.team_barrier();
- }
- }
-
-
-
- template <int dim,
- int fe_degree,
- int n_q_points_1d,
- int n_components_,
- typename Number>
- DEAL_II_HOST_DEVICE void
- FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::integrate(
- const bool integrate_val,
- const bool integrate_grad)
- {
- integrate(
- (integrate_val ? EvaluationFlags::values : EvaluationFlags::nothing) |
- (integrate_grad ? EvaluationFlags::gradients : EvaluationFlags::nothing));
- }
-
-
-
- template <int dim,
- int fe_degree,
- int n_q_points_1d,
- int n_components_,
- typename Number>
- DEAL_II_HOST_DEVICE typename FEEvaluation<dim,
- fe_degree,
- n_q_points_1d,
- n_components_,
- Number>::value_type
- FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::get_value(
- int q_point) const
- {
- return shared_data->values(q_point);
- }
-
-
-
- template <int dim,
- int fe_degree,
- int n_q_points_1d,
- int n_components_,
- typename Number>
- DEAL_II_HOST_DEVICE typename FEEvaluation<dim,
- fe_degree,
- n_q_points_1d,
- n_components_,
- Number>::value_type
- FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
- get_dof_value(int q_point) const
- {
- return shared_data->values(q_point);
- }
-
-
-
- template <int dim,
- int fe_degree,
- int n_q_points_1d,
- int n_components_,
- typename Number>
- DEAL_II_HOST_DEVICE void
- FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
- submit_value(const value_type &val_in, int q_point)
- {
- shared_data->values(q_point) = val_in * data->JxW(cell_id, q_point);
- }
-
-
-
- template <int dim,
- int fe_degree,
- int n_q_points_1d,
- int n_components_,
- typename Number>
- DEAL_II_HOST_DEVICE void
- FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
- submit_dof_value(const value_type &val_in, int q_point)
- {
- shared_data->values(q_point) = val_in;
- }
-
-
-
- template <int dim,
- int fe_degree,
- int n_q_points_1d,
- int n_components_,
- typename Number>
- DEAL_II_HOST_DEVICE typename FEEvaluation<dim,
- fe_degree,
- n_q_points_1d,
- n_components_,
- Number>::gradient_type
- FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
- get_gradient(int q_point) const
- {
- static_assert(n_components_ == 1, "This function only supports FE with one \
- components");
-
- gradient_type grad;
- for (unsigned int d_1 = 0; d_1 < dim; ++d_1)
- {
- Number tmp = 0.;
- for (unsigned int d_2 = 0; d_2 < dim; ++d_2)
- tmp += data->inv_jacobian(cell_id, q_point, d_2, d_1) *
- shared_data->gradients(q_point, d_2);
- grad[d_1] = tmp;
- }
-
- return grad;
- }
-
-
-
- template <int dim,
- int fe_degree,
- int n_q_points_1d,
- int n_components_,
- typename Number>
- DEAL_II_HOST_DEVICE void
- FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
- submit_gradient(const gradient_type &grad_in, int q_point)
- {
- for (unsigned int d_1 = 0; d_1 < dim; ++d_1)
- {
- Number tmp = 0.;
- for (unsigned int d_2 = 0; d_2 < dim; ++d_2)
- tmp += data->inv_jacobian(cell_id, q_point, d_1, d_2) * grad_in[d_2];
- shared_data->gradients(q_point, d_1) =
- tmp * data->JxW(cell_id, q_point);
- }
- }
-
-
-
- template <int dim,
- int fe_degree,
- int n_q_points_1d,
- int n_components_,
- typename Number>
- template <typename Functor>
- DEAL_II_HOST_DEVICE void
- FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
- apply_for_each_quad_point(const Functor &func)
- {
- Kokkos::parallel_for(Kokkos::TeamThreadRange(shared_data->team_member,
- n_q_points),
- [&](const int &i) { func(this, i); });
- shared_data->team_member.team_barrier();
- }
-
-
-
-#ifndef DOXYGEN
- template <int dim,
- int fe_degree,
- int n_q_points_1d,
- int n_components_,
- typename Number>
- constexpr unsigned int
- FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
- n_q_points;
-#endif
+ using namespace Portable;
} // namespace CUDAWrappers
DEAL_II_NAMESPACE_CLOSE
#ifndef dealii_cuda_hanging_nodes_internal_h
#define dealii_cuda_hanging_nodes_internal_h
-#include <deal.II/base/config.h>
-
-#include <deal.II/base/cuda_size.h>
-
-#include <deal.II/matrix_free/hanging_nodes_internal.h>
-
-#include <Kokkos_Macros.hpp>
+#include <deal.II/matrix_free/portable_hanging_nodes_internal.h>
DEAL_II_NAMESPACE_OPEN
namespace CUDAWrappers
{
- namespace internal
- {
- //------------------------------------------------------------------------//
- // Functions for resolving the hanging node constraints on the GPU //
- //------------------------------------------------------------------------//
- template <unsigned int size>
- DEAL_II_HOST_DEVICE inline unsigned int
- index2(unsigned int i, unsigned int j)
- {
- return i + size * j;
- }
-
-
-
- template <unsigned int size>
- DEAL_II_HOST_DEVICE inline unsigned int
- index3(unsigned int i, unsigned int j, unsigned int k)
- {
- return i + size * j + size * size * k;
- }
-
-
-
- template <unsigned int fe_degree, unsigned int direction>
- DEAL_II_HOST_DEVICE inline bool
- is_constrained_dof_2d(
- const dealii::internal::MatrixFreeFunctions::ConstraintKinds
- &constraint_mask,
- const unsigned int x_idx,
- const unsigned int y_idx)
- {
- return ((direction == 0) &&
- (((constraint_mask & dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::subcell_y) !=
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::
- unconstrained) ?
- (y_idx == 0) :
- (y_idx == fe_degree))) ||
- ((direction == 1) &&
- (((constraint_mask & dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::subcell_x) !=
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::
- unconstrained) ?
- (x_idx == 0) :
- (x_idx == fe_degree)));
- }
-
- template <unsigned int fe_degree, unsigned int direction>
- DEAL_II_HOST_DEVICE inline bool
- is_constrained_dof_3d(
- const dealii::internal::MatrixFreeFunctions::ConstraintKinds
- &constraint_mask,
- const unsigned int x_idx,
- const unsigned int y_idx,
- const unsigned int z_idx,
- const dealii::internal::MatrixFreeFunctions::ConstraintKinds face1_type,
- const dealii::internal::MatrixFreeFunctions::ConstraintKinds face2_type,
- const dealii::internal::MatrixFreeFunctions::ConstraintKinds face1,
- const dealii::internal::MatrixFreeFunctions::ConstraintKinds face2,
- const dealii::internal::MatrixFreeFunctions::ConstraintKinds edge)
- {
- const unsigned int face1_idx = (direction == 0) ? y_idx :
- (direction == 1) ? z_idx :
- x_idx;
- const unsigned int face2_idx = (direction == 0) ? z_idx :
- (direction == 1) ? x_idx :
- y_idx;
-
- const bool on_face1 = ((constraint_mask & face1_type) !=
- dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained) ?
- (face1_idx == 0) :
- (face1_idx == fe_degree);
- const bool on_face2 = ((constraint_mask & face2_type) !=
- dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained) ?
- (face2_idx == 0) :
- (face2_idx == fe_degree);
- return (
- (((constraint_mask & face1) != dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained) &&
- on_face1) ||
- (((constraint_mask & face2) != dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained) &&
- on_face2) ||
- (((constraint_mask & edge) != dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained) &&
- on_face1 && on_face2));
- }
-
-
-
- template <unsigned int fe_degree,
- unsigned int direction,
- bool transpose,
- typename Number>
- DEAL_II_HOST_DEVICE inline void
- interpolate_boundary_2d(
- const Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>::member_type
- &team_member,
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- constraint_weights,
- const dealii::internal::MatrixFreeFunctions::ConstraintKinds
- &constraint_mask,
- Kokkos::View<Number *,
- MemorySpace::Default::kokkos_space::execution_space::
- scratch_memory_space,
- Kokkos::MemoryTraits<Kokkos::Unmanaged>> values)
- {
- constexpr unsigned int n_q_points_1d = fe_degree + 1;
- constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, 2);
-
- // Flag is true if dof is constrained for the given direction and the
- // given face.
- const bool constrained_face =
- (constraint_mask &
- (((direction == 0) ?
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::face_y :
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::
- unconstrained) |
- ((direction == 1) ?
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::face_x :
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::
- unconstrained))) !=
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::unconstrained;
-
- Number tmp[n_q_points];
- Kokkos::parallel_for(
- Kokkos::TeamThreadRange(team_member, n_q_points),
- [&](const int &q_point) {
- const unsigned int x_idx = q_point % n_q_points_1d;
- const unsigned int y_idx = q_point / n_q_points_1d;
-
- const auto this_type =
- (direction == 0) ?
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::
- subcell_x :
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_y;
-
- const unsigned int interp_idx = (direction == 0) ? x_idx : y_idx;
- tmp[q_point] = 0;
-
- // Flag is true if for the given direction, the dof is constrained
- // with the right type and is on the correct side (left (= 0) or right
- // (= fe_degree))
- const bool constrained_dof =
- is_constrained_dof_2d<fe_degree, direction>(constraint_mask,
- x_idx,
- y_idx);
-
- if (constrained_face && constrained_dof)
- {
- const bool type = (constraint_mask & this_type) !=
- dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained;
-
- if (type)
- {
- for (unsigned int i = 0; i <= fe_degree; ++i)
- {
- const unsigned int real_idx =
- (direction == 0) ? index2<n_q_points_1d>(i, y_idx) :
- index2<n_q_points_1d>(x_idx, i);
-
- const Number w =
- transpose ?
- constraint_weights[i * n_q_points_1d + interp_idx] :
- constraint_weights[interp_idx * n_q_points_1d + i];
- tmp[q_point] += w * values[real_idx];
- }
- }
- else
- {
- for (unsigned int i = 0; i <= fe_degree; ++i)
- {
- const unsigned int real_idx =
- (direction == 0) ? index2<n_q_points_1d>(i, y_idx) :
- index2<n_q_points_1d>(x_idx, i);
-
- const Number w =
- transpose ?
- constraint_weights[(fe_degree - i) * n_q_points_1d +
- fe_degree - interp_idx] :
- constraint_weights[(fe_degree - interp_idx) *
- n_q_points_1d +
- fe_degree - i];
- tmp[q_point] += w * values[real_idx];
- }
- }
- }
- });
-
- // The synchronization is done for all the threads in one team with
- // each team being assigned to one element.
- team_member.team_barrier();
- Kokkos::parallel_for(Kokkos::TeamThreadRange(team_member, n_q_points),
- [&](const int &q_point) {
- const unsigned int x_idx = q_point % n_q_points_1d;
- const unsigned int y_idx = q_point / n_q_points_1d;
- const bool constrained_dof =
- is_constrained_dof_2d<fe_degree, direction>(
- constraint_mask, x_idx, y_idx);
- if (constrained_face && constrained_dof)
- values[index2<fe_degree + 1>(x_idx, y_idx)] =
- tmp[q_point];
- });
-
- team_member.team_barrier();
- }
-
-
-
- template <unsigned int fe_degree,
- unsigned int direction,
- bool transpose,
- typename Number>
- DEAL_II_HOST_DEVICE inline void
- interpolate_boundary_3d(
- const Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>::member_type
- &team_member,
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- constraint_weights,
- const dealii::internal::MatrixFreeFunctions::ConstraintKinds
- constraint_mask,
- Kokkos::View<Number *,
- MemorySpace::Default::kokkos_space::execution_space::
- scratch_memory_space,
- Kokkos::MemoryTraits<Kokkos::Unmanaged>> values)
- {
- constexpr unsigned int n_q_points_1d = fe_degree + 1;
- constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, 3);
-
- const auto this_type =
- (direction == 0) ?
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_x :
- (direction == 1) ?
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_y :
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_z;
- const auto face1_type =
- (direction == 0) ?
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_y :
- (direction == 1) ?
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_z :
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_x;
- const auto face2_type =
- (direction == 0) ?
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_z :
- (direction == 1) ?
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_x :
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_y;
-
- // If computing in x-direction, need to match against face_y or
- // face_z
- const auto face1 =
- (direction == 0) ?
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::face_y :
- (direction == 1) ?
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::face_z :
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::face_x;
- const auto face2 =
- (direction == 0) ?
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::face_z :
- (direction == 1) ?
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::face_x :
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::face_y;
- const auto edge =
- (direction == 0) ?
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::edge_x :
- (direction == 1) ?
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::edge_y :
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::edge_z;
- const auto constrained_face = constraint_mask & (face1 | face2 | edge);
-
- Number tmp[n_q_points];
- Kokkos::parallel_for(
- Kokkos::TeamThreadRange(team_member, n_q_points),
- [&](const int &q_point) {
- const unsigned int x_idx = q_point % n_q_points_1d;
- const unsigned int y_idx = (q_point / n_q_points_1d) % n_q_points_1d;
- const unsigned int z_idx = q_point / (n_q_points_1d * n_q_points_1d);
-
- const unsigned int interp_idx = (direction == 0) ? x_idx :
- (direction == 1) ? y_idx :
- z_idx;
- const bool constrained_dof =
- is_constrained_dof_3d<fe_degree, direction>(constraint_mask,
- x_idx,
- y_idx,
- z_idx,
- face1_type,
- face2_type,
- face1,
- face2,
- edge);
- tmp[q_point] = 0;
- if ((constrained_face != dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained) &&
- constrained_dof)
- {
- const bool type = (constraint_mask & this_type) !=
- dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained;
- if (type)
- {
- for (unsigned int i = 0; i <= fe_degree; ++i)
- {
- const unsigned int real_idx =
- (direction == 0) ?
- index3<fe_degree + 1>(i, y_idx, z_idx) :
- (direction == 1) ?
- index3<fe_degree + 1>(x_idx, i, z_idx) :
- index3<fe_degree + 1>(x_idx, y_idx, i);
-
- const Number w =
- transpose ?
- constraint_weights[i * n_q_points_1d + interp_idx] :
- constraint_weights[interp_idx * n_q_points_1d + i];
- tmp[q_point] += w * values[real_idx];
- }
- }
- else
- {
- for (unsigned int i = 0; i <= fe_degree; ++i)
- {
- const unsigned int real_idx =
- (direction == 0) ?
- index3<n_q_points_1d>(i, y_idx, z_idx) :
- (direction == 1) ?
- index3<n_q_points_1d>(x_idx, i, z_idx) :
- index3<n_q_points_1d>(x_idx, y_idx, i);
-
- const Number w =
- transpose ?
- constraint_weights[(fe_degree - i) * n_q_points_1d +
- fe_degree - interp_idx] :
- constraint_weights[(fe_degree - interp_idx) *
- n_q_points_1d +
- fe_degree - i];
- tmp[q_point] += w * values[real_idx];
- }
- }
- }
- });
-
- // The synchronization is done for all the threads in one team with
- // each team being assigned to one element.
- team_member.team_barrier();
-
- Kokkos::parallel_for(
- Kokkos::TeamThreadRange(team_member, n_q_points),
- [&](const int &q_point) {
- const unsigned int x_idx = q_point % n_q_points_1d;
- const unsigned int y_idx = (q_point / n_q_points_1d) % n_q_points_1d;
- const unsigned int z_idx = q_point / (n_q_points_1d * n_q_points_1d);
- const bool constrained_dof =
- is_constrained_dof_3d<fe_degree, direction>(constraint_mask,
- x_idx,
- y_idx,
- z_idx,
- face1_type,
- face2_type,
- face1,
- face2,
- edge);
- if ((constrained_face != dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained) &&
- constrained_dof)
- values[index3<fe_degree + 1>(x_idx, y_idx, z_idx)] = tmp[q_point];
- });
-
- team_member.team_barrier();
- }
-
-
-
- /**
- * This function resolves the hanging nodes using tensor product.
- *
- * The implementation of this class is explained in Section 3 of
- * @cite ljungkvist2017matrix and in Section 3.4 of
- * @cite kronbichler2019multigrid.
- */
- template <int dim, int fe_degree, bool transpose, typename Number>
- DEAL_II_HOST_DEVICE void
- resolve_hanging_nodes(
- const Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>::member_type
- &team_member,
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- constraint_weights,
- const dealii::internal::MatrixFreeFunctions::ConstraintKinds
- constraint_mask,
- Kokkos::View<Number *,
- MemorySpace::Default::kokkos_space::execution_space::
- scratch_memory_space,
- Kokkos::MemoryTraits<Kokkos::Unmanaged>> values)
- {
- if (dim == 2)
- {
- interpolate_boundary_2d<fe_degree, 0, transpose>(team_member,
- constraint_weights,
- constraint_mask,
- values);
-
- interpolate_boundary_2d<fe_degree, 1, transpose>(team_member,
- constraint_weights,
- constraint_mask,
- values);
- }
- else if (dim == 3)
- {
- // Interpolate y and z faces (x-direction)
- interpolate_boundary_3d<fe_degree, 0, transpose>(team_member,
- constraint_weights,
- constraint_mask,
- values);
- // Interpolate x and z faces (y-direction)
- interpolate_boundary_3d<fe_degree, 1, transpose>(team_member,
- constraint_weights,
- constraint_mask,
- values);
- // Interpolate x and y faces (z-direction)
- interpolate_boundary_3d<fe_degree, 2, transpose>(team_member,
- constraint_weights,
- constraint_mask,
- values);
- }
- }
- } // namespace internal
+ using namespace Portable;
} // namespace CUDAWrappers
DEAL_II_NAMESPACE_CLOSE
#ifndef dealii_cuda_matrix_free_h
#define dealii_cuda_matrix_free_h
-#include <deal.II/base/config.h>
-
-#include <deal.II/base/cuda_size.h>
-#include <deal.II/base/memory_space.h>
-#include <deal.II/base/mpi_stub.h>
-#include <deal.II/base/partitioner.h>
-#include <deal.II/base/quadrature.h>
-#include <deal.II/base/tensor.h>
-#include <deal.II/base/utilities.h>
-
-#include <deal.II/dofs/dof_handler.h>
-
-#include <deal.II/fe/fe_update_flags.h>
-#include <deal.II/fe/mapping.h>
-
-#include <deal.II/grid/filtered_iterator.h>
-
-#include <deal.II/lac/affine_constraints.h>
-#include <deal.II/lac/cuda_vector.h>
-#include <deal.II/lac/la_parallel_vector.h>
-
-#include <Kokkos_Core.hpp>
-
-
+#include <deal.II/matrix_free/portable_matrix_free.h>
DEAL_II_NAMESPACE_OPEN
-// Forward declaration
-namespace internal
-{
- namespace MatrixFreeFunctions
- {
- enum class ConstraintKinds : std::uint16_t;
- }
-} // namespace internal
-
namespace CUDAWrappers
{
- // forward declaration
-#ifndef DOXYGEN
- namespace internal
- {
- template <int dim, typename Number>
- class ReinitHelper;
- }
-#endif
-
- /**
- * This class collects all the data that is stored for the matrix free
- * implementation. The storage scheme is tailored towards several loops
- * performed with the same data, i.e., typically doing many matrix-vector
- * products or residual computations on the same mesh.
- *
- * This class does not implement any operations involving finite element basis
- * functions, i.e., regarding the operation performed on the cells. For these
- * operations, the class FEEvaluation is designed to use the data collected in
- * this class.
- *
- * This class implements a loop over all cells (cell_loop()). This loop is
- * scheduled in such a way that cells that share degrees of freedom
- * are not worked on simultaneously, which implies that it is possible to
- * write to vectors in parallel without having to explicitly synchronize
- * access to these vectors and matrices. This class does not implement any
- * shape values, all it does is to cache the respective data. To implement
- * finite element operations, use the class CUDAWrappers::FEEvaluation.
- *
- * This class traverse the cells in a different order than the usual
- * Triangulation class in deal.II.
- *
- * @note Only float and double are supported.
- *
- * @ingroup CUDAWrappers
- */
- template <int dim, typename Number = double>
- class MatrixFree : public Subscriptor
- {
- public:
- using jacobian_type = Tensor<2, dim, Tensor<1, dim, Number>>;
- using point_type = Point<dim, Number>;
- using CellFilter =
- FilteredIterator<typename DoFHandler<dim>::active_cell_iterator>;
-
- /**
- * Standardized data struct to pipe additional data to MatrixFree.
- */
- struct AdditionalData
- {
- /**
- * Constructor.
- */
- AdditionalData(const UpdateFlags mapping_update_flags =
- update_gradients | update_JxW_values |
- update_quadrature_points,
- const bool use_coloring = false,
- const bool overlap_communication_computation = false)
- : mapping_update_flags(mapping_update_flags)
- , use_coloring(use_coloring)
- , overlap_communication_computation(overlap_communication_computation)
- {
-#ifndef DEAL_II_MPI_WITH_DEVICE_SUPPORT
- AssertThrow(
- overlap_communication_computation == false,
- ExcMessage(
- "Overlapping communication and computation requires CUDA-aware MPI."));
-#endif
- if (overlap_communication_computation == true)
- AssertThrow(
- use_coloring == false || overlap_communication_computation == false,
- ExcMessage(
- "Overlapping communication and coloring are incompatible options. Only one of them can be enabled."));
- }
- /**
- * This flag is used to determine which quantities should be cached. This
- * class can cache data needed for gradient computations (inverse
- * Jacobians), Jacobian determinants (JxW), quadrature points as well as
- * data for Hessians (derivative of Jacobians). By default, only data for
- * gradients and Jacobian determinants times quadrature weights, JxW, are
- * cached. If quadrature points of second derivatives are needed, they
- * must be specified by this field.
- */
- UpdateFlags mapping_update_flags;
-
- /**
- * If true, use graph coloring. Otherwise, use atomic operations. Graph
- * coloring ensures bitwise reproducibility but is slower on Pascal and
- * newer architectures.
- */
- bool use_coloring;
-
- /**
- * Overlap MPI communications with computation. This requires CUDA-aware
- * MPI and use_coloring must be false.
- */
- bool overlap_communication_computation;
- };
-
- /**
- * Structure which is passed to the kernel. It is used to pass all the
- * necessary information from the CPU to the GPU.
- */
- struct Data
- {
- /**
- * Kokkos::View of the quadrature points.
- */
- Kokkos::View<point_type **, MemorySpace::Default::kokkos_space> q_points;
-
- /**
- * Map the position in the local vector to the position in the global
- * vector.
- */
- Kokkos::View<types::global_dof_index **,
- MemorySpace::Default::kokkos_space>
- local_to_global;
-
- /**
- * Kokkos::View of the inverse Jacobian.
- */
- Kokkos::View<Number **[dim][dim], MemorySpace::Default::kokkos_space>
- inv_jacobian;
-
- /**
- * Kokkos::View of the Jacobian times the weights.
- */
- Kokkos::View<Number **, MemorySpace::Default::kokkos_space> JxW;
-
- /**
- * Mask deciding where constraints are set on a given cell.
- */
- Kokkos::View<dealii::internal::MatrixFreeFunctions::ConstraintKinds *,
- MemorySpace::Default::kokkos_space>
- constraint_mask;
-
- /**
- * Values of the shape functions.
- */
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space> shape_values;
-
- /**
- * Gradients of the shape functions.
- */
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- shape_gradients;
-
- /**
- * Gradients of the shape functions for collocation methods.
- */
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- co_shape_gradients;
-
- /**
- * Weights used when resolving hanginf nodes.
- */
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- constraint_weights;
-
- /**
- * Number of cells.
- */
- unsigned int n_cells;
-
- /**
- * Length of the padding.
- */
- unsigned int padding_length;
-
- /**
- * Row start (including padding).
- */
- unsigned int row_start;
-
- /**
- * If true, use graph coloring has been used and we can simply add into
- * the destingation vector. Otherwise, use atomic operations.
- */
- bool use_coloring;
-
- /**
- * Return the quadrature point index local. The index is
- * only unique for a given MPI process.
- */
- DEAL_II_HOST_DEVICE unsigned int
- local_q_point_id(const unsigned int cell,
- const unsigned int n_q_points,
- const unsigned int q_point) const
- {
- return (row_start / padding_length + cell) * n_q_points + q_point;
- }
-
-
- /**
- * Return the quadrature point.
- */
- DEAL_II_HOST_DEVICE
- typename CUDAWrappers::MatrixFree<dim, Number>::point_type &
- get_quadrature_point(const unsigned int cell,
- const unsigned int q_point) const
- {
- return q_points(cell, q_point);
- }
- };
-
- /**
- * Default constructor.
- */
- MatrixFree();
-
- /**
- * Return the length of the padding.
- */
- unsigned int
- get_padding_length() const;
-
- /**
- * Extracts the information needed to perform loops over cells. The
- * DoFHandler and AffineConstraints objects describe the layout of
- * degrees of freedom, the DoFHandler and the mapping describe the
- * transformation from unit to real cell, and the finite element
- * underlying the DoFHandler together with the quadrature formula
- * describe the local operations. This function takes an IteratorFilters
- * object (predicate) to loop over a subset of the active cells. When using
- * MPI, the predicate should filter out non locally owned cells.
- */
- template <typename IteratorFiltersType>
- void
- reinit(const Mapping<dim> &mapping,
- const DoFHandler<dim> &dof_handler,
- const AffineConstraints<Number> &constraints,
- const Quadrature<1> &quad,
- const IteratorFiltersType &iterator_filter,
- const AdditionalData &additional_data = AdditionalData());
-
- /**
- * Same as above using Iterators::LocallyOwnedCell() as predicate.
- */
- void
- reinit(const Mapping<dim> &mapping,
- const DoFHandler<dim> &dof_handler,
- const AffineConstraints<Number> &constraints,
- const Quadrature<1> &quad,
- const AdditionalData &additional_data = AdditionalData());
-
- /**
- * Initializes the data structures. Same as above but using a Q1 mapping.
- */
- void
- reinit(const DoFHandler<dim> &dof_handler,
- const AffineConstraints<Number> &constraints,
- const Quadrature<1> &quad,
- const AdditionalData &additional_data = AdditionalData());
-
- /**
- * Return the Data structure associated with @p color.
- */
- Data
- get_data(unsigned int color) const;
-
- // clang-format off
- /**
- * This method runs the loop over all cells and apply the local operation on
- * each element in parallel. @p func is a functor which is applied on each color.
- *
- * @p func needs to define
- * \code
- * DEAL_II_HOST_DEVICE void operator()(
- * const unsigned int cell,
- * const typename CUDAWrappers::MatrixFree<dim, Number>::Data *gpu_data,
- * CUDAWrappers::SharedData<dim, Number> * shared_data,
- * const Number * src,
- * Number * dst) const;
- * static const unsigned int n_dofs_1d;
- * static const unsigned int n_local_dofs;
- * static const unsigned int n_q_points;
- * \endcode
- */
- // clang-format on
- template <typename Functor, typename VectorType>
- void
- cell_loop(const Functor &func,
- const VectorType &src,
- VectorType &dst) const;
-
- /**
- * This method runs the loop over all cells and apply the local operation on
- * each element in parallel. This function is very similar to cell_loop()
- * but it uses a simpler functor.
- *
- * @p func needs to define
- * \code
- * DEAL_II_HOST_DEVICE void operator()(
- * const unsigned int cell,
- * const typename CUDAWrappers::MatrixFree<dim, Number>::Data *gpu_data);
- * static const unsigned int n_dofs_1d;
- * static const unsigned int n_local_dofs;
- * static const unsigned int n_q_points;
- * \endcode
- */
- template <typename Functor>
- void
- evaluate_coefficients(Functor func) const;
-
- /**
- * Copy the values of the constrained entries from @p src to @p dst. This is
- * used to impose zero Dirichlet boundary condition.
- */
- template <typename VectorType>
- void
- copy_constrained_values(const VectorType &src, VectorType &dst) const;
-
- /**
- * Set the entries in @p dst corresponding to constrained values to @p val.
- * The main purpose of this function is to set the constrained entries of
- * the source vector used in cell_loop() to zero.
- */
- template <typename VectorType>
- void
- set_constrained_values(const Number val, VectorType &dst) const;
-
-#ifdef DEAL_II_WITH_CUDA
- /**
- * Initialize a serial vector. The size corresponds to the number of degrees
- * of freedom in the DoFHandler object.
- */
- void
- initialize_dof_vector(
- LinearAlgebra::CUDAWrappers::Vector<Number> &vec) const;
-#endif
-
- /**
- * Initialize a distributed vector. The local elements correspond to the
- * locally owned degrees of freedom and the ghost elements correspond to the
- * (additional) locally relevant dofs.
- */
- void
- initialize_dof_vector(
- LinearAlgebra::distributed::Vector<Number, MemorySpace::Default> &vec)
- const;
-
- /**
- * Return the colored graph of locally owned active cells.
- */
- const std::vector<std::vector<CellFilter>> &
- get_colored_graph() const;
-
- /**
- * Return the partitioner that represents the locally owned data and the
- * ghost indices where access is needed to for the cell loop. The
- * partitioner is constructed from the locally owned dofs and ghost dofs
- * given by the respective fields. If you want to have specific information
- * about these objects, you can query them with the respective access
- * functions. If you just want to initialize a (parallel) vector, you should
- * usually prefer this data structure as the data exchange information can
- * be reused from one vector to another.
- */
- const std::shared_ptr<const Utilities::MPI::Partitioner> &
- get_vector_partitioner() const;
-
- /**
- * Return the DoFHandler.
- */
- const DoFHandler<dim> &
- get_dof_handler() const;
-
- /**
- * Return an approximation of the memory consumption of this class in bytes.
- */
- std::size_t
- memory_consumption() const;
-
- private:
- /**
- * Initializes the data structures.
- */
- template <typename IteratorFiltersType>
- void
- internal_reinit(const Mapping<dim> &mapping,
- const DoFHandler<dim> &dof_handler,
- const AffineConstraints<Number> &constraints,
- const Quadrature<1> &quad,
- const IteratorFiltersType &iterator_filter,
- const std::shared_ptr<const MPI_Comm> &comm,
- const AdditionalData additional_data);
-
- /**
- * Helper function. Loop over all the cells and apply the functor on each
- * element in parallel. This function is used when MPI is not used.
- */
- template <typename Functor, typename VectorType>
- void
- serial_cell_loop(const Functor &func,
- const VectorType &src,
- VectorType &dst) const;
-
- /**
- * Helper function. Loop over all the cells and apply the functor on each
- * element in parallel. This function is used when MPI is used.
- */
- template <typename Functor>
- void
- distributed_cell_loop(
- const Functor &func,
- const LinearAlgebra::distributed::Vector<Number, MemorySpace::Default>
- &src,
- LinearAlgebra::distributed::Vector<Number, MemorySpace::Default> &dst)
- const;
-
-#ifdef DEAL_II_WITH_CUDA
- /**
- * This function should never be called. Calling it results in an internal
- * error. This function exists only because cell_loop needs
- * distributed_cell_loop() to exist for LinearAlgebra::CUDAWrappers::Vector.
- */
- template <typename Functor>
- void
- distributed_cell_loop(
- const Functor &func,
- const LinearAlgebra::CUDAWrappers::Vector<Number> &src,
- LinearAlgebra::CUDAWrappers::Vector<Number> &dst) const;
-#endif
-
- /**
- * Unique ID associated with the object.
- */
- int my_id;
-
- /**
- * If true, use graph coloring. Otherwise, use atomic operations. Graph
- * coloring ensures bitwise reproducibility but is slower on Pascal and
- * newer architectures.
- */
- bool use_coloring;
-
- /**
- * Overlap MPI communications with computation. This requires CUDA-aware
- * MPI and use_coloring must be false.
- */
- bool overlap_communication_computation;
-
- /**
- * Total number of degrees of freedom.
- */
- types::global_dof_index n_dofs;
-
- /**
- * Degree of the finite element used.
- */
- unsigned int fe_degree;
-
- /**
- * Number of degrees of freedom per cell.
- */
- unsigned int dofs_per_cell;
-
- /**
- * Number of constrained degrees of freedom.
- */
- unsigned int n_constrained_dofs;
-
- /**
- * Number of quadrature points per cells.
- */
- unsigned int q_points_per_cell;
-
- /**
- * Number of colors produced by the graph coloring algorithm.
- */
- unsigned int n_colors;
-
- /**
- * Number of cells in each color.
- */
- std::vector<unsigned int> n_cells;
-
- /**
- * Vector of Kokkos::View to the quadrature points associated to the cells
- * of each color.
- */
- std::vector<Kokkos::View<point_type **, MemorySpace::Default::kokkos_space>>
- q_points;
-
- /**
- * Map the position in the local vector to the position in the global
- * vector.
- */
- std::vector<Kokkos::View<types::global_dof_index **,
- MemorySpace::Default::kokkos_space>>
- local_to_global;
-
- /**
- * Vector of Kokkos::View of the inverse Jacobian associated to the cells of
- * each color.
- */
- std::vector<
- Kokkos::View<Number **[dim][dim], MemorySpace::Default::kokkos_space>>
- inv_jacobian;
-
- /**
- * Vector of Kokkos::View to the Jacobian times the weights associated to
- * the cells of each color.
- */
- std::vector<Kokkos::View<Number **, MemorySpace::Default::kokkos_space>>
- JxW;
-
- /**
- * Kokkos::View to the constrained degrees of freedom.
- */
- Kokkos::View<types::global_dof_index *, MemorySpace::Default::kokkos_space>
- constrained_dofs;
-
- /**
- * Mask deciding where constraints are set on a given cell.
- */
- std::vector<
- Kokkos::View<dealii::internal::MatrixFreeFunctions::ConstraintKinds *,
- MemorySpace::Default::kokkos_space>>
- constraint_mask;
-
- /**
- * Values of the shape functions.
- */
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space> shape_values;
-
- /**
- * Gradients of the shape functions.
- */
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space> shape_gradients;
-
- /**
- * Gradients of the shape functions for collocation methods.
- */
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- co_shape_gradients;
-
- /**
- * Weights used when resolving hanginf nodes.
- */
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- constraint_weights;
-
- /**
- * Shared pointer to a Partitioner for distributed Vectors used in
- * cell_loop. When MPI is not used the pointer is null.
- */
- std::shared_ptr<const Utilities::MPI::Partitioner> partitioner;
-
-
- /**
- * Length of the padding (closest power of two larger than or equal to
- * the number of thread).
- */
- unsigned int padding_length;
-
- /**
- * Row start of each color.
- */
- std::vector<unsigned int> row_start;
-
- /**
- * Pointer to the DoFHandler associated with the object.
- */
- const DoFHandler<dim> *dof_handler;
-
- /**
- * Colored graphed of locally owned active cells.
- */
- std::vector<std::vector<CellFilter>> graph;
-
- friend class internal::ReinitHelper<dim, Number>;
- };
-
-
-
- template <int dim, typename Number>
- struct SharedData
- {
- using TeamHandle = Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>::member_type;
-
- using SharedView1D = Kokkos::View<
- Number *,
- MemorySpace::Default::kokkos_space::execution_space::scratch_memory_space,
- Kokkos::MemoryTraits<Kokkos::Unmanaged>>;
- using SharedView2D = Kokkos::View<
- Number *[dim],
- MemorySpace::Default::kokkos_space::execution_space::scratch_memory_space,
- Kokkos::MemoryTraits<Kokkos::Unmanaged>>;
-
- DEAL_II_HOST_DEVICE
- SharedData(const TeamHandle &team_member,
- const SharedView1D &values,
- const SharedView2D &gradients)
- : team_member(team_member)
- , values(values)
- , gradients(gradients)
- {}
-
- /**
- * TeamPolicy handle.
- */
- TeamHandle team_member;
-
- /**
- * Memory for dof and quad values.
- */
- SharedView1D values;
-
- /**
- * Memory for computed gradients in reference coordinate system.
- */
- SharedView2D gradients;
- };
-
-
-
- /**
- * Structure which is passed to the kernel. It is used to pass all the
- * necessary information from the CPU to the GPU.
- */
- template <int dim, typename Number>
- struct DataHost
- {
- /**
- * Kokkos::View of quadrature points on the host.
- */
- typename Kokkos::View<Point<dim, Number> **,
- MemorySpace::Default::kokkos_space>::HostMirror
- q_points;
-
- /**
- * Map the position in the local vector to the position in the global
- * vector.
- */
- typename Kokkos::View<types::global_dof_index **,
- MemorySpace::Default::kokkos_space>::HostMirror
- local_to_global;
-
- /**
- * Kokkos::View of inverse Jacobians on the host.
- */
- typename Kokkos::View<Number **[dim][dim],
- MemorySpace::Default::kokkos_space>::HostMirror
- inv_jacobian;
-
- /**
- * Kokkos::View of Jacobian times the weights on the host.
- */
- typename Kokkos::View<Number **,
- MemorySpace::Default::kokkos_space>::HostMirror JxW;
-
- /**
- * Number of cells.
- */
- unsigned int n_cells;
-
- /**
- * Length of the padding.
- */
- unsigned int padding_length;
-
- /**
- * Row start (including padding).
- */
- unsigned int row_start;
-
- /**
- * Mask deciding where constraints are set on a given cell.
- */
- typename Kokkos::View<
- dealii::internal::MatrixFreeFunctions::ConstraintKinds *,
- MemorySpace::Default::kokkos_space>::HostMirror constraint_mask;
-
- /**
- * If true, use graph coloring has been used and we can simply add into
- * the destingation vector. Otherwise, use atomic operations.
- */
- bool use_coloring;
-
-
-
- /**
- * This function is the host version of local_q_point_id().
- */
- unsigned int
- local_q_point_id(const unsigned int cell,
- const unsigned int n_q_points,
- const unsigned int q_point) const
- {
- return (row_start / padding_length + cell) * n_q_points + q_point;
- }
-
-
-
- /**
- * This function is the host version of get_quadrature_point().
- */
- Point<dim, Number>
- get_quadrature_point(const unsigned int cell,
- const unsigned int q_point) const
- {
- return q_points(cell, q_point);
- }
- };
-
-
-
- /**
- * Copy @p data from the @ref GlossDevice "device" to the host. @p update_flags should be
- * identical to the one used in MatrixFree::AdditionalData.
- *
- * @relates CUDAWrappers::MatrixFree
- */
- template <int dim, typename Number>
- DataHost<dim, Number>
- copy_mf_data_to_host(
- const typename dealii::CUDAWrappers::MatrixFree<dim, Number>::Data &data,
- const UpdateFlags &update_flags)
- {
- DataHost<dim, Number> data_host;
-
- data_host.n_cells = data.n_cells;
- data_host.padding_length = data.padding_length;
- data_host.row_start = data.row_start;
- data_host.use_coloring = data.use_coloring;
-
- if (update_flags & update_quadrature_points)
- {
- data_host.q_points = Kokkos::create_mirror(data.q_points);
- Kokkos::deep_copy(data_host.q_points, data.q_points);
- }
-
- data_host.local_to_global = Kokkos::create_mirror(data.local_to_global);
- Kokkos::deep_copy(data_host.local_to_global, data.local_to_global);
-
- if (update_flags & update_gradients)
- {
- data_host.inv_jacobian = Kokkos::create_mirror(data.inv_jacobian);
- Kokkos::deep_copy(data_host.inv_jacobian, data.inv_jacobian);
- }
-
- if (update_flags & update_JxW_values)
- {
- data_host.JxW = Kokkos::create_mirror(data.JxW);
- Kokkos::deep_copy(data_host.JxW, data.JxW);
- }
-
- data_host.constraint_mask = Kokkos::create_mirror(data.constraint_mask);
- Kokkos::deep_copy(data_host.constraint_mask, data.constraint_mask);
-
- return data_host;
- }
-
-
- /*----------------------- Inline functions ---------------------------------*/
-
-#ifndef DOXYGEN
-
- template <int dim, typename Number>
- inline const std::vector<std::vector<
- FilteredIterator<typename DoFHandler<dim>::active_cell_iterator>>> &
- MatrixFree<dim, Number>::get_colored_graph() const
- {
- return graph;
- }
-
-
-
- template <int dim, typename Number>
- inline const std::shared_ptr<const Utilities::MPI::Partitioner> &
- MatrixFree<dim, Number>::get_vector_partitioner() const
- {
- return partitioner;
- }
-
-
-
- template <int dim, typename Number>
- inline const DoFHandler<dim> &
- MatrixFree<dim, Number>::get_dof_handler() const
- {
- Assert(dof_handler != nullptr, ExcNotInitialized());
-
- return *dof_handler;
- }
-
-#endif
-
+ using namespace Portable;
} // namespace CUDAWrappers
DEAL_II_NAMESPACE_CLOSE
#ifndef dealii_cuda_matrix_free_templates_h
#define dealii_cuda_matrix_free_templates_h
-#include <deal.II/base/config.h>
-
-#include <deal.II/base/cuda.h>
-#include <deal.II/base/cuda_size.h>
-#include <deal.II/base/graph_coloring.h>
-#include <deal.II/base/memory_space.h>
-
-#include <deal.II/dofs/dof_tools.h>
-
-#include <deal.II/fe/fe_dgq.h>
-#include <deal.II/fe/fe_values.h>
-#include <deal.II/fe/mapping_q1.h>
-
-#include <deal.II/matrix_free/cuda_hanging_nodes_internal.h>
-#include <deal.II/matrix_free/cuda_matrix_free.h>
-#include <deal.II/matrix_free/shape_info.h>
-
-#include <Kokkos_Core.hpp>
-
-#include <cmath>
-#include <functional>
-#include <string>
-
+#include <deal.II/matrix_free/portable_matrix_free.templates.h>
DEAL_II_NAMESPACE_OPEN
namespace CUDAWrappers
{
- namespace internal
- {
- /**
- * Helper class to (re)initialize MatrixFree object.
- */
- template <int dim, typename Number>
- class ReinitHelper
- {
- public:
- ReinitHelper(
- MatrixFree<dim, Number> *data,
- const Mapping<dim> &mapping,
- const FiniteElement<dim, dim> &fe,
- const Quadrature<1> &quad,
- const ::dealii::internal::MatrixFreeFunctions::ShapeInfo<Number>
- &shape_info,
- const DoFHandler<dim> &dof_handler,
- const UpdateFlags &update_flags);
-
- void
- resize(const unsigned int n_colors);
-
- template <typename CellFilter>
- void
- fill_data(
- const unsigned int color,
- const std::vector<CellFilter> &graph,
- const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
-
- private:
- MatrixFree<dim, Number> *data;
- // Local buffer
- std::vector<types::global_dof_index> local_dof_indices;
- FEValues<dim> fe_values;
- // Convert the default dof numbering to a lexicographic one
- const std::vector<unsigned int> &lexicographic_inv;
- std::vector<types::global_dof_index> lexicographic_dof_indices;
- const unsigned int fe_degree;
- const unsigned int dofs_per_cell;
- const unsigned int q_points_per_cell;
- const UpdateFlags &update_flags;
- const unsigned int padding_length;
- dealii::internal::MatrixFreeFunctions::HangingNodes<dim> hanging_nodes;
- };
-
-
-
- template <int dim, typename Number>
- ReinitHelper<dim, Number>::ReinitHelper(
- MatrixFree<dim, Number> *data,
- const Mapping<dim> &mapping,
- const FiniteElement<dim> &fe,
- const Quadrature<1> &quad,
- const ::dealii::internal::MatrixFreeFunctions::ShapeInfo<Number>
- &shape_info,
- const DoFHandler<dim> &dof_handler,
- const UpdateFlags &update_flags)
- : data(data)
- , fe_values(mapping,
- fe,
- Quadrature<dim>(quad),
- update_inverse_jacobians | update_quadrature_points |
- update_values | update_gradients | update_JxW_values)
- , lexicographic_inv(shape_info.lexicographic_numbering)
- , fe_degree(data->fe_degree)
- , dofs_per_cell(data->dofs_per_cell)
- , q_points_per_cell(data->q_points_per_cell)
- , update_flags(update_flags)
- , padding_length(data->get_padding_length())
- , hanging_nodes(dof_handler.get_triangulation())
- {
- local_dof_indices.resize(data->dofs_per_cell);
- lexicographic_dof_indices.resize(dofs_per_cell);
- fe_values.always_allow_check_for_cell_similarity(true);
- }
-
-
-
- template <int dim, typename Number>
- void
- ReinitHelper<dim, Number>::resize(const unsigned int n_colors)
- {
- // We need at least three colors when we are using CUDA-aware MPI and
- // overlapping the communication
- data->n_cells.resize(std::max(n_colors, 3U), 0);
- data->local_to_global.resize(n_colors);
- data->constraint_mask.resize(n_colors);
-
- data->row_start.resize(n_colors);
-
- if (update_flags & update_quadrature_points)
- data->q_points.resize(n_colors);
-
- if (update_flags & update_JxW_values)
- data->JxW.resize(n_colors);
-
- if (update_flags & update_gradients)
- data->inv_jacobian.resize(n_colors);
- }
-
-
-
- template <int dim, typename Number>
- template <typename CellFilter>
- void
- ReinitHelper<dim, Number>::fill_data(
- const unsigned int color,
- const std::vector<CellFilter> &graph,
- const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner)
- {
- const unsigned int n_cells = data->n_cells[color];
-
- // Create the Views
- data->local_to_global[color] =
- Kokkos::View<types::global_dof_index **,
- MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("local_to_global_" + std::to_string(color),
- Kokkos::WithoutInitializing),
- n_cells,
- dofs_per_cell);
-
- if (update_flags & update_quadrature_points)
- data->q_points[color] =
- Kokkos::View<Point<dim, Number> **,
- MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("q_points_" + std::to_string(color),
- Kokkos::WithoutInitializing),
- n_cells,
- q_points_per_cell);
-
- if (update_flags & update_JxW_values)
- data->JxW[color] =
- Kokkos::View<Number **, MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("JxW_" + std::to_string(color),
- Kokkos::WithoutInitializing),
- n_cells,
- dofs_per_cell);
-
- if (update_flags & update_gradients)
- data->inv_jacobian[color] =
- Kokkos::View<Number **[dim][dim], MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("inv_jacobian_" + std::to_string(color),
- Kokkos::WithoutInitializing),
- n_cells,
- dofs_per_cell);
-
- // Initialize to zero, i.e., unconstrained cell
- data->constraint_mask[color] =
- Kokkos::View<dealii::internal::MatrixFreeFunctions::ConstraintKinds *,
- MemorySpace::Default::kokkos_space>(
- "constraint_mask_" + std::to_string(color), n_cells);
-
- // Create the host mirrow Views and fill them
- auto constraint_mask_host =
- Kokkos::create_mirror_view(data->constraint_mask[color]);
-
- typename std::remove_reference_t<
- decltype(data->q_points[color])>::HostMirror q_points_host;
- typename std::remove_reference_t<decltype(data->JxW[color])>::HostMirror
- JxW_host;
- typename std::remove_reference_t<
- decltype(data->inv_jacobian[color])>::HostMirror inv_jacobian_host;
-#if KOKKOS_VERSION >= 30600
- auto local_to_global_host =
- Kokkos::create_mirror_view(Kokkos::WithoutInitializing,
- data->local_to_global[color]);
- if (update_flags & update_quadrature_points)
- q_points_host = Kokkos::create_mirror_view(Kokkos::WithoutInitializing,
- data->q_points[color]);
- if (update_flags & update_JxW_values)
- JxW_host = Kokkos::create_mirror_view(Kokkos::WithoutInitializing,
- data->JxW[color]);
- if (update_flags & update_gradients)
- inv_jacobian_host =
- Kokkos::create_mirror_view(Kokkos::WithoutInitializing,
- data->inv_jacobian[color]);
-#else
- auto local_to_global_host =
- Kokkos::create_mirror_view(data->local_to_global[color]);
- if (update_flags & update_quadrature_points)
- q_points_host = Kokkos::create_mirror_view(data->q_points[color]);
- if (update_flags & update_JxW_values)
- JxW_host = Kokkos::create_mirror_view(data->JxW[color]);
- if (update_flags & update_gradients)
- inv_jacobian_host =
- Kokkos::create_mirror_view(data->inv_jacobian[color]);
-#endif
-
- auto cell = graph.cbegin(), end_cell = graph.cend();
- for (unsigned int cell_id = 0; cell != end_cell; ++cell, ++cell_id)
- {
- (*cell)->get_dof_indices(local_dof_indices);
- // When using MPI, we need to transform the local_dof_indices, which
- // contain global numbers of dof indices in the MPI universe, to get
- // local (to the current MPI process) dof indices.
- if (partitioner)
- for (auto &index : local_dof_indices)
- index = partitioner->global_to_local(index);
-
- for (unsigned int i = 0; i < dofs_per_cell; ++i)
- lexicographic_dof_indices[i] =
- local_dof_indices[lexicographic_inv[i]];
-
- const ArrayView<
- dealii::internal::MatrixFreeFunctions::ConstraintKinds>
- cell_id_view(constraint_mask_host[cell_id]);
-
- hanging_nodes.setup_constraints(*cell,
- partitioner,
- {lexicographic_inv},
- lexicographic_dof_indices,
- cell_id_view);
-
- for (unsigned int i = 0; i < dofs_per_cell; ++i)
- local_to_global_host(cell_id, i) = lexicographic_dof_indices[i];
-
- fe_values.reinit(*cell);
-
- // Quadrature points
- if (update_flags & update_quadrature_points)
- {
- for (unsigned int i = 0; i < q_points_per_cell; ++i)
- q_points_host(cell_id, i) = fe_values.quadrature_point(i);
- }
-
- if (update_flags & update_JxW_values)
- {
- for (unsigned int i = 0; i < q_points_per_cell; ++i)
- JxW_host(cell_id, i) = fe_values.JxW(i);
- }
-
- if (update_flags & update_gradients)
- {
- for (unsigned int i = 0; i < q_points_per_cell; ++i)
- for (unsigned int d = 0; d < dim; ++d)
- for (unsigned int e = 0; e < dim; ++e)
- inv_jacobian_host(cell_id, i, d, e) =
- fe_values.inverse_jacobian(i)[d][e];
- }
- }
-
- // Copy the data to the device
- Kokkos::deep_copy(data->constraint_mask[color], constraint_mask_host);
- Kokkos::deep_copy(data->local_to_global[color], local_to_global_host);
- if (update_flags & update_quadrature_points)
- Kokkos::deep_copy(data->q_points[color], q_points_host);
- if (update_flags & update_JxW_values)
- Kokkos::deep_copy(data->JxW[color], JxW_host);
- if (update_flags & update_gradients)
- Kokkos::deep_copy(data->inv_jacobian[color], inv_jacobian_host);
- }
-
-
-
- template <int dim, typename number>
- std::vector<types::global_dof_index>
- get_conflict_indices(
- const FilteredIterator<typename DoFHandler<dim>::active_cell_iterator>
- &cell,
- const AffineConstraints<number> &constraints)
- {
- std::vector<types::global_dof_index> local_dof_indices(
- cell->get_fe().n_dofs_per_cell());
- cell->get_dof_indices(local_dof_indices);
- constraints.resolve_indices(local_dof_indices);
-
- return local_dof_indices;
- }
-
-
-
- template <typename VectorType>
- struct VectorLocalSize
- {
- static unsigned int
- get(const VectorType &vec)
- {
- return vec.locally_owned_size();
- }
- };
-
-#ifdef DEAL_II_WITH_CUDA
- template <>
- struct VectorLocalSize<LinearAlgebra::CUDAWrappers::Vector<double>>
- {
- static unsigned int
- get(const LinearAlgebra::CUDAWrappers::Vector<double> &vec)
- {
- return vec.size();
- }
- };
-
- template <>
- struct VectorLocalSize<LinearAlgebra::CUDAWrappers::Vector<float>>
- {
- static unsigned int
- get(const LinearAlgebra::CUDAWrappers::Vector<float> &vec)
- {
- return vec.size();
- }
- };
-#endif
-
-
-
- template <int dim, typename Number, typename Functor>
- struct ApplyKernel
- {
- using TeamHandle = Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>::member_type;
- using SharedView1D =
- Kokkos::View<Number *,
- MemorySpace::Default::kokkos_space::execution_space::
- scratch_memory_space,
- Kokkos::MemoryTraits<Kokkos::Unmanaged>>;
- using SharedView2D =
- Kokkos::View<Number *[dim],
- MemorySpace::Default::kokkos_space::execution_space::
- scratch_memory_space,
- Kokkos::MemoryTraits<Kokkos::Unmanaged>>;
-
- ApplyKernel(Functor func,
- const typename MatrixFree<dim, Number>::Data gpu_data,
- Number *const src,
- Number *dst)
- : func(func)
- , gpu_data(gpu_data)
- , src(src)
- , dst(dst)
- {}
-
- Functor func;
- const typename MatrixFree<dim, Number>::Data gpu_data;
- Number *const src;
- Number *dst;
-
-
- // Provide the shared memory capacity. This function takes the team_size
- // as an argument, which allows team_size dependent allocations.
- size_t
- team_shmem_size(int /*team_size*/) const
- {
- return SharedView1D::shmem_size(Functor::n_local_dofs) +
- SharedView2D::shmem_size(Functor::n_local_dofs);
- }
-
-
- DEAL_II_HOST_DEVICE
- void
- operator()(const TeamHandle &team_member) const
- {
- // Get the scratch memory
- SharedView1D values(team_member.team_shmem(), Functor::n_local_dofs);
- SharedView2D gradients(team_member.team_shmem(), Functor::n_local_dofs);
-
- SharedData<dim, Number> shared_data(team_member, values, gradients);
- func(team_member.league_rank(), &gpu_data, &shared_data, src, dst);
- }
- };
- } // namespace internal
-
-
-
- template <int dim, typename Number>
- MatrixFree<dim, Number>::MatrixFree()
- : my_id(-1)
- , n_dofs(0)
- , padding_length(0)
- , dof_handler(nullptr)
- {}
-
-
-
- template <int dim, typename Number>
- template <typename IteratorFiltersType>
- void
- MatrixFree<dim, Number>::reinit(const Mapping<dim> &mapping,
- const DoFHandler<dim> &dof_handler,
- const AffineConstraints<Number> &constraints,
- const Quadrature<1> &quad,
- const IteratorFiltersType &iterator_filter,
- const AdditionalData &additional_data)
- {
- const auto &triangulation = dof_handler.get_triangulation();
- if (const auto parallel_triangulation =
- dynamic_cast<const parallel::TriangulationBase<dim> *>(
- &triangulation))
- internal_reinit(mapping,
- dof_handler,
- constraints,
- quad,
- iterator_filter,
- std::make_shared<const MPI_Comm>(
- parallel_triangulation->get_communicator()),
- additional_data);
- else
- internal_reinit(mapping,
- dof_handler,
- constraints,
- quad,
- iterator_filter,
- nullptr,
- additional_data);
- }
-
-
-
- template <int dim, typename Number>
- void
- MatrixFree<dim, Number>::reinit(const Mapping<dim> &mapping,
- const DoFHandler<dim> &dof_handler,
- const AffineConstraints<Number> &constraints,
- const Quadrature<1> &quad,
- const AdditionalData &additional_data)
- {
- IteratorFilters::LocallyOwnedCell locally_owned_cell_filter;
- reinit(mapping,
- dof_handler,
- constraints,
- quad,
- locally_owned_cell_filter,
- additional_data);
- }
-
-
-
- template <int dim, typename Number>
- void
- MatrixFree<dim, Number>::reinit(const DoFHandler<dim> &dof_handler,
- const AffineConstraints<Number> &constraints,
- const Quadrature<1> &quad,
- const AdditionalData &additional_data)
- {
- reinit(StaticMappingQ1<dim>::mapping,
- dof_handler,
- constraints,
- quad,
- additional_data);
- }
-
-
-
- template <int dim, typename Number>
- typename MatrixFree<dim, Number>::Data
- MatrixFree<dim, Number>::get_data(unsigned int color) const
- {
- Data data_copy;
- if (q_points.size() > 0)
- data_copy.q_points = q_points[color];
- if (inv_jacobian.size() > 0)
- data_copy.inv_jacobian = inv_jacobian[color];
- if (JxW.size() > 0)
- data_copy.JxW = JxW[color];
- data_copy.local_to_global = local_to_global[color];
- data_copy.constraint_mask = constraint_mask[color];
- data_copy.shape_values = shape_values;
- data_copy.shape_gradients = shape_gradients;
- data_copy.co_shape_gradients = co_shape_gradients;
- data_copy.constraint_weights = constraint_weights;
- data_copy.n_cells = n_cells[color];
- data_copy.padding_length = padding_length;
- data_copy.row_start = row_start[color];
- data_copy.use_coloring = use_coloring;
-
- return data_copy;
- }
-
-
-
- template <int dim, typename Number>
- template <typename VectorType>
- void
- MatrixFree<dim, Number>::copy_constrained_values(const VectorType &src,
- VectorType &dst) const
- {
- static_assert(
- std::is_same_v<Number, typename VectorType::value_type>,
- "VectorType::value_type and Number should be of the same type.");
- Assert(src.size() == dst.size(),
- ExcMessage("src and dst vectors have different size."));
- // FIXME When using C++17, we can use KOKKOS_CLASS_LAMBDA and this
- // work-around can be removed.
- auto constr_dofs = constrained_dofs;
- const unsigned int size = internal::VectorLocalSize<VectorType>::get(dst);
- const Number *src_ptr = src.get_values();
- Number *dst_ptr = dst.get_values();
- Kokkos::parallel_for(
- "dealii::copy_constrained_values",
- Kokkos::RangePolicy<MemorySpace::Default::kokkos_space::execution_space>(
- 0, n_constrained_dofs),
- KOKKOS_LAMBDA(int dof) {
- // When working with distributed vectors, the constrained dofs are
- // computed for ghosted vectors but we want to copy the values of the
- // constrained dofs of non-ghosted vectors.
- const auto constrained_dof = constr_dofs[dof];
- if (constrained_dof < size)
- dst_ptr[constrained_dof] = src_ptr[constrained_dof];
- });
- }
-
-
-
- template <int dim, typename Number>
- template <typename VectorType>
- void
- MatrixFree<dim, Number>::set_constrained_values(Number val,
- VectorType &dst) const
- {
- static_assert(
- std::is_same_v<Number, typename VectorType::value_type>,
- "VectorType::value_type and Number should be of the same type.");
- Number *dst_ptr = dst.get_values();
- // FIXME When using C++17, we can use KOKKOS_CLASS_LAMBDA and this
- // work-around can be removed.
- auto constr_dofs = constrained_dofs;
- // When working with distributed vectors, the constrained dofs are
- // computed for ghosted vectors but we want to set the values of the
- // constrained dofs of non-ghosted vectors.
- const unsigned int size =
- partitioner ? dst.locally_owned_size() : dst.size();
- Kokkos::parallel_for(
- "dealii::set_constrained_values",
- Kokkos::RangePolicy<MemorySpace::Default::kokkos_space::execution_space>(
- 0, n_constrained_dofs),
- KOKKOS_LAMBDA(int dof) {
- if (constr_dofs[dof] < size)
- dst_ptr[constr_dofs[dof]] = val;
- });
- }
-
-
-#ifdef DEAL_II_WITH_CUDA
- template <int dim, typename Number>
- void
- MatrixFree<dim, Number>::initialize_dof_vector(
- LinearAlgebra::CUDAWrappers::Vector<Number> &vec) const
- {
- vec.reinit(n_dofs);
- }
-#endif
-
-
-
- template <int dim, typename Number>
- void
- MatrixFree<dim, Number>::initialize_dof_vector(
- LinearAlgebra::distributed::Vector<Number, MemorySpace::Default> &vec) const
- {
- if (partitioner)
- vec.reinit(partitioner);
- else
- vec.reinit(n_dofs);
- }
-
-
-
- template <int dim, typename Number>
- unsigned int
- MatrixFree<dim, Number>::get_padding_length() const
- {
- return padding_length;
- }
-
-
-
- template <int dim, typename Number>
- template <typename Functor, typename VectorType>
- void
- MatrixFree<dim, Number>::cell_loop(const Functor &func,
- const VectorType &src,
- VectorType &dst) const
- {
- if (partitioner)
- distributed_cell_loop(func, src, dst);
- else
- serial_cell_loop(func, src, dst);
- }
-
-
-
- template <int dim, typename Number>
- template <typename Functor>
- void
- MatrixFree<dim, Number>::evaluate_coefficients(Functor func) const
- {
- for (unsigned int i = 0; i < n_colors; ++i)
- if (n_cells[i] > 0)
- {
- MemorySpace::Default::kokkos_space::execution_space exec;
- auto color_data = get_data(i);
- Kokkos::parallel_for(
- "dealii::MatrixFree::evaluate_coeff",
- Kokkos::MDRangePolicy<
- MemorySpace::Default::kokkos_space::execution_space,
- Kokkos::Rank<2>>(
-#if KOKKOS_VERSION >= 20900
- exec,
-#endif
- {0, 0},
- {n_cells[i], Functor::n_q_points}),
- KOKKOS_LAMBDA(const int cell, const int q) {
- func(&color_data, cell, q);
- });
- }
- }
-
-
-
- template <int dim, typename Number>
- std::size_t
- MatrixFree<dim, Number>::memory_consumption() const
- {
- // First compute the size of n_cells, row_starts, kernel launch parameters,
- // and constrained_dofs
- std::size_t bytes = n_cells.size() * sizeof(unsigned int) * 2 +
- n_constrained_dofs * sizeof(unsigned int);
-
- // For each color, add local_to_global, inv_jacobian, JxW, and q_points.
- // FIXME
- for (unsigned int i = 0; i < n_colors; ++i)
- {
- bytes += n_cells[i] * padding_length * sizeof(unsigned int) +
- n_cells[i] * padding_length * dim * dim * sizeof(Number) +
- n_cells[i] * padding_length * sizeof(Number) +
- n_cells[i] * padding_length * sizeof(point_type) +
- n_cells[i] * sizeof(unsigned int);
- }
-
- return bytes;
- }
-
-
-
- template <int dim, typename Number>
- template <typename IteratorFiltersType>
- void
- MatrixFree<dim, Number>::internal_reinit(
- const Mapping<dim> &mapping,
- const DoFHandler<dim> &dof_handler_,
- const AffineConstraints<Number> &constraints,
- const Quadrature<1> &quad,
- const IteratorFiltersType &iterator_filter,
- const std::shared_ptr<const MPI_Comm> &comm,
- const AdditionalData additional_data)
- {
- dof_handler = &dof_handler_;
-
- UpdateFlags update_flags = additional_data.mapping_update_flags;
- if (update_flags & update_gradients)
- update_flags |= update_JxW_values;
-
- this->use_coloring = additional_data.use_coloring;
- this->overlap_communication_computation =
- additional_data.overlap_communication_computation;
-
- n_dofs = dof_handler->n_dofs();
-
- const FiniteElement<dim> &fe = dof_handler->get_fe();
-
- fe_degree = fe.degree;
- // TODO this should be a templated parameter
- const unsigned int n_dofs_1d = fe_degree + 1;
- const unsigned int n_q_points_1d = quad.size();
-
- Assert(n_dofs_1d == n_q_points_1d,
- ExcMessage("n_q_points_1d must be equal to fe_degree+1."));
-
- // Set padding length to the closest power of two larger than or equal to
- // the number of threads.
- padding_length = 1 << static_cast<unsigned int>(
- std::ceil(dim * std::log2(fe_degree + 1.)));
-
- dofs_per_cell = fe.n_dofs_per_cell();
- q_points_per_cell = Utilities::fixed_power<dim>(n_q_points_1d);
-
- ::dealii::internal::MatrixFreeFunctions::ShapeInfo<Number> shape_info(quad,
- fe);
-
- unsigned int size_shape_values = n_dofs_1d * n_q_points_1d;
-
- shape_values = Kokkos::View<Number *, MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("shape_values", Kokkos::WithoutInitializing),
- size_shape_values);
- Kokkos::deep_copy(shape_values,
- Kokkos::View<Number *, Kokkos::HostSpace>(
- shape_info.data.front().shape_values.data(),
- size_shape_values));
-
- if (update_flags & update_gradients)
- {
- shape_gradients =
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("shape_gradients", Kokkos::WithoutInitializing),
- size_shape_values);
- Kokkos::deep_copy(shape_gradients,
- Kokkos::View<Number *, Kokkos::HostSpace>(
- shape_info.data.front().shape_gradients.data(),
- size_shape_values));
-
-
- co_shape_gradients =
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("co_shape_gradients",
- Kokkos::WithoutInitializing),
- n_q_points_1d * n_q_points_1d);
- Kokkos::deep_copy(
- co_shape_gradients,
- Kokkos::View<Number *, Kokkos::HostSpace>(
- shape_info.data.front().shape_gradients_collocation.data(),
- n_q_points_1d * n_q_points_1d));
- }
-
- internal::ReinitHelper<dim, Number> helper(
- this, mapping, fe, quad, shape_info, *dof_handler, update_flags);
-
- const unsigned int constraint_weights_size =
- shape_info.data.front().subface_interpolation_matrices[0].size();
- constraint_weights =
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("constraint_weights", Kokkos::WithoutInitializing),
- constraint_weights_size);
- auto constraint_weights_host =
- Kokkos::create_mirror_view(constraint_weights);
- for (unsigned int i = 0; i < constraint_weights_size; ++i)
- {
- constraint_weights_host[i] =
- shape_info.data.front().subface_interpolation_matrices[0][i];
- }
- Kokkos::deep_copy(constraint_weights, constraint_weights_host);
-
- // Create a graph coloring
- CellFilter begin(iterator_filter, dof_handler->begin_active());
- CellFilter end(iterator_filter, dof_handler->end());
-
- if (begin != end)
- {
- if (additional_data.use_coloring)
- {
- const auto fun = [&](const CellFilter &filter) {
- return internal::get_conflict_indices<dim, Number>(filter,
- constraints);
- };
- graph = GraphColoring::make_graph_coloring(begin, end, fun);
- }
- else
- {
- graph.clear();
- if (additional_data.overlap_communication_computation)
- {
- // We create one color (1) with the cells on the boundary of the
- // local domain and two colors (0 and 2) with the interior
- // cells.
- graph.resize(3, std::vector<CellFilter>());
-
- std::vector<bool> ghost_vertices(
- dof_handler->get_triangulation().n_vertices(), false);
-
- for (const auto &cell :
- dof_handler->get_triangulation().active_cell_iterators())
- if (cell->is_ghost())
- for (unsigned int i = 0;
- i < GeometryInfo<dim>::vertices_per_cell;
- i++)
- ghost_vertices[cell->vertex_index(i)] = true;
-
- std::vector<dealii::FilteredIterator<dealii::TriaActiveIterator<
- dealii::DoFCellAccessor<dim, dim, false>>>>
- inner_cells;
-
- for (auto cell = begin; cell != end; ++cell)
- {
- bool ghost_vertex = false;
-
- for (unsigned int i = 0;
- i < GeometryInfo<dim>::vertices_per_cell;
- i++)
- if (ghost_vertices[cell->vertex_index(i)])
- {
- ghost_vertex = true;
- break;
- }
-
- if (ghost_vertex)
- graph[1].emplace_back(cell);
- else
- inner_cells.emplace_back(cell);
- }
- for (unsigned i = 0; i < inner_cells.size(); ++i)
- if (i < inner_cells.size() / 2)
- graph[0].emplace_back(inner_cells[i]);
- else
- graph[2].emplace_back(inner_cells[i]);
- }
- else
- {
- // If we are not using coloring, all the cells belong to the
- // same color.
- graph.resize(1, std::vector<CellFilter>());
- for (auto cell = begin; cell != end; ++cell)
- graph[0].emplace_back(cell);
- }
- }
- }
- n_colors = graph.size();
-
- helper.resize(n_colors);
-
- IndexSet locally_relevant_dofs;
- if (comm)
- {
- locally_relevant_dofs =
- DoFTools::extract_locally_relevant_dofs(*dof_handler);
- partitioner = std::make_shared<Utilities::MPI::Partitioner>(
- dof_handler->locally_owned_dofs(), locally_relevant_dofs, *comm);
- }
- for (unsigned int i = 0; i < n_colors; ++i)
- {
- n_cells[i] = graph[i].size();
- helper.fill_data(i, graph[i], partitioner);
- }
-
- // Setup row starts
- if (n_colors > 0)
- row_start[0] = 0;
- for (unsigned int i = 1; i < n_colors; ++i)
- row_start[i] = row_start[i - 1] + n_cells[i - 1] * get_padding_length();
-
- // Constrained indices
- n_constrained_dofs = constraints.n_constraints();
-
- if (n_constrained_dofs != 0)
- {
- std::vector<dealii::types::global_dof_index> constrained_dofs_host(
- n_constrained_dofs);
-
- if (partitioner)
- {
- const unsigned int n_local_dofs =
- locally_relevant_dofs.n_elements();
- unsigned int i_constraint = 0;
- for (unsigned int i = 0; i < n_local_dofs; ++i)
- {
- // is_constrained uses a global dof id but
- // constrained_dofs_host works on the local id
- if (constraints.is_constrained(partitioner->local_to_global(i)))
- {
- constrained_dofs_host[i_constraint] = i;
- ++i_constraint;
- }
- }
- }
- else
- {
- const unsigned int n_local_dofs = dof_handler->n_dofs();
- unsigned int i_constraint = 0;
- for (unsigned int i = 0; i < n_local_dofs; ++i)
- {
- if (constraints.is_constrained(i))
- {
- constrained_dofs_host[i_constraint] = i;
- ++i_constraint;
- }
- }
- }
-
- constrained_dofs = Kokkos::View<types::global_dof_index *,
- MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("constrained_dofs", Kokkos::WithoutInitializing),
- n_constrained_dofs);
-
- Kokkos::View<types::global_dof_index *,
- MemorySpace::Default::kokkos_space,
- Kokkos::MemoryTraits<Kokkos::Unmanaged>>
- constrained_dofs_host_view(constrained_dofs_host.data(),
- constrained_dofs_host.size());
- Kokkos::deep_copy(constrained_dofs, constrained_dofs_host_view);
- }
- }
-
-
-
- template <int dim, typename Number>
- template <typename Functor, typename VectorType>
- void
- MatrixFree<dim, Number>::serial_cell_loop(const Functor &func,
- const VectorType &src,
- VectorType &dst) const
- {
- // Execute the loop on the cells
- for (unsigned int color = 0; color < n_colors; ++color)
- if (n_cells[color] > 0)
- {
- MemorySpace::Default::kokkos_space::execution_space exec;
- Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>
- team_policy(
-#if KOKKOS_VERSION >= 20900
- exec,
-#endif
- n_cells[color],
- Kokkos::AUTO);
-
- internal::ApplyKernel<dim, Number, Functor> apply_kernel(
- func, get_data(color), src.get_values(), dst.get_values());
-
- Kokkos::parallel_for("dealii::MatrixFree::serial_cell_loop",
- team_policy,
- apply_kernel);
- }
- Kokkos::fence();
- }
-
-
-
- template <int dim, typename Number>
- template <typename Functor>
- void
- MatrixFree<dim, Number>::distributed_cell_loop(
- const Functor &func,
- const LinearAlgebra::distributed::Vector<Number, MemorySpace::Default> &src,
- LinearAlgebra::distributed::Vector<Number, MemorySpace::Default> &dst) const
- {
- MemorySpace::Default::kokkos_space::execution_space exec;
-
- // in case we have compatible partitioners, we can simply use the provided
- // vectors
- if (src.get_partitioner().get() == partitioner.get() &&
- dst.get_partitioner().get() == partitioner.get())
- {
- // This code is inspired to the code in TaskInfo::loop.
- if (overlap_communication_computation)
- {
- src.update_ghost_values_start(0);
-
- // In parallel, it's possible that some processors do not own any
- // cells.
- if (n_cells[0] > 0)
- {
- Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>
- team_policy(
-#if KOKKOS_VERSION >= 20900
- exec,
-#endif
- n_cells[0],
- Kokkos::AUTO);
-
- internal::ApplyKernel<dim, Number, Functor> apply_kernel(
- func, get_data(0), src.get_values(), dst.get_values());
-
- Kokkos::parallel_for(
- "dealii::MatrixFree::distributed_cell_loop_0",
- team_policy,
- apply_kernel);
- }
- src.update_ghost_values_finish();
-
- // In serial this color does not exist because there are no ghost
- // cells
- if (n_cells[1] > 0)
- {
- Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>
- team_policy(
-#if KOKKOS_VERSION >= 20900
- exec,
-#endif
- n_cells[1],
- Kokkos::AUTO);
-
- internal::ApplyKernel<dim, Number, Functor> apply_kernel(
- func, get_data(1), src.get_values(), dst.get_values());
-
- Kokkos::parallel_for(
- "dealii::MatrixFree::distributed_cell_loop_1",
- team_policy,
- apply_kernel);
-
- // We need a synchronization point because we don't want
- // CUDA-aware MPI to start the MPI communication until the
- // kernel is done.
- Kokkos::fence();
- }
-
- dst.compress_start(0, VectorOperation::add);
- // When the mesh is coarse it is possible that some processors do
- // not own any cells
- if (n_cells[2] > 0)
- {
- Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>
- team_policy(
-#if KOKKOS_VERSION >= 20900
- exec,
-#endif
- n_cells[2],
- Kokkos::AUTO);
-
- internal::ApplyKernel<dim, Number, Functor> apply_kernel(
- func, get_data(2), src.get_values(), dst.get_values());
-
- Kokkos::parallel_for(
- "dealii::MatrixFree::distributed_cell_loop_2",
- team_policy,
- apply_kernel);
- }
- dst.compress_finish(VectorOperation::add);
- }
- else
- {
- src.update_ghost_values();
- std::vector<
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>>
- values_colors(n_colors);
- std::vector<
- Kokkos::View<Number *[dim], MemorySpace::Default::kokkos_space>>
- gradients_colors(n_colors);
-
- // Execute the loop on the cells
- for (unsigned int i = 0; i < n_colors; ++i)
- if (n_cells[i] > 0)
- {
- Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>
- team_policy(
-#if KOKKOS_VERSION >= 20900
- exec,
-#endif
- n_cells[i],
- Kokkos::AUTO);
-
- internal::ApplyKernel<dim, Number, Functor> apply_kernel(
- func, get_data(i), src.get_values(), dst.get_values());
-
- Kokkos::parallel_for(
- "dealii::MatrixFree::distributed_cell_loop_" +
- std::to_string(i),
- team_policy,
- apply_kernel);
- }
- dst.compress(VectorOperation::add);
- }
- src.zero_out_ghost_values();
- }
- else
- {
- // Create the ghosted source and the ghosted destination
- LinearAlgebra::distributed::Vector<Number, MemorySpace::Default>
- ghosted_src(partitioner);
- LinearAlgebra::distributed::Vector<Number, MemorySpace::Default>
- ghosted_dst(ghosted_src);
- ghosted_src = src;
- ghosted_dst = dst;
- ghosted_dst.zero_out_ghost_values();
-
- // Execute the loop on the cells
- for (unsigned int i = 0; i < n_colors; ++i)
- if (n_cells[i] > 0)
- {
- Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>
- team_policy(
-#if KOKKOS_VERSION >= 20900
- exec,
-#endif
- n_cells[i],
- Kokkos::AUTO);
-
- internal::ApplyKernel<dim, Number, Functor> apply_kernel(
- func,
- get_data(i),
- ghosted_src.get_values(),
- ghosted_dst.get_values());
-
- Kokkos::parallel_for(
- "dealii::MatrixFree::distributed_cell_loop_" +
- std::to_string(i),
- team_policy,
- apply_kernel);
- }
-
- // Add the ghosted values
- ghosted_dst.compress(VectorOperation::add);
- dst = ghosted_dst;
- }
- }
-
-#ifdef DEAL_II_WITH_CUDA
- template <int dim, typename Number>
- template <typename Functor>
- void
- MatrixFree<dim, Number>::distributed_cell_loop(
- const Functor &,
- const LinearAlgebra::CUDAWrappers::Vector<Number> &,
- LinearAlgebra::CUDAWrappers::Vector<Number> &) const
- {
- DEAL_II_ASSERT_UNREACHABLE();
- }
-#endif
+ using namespace Portable;
} // namespace CUDAWrappers
DEAL_II_NAMESPACE_CLOSE
#ifndef dealii_cuda_tensor_product_kernels_h
#define dealii_cuda_tensor_product_kernels_h
-#include <deal.II/base/config.h>
-#include <deal.II/base/utilities.h>
-
-#include <deal.II/matrix_free/cuda_matrix_free.templates.h>
+#include <deal.II/matrix_free/portable_tensor_product_kernels.h>
DEAL_II_NAMESPACE_OPEN
namespace CUDAWrappers
{
- namespace internal
- {
- /**
- * In this namespace, the evaluator routines that evaluate the tensor
- * products are implemented.
- *
- * @ingroup CUDAWrappers
- */
- // TODO: for now only the general variant is implemented
- enum EvaluatorVariant
- {
- evaluate_general,
- evaluate_symmetric,
- evaluate_evenodd
- };
-
-
-
-#if KOKKOS_VERSION >= 40000
- /**
- * Helper function for values() and gradients() in 1D
- */
- template <int n_q_points_1d,
- typename Number,
- int direction,
- bool dof_to_quad,
- bool add,
- bool in_place,
- typename ViewTypeIn,
- typename ViewTypeOut>
- DEAL_II_HOST_DEVICE void
- apply_1d(const Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>::member_type
- &team_member,
- const Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- shape_data,
- const ViewTypeIn in,
- ViewTypeOut out)
- {
- Number t[n_q_points_1d];
- Kokkos::parallel_for(Kokkos::TeamThreadRange(team_member, n_q_points_1d),
- [&](const int &q) {
- t[q] = 0;
- // This loop simply multiplies the shape function
- // at the quadrature point by the value finite
- // element coefficient.
- // FIXME check why using parallel_reduce
- // ThreadVector is slower
- for (int k = 0; k < n_q_points_1d; ++k)
- {
- const unsigned int shape_idx =
- dof_to_quad ? (q + k * n_q_points_1d) :
- (k + q * n_q_points_1d);
- const unsigned int source_idx = k;
- t[q] += shape_data[shape_idx] * in(source_idx);
- }
- });
-
- if constexpr (in_place)
- team_member.team_barrier();
-
- Kokkos::parallel_for(Kokkos::TeamThreadRange(team_member, n_q_points_1d),
- [&](const int &q) {
- const unsigned int destination_idx = q;
- if constexpr (add)
- Kokkos::atomic_add(&out(destination_idx), t[q]);
- else
- out(destination_idx) = t[q];
- });
- }
-
-
-
- /**
- * Helper function for values() and gradients() in 2D
- */
- template <int n_q_points_1d,
- typename Number,
- int direction,
- bool dof_to_quad,
- bool add,
- bool in_place,
- typename ViewTypeIn,
- typename ViewTypeOut>
- DEAL_II_HOST_DEVICE void
- apply_2d(const Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>::member_type
- &team_member,
- const Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- shape_data,
- const ViewTypeIn in,
- ViewTypeOut out)
- {
- using TeamType = Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>::member_type;
- constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, 2);
-
- Number t[n_q_points];
- auto thread_policy =
- Kokkos::TeamThreadMDRange<Kokkos::Rank<2>, TeamType>(team_member,
- n_q_points_1d,
- n_q_points_1d);
- Kokkos::parallel_for(thread_policy, [&](const int i, const int j) {
- int q_point = i + j * n_q_points_1d;
- t[q_point] = 0;
-
- // This loop simply multiplies the shape function at the quadrature
- // point by the value finite element coefficient.
- // FIXME check why using parallel_reduce ThreadVector is slower
- for (int k = 0; k < n_q_points_1d; ++k)
- {
- const unsigned int shape_idx =
- dof_to_quad ? (j + k * n_q_points_1d) : (k + j * n_q_points_1d);
- const unsigned int source_idx = (direction == 0) ?
- (k + n_q_points_1d * i) :
- (i + n_q_points_1d * k);
- t[q_point] += shape_data[shape_idx] * in(source_idx);
- }
- });
-
- if (in_place)
- team_member.team_barrier();
-
- Kokkos::parallel_for(thread_policy, [&](const int i, const int j) {
- const int q_point = i + j * n_q_points_1d;
- const unsigned int destination_idx =
- (direction == 0) ? (j + n_q_points_1d * i) : (i + n_q_points_1d * j);
-
- if (add)
- Kokkos::atomic_add(&out(destination_idx), t[q_point]);
- else
- out(destination_idx) = t[q_point];
- });
- }
-
-
-
- /**
- * Helper function for values() and gradients() in 3D
- */
- template <int n_q_points_1d,
- typename Number,
- int direction,
- bool dof_to_quad,
- bool add,
- bool in_place,
- typename ViewTypeIn,
- typename ViewTypeOut>
- DEAL_II_HOST_DEVICE void
- apply_3d(const Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>::member_type
- &team_member,
- const Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- shape_data,
- const ViewTypeIn in,
- ViewTypeOut out)
- {
- using TeamType = Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>::member_type;
- constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, 3);
-
- Number t[n_q_points];
- auto thread_policy = Kokkos::TeamThreadMDRange<Kokkos::Rank<3>, TeamType>(
- team_member, n_q_points_1d, n_q_points_1d, n_q_points_1d);
- Kokkos::parallel_for(
- thread_policy, [&](const int i, const int j, const int q) {
- const int q_point =
- i + j * n_q_points_1d + q * n_q_points_1d * n_q_points_1d;
- t[q_point] = 0;
-
- // This loop simply multiplies the shape function at the quadrature
- // point by the value finite element coefficient.
- // FIXME check why using parallel_reduce ThreadVector is slower
- for (int k = 0; k < n_q_points_1d; ++k)
- {
- const unsigned int shape_idx =
- dof_to_quad ? (q + k * n_q_points_1d) : (k + q * n_q_points_1d);
- const unsigned int source_idx =
- (direction == 0) ?
- (k + n_q_points_1d * (i + n_q_points_1d * j)) :
- (direction == 1) ?
- (i + n_q_points_1d * (k + n_q_points_1d * j)) :
- (i + n_q_points_1d * (j + n_q_points_1d * k));
- t[q_point] += shape_data[shape_idx] * in(source_idx);
- }
- });
-
- if (in_place)
- team_member.team_barrier();
-
- Kokkos::parallel_for(
- thread_policy, [&](const int i, const int j, const int q) {
- const int q_point =
- i + j * n_q_points_1d + q * n_q_points_1d * n_q_points_1d;
- const unsigned int destination_idx =
- (direction == 0) ? (q + n_q_points_1d * (i + n_q_points_1d * j)) :
- (direction == 1) ? (i + n_q_points_1d * (q + n_q_points_1d * j)) :
- (i + n_q_points_1d * (j + n_q_points_1d * q));
-
- if (add)
- Kokkos::atomic_add(&out(destination_idx), t[q_point]);
- else
- out(destination_idx) = t[q_point];
- });
- }
-#endif
-
-
-
- /**
- * Helper function for values() and gradients().
- */
- template <int dim,
- int n_q_points_1d,
- typename Number,
- int direction,
- bool dof_to_quad,
- bool add,
- bool in_place,
- typename ViewTypeIn,
- typename ViewTypeOut>
- DEAL_II_HOST_DEVICE void
- apply(const Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>::member_type
- &team_member,
- const Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- shape_data,
- const ViewTypeIn in,
- ViewTypeOut out)
- {
-#if KOKKOS_VERSION >= 40000
- if constexpr (dim == 1)
- apply_1d<n_q_points_1d, Number, direction, dof_to_quad, add, in_place>(
- team_member, shape_data, in, out);
- if constexpr (dim == 2)
- apply_2d<n_q_points_1d, Number, direction, dof_to_quad, add, in_place>(
- team_member, shape_data, in, out);
- if constexpr (dim == 3)
- apply_3d<n_q_points_1d, Number, direction, dof_to_quad, add, in_place>(
- team_member, shape_data, in, out);
-#else
- constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, dim);
-
- Number t[n_q_points];
- Kokkos::parallel_for(
- Kokkos::TeamThreadRange(team_member, n_q_points),
- [&](const int &q_point) {
- const unsigned int i = (dim == 1) ? 0 : q_point % n_q_points_1d;
- const unsigned int j =
- (dim == 3) ? (q_point / n_q_points_1d) % n_q_points_1d : 0;
- const unsigned int q =
- (dim == 1) ? q_point :
- (dim == 2) ? (q_point / n_q_points_1d) % n_q_points_1d :
- q_point / (n_q_points_1d * n_q_points_1d);
-
- // This loop simply multiplies the shape function at the quadrature
- // point by the value finite element coefficient.
- t[q_point] = 0;
- for (int k = 0; k < n_q_points_1d; ++k)
- {
- const unsigned int shape_idx =
- dof_to_quad ? (q + k * n_q_points_1d) : (k + q * n_q_points_1d);
- const unsigned int source_idx =
- (direction == 0) ?
- (k + n_q_points_1d * (i + n_q_points_1d * j)) :
- (direction == 1) ?
- (i + n_q_points_1d * (k + n_q_points_1d * j)) :
- (i + n_q_points_1d * (j + n_q_points_1d * k));
- t[q_point] += shape_data[shape_idx] *
- (in_place ? out(source_idx) : in(source_idx));
- }
- });
-
- if (in_place)
- team_member.team_barrier();
-
- Kokkos::parallel_for(
- Kokkos::TeamThreadRange(team_member, n_q_points),
- [&](const int &q_point) {
- const unsigned int i = (dim == 1) ? 0 : q_point % n_q_points_1d;
- const unsigned int j =
- (dim == 3) ? (q_point / n_q_points_1d) % n_q_points_1d : 0;
- const unsigned int q =
- (dim == 1) ? q_point :
- (dim == 2) ? (q_point / n_q_points_1d) % n_q_points_1d :
- q_point / (n_q_points_1d * n_q_points_1d);
-
- const unsigned int destination_idx =
- (direction == 0) ? (q + n_q_points_1d * (i + n_q_points_1d * j)) :
- (direction == 1) ? (i + n_q_points_1d * (q + n_q_points_1d * j)) :
- (i + n_q_points_1d * (j + n_q_points_1d * q));
-
- if (add)
- Kokkos::atomic_add(&out(destination_idx), t[q_point]);
- else
- out(destination_idx) = t[q_point];
- });
-#endif
- }
-
-
- /**
- * Generic evaluator framework.
- *
- * @ingroup CUDAWrappers
- */
- template <EvaluatorVariant variant,
- int dim,
- int fe_degree,
- int n_q_points_1d,
- typename Number>
- struct EvaluatorTensorProduct
- {};
-
-
-
- /**
- * Internal evaluator for 1d-3d shape function using the tensor product form
- * of the basis functions.
- *
- * @ingroup CUDAWrappers
- */
- template <int dim, int fe_degree, int n_q_points_1d, typename Number>
- struct EvaluatorTensorProduct<evaluate_general,
- dim,
- fe_degree,
- n_q_points_1d,
- Number>
- {
- public:
- using TeamHandle = Kokkos::TeamPolicy<
- MemorySpace::Default::kokkos_space::execution_space>::member_type;
-
- DEAL_II_HOST_DEVICE
- EvaluatorTensorProduct(
- const TeamHandle &team_member,
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space> shape_values,
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- shape_gradients,
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- co_shape_gradients);
-
- /**
- * Evaluate the finite element function at the quadrature points.
- */
- template <typename ViewType>
- DEAL_II_HOST_DEVICE void
- evaluate_values(ViewType u);
-
- /**
- * Evaluate the gradients of the finite element function at the quadrature
- * points.
- */
- template <typename ViewTypeIn, typename ViewTypeOut>
- DEAL_II_HOST_DEVICE void
- evaluate_gradients(const ViewTypeIn u, ViewTypeOut grad_u);
-
- /**
- * Evaluate the values and the gradients of the finite element function at
- * the quadrature points.
- */
- template <typename ViewType1, typename ViewType2>
- DEAL_II_HOST_DEVICE void
- evaluate_values_and_gradients(ViewType1 u, ViewType2 grad_u);
-
- /**
- * Helper function for integrate(). Integrate the finite element function.
- */
- template <typename ViewType>
- DEAL_II_HOST_DEVICE void
- integrate_values(ViewType u);
-
- /**
- * Helper function for integrate(). Integrate the gradients of the finite
- * element function.
- */
- template <bool add, typename ViewType1, typename ViewType2>
- DEAL_II_HOST_DEVICE void
- integrate_gradients(ViewType1 u, ViewType2 grad_u);
-
- /**
- * Helper function for integrate(). Integrate the values and the gradients
- * of the finite element function.
- */
- template <typename ViewType1, typename ViewType2>
- DEAL_II_HOST_DEVICE void
- integrate_values_and_gradients(ViewType1 u, ViewType2 grad_u);
-
- /**
- * Evaluate/integrate the values of a finite element function at the
- * quadrature points for a given @p direction.
- */
- template <int direction,
- bool dof_to_quad,
- bool add,
- bool in_place,
- typename ViewTypeIn,
- typename ViewTypeOut>
- DEAL_II_HOST_DEVICE void
- values(const ViewTypeIn in, ViewTypeOut out) const;
-
- /**
- * Evaluate/integrate the gradient of a finite element function at the
- * quadrature points for a given @p direction.
- */
- template <int direction,
- bool dof_to_quad,
- bool add,
- bool in_place,
- typename ViewTypeIn,
- typename ViewTypeOut>
- DEAL_II_HOST_DEVICE void
- gradients(const ViewTypeIn in, ViewTypeOut out) const;
-
- public:
- /**
- * Evaluate the gradient of a finite element function at the quadrature
- * points for a given @p direction for collocation methods.
- */
- template <int direction,
- bool dof_to_quad,
- bool add,
- bool in_place,
- typename ViewTypeIn,
- typename ViewTypeOut>
- DEAL_II_HOST_DEVICE void
- co_gradients(const ViewTypeIn in, ViewTypeOut out) const;
-
- /**
- * TeamPolicy handle.
- */
- const TeamHandle &team_member;
-
- /**
- * Values of the shape functions.
- */
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space> shape_values;
-
- /**
- * Values of the shape function gradients.
- */
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- shape_gradients;
-
- /**
- * Values of the shape function gradients for collocation methods.
- */
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- co_shape_gradients;
- };
-
-
-
- template <int dim, int fe_degree, int n_q_points_1d, typename Number>
- DEAL_II_HOST_DEVICE
- EvaluatorTensorProduct<evaluate_general,
- dim,
- fe_degree,
- n_q_points_1d,
- Number>::
- EvaluatorTensorProduct(
- const TeamHandle &team_member,
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space> shape_values,
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- shape_gradients,
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- co_shape_gradients)
- : team_member(team_member)
- , shape_values(shape_values)
- , shape_gradients(shape_gradients)
- , co_shape_gradients(co_shape_gradients)
- {}
-
-
-
- template <int dim, int fe_degree, int n_q_points_1d, typename Number>
- template <int direction,
- bool dof_to_quad,
- bool add,
- bool in_place,
- typename ViewTypeIn,
- typename ViewTypeOut>
- DEAL_II_HOST_DEVICE void
- EvaluatorTensorProduct<evaluate_general,
- dim,
- fe_degree,
- n_q_points_1d,
- Number>::values(const ViewTypeIn in,
- ViewTypeOut out) const
- {
- apply<dim, n_q_points_1d, Number, direction, dof_to_quad, add, in_place>(
- team_member, shape_values, in, out);
- }
-
-
-
- template <int dim, int fe_degree, int n_q_points_1d, typename Number>
- template <int direction,
- bool dof_to_quad,
- bool add,
- bool in_place,
- typename ViewTypeIn,
- typename ViewTypeOut>
- DEAL_II_HOST_DEVICE void
- EvaluatorTensorProduct<evaluate_general,
- dim,
- fe_degree,
- n_q_points_1d,
- Number>::gradients(const ViewTypeIn in,
- ViewTypeOut out) const
- {
- apply<dim, n_q_points_1d, Number, direction, dof_to_quad, add, in_place>(
- team_member, shape_gradients, in, out);
- }
-
-
-
- template <int dim, int fe_degree, int n_q_points_1d, typename Number>
- template <int direction,
- bool dof_to_quad,
- bool add,
- bool in_place,
- typename ViewTypeIn,
- typename ViewTypeOut>
- DEAL_II_HOST_DEVICE void
- EvaluatorTensorProduct<evaluate_general,
- dim,
- fe_degree,
- n_q_points_1d,
- Number>::co_gradients(const ViewTypeIn in,
- ViewTypeOut out) const
- {
- apply<dim, n_q_points_1d, Number, direction, dof_to_quad, add, in_place>(
- team_member, co_shape_gradients, in, out);
- }
-
-
-
- template <int dim, int fe_degree, int n_q_points_1d, typename Number>
- template <typename ViewType>
- DEAL_II_HOST_DEVICE inline void
- EvaluatorTensorProduct<evaluate_general,
- dim,
- fe_degree,
- n_q_points_1d,
- Number>::evaluate_values(ViewType u)
- {
- if constexpr (dim == 1)
- values<0, true, false, true>(u, u);
- else if constexpr (dim == 2)
- {
- values<0, true, false, true>(u, u);
- team_member.team_barrier();
- values<1, true, false, true>(u, u);
- }
- else if constexpr (dim == 3)
- {
- values<0, true, false, true>(u, u);
- team_member.team_barrier();
- values<1, true, false, true>(u, u);
- team_member.team_barrier();
- values<2, true, false, true>(u, u);
- }
- else
- Kokkos::abort("dim must not exceed 3!");
- }
-
-
-
- template <int dim, int fe_degree, int n_q_points_1d, typename Number>
- template <typename ViewType>
- DEAL_II_HOST_DEVICE inline void
- EvaluatorTensorProduct<evaluate_general,
- dim,
- fe_degree,
- n_q_points_1d,
- Number>::integrate_values(ViewType u)
- {
- if constexpr (dim == 1)
- values<0, false, false, true>(u, u);
- else if constexpr (dim == 2)
- {
- values<0, false, false, true>(u, u);
- team_member.team_barrier();
- values<1, false, false, true>(u, u);
- }
- else if constexpr (dim == 3)
- {
- values<0, false, false, true>(u, u);
- team_member.team_barrier();
- values<1, false, false, true>(u, u);
- team_member.team_barrier();
- values<2, false, false, true>(u, u);
- }
- else
- Kokkos::abort("dim must not exceed 3!");
- }
-
-
-
- template <int dim, int fe_degree, int n_q_points_1d, typename Number>
- template <typename ViewTypeIn, typename ViewTypeOut>
- DEAL_II_HOST_DEVICE inline void
- EvaluatorTensorProduct<evaluate_general,
- dim,
- fe_degree,
- n_q_points_1d,
- Number>::evaluate_gradients(const ViewTypeIn u,
- ViewTypeOut grad_u)
- {
- if constexpr (dim == 1)
- {
- gradients<0, true, false, false>(
- u, Kokkos::subview(grad_u, Kokkos::ALL, 0));
- }
- else if constexpr (dim == 2)
- {
- gradients<0, true, false, false>(
- u, Kokkos::subview(grad_u, Kokkos::ALL, 0));
- values<0, true, false, false>(
- u, Kokkos::subview(grad_u, Kokkos::ALL, 1));
-
- team_member.team_barrier();
-
- values<1, true, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 0),
- Kokkos::subview(grad_u, Kokkos::ALL, 0));
- gradients<1, true, false, true>(
- Kokkos::subview(grad_u, Kokkos::ALL, 1),
- Kokkos::subview(grad_u, Kokkos::ALL, 1));
- }
- else if constexpr (dim == 3)
- {
- gradients<0, true, false, false>(
- u, Kokkos::subview(grad_u, Kokkos::ALL, 0));
- values<0, true, false, false>(
- u, Kokkos::subview(grad_u, Kokkos::ALL, 1));
- values<0, true, false, false>(
- u, Kokkos::subview(grad_u, Kokkos::ALL, 2));
-
- team_member.team_barrier();
-
- values<1, true, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 0),
- Kokkos::subview(grad_u, Kokkos::ALL, 0));
- gradients<1, true, false, true>(
- Kokkos::subview(grad_u, Kokkos::ALL, 1),
- Kokkos::subview(grad_u, Kokkos::ALL, 1));
- values<1, true, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 2),
- Kokkos::subview(grad_u, Kokkos::ALL, 2));
-
- team_member.team_barrier();
-
- values<2, true, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 0),
- Kokkos::subview(grad_u, Kokkos::ALL, 0));
- values<2, true, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 1),
- Kokkos::subview(grad_u, Kokkos::ALL, 1));
- gradients<2, true, false, true>(
- Kokkos::subview(grad_u, Kokkos::ALL, 2),
- Kokkos::subview(grad_u, Kokkos::ALL, 2));
- }
- else
- Kokkos::abort("dim must not exceed 3!");
- }
-
-
-
- template <int dim, int fe_degree, int n_q_points_1d, typename Number>
- template <typename ViewType1, typename ViewType2>
- DEAL_II_HOST_DEVICE inline void
- EvaluatorTensorProduct<evaluate_general,
- dim,
- fe_degree,
- n_q_points_1d,
- Number>::evaluate_values_and_gradients(ViewType1 u,
- ViewType2
- grad_u)
- {
- if constexpr (dim == 1)
- {
- values<0, true, false, true>(u, u);
- team_member.team_barrier();
-
- co_gradients<0, true, false, false>(
- u, Kokkos::subview(grad_u, Kokkos::ALL, 0));
- }
- else if constexpr (dim == 2)
- {
- values<0, true, false, true>(u, u);
- team_member.team_barrier();
- values<1, true, false, true>(u, u);
- team_member.team_barrier();
-
- co_gradients<0, true, false, false>(
- u, Kokkos::subview(grad_u, Kokkos::ALL, 0));
- co_gradients<1, true, false, false>(
- u, Kokkos::subview(grad_u, Kokkos::ALL, 1));
- }
- else if constexpr (dim == 3)
- {
- values<0, true, false, true>(u, u);
- team_member.team_barrier();
- values<1, true, false, true>(u, u);
- team_member.team_barrier();
- values<2, true, false, true>(u, u);
- team_member.team_barrier();
-
- co_gradients<0, true, false, false>(
- u, Kokkos::subview(grad_u, Kokkos::ALL, 0));
- co_gradients<1, true, false, false>(
- u, Kokkos::subview(grad_u, Kokkos::ALL, 1));
- co_gradients<2, true, false, false>(
- u, Kokkos::subview(grad_u, Kokkos::ALL, 2));
- }
- else
- Kokkos::abort("dim must not exceed 3!");
- }
-
-
-
- template <int dim, int fe_degree, int n_q_points_1d, typename Number>
- template <bool add, typename ViewType1, typename ViewType2>
- DEAL_II_HOST_DEVICE inline void
- EvaluatorTensorProduct<evaluate_general,
- dim,
- fe_degree,
- n_q_points_1d,
- Number>::integrate_gradients(ViewType1 u,
- ViewType2 grad_u)
- {
- if constexpr (dim == 1)
- {
- gradients<0, false, add, false>(
- Kokkos::subview(grad_u, Kokkos::ALL, dim), u);
- }
- else if constexpr (dim == 2)
- {
- gradients<0, false, false, true>(
- Kokkos::subview(grad_u, Kokkos::ALL, 0),
- Kokkos::subview(grad_u, Kokkos::ALL, 0));
- values<0, false, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 1),
- Kokkos::subview(grad_u,
- Kokkos::ALL,
- 1));
-
- team_member.team_barrier();
-
- values<1, false, add, false>(Kokkos::subview(grad_u, Kokkos::ALL, 0),
- u);
- team_member.team_barrier();
- gradients<1, false, true, false>(
- Kokkos::subview(grad_u, Kokkos::ALL, 1), u);
- }
- else if constexpr (dim == 3)
- {
- gradients<0, false, false, true>(
- Kokkos::subview(grad_u, Kokkos::ALL, 0),
- Kokkos::subview(grad_u, Kokkos::ALL, 0));
- values<0, false, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 1),
- Kokkos::subview(grad_u,
- Kokkos::ALL,
- 1));
- values<0, false, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 2),
- Kokkos::subview(grad_u,
- Kokkos::ALL,
- 2));
-
- team_member.team_barrier();
-
- values<1, false, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 0),
- Kokkos::subview(grad_u,
- Kokkos::ALL,
- 0));
- gradients<1, false, false, true>(
- Kokkos::subview(grad_u, Kokkos::ALL, 1),
- Kokkos::subview(grad_u, Kokkos::ALL, 1));
- values<1, false, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 2),
- Kokkos::subview(grad_u,
- Kokkos::ALL,
- 2));
-
- team_member.team_barrier();
-
- values<2, false, add, false>(Kokkos::subview(grad_u, Kokkos::ALL, 0),
- u);
- team_member.team_barrier();
- values<2, false, true, false>(Kokkos::subview(grad_u, Kokkos::ALL, 1),
- u);
- team_member.team_barrier();
- gradients<2, false, true, false>(
- Kokkos::subview(grad_u, Kokkos::ALL, 2), u);
- }
- else
- Kokkos::abort("dim must not exceed 3!");
- }
-
-
-
- template <int dim, int fe_degree, int n_q_points_1d, typename Number>
- template <typename ViewType1, typename ViewType2>
- DEAL_II_HOST_DEVICE inline void
- EvaluatorTensorProduct<evaluate_general,
- dim,
- fe_degree,
- n_q_points_1d,
- Number>::integrate_values_and_gradients(ViewType1 u,
- ViewType2
- grad_u)
- {
- if constexpr (dim == 1)
- {
- co_gradients<0, false, true, false>(
- Kokkos::subview(grad_u, Kokkos::ALL, 0), u);
- team_member.team_barrier();
-
- values<0, false, false, true>(u, u);
- }
- else if constexpr (dim == 2)
- {
- co_gradients<1, false, true, false>(
- Kokkos::subview(grad_u, Kokkos::ALL, 1), u);
- team_member.team_barrier();
- co_gradients<0, false, true, false>(
- Kokkos::subview(grad_u, Kokkos::ALL, 0), u);
- team_member.team_barrier();
-
- values<1, false, false, true>(u, u);
- team_member.team_barrier();
- values<0, false, false, true>(u, u);
- team_member.team_barrier();
- }
- else if constexpr (dim == 3)
- {
- co_gradients<2, false, true, false>(
- Kokkos::subview(grad_u, Kokkos::ALL, 2), u);
- team_member.team_barrier();
- co_gradients<1, false, true, false>(
- Kokkos::subview(grad_u, Kokkos::ALL, 1), u);
- team_member.team_barrier();
- co_gradients<0, false, true, false>(
- Kokkos::subview(grad_u, Kokkos::ALL, 0), u);
- team_member.team_barrier();
-
- values<2, false, false, true>(u, u);
- team_member.team_barrier();
- values<1, false, false, true>(u, u);
- team_member.team_barrier();
- values<0, false, false, true>(u, u);
- team_member.team_barrier();
- }
- else
- Kokkos::abort("dim must not exceed 3!");
- }
- } // namespace internal
+ using namespace Portable;
} // namespace CUDAWrappers
DEAL_II_NAMESPACE_CLOSE
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_portable_fe_evaluation_h
+#define dealii_portable_fe_evaluation_h
+
+#include <deal.II/base/config.h>
+
+#include <deal.II/base/memory_space.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/matrix_free/evaluation_flags.h>
+#include <deal.II/matrix_free/portable_hanging_nodes_internal.h>
+#include <deal.II/matrix_free/portable_matrix_free.h>
+#include <deal.II/matrix_free/portable_matrix_free.templates.h>
+#include <deal.II/matrix_free/portable_tensor_product_kernels.h>
+
+#include <Kokkos_Core.hpp>
+
+DEAL_II_NAMESPACE_OPEN
+
+/**
+ * Namespace for portable capabilities
+ */
+namespace Portable
+{
+ /**
+ * This class provides all the functions necessary to evaluate functions at
+ * quadrature points and cell integrations. In functionality, this class is
+ * similar to FEValues<dim>.
+ *
+ * This class has five template arguments:
+ *
+ * @tparam dim Dimension in which this class is to be used
+ *
+ * @tparam fe_degree Degree of the tensor prodict finite element with fe_degree+1
+ * degrees of freedom per coordinate direction
+ *
+ * @tparam n_q_points_1d Number of points in the quadrature formular in 1d,
+ * defaults to fe_degree+1
+ *
+ * @tparam n_components Number of vector components when solving a system of
+ * PDEs. If the same operation is applied to several components of a PDE (e.g.
+ * a vector Laplace equation), they can be applied simultaneously with one
+ * call (and often more efficiently). Defaults to 1
+ *
+ * @tparam Number Number format, @p double or @p float. Defaults to @p
+ * double.
+ *
+ * @ingroup CUDAWrappers
+ * @ingroup Portable
+ */
+ template <int dim,
+ int fe_degree,
+ int n_q_points_1d = fe_degree + 1,
+ int n_components_ = 1,
+ typename Number = double>
+ class FEEvaluation
+ {
+ public:
+ /**
+ * An alias for scalar quantities.
+ */
+ using value_type = Number;
+
+ /**
+ * An alias for vectorial quantities.
+ */
+ using gradient_type = Tensor<1, dim, Number>;
+
+ /**
+ * An alias to kernel specific information.
+ */
+ using data_type = typename MatrixFree<dim, Number>::Data;
+
+ /**
+ * Dimension.
+ */
+ static constexpr unsigned int dimension = dim;
+
+ /**
+ * Number of components.
+ */
+ static constexpr unsigned int n_components = n_components_;
+
+ /**
+ * Number of quadrature points per cell.
+ */
+ static constexpr unsigned int n_q_points =
+ Utilities::pow(n_q_points_1d, dim);
+
+ /**
+ * Number of tensor degrees of freedoms per cell.
+ */
+ static constexpr unsigned int tensor_dofs_per_cell =
+ Utilities::pow(fe_degree + 1, dim);
+
+ /**
+ * Constructor.
+ */
+ DEAL_II_HOST_DEVICE
+ FEEvaluation(const data_type *data, SharedData<dim, Number> *shdata);
+
+ /**
+ * For the vector @p src, read out the values on the degrees of freedom of
+ * the current cell, and store them internally. Similar functionality as
+ * the function DoFAccessor::get_interpolated_dof_values when no
+ * constraints are present, but it also includes constraints from hanging
+ * nodes, so once can see it as a similar function to
+ * AffineConstraints::read_dof_values() as well.
+ */
+ DEAL_II_HOST_DEVICE void
+ read_dof_values(const Number *src);
+
+ /**
+ * Take the value stored internally on dof values of the current cell and
+ * sum them into the vector @p dst. The function also applies constraints
+ * during the write operation. The functionality is hence similar to the
+ * function AffineConstraints::distribute_local_to_global.
+ */
+ DEAL_II_HOST_DEVICE void
+ distribute_local_to_global(Number *dst) const;
+
+ /**
+ * Evaluate the function values and the gradients of the FE function given
+ * at the DoF values in the input vector at the quadrature points on the
+ * unit cell. The function arguments specify which parts shall actually be
+ * computed. This function needs to be called before the functions
+ * @p get_value() or @p get_gradient() give useful information.
+ */
+ DEAL_II_HOST_DEVICE void
+ evaluate(const EvaluationFlags::EvaluationFlags evaluate_flag);
+
+ /**
+ * Evaluate the function values and the gradients of the FE function given
+ * at the DoF values in the input vector at the quadrature points on the
+ * unit cell. The function arguments specify which parts shall actually be
+ * computed. This function needs to be called before the functions
+ * @p get_value() or @p get_gradient() give useful information.
+ */
+ DEAL_II_DEPRECATED_EARLY_WITH_COMMENT(
+ "Use the version taking EvaluationFlags.")
+ DEAL_II_HOST_DEVICE
+ void
+ evaluate(const bool evaluate_val, const bool evaluate_grad);
+
+ /**
+ * This function takes the values and/or gradients that are stored on
+ * quadrature points, tests them by all the basis functions/gradients on
+ * the cell and performs the cell integration. The two function arguments
+ * @p integrate_val and @p integrate_grad are used to enable/disable some
+ * of the values or the gradients.
+ */
+ DEAL_II_HOST_DEVICE void
+ integrate(const EvaluationFlags::EvaluationFlags integration_flag);
+
+ /**
+ * This function takes the values and/or gradients that are stored on
+ * quadrature points, tests them by all the basis functions/gradients on
+ * the cell and performs the cell integration. The two function arguments
+ * @p integrate_val and @p integrate_grad are used to enable/disable some
+ * of the values or the gradients.
+ */
+ DEAL_II_DEPRECATED_EARLY_WITH_COMMENT(
+ "Use the version taking EvaluationFlags.")
+ DEAL_II_HOST_DEVICE
+ void
+ integrate(const bool integrate_val, const bool integrate_grad);
+
+ /**
+ * Same as above, except that the quadrature point is computed from thread
+ * id.
+ */
+ DEAL_II_HOST_DEVICE value_type
+ get_value(int q_point) const;
+
+ /**
+ * Same as above, except that the local dof index is computed from the
+ * thread id.
+ */
+ DEAL_II_HOST_DEVICE value_type
+ get_dof_value(int q_point) const;
+
+ /**
+ * Same as above, except that the quadrature point is computed from the
+ * thread id.
+ */
+ DEAL_II_HOST_DEVICE void
+ submit_value(const value_type &val_in, int q_point);
+
+ /**
+ * Same as above, except that the local dof index is computed from the
+ * thread id.
+ */
+ DEAL_II_HOST_DEVICE void
+ submit_dof_value(const value_type &val_in, int q_point);
+
+ /**
+ * Same as above, except that the quadrature point is computed from the
+ * thread id.
+ */
+ DEAL_II_HOST_DEVICE gradient_type
+ get_gradient(int q_point) const;
+
+ /**
+ * Same as above, except that the quadrature point is computed from the
+ * thread id.
+ */
+ DEAL_II_HOST_DEVICE void
+ submit_gradient(const gradient_type &grad_in, int q_point);
+
+ // clang-format off
+ /**
+ * Same as above, except that the functor @p func only takes a single input
+ * argument (fe_eval) and computes the quadrature point from the thread id.
+ *
+ * @p func needs to define
+ * \code
+ * DEAL_II_HOST_DEVICE void operator()(
+ * Portable::FEEvaluation<dim, fe_degree, n_q_points_1d, n_components, Number> *fe_eval) const;
+ * \endcode
+ */
+ // clang-format on
+ template <typename Functor>
+ DEAL_II_HOST_DEVICE void
+ apply_for_each_quad_point(const Functor &func);
+
+ private:
+ const data_type *data;
+ SharedData<dim, Number> *shared_data;
+ int cell_id;
+ };
+
+
+
+ template <int dim,
+ int fe_degree,
+ int n_q_points_1d,
+ int n_components_,
+ typename Number>
+ DEAL_II_HOST_DEVICE
+ FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+ FEEvaluation(const data_type *data, SharedData<dim, Number> *shdata)
+ : data(data)
+ , shared_data(shdata)
+ , cell_id(shared_data->team_member.league_rank())
+ {}
+
+
+
+ template <int dim,
+ int fe_degree,
+ int n_q_points_1d,
+ int n_components_,
+ typename Number>
+ DEAL_II_HOST_DEVICE void
+ FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+ read_dof_values(const Number *src)
+ {
+ static_assert(n_components_ == 1, "This function only supports FE with one \
+ components");
+ // Populate the scratch memory
+ Kokkos::parallel_for(Kokkos::TeamThreadRange(shared_data->team_member,
+ n_q_points),
+ [&](const int &i) {
+ shared_data->values(i) =
+ src[data->local_to_global(cell_id, i)];
+ });
+ shared_data->team_member.team_barrier();
+
+ internal::resolve_hanging_nodes<dim, fe_degree, false>(
+ shared_data->team_member,
+ data->constraint_weights,
+ data->constraint_mask(cell_id),
+ shared_data->values);
+ }
+
+
+
+ template <int dim,
+ int fe_degree,
+ int n_q_points_1d,
+ int n_components_,
+ typename Number>
+ DEAL_II_HOST_DEVICE void
+ FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+ distribute_local_to_global(Number *dst) const
+ {
+ static_assert(n_components_ == 1, "This function only supports FE with one \
+ components");
+
+ internal::resolve_hanging_nodes<dim, fe_degree, true>(
+ shared_data->team_member,
+ data->constraint_weights,
+ data->constraint_mask(cell_id),
+ shared_data->values);
+
+ if (data->use_coloring)
+ {
+ Kokkos::parallel_for(Kokkos::TeamThreadRange(shared_data->team_member,
+ n_q_points),
+ [&](const int &i) {
+ dst[data->local_to_global(cell_id, i)] +=
+ shared_data->values(i);
+ });
+ }
+ else
+ {
+ Kokkos::parallel_for(
+ Kokkos::TeamThreadRange(shared_data->team_member, n_q_points),
+ [&](const int &i) {
+ Kokkos::atomic_add(&dst[data->local_to_global(cell_id, i)],
+ shared_data->values(i));
+ });
+ }
+ }
+
+
+
+ template <int dim,
+ int fe_degree,
+ int n_q_points_1d,
+ int n_components_,
+ typename Number>
+ DEAL_II_HOST_DEVICE void
+ FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::evaluate(
+ const EvaluationFlags::EvaluationFlags evaluate_flag)
+ {
+ // First evaluate the gradients because it requires values that will be
+ // changed if evaluate_val is true
+ internal::EvaluatorTensorProduct<
+ internal::EvaluatorVariant::evaluate_general,
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ Number>
+ evaluator_tensor_product(shared_data->team_member,
+ data->shape_values,
+ data->shape_gradients,
+ data->co_shape_gradients);
+
+ if ((evaluate_flag & EvaluationFlags::values) &&
+ (evaluate_flag & EvaluationFlags::gradients))
+ {
+ evaluator_tensor_product.evaluate_values_and_gradients(
+ shared_data->values, shared_data->gradients);
+ shared_data->team_member.team_barrier();
+ }
+ else if (evaluate_flag & EvaluationFlags::gradients)
+ {
+ evaluator_tensor_product.evaluate_gradients(shared_data->values,
+ shared_data->gradients);
+ shared_data->team_member.team_barrier();
+ }
+ else if (evaluate_flag & EvaluationFlags::values)
+ {
+ evaluator_tensor_product.evaluate_values(shared_data->values);
+ shared_data->team_member.team_barrier();
+ }
+ }
+
+
+
+ template <int dim,
+ int fe_degree,
+ int n_q_points_1d,
+ int n_components_,
+ typename Number>
+ DEAL_II_HOST_DEVICE void
+ FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::evaluate(
+ const bool evaluate_val,
+ const bool evaluate_grad)
+ {
+ evaluate(
+ (evaluate_val ? EvaluationFlags::values : EvaluationFlags::nothing) |
+ (evaluate_grad ? EvaluationFlags::gradients : EvaluationFlags::nothing));
+ }
+
+
+
+ template <int dim,
+ int fe_degree,
+ int n_q_points_1d,
+ int n_components_,
+ typename Number>
+ DEAL_II_HOST_DEVICE void
+ FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::integrate(
+ const EvaluationFlags::EvaluationFlags integration_flag)
+ {
+ internal::EvaluatorTensorProduct<
+ internal::EvaluatorVariant::evaluate_general,
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ Number>
+ evaluator_tensor_product(shared_data->team_member,
+ data->shape_values,
+ data->shape_gradients,
+ data->co_shape_gradients);
+
+ if ((integration_flag & EvaluationFlags::values) &&
+ (integration_flag & EvaluationFlags::gradients))
+ {
+ evaluator_tensor_product.integrate_values_and_gradients(
+ shared_data->values, shared_data->gradients);
+ }
+ else if (integration_flag & EvaluationFlags::values)
+ {
+ evaluator_tensor_product.integrate_values(shared_data->values);
+ shared_data->team_member.team_barrier();
+ }
+ else if (integration_flag & EvaluationFlags::gradients)
+ {
+ evaluator_tensor_product.template integrate_gradients<false>(
+ shared_data->values, shared_data->gradients);
+ shared_data->team_member.team_barrier();
+ }
+ }
+
+
+
+ template <int dim,
+ int fe_degree,
+ int n_q_points_1d,
+ int n_components_,
+ typename Number>
+ DEAL_II_HOST_DEVICE void
+ FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::integrate(
+ const bool integrate_val,
+ const bool integrate_grad)
+ {
+ integrate(
+ (integrate_val ? EvaluationFlags::values : EvaluationFlags::nothing) |
+ (integrate_grad ? EvaluationFlags::gradients : EvaluationFlags::nothing));
+ }
+
+
+
+ template <int dim,
+ int fe_degree,
+ int n_q_points_1d,
+ int n_components_,
+ typename Number>
+ DEAL_II_HOST_DEVICE typename FEEvaluation<dim,
+ fe_degree,
+ n_q_points_1d,
+ n_components_,
+ Number>::value_type
+ FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::get_value(
+ int q_point) const
+ {
+ return shared_data->values(q_point);
+ }
+
+
+
+ template <int dim,
+ int fe_degree,
+ int n_q_points_1d,
+ int n_components_,
+ typename Number>
+ DEAL_II_HOST_DEVICE typename FEEvaluation<dim,
+ fe_degree,
+ n_q_points_1d,
+ n_components_,
+ Number>::value_type
+ FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+ get_dof_value(int q_point) const
+ {
+ return shared_data->values(q_point);
+ }
+
+
+
+ template <int dim,
+ int fe_degree,
+ int n_q_points_1d,
+ int n_components_,
+ typename Number>
+ DEAL_II_HOST_DEVICE void
+ FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+ submit_value(const value_type &val_in, int q_point)
+ {
+ shared_data->values(q_point) = val_in * data->JxW(cell_id, q_point);
+ }
+
+
+
+ template <int dim,
+ int fe_degree,
+ int n_q_points_1d,
+ int n_components_,
+ typename Number>
+ DEAL_II_HOST_DEVICE void
+ FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+ submit_dof_value(const value_type &val_in, int q_point)
+ {
+ shared_data->values(q_point) = val_in;
+ }
+
+
+
+ template <int dim,
+ int fe_degree,
+ int n_q_points_1d,
+ int n_components_,
+ typename Number>
+ DEAL_II_HOST_DEVICE typename FEEvaluation<dim,
+ fe_degree,
+ n_q_points_1d,
+ n_components_,
+ Number>::gradient_type
+ FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+ get_gradient(int q_point) const
+ {
+ static_assert(n_components_ == 1, "This function only supports FE with one \
+ components");
+
+ gradient_type grad;
+ for (unsigned int d_1 = 0; d_1 < dim; ++d_1)
+ {
+ Number tmp = 0.;
+ for (unsigned int d_2 = 0; d_2 < dim; ++d_2)
+ tmp += data->inv_jacobian(cell_id, q_point, d_2, d_1) *
+ shared_data->gradients(q_point, d_2);
+ grad[d_1] = tmp;
+ }
+
+ return grad;
+ }
+
+
+
+ template <int dim,
+ int fe_degree,
+ int n_q_points_1d,
+ int n_components_,
+ typename Number>
+ DEAL_II_HOST_DEVICE void
+ FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+ submit_gradient(const gradient_type &grad_in, int q_point)
+ {
+ for (unsigned int d_1 = 0; d_1 < dim; ++d_1)
+ {
+ Number tmp = 0.;
+ for (unsigned int d_2 = 0; d_2 < dim; ++d_2)
+ tmp += data->inv_jacobian(cell_id, q_point, d_1, d_2) * grad_in[d_2];
+ shared_data->gradients(q_point, d_1) =
+ tmp * data->JxW(cell_id, q_point);
+ }
+ }
+
+
+
+ template <int dim,
+ int fe_degree,
+ int n_q_points_1d,
+ int n_components_,
+ typename Number>
+ template <typename Functor>
+ DEAL_II_HOST_DEVICE void
+ FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+ apply_for_each_quad_point(const Functor &func)
+ {
+ Kokkos::parallel_for(Kokkos::TeamThreadRange(shared_data->team_member,
+ n_q_points),
+ [&](const int &i) { func(this, i); });
+ shared_data->team_member.team_barrier();
+ }
+
+
+
+#ifndef DOXYGEN
+ template <int dim,
+ int fe_degree,
+ int n_q_points_1d,
+ int n_components_,
+ typename Number>
+ constexpr unsigned int
+ FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+ n_q_points;
+#endif
+} // namespace Portable
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2021 - 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_portable_hanging_nodes_internal_h
+#define dealii_portable_hanging_nodes_internal_h
+
+#include <deal.II/base/config.h>
+
+#include <deal.II/base/cuda_size.h>
+
+#include <deal.II/matrix_free/hanging_nodes_internal.h>
+
+#include <Kokkos_Macros.hpp>
+
+DEAL_II_NAMESPACE_OPEN
+namespace Portable
+{
+ namespace internal
+ {
+ //------------------------------------------------------------------------//
+ // Functions for resolving the hanging node constraints on the GPU //
+ //------------------------------------------------------------------------//
+ template <unsigned int size>
+ DEAL_II_HOST_DEVICE inline unsigned int
+ index2(unsigned int i, unsigned int j)
+ {
+ return i + size * j;
+ }
+
+
+
+ template <unsigned int size>
+ DEAL_II_HOST_DEVICE inline unsigned int
+ index3(unsigned int i, unsigned int j, unsigned int k)
+ {
+ return i + size * j + size * size * k;
+ }
+
+
+
+ template <unsigned int fe_degree, unsigned int direction>
+ DEAL_II_HOST_DEVICE inline bool
+ is_constrained_dof_2d(
+ const dealii::internal::MatrixFreeFunctions::ConstraintKinds
+ &constraint_mask,
+ const unsigned int x_idx,
+ const unsigned int y_idx)
+ {
+ return ((direction == 0) &&
+ (((constraint_mask & dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::subcell_y) !=
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::
+ unconstrained) ?
+ (y_idx == 0) :
+ (y_idx == fe_degree))) ||
+ ((direction == 1) &&
+ (((constraint_mask & dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::subcell_x) !=
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::
+ unconstrained) ?
+ (x_idx == 0) :
+ (x_idx == fe_degree)));
+ }
+
+ template <unsigned int fe_degree, unsigned int direction>
+ DEAL_II_HOST_DEVICE inline bool
+ is_constrained_dof_3d(
+ const dealii::internal::MatrixFreeFunctions::ConstraintKinds
+ &constraint_mask,
+ const unsigned int x_idx,
+ const unsigned int y_idx,
+ const unsigned int z_idx,
+ const dealii::internal::MatrixFreeFunctions::ConstraintKinds face1_type,
+ const dealii::internal::MatrixFreeFunctions::ConstraintKinds face2_type,
+ const dealii::internal::MatrixFreeFunctions::ConstraintKinds face1,
+ const dealii::internal::MatrixFreeFunctions::ConstraintKinds face2,
+ const dealii::internal::MatrixFreeFunctions::ConstraintKinds edge)
+ {
+ const unsigned int face1_idx = (direction == 0) ? y_idx :
+ (direction == 1) ? z_idx :
+ x_idx;
+ const unsigned int face2_idx = (direction == 0) ? z_idx :
+ (direction == 1) ? x_idx :
+ y_idx;
+
+ const bool on_face1 = ((constraint_mask & face1_type) !=
+ dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained) ?
+ (face1_idx == 0) :
+ (face1_idx == fe_degree);
+ const bool on_face2 = ((constraint_mask & face2_type) !=
+ dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained) ?
+ (face2_idx == 0) :
+ (face2_idx == fe_degree);
+ return (
+ (((constraint_mask & face1) != dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained) &&
+ on_face1) ||
+ (((constraint_mask & face2) != dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained) &&
+ on_face2) ||
+ (((constraint_mask & edge) != dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained) &&
+ on_face1 && on_face2));
+ }
+
+
+
+ template <unsigned int fe_degree,
+ unsigned int direction,
+ bool transpose,
+ typename Number>
+ DEAL_II_HOST_DEVICE inline void
+ interpolate_boundary_2d(
+ const Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type
+ &team_member,
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ constraint_weights,
+ const dealii::internal::MatrixFreeFunctions::ConstraintKinds
+ &constraint_mask,
+ Kokkos::View<Number *,
+ MemorySpace::Default::kokkos_space::execution_space::
+ scratch_memory_space,
+ Kokkos::MemoryTraits<Kokkos::Unmanaged>> values)
+ {
+ constexpr unsigned int n_q_points_1d = fe_degree + 1;
+ constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, 2);
+
+ // Flag is true if dof is constrained for the given direction and the
+ // given face.
+ const bool constrained_face =
+ (constraint_mask &
+ (((direction == 0) ?
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::face_y :
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::
+ unconstrained) |
+ ((direction == 1) ?
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::face_x :
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::
+ unconstrained))) !=
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::unconstrained;
+
+ Number tmp[n_q_points];
+ Kokkos::parallel_for(
+ Kokkos::TeamThreadRange(team_member, n_q_points),
+ [&](const int &q_point) {
+ const unsigned int x_idx = q_point % n_q_points_1d;
+ const unsigned int y_idx = q_point / n_q_points_1d;
+
+ const auto this_type =
+ (direction == 0) ?
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::
+ subcell_x :
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_y;
+
+ const unsigned int interp_idx = (direction == 0) ? x_idx : y_idx;
+ tmp[q_point] = 0;
+
+ // Flag is true if for the given direction, the dof is constrained
+ // with the right type and is on the correct side (left (= 0) or right
+ // (= fe_degree))
+ const bool constrained_dof =
+ is_constrained_dof_2d<fe_degree, direction>(constraint_mask,
+ x_idx,
+ y_idx);
+
+ if (constrained_face && constrained_dof)
+ {
+ const bool type = (constraint_mask & this_type) !=
+ dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained;
+
+ if (type)
+ {
+ for (unsigned int i = 0; i <= fe_degree; ++i)
+ {
+ const unsigned int real_idx =
+ (direction == 0) ? index2<n_q_points_1d>(i, y_idx) :
+ index2<n_q_points_1d>(x_idx, i);
+
+ const Number w =
+ transpose ?
+ constraint_weights[i * n_q_points_1d + interp_idx] :
+ constraint_weights[interp_idx * n_q_points_1d + i];
+ tmp[q_point] += w * values[real_idx];
+ }
+ }
+ else
+ {
+ for (unsigned int i = 0; i <= fe_degree; ++i)
+ {
+ const unsigned int real_idx =
+ (direction == 0) ? index2<n_q_points_1d>(i, y_idx) :
+ index2<n_q_points_1d>(x_idx, i);
+
+ const Number w =
+ transpose ?
+ constraint_weights[(fe_degree - i) * n_q_points_1d +
+ fe_degree - interp_idx] :
+ constraint_weights[(fe_degree - interp_idx) *
+ n_q_points_1d +
+ fe_degree - i];
+ tmp[q_point] += w * values[real_idx];
+ }
+ }
+ }
+ });
+
+ // The synchronization is done for all the threads in one team with
+ // each team being assigned to one element.
+ team_member.team_barrier();
+ Kokkos::parallel_for(Kokkos::TeamThreadRange(team_member, n_q_points),
+ [&](const int &q_point) {
+ const unsigned int x_idx = q_point % n_q_points_1d;
+ const unsigned int y_idx = q_point / n_q_points_1d;
+ const bool constrained_dof =
+ is_constrained_dof_2d<fe_degree, direction>(
+ constraint_mask, x_idx, y_idx);
+ if (constrained_face && constrained_dof)
+ values[index2<fe_degree + 1>(x_idx, y_idx)] =
+ tmp[q_point];
+ });
+
+ team_member.team_barrier();
+ }
+
+
+
+ template <unsigned int fe_degree,
+ unsigned int direction,
+ bool transpose,
+ typename Number>
+ DEAL_II_HOST_DEVICE inline void
+ interpolate_boundary_3d(
+ const Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type
+ &team_member,
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ constraint_weights,
+ const dealii::internal::MatrixFreeFunctions::ConstraintKinds
+ constraint_mask,
+ Kokkos::View<Number *,
+ MemorySpace::Default::kokkos_space::execution_space::
+ scratch_memory_space,
+ Kokkos::MemoryTraits<Kokkos::Unmanaged>> values)
+ {
+ constexpr unsigned int n_q_points_1d = fe_degree + 1;
+ constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, 3);
+
+ const auto this_type =
+ (direction == 0) ?
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_x :
+ (direction == 1) ?
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_y :
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_z;
+ const auto face1_type =
+ (direction == 0) ?
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_y :
+ (direction == 1) ?
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_z :
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_x;
+ const auto face2_type =
+ (direction == 0) ?
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_z :
+ (direction == 1) ?
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_x :
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_y;
+
+ // If computing in x-direction, need to match against face_y or
+ // face_z
+ const auto face1 =
+ (direction == 0) ?
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::face_y :
+ (direction == 1) ?
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::face_z :
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::face_x;
+ const auto face2 =
+ (direction == 0) ?
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::face_z :
+ (direction == 1) ?
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::face_x :
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::face_y;
+ const auto edge =
+ (direction == 0) ?
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::edge_x :
+ (direction == 1) ?
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::edge_y :
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::edge_z;
+ const auto constrained_face = constraint_mask & (face1 | face2 | edge);
+
+ Number tmp[n_q_points];
+ Kokkos::parallel_for(
+ Kokkos::TeamThreadRange(team_member, n_q_points),
+ [&](const int &q_point) {
+ const unsigned int x_idx = q_point % n_q_points_1d;
+ const unsigned int y_idx = (q_point / n_q_points_1d) % n_q_points_1d;
+ const unsigned int z_idx = q_point / (n_q_points_1d * n_q_points_1d);
+
+ const unsigned int interp_idx = (direction == 0) ? x_idx :
+ (direction == 1) ? y_idx :
+ z_idx;
+ const bool constrained_dof =
+ is_constrained_dof_3d<fe_degree, direction>(constraint_mask,
+ x_idx,
+ y_idx,
+ z_idx,
+ face1_type,
+ face2_type,
+ face1,
+ face2,
+ edge);
+ tmp[q_point] = 0;
+ if ((constrained_face != dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained) &&
+ constrained_dof)
+ {
+ const bool type = (constraint_mask & this_type) !=
+ dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained;
+ if (type)
+ {
+ for (unsigned int i = 0; i <= fe_degree; ++i)
+ {
+ const unsigned int real_idx =
+ (direction == 0) ?
+ index3<fe_degree + 1>(i, y_idx, z_idx) :
+ (direction == 1) ?
+ index3<fe_degree + 1>(x_idx, i, z_idx) :
+ index3<fe_degree + 1>(x_idx, y_idx, i);
+
+ const Number w =
+ transpose ?
+ constraint_weights[i * n_q_points_1d + interp_idx] :
+ constraint_weights[interp_idx * n_q_points_1d + i];
+ tmp[q_point] += w * values[real_idx];
+ }
+ }
+ else
+ {
+ for (unsigned int i = 0; i <= fe_degree; ++i)
+ {
+ const unsigned int real_idx =
+ (direction == 0) ?
+ index3<n_q_points_1d>(i, y_idx, z_idx) :
+ (direction == 1) ?
+ index3<n_q_points_1d>(x_idx, i, z_idx) :
+ index3<n_q_points_1d>(x_idx, y_idx, i);
+
+ const Number w =
+ transpose ?
+ constraint_weights[(fe_degree - i) * n_q_points_1d +
+ fe_degree - interp_idx] :
+ constraint_weights[(fe_degree - interp_idx) *
+ n_q_points_1d +
+ fe_degree - i];
+ tmp[q_point] += w * values[real_idx];
+ }
+ }
+ }
+ });
+
+ // The synchronization is done for all the threads in one team with
+ // each team being assigned to one element.
+ team_member.team_barrier();
+
+ Kokkos::parallel_for(
+ Kokkos::TeamThreadRange(team_member, n_q_points),
+ [&](const int &q_point) {
+ const unsigned int x_idx = q_point % n_q_points_1d;
+ const unsigned int y_idx = (q_point / n_q_points_1d) % n_q_points_1d;
+ const unsigned int z_idx = q_point / (n_q_points_1d * n_q_points_1d);
+ const bool constrained_dof =
+ is_constrained_dof_3d<fe_degree, direction>(constraint_mask,
+ x_idx,
+ y_idx,
+ z_idx,
+ face1_type,
+ face2_type,
+ face1,
+ face2,
+ edge);
+ if ((constrained_face != dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained) &&
+ constrained_dof)
+ values[index3<fe_degree + 1>(x_idx, y_idx, z_idx)] = tmp[q_point];
+ });
+
+ team_member.team_barrier();
+ }
+
+
+
+ /**
+ * This function resolves the hanging nodes using tensor product.
+ *
+ * The implementation of this class is explained in Section 3 of
+ * @cite ljungkvist2017matrix and in Section 3.4 of
+ * @cite kronbichler2019multigrid.
+ */
+ template <int dim, int fe_degree, bool transpose, typename Number>
+ DEAL_II_HOST_DEVICE void
+ resolve_hanging_nodes(
+ const Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type
+ &team_member,
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ constraint_weights,
+ const dealii::internal::MatrixFreeFunctions::ConstraintKinds
+ constraint_mask,
+ Kokkos::View<Number *,
+ MemorySpace::Default::kokkos_space::execution_space::
+ scratch_memory_space,
+ Kokkos::MemoryTraits<Kokkos::Unmanaged>> values)
+ {
+ if (dim == 2)
+ {
+ interpolate_boundary_2d<fe_degree, 0, transpose>(team_member,
+ constraint_weights,
+ constraint_mask,
+ values);
+
+ interpolate_boundary_2d<fe_degree, 1, transpose>(team_member,
+ constraint_weights,
+ constraint_mask,
+ values);
+ }
+ else if (dim == 3)
+ {
+ // Interpolate y and z faces (x-direction)
+ interpolate_boundary_3d<fe_degree, 0, transpose>(team_member,
+ constraint_weights,
+ constraint_mask,
+ values);
+ // Interpolate x and z faces (y-direction)
+ interpolate_boundary_3d<fe_degree, 1, transpose>(team_member,
+ constraint_weights,
+ constraint_mask,
+ values);
+ // Interpolate x and y faces (z-direction)
+ interpolate_boundary_3d<fe_degree, 2, transpose>(team_member,
+ constraint_weights,
+ constraint_mask,
+ values);
+ }
+ }
+ } // namespace internal
+} // namespace Portable
+
+DEAL_II_NAMESPACE_CLOSE
+#endif
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2016 - 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+#ifndef dealii_portable_matrix_free_h
+#define dealii_portable_matrix_free_h
+
+#include <deal.II/base/config.h>
+
+#include <deal.II/base/cuda_size.h>
+#include <deal.II/base/memory_space.h>
+#include <deal.II/base/mpi_stub.h>
+#include <deal.II/base/partitioner.h>
+#include <deal.II/base/quadrature.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/dofs/dof_handler.h>
+
+#include <deal.II/fe/fe_update_flags.h>
+#include <deal.II/fe/mapping.h>
+
+#include <deal.II/grid/filtered_iterator.h>
+
+#include <deal.II/lac/affine_constraints.h>
+#include <deal.II/lac/cuda_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+
+#include <Kokkos_Core.hpp>
+
+
+
+DEAL_II_NAMESPACE_OPEN
+
+// Forward declaration
+namespace internal
+{
+ namespace MatrixFreeFunctions
+ {
+ enum class ConstraintKinds : std::uint16_t;
+ }
+} // namespace internal
+
+namespace Portable
+{
+ // forward declaration
+#ifndef DOXYGEN
+ namespace internal
+ {
+ template <int dim, typename Number>
+ class ReinitHelper;
+ }
+#endif
+
+ /**
+ * This class collects all the data that is stored for the matrix free
+ * implementation. The storage scheme is tailored towards several loops
+ * performed with the same data, i.e., typically doing many matrix-vector
+ * products or residual computations on the same mesh.
+ *
+ * This class does not implement any operations involving finite element basis
+ * functions, i.e., regarding the operation performed on the cells. For these
+ * operations, the class FEEvaluation is designed to use the data collected in
+ * this class.
+ *
+ * This class implements a loop over all cells (cell_loop()). This loop is
+ * scheduled in such a way that cells that share degrees of freedom
+ * are not worked on simultaneously, which implies that it is possible to
+ * write to vectors in parallel without having to explicitly synchronize
+ * access to these vectors and matrices. This class does not implement any
+ * shape values, all it does is to cache the respective data. To implement
+ * finite element operations, use the class Portable::FEEvaluation.
+ *
+ * This class traverse the cells in a different order than the usual
+ * Triangulation class in deal.II.
+ *
+ * @note Only float and double are supported.
+ *
+ * @ingroup CUDAWrappers
+ * @ingroup Portable
+ */
+ template <int dim, typename Number = double>
+ class MatrixFree : public Subscriptor
+ {
+ public:
+ using jacobian_type = Tensor<2, dim, Tensor<1, dim, Number>>;
+ using point_type = Point<dim, Number>;
+ using CellFilter =
+ FilteredIterator<typename DoFHandler<dim>::active_cell_iterator>;
+
+ /**
+ * Standardized data struct to pipe additional data to MatrixFree.
+ */
+ struct AdditionalData
+ {
+ /**
+ * Constructor.
+ */
+ AdditionalData(const UpdateFlags mapping_update_flags =
+ update_gradients | update_JxW_values |
+ update_quadrature_points,
+ const bool use_coloring = false,
+ const bool overlap_communication_computation = false)
+ : mapping_update_flags(mapping_update_flags)
+ , use_coloring(use_coloring)
+ , overlap_communication_computation(overlap_communication_computation)
+ {
+#ifndef DEAL_II_MPI_WITH_DEVICE_SUPPORT
+ AssertThrow(
+ overlap_communication_computation == false,
+ ExcMessage(
+ "Overlapping communication and computation requires CUDA-aware MPI."));
+#endif
+ if (overlap_communication_computation == true)
+ AssertThrow(
+ use_coloring == false || overlap_communication_computation == false,
+ ExcMessage(
+ "Overlapping communication and coloring are incompatible options. Only one of them can be enabled."));
+ }
+ /**
+ * This flag is used to determine which quantities should be cached. This
+ * class can cache data needed for gradient computations (inverse
+ * Jacobians), Jacobian determinants (JxW), quadrature points as well as
+ * data for Hessians (derivative of Jacobians). By default, only data for
+ * gradients and Jacobian determinants times quadrature weights, JxW, are
+ * cached. If quadrature points of second derivatives are needed, they
+ * must be specified by this field.
+ */
+ UpdateFlags mapping_update_flags;
+
+ /**
+ * If true, use graph coloring. Otherwise, use atomic operations. Graph
+ * coloring ensures bitwise reproducibility but is slower on Pascal and
+ * newer architectures.
+ */
+ bool use_coloring;
+
+ /**
+ * Overlap MPI communications with computation. This requires CUDA-aware
+ * MPI and use_coloring must be false.
+ */
+ bool overlap_communication_computation;
+ };
+
+ /**
+ * Structure which is passed to the kernel. It is used to pass all the
+ * necessary information from the CPU to the GPU.
+ */
+ struct Data
+ {
+ /**
+ * Kokkos::View of the quadrature points.
+ */
+ Kokkos::View<point_type **, MemorySpace::Default::kokkos_space> q_points;
+
+ /**
+ * Map the position in the local vector to the position in the global
+ * vector.
+ */
+ Kokkos::View<types::global_dof_index **,
+ MemorySpace::Default::kokkos_space>
+ local_to_global;
+
+ /**
+ * Kokkos::View of the inverse Jacobian.
+ */
+ Kokkos::View<Number **[dim][dim], MemorySpace::Default::kokkos_space>
+ inv_jacobian;
+
+ /**
+ * Kokkos::View of the Jacobian times the weights.
+ */
+ Kokkos::View<Number **, MemorySpace::Default::kokkos_space> JxW;
+
+ /**
+ * Mask deciding where constraints are set on a given cell.
+ */
+ Kokkos::View<dealii::internal::MatrixFreeFunctions::ConstraintKinds *,
+ MemorySpace::Default::kokkos_space>
+ constraint_mask;
+
+ /**
+ * Values of the shape functions.
+ */
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space> shape_values;
+
+ /**
+ * Gradients of the shape functions.
+ */
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ shape_gradients;
+
+ /**
+ * Gradients of the shape functions for collocation methods.
+ */
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ co_shape_gradients;
+
+ /**
+ * Weights used when resolving hanginf nodes.
+ */
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ constraint_weights;
+
+ /**
+ * Number of cells.
+ */
+ unsigned int n_cells;
+
+ /**
+ * Length of the padding.
+ */
+ unsigned int padding_length;
+
+ /**
+ * Row start (including padding).
+ */
+ unsigned int row_start;
+
+ /**
+ * If true, use graph coloring has been used and we can simply add into
+ * the destingation vector. Otherwise, use atomic operations.
+ */
+ bool use_coloring;
+
+ /**
+ * Return the quadrature point index local. The index is
+ * only unique for a given MPI process.
+ */
+ DEAL_II_HOST_DEVICE unsigned int
+ local_q_point_id(const unsigned int cell,
+ const unsigned int n_q_points,
+ const unsigned int q_point) const
+ {
+ return (row_start / padding_length + cell) * n_q_points + q_point;
+ }
+
+
+ /**
+ * Return the quadrature point.
+ */
+ DEAL_II_HOST_DEVICE
+ typename Portable::MatrixFree<dim, Number>::point_type &
+ get_quadrature_point(const unsigned int cell,
+ const unsigned int q_point) const
+ {
+ return q_points(cell, q_point);
+ }
+ };
+
+ /**
+ * Default constructor.
+ */
+ MatrixFree();
+
+ /**
+ * Return the length of the padding.
+ */
+ unsigned int
+ get_padding_length() const;
+
+ /**
+ * Extracts the information needed to perform loops over cells. The
+ * DoFHandler and AffineConstraints objects describe the layout of
+ * degrees of freedom, the DoFHandler and the mapping describe the
+ * transformation from unit to real cell, and the finite element
+ * underlying the DoFHandler together with the quadrature formula
+ * describe the local operations. This function takes an IteratorFilters
+ * object (predicate) to loop over a subset of the active cells. When using
+ * MPI, the predicate should filter out non locally owned cells.
+ */
+ template <typename IteratorFiltersType>
+ void
+ reinit(const Mapping<dim> &mapping,
+ const DoFHandler<dim> &dof_handler,
+ const AffineConstraints<Number> &constraints,
+ const Quadrature<1> &quad,
+ const IteratorFiltersType &iterator_filter,
+ const AdditionalData &additional_data = AdditionalData());
+
+ /**
+ * Same as above using Iterators::LocallyOwnedCell() as predicate.
+ */
+ void
+ reinit(const Mapping<dim> &mapping,
+ const DoFHandler<dim> &dof_handler,
+ const AffineConstraints<Number> &constraints,
+ const Quadrature<1> &quad,
+ const AdditionalData &additional_data = AdditionalData());
+
+ /**
+ * Initializes the data structures. Same as above but using a Q1 mapping.
+ */
+ void
+ reinit(const DoFHandler<dim> &dof_handler,
+ const AffineConstraints<Number> &constraints,
+ const Quadrature<1> &quad,
+ const AdditionalData &additional_data = AdditionalData());
+
+ /**
+ * Return the Data structure associated with @p color.
+ */
+ Data
+ get_data(unsigned int color) const;
+
+ // clang-format off
+ /**
+ * This method runs the loop over all cells and apply the local operation on
+ * each element in parallel. @p func is a functor which is applied on each color.
+ *
+ * @p func needs to define
+ * \code
+ * DEAL_II_HOST_DEVICE void operator()(
+ * const unsigned int cell,
+ * const typename Portable::MatrixFree<dim, Number>::Data *gpu_data,
+ * Portable::SharedData<dim, Number> * shared_data,
+ * const Number * src,
+ * Number * dst) const;
+ * static const unsigned int n_dofs_1d;
+ * static const unsigned int n_local_dofs;
+ * static const unsigned int n_q_points;
+ * \endcode
+ */
+ // clang-format on
+ template <typename Functor, typename VectorType>
+ void
+ cell_loop(const Functor &func,
+ const VectorType &src,
+ VectorType &dst) const;
+
+ /**
+ * This method runs the loop over all cells and apply the local operation on
+ * each element in parallel. This function is very similar to cell_loop()
+ * but it uses a simpler functor.
+ *
+ * @p func needs to define
+ * \code
+ * DEAL_II_HOST_DEVICE void operator()(
+ * const unsigned int cell,
+ * const typename Portable::MatrixFree<dim, Number>::Data *gpu_data);
+ * static const unsigned int n_dofs_1d;
+ * static const unsigned int n_local_dofs;
+ * static const unsigned int n_q_points;
+ * \endcode
+ */
+ template <typename Functor>
+ void
+ evaluate_coefficients(Functor func) const;
+
+ /**
+ * Copy the values of the constrained entries from @p src to @p dst. This is
+ * used to impose zero Dirichlet boundary condition.
+ */
+ template <typename VectorType>
+ void
+ copy_constrained_values(const VectorType &src, VectorType &dst) const;
+
+ /**
+ * Set the entries in @p dst corresponding to constrained values to @p val.
+ * The main purpose of this function is to set the constrained entries of
+ * the source vector used in cell_loop() to zero.
+ */
+ template <typename VectorType>
+ void
+ set_constrained_values(const Number val, VectorType &dst) const;
+
+#ifdef DEAL_II_WITH_CUDA
+ /**
+ * Initialize a serial vector. The size corresponds to the number of degrees
+ * of freedom in the DoFHandler object.
+ */
+ void
+ initialize_dof_vector(
+ LinearAlgebra::CUDAWrappers::Vector<Number> &vec) const;
+#endif
+
+ /**
+ * Initialize a distributed vector. The local elements correspond to the
+ * locally owned degrees of freedom and the ghost elements correspond to the
+ * (additional) locally relevant dofs.
+ */
+ void
+ initialize_dof_vector(
+ LinearAlgebra::distributed::Vector<Number, MemorySpace::Default> &vec)
+ const;
+
+ /**
+ * Return the colored graph of locally owned active cells.
+ */
+ const std::vector<std::vector<CellFilter>> &
+ get_colored_graph() const;
+
+ /**
+ * Return the partitioner that represents the locally owned data and the
+ * ghost indices where access is needed to for the cell loop. The
+ * partitioner is constructed from the locally owned dofs and ghost dofs
+ * given by the respective fields. If you want to have specific information
+ * about these objects, you can query them with the respective access
+ * functions. If you just want to initialize a (parallel) vector, you should
+ * usually prefer this data structure as the data exchange information can
+ * be reused from one vector to another.
+ */
+ const std::shared_ptr<const Utilities::MPI::Partitioner> &
+ get_vector_partitioner() const;
+
+ /**
+ * Return the DoFHandler.
+ */
+ const DoFHandler<dim> &
+ get_dof_handler() const;
+
+ /**
+ * Return an approximation of the memory consumption of this class in bytes.
+ */
+ std::size_t
+ memory_consumption() const;
+
+ private:
+ /**
+ * Initializes the data structures.
+ */
+ template <typename IteratorFiltersType>
+ void
+ internal_reinit(const Mapping<dim> &mapping,
+ const DoFHandler<dim> &dof_handler,
+ const AffineConstraints<Number> &constraints,
+ const Quadrature<1> &quad,
+ const IteratorFiltersType &iterator_filter,
+ const std::shared_ptr<const MPI_Comm> &comm,
+ const AdditionalData additional_data);
+
+ /**
+ * Helper function. Loop over all the cells and apply the functor on each
+ * element in parallel. This function is used when MPI is not used.
+ */
+ template <typename Functor, typename VectorType>
+ void
+ serial_cell_loop(const Functor &func,
+ const VectorType &src,
+ VectorType &dst) const;
+
+ /**
+ * Helper function. Loop over all the cells and apply the functor on each
+ * element in parallel. This function is used when MPI is used.
+ */
+ template <typename Functor>
+ void
+ distributed_cell_loop(
+ const Functor &func,
+ const LinearAlgebra::distributed::Vector<Number, MemorySpace::Default>
+ &src,
+ LinearAlgebra::distributed::Vector<Number, MemorySpace::Default> &dst)
+ const;
+
+#ifdef DEAL_II_WITH_CUDA
+ /**
+ * This function should never be called. Calling it results in an internal
+ * error. This function exists only because cell_loop needs
+ * distributed_cell_loop() to exist for LinearAlgebra::CUDAWrappers::Vector.
+ */
+ template <typename Functor>
+ void
+ distributed_cell_loop(
+ const Functor &func,
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &src,
+ LinearAlgebra::CUDAWrappers::Vector<Number> &dst) const;
+#endif
+
+ /**
+ * Unique ID associated with the object.
+ */
+ int my_id;
+
+ /**
+ * If true, use graph coloring. Otherwise, use atomic operations. Graph
+ * coloring ensures bitwise reproducibility but is slower on Pascal and
+ * newer architectures.
+ */
+ bool use_coloring;
+
+ /**
+ * Overlap MPI communications with computation. This requires CUDA-aware
+ * MPI and use_coloring must be false.
+ */
+ bool overlap_communication_computation;
+
+ /**
+ * Total number of degrees of freedom.
+ */
+ types::global_dof_index n_dofs;
+
+ /**
+ * Degree of the finite element used.
+ */
+ unsigned int fe_degree;
+
+ /**
+ * Number of degrees of freedom per cell.
+ */
+ unsigned int dofs_per_cell;
+
+ /**
+ * Number of constrained degrees of freedom.
+ */
+ unsigned int n_constrained_dofs;
+
+ /**
+ * Number of quadrature points per cells.
+ */
+ unsigned int q_points_per_cell;
+
+ /**
+ * Number of colors produced by the graph coloring algorithm.
+ */
+ unsigned int n_colors;
+
+ /**
+ * Number of cells in each color.
+ */
+ std::vector<unsigned int> n_cells;
+
+ /**
+ * Vector of Kokkos::View to the quadrature points associated to the cells
+ * of each color.
+ */
+ std::vector<Kokkos::View<point_type **, MemorySpace::Default::kokkos_space>>
+ q_points;
+
+ /**
+ * Map the position in the local vector to the position in the global
+ * vector.
+ */
+ std::vector<Kokkos::View<types::global_dof_index **,
+ MemorySpace::Default::kokkos_space>>
+ local_to_global;
+
+ /**
+ * Vector of Kokkos::View of the inverse Jacobian associated to the cells of
+ * each color.
+ */
+ std::vector<
+ Kokkos::View<Number **[dim][dim], MemorySpace::Default::kokkos_space>>
+ inv_jacobian;
+
+ /**
+ * Vector of Kokkos::View to the Jacobian times the weights associated to
+ * the cells of each color.
+ */
+ std::vector<Kokkos::View<Number **, MemorySpace::Default::kokkos_space>>
+ JxW;
+
+ /**
+ * Kokkos::View to the constrained degrees of freedom.
+ */
+ Kokkos::View<types::global_dof_index *, MemorySpace::Default::kokkos_space>
+ constrained_dofs;
+
+ /**
+ * Mask deciding where constraints are set on a given cell.
+ */
+ std::vector<
+ Kokkos::View<dealii::internal::MatrixFreeFunctions::ConstraintKinds *,
+ MemorySpace::Default::kokkos_space>>
+ constraint_mask;
+
+ /**
+ * Values of the shape functions.
+ */
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space> shape_values;
+
+ /**
+ * Gradients of the shape functions.
+ */
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space> shape_gradients;
+
+ /**
+ * Gradients of the shape functions for collocation methods.
+ */
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ co_shape_gradients;
+
+ /**
+ * Weights used when resolving hanginf nodes.
+ */
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ constraint_weights;
+
+ /**
+ * Shared pointer to a Partitioner for distributed Vectors used in
+ * cell_loop. When MPI is not used the pointer is null.
+ */
+ std::shared_ptr<const Utilities::MPI::Partitioner> partitioner;
+
+
+ /**
+ * Length of the padding (closest power of two larger than or equal to
+ * the number of thread).
+ */
+ unsigned int padding_length;
+
+ /**
+ * Row start of each color.
+ */
+ std::vector<unsigned int> row_start;
+
+ /**
+ * Pointer to the DoFHandler associated with the object.
+ */
+ const DoFHandler<dim> *dof_handler;
+
+ /**
+ * Colored graphed of locally owned active cells.
+ */
+ std::vector<std::vector<CellFilter>> graph;
+
+ friend class internal::ReinitHelper<dim, Number>;
+ };
+
+
+
+ template <int dim, typename Number>
+ struct SharedData
+ {
+ using TeamHandle = Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type;
+
+ using SharedView1D = Kokkos::View<
+ Number *,
+ MemorySpace::Default::kokkos_space::execution_space::scratch_memory_space,
+ Kokkos::MemoryTraits<Kokkos::Unmanaged>>;
+ using SharedView2D = Kokkos::View<
+ Number *[dim],
+ MemorySpace::Default::kokkos_space::execution_space::scratch_memory_space,
+ Kokkos::MemoryTraits<Kokkos::Unmanaged>>;
+
+ DEAL_II_HOST_DEVICE
+ SharedData(const TeamHandle &team_member,
+ const SharedView1D &values,
+ const SharedView2D &gradients)
+ : team_member(team_member)
+ , values(values)
+ , gradients(gradients)
+ {}
+
+ /**
+ * TeamPolicy handle.
+ */
+ TeamHandle team_member;
+
+ /**
+ * Memory for dof and quad values.
+ */
+ SharedView1D values;
+
+ /**
+ * Memory for computed gradients in reference coordinate system.
+ */
+ SharedView2D gradients;
+ };
+
+
+
+ /**
+ * Structure which is passed to the kernel. It is used to pass all the
+ * necessary information from the CPU to the GPU.
+ */
+ template <int dim, typename Number>
+ struct DataHost
+ {
+ /**
+ * Kokkos::View of quadrature points on the host.
+ */
+ typename Kokkos::View<Point<dim, Number> **,
+ MemorySpace::Default::kokkos_space>::HostMirror
+ q_points;
+
+ /**
+ * Map the position in the local vector to the position in the global
+ * vector.
+ */
+ typename Kokkos::View<types::global_dof_index **,
+ MemorySpace::Default::kokkos_space>::HostMirror
+ local_to_global;
+
+ /**
+ * Kokkos::View of inverse Jacobians on the host.
+ */
+ typename Kokkos::View<Number **[dim][dim],
+ MemorySpace::Default::kokkos_space>::HostMirror
+ inv_jacobian;
+
+ /**
+ * Kokkos::View of Jacobian times the weights on the host.
+ */
+ typename Kokkos::View<Number **,
+ MemorySpace::Default::kokkos_space>::HostMirror JxW;
+
+ /**
+ * Number of cells.
+ */
+ unsigned int n_cells;
+
+ /**
+ * Length of the padding.
+ */
+ unsigned int padding_length;
+
+ /**
+ * Row start (including padding).
+ */
+ unsigned int row_start;
+
+ /**
+ * Mask deciding where constraints are set on a given cell.
+ */
+ typename Kokkos::View<
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds *,
+ MemorySpace::Default::kokkos_space>::HostMirror constraint_mask;
+
+ /**
+ * If true, use graph coloring has been used and we can simply add into
+ * the destingation vector. Otherwise, use atomic operations.
+ */
+ bool use_coloring;
+
+
+
+ /**
+ * This function is the host version of local_q_point_id().
+ */
+ unsigned int
+ local_q_point_id(const unsigned int cell,
+ const unsigned int n_q_points,
+ const unsigned int q_point) const
+ {
+ return (row_start / padding_length + cell) * n_q_points + q_point;
+ }
+
+
+
+ /**
+ * This function is the host version of get_quadrature_point().
+ */
+ Point<dim, Number>
+ get_quadrature_point(const unsigned int cell,
+ const unsigned int q_point) const
+ {
+ return q_points(cell, q_point);
+ }
+ };
+
+
+
+ /**
+ * Copy @p data from the @ref GlossDevice "device" to the host. @p update_flags should be
+ * identical to the one used in MatrixFree::AdditionalData.
+ *
+ * @relates Portable::MatrixFree
+ */
+ template <int dim, typename Number>
+ DataHost<dim, Number>
+ copy_mf_data_to_host(
+ const typename dealii::Portable::MatrixFree<dim, Number>::Data &data,
+ const UpdateFlags &update_flags)
+ {
+ DataHost<dim, Number> data_host;
+
+ data_host.n_cells = data.n_cells;
+ data_host.padding_length = data.padding_length;
+ data_host.row_start = data.row_start;
+ data_host.use_coloring = data.use_coloring;
+
+ if (update_flags & update_quadrature_points)
+ {
+ data_host.q_points = Kokkos::create_mirror(data.q_points);
+ Kokkos::deep_copy(data_host.q_points, data.q_points);
+ }
+
+ data_host.local_to_global = Kokkos::create_mirror(data.local_to_global);
+ Kokkos::deep_copy(data_host.local_to_global, data.local_to_global);
+
+ if (update_flags & update_gradients)
+ {
+ data_host.inv_jacobian = Kokkos::create_mirror(data.inv_jacobian);
+ Kokkos::deep_copy(data_host.inv_jacobian, data.inv_jacobian);
+ }
+
+ if (update_flags & update_JxW_values)
+ {
+ data_host.JxW = Kokkos::create_mirror(data.JxW);
+ Kokkos::deep_copy(data_host.JxW, data.JxW);
+ }
+
+ data_host.constraint_mask = Kokkos::create_mirror(data.constraint_mask);
+ Kokkos::deep_copy(data_host.constraint_mask, data.constraint_mask);
+
+ return data_host;
+ }
+
+
+ /*----------------------- Inline functions ---------------------------------*/
+
+#ifndef DOXYGEN
+
+ template <int dim, typename Number>
+ inline const std::vector<std::vector<
+ FilteredIterator<typename DoFHandler<dim>::active_cell_iterator>>> &
+ MatrixFree<dim, Number>::get_colored_graph() const
+ {
+ return graph;
+ }
+
+
+
+ template <int dim, typename Number>
+ inline const std::shared_ptr<const Utilities::MPI::Partitioner> &
+ MatrixFree<dim, Number>::get_vector_partitioner() const
+ {
+ return partitioner;
+ }
+
+
+
+ template <int dim, typename Number>
+ inline const DoFHandler<dim> &
+ MatrixFree<dim, Number>::get_dof_handler() const
+ {
+ Assert(dof_handler != nullptr, ExcNotInitialized());
+
+ return *dof_handler;
+ }
+
+#endif
+
+} // namespace Portable
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2016 - 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+#ifndef dealii_portable_matrix_free_templates_h
+#define dealii_portable_matrix_free_templates_h
+
+#include <deal.II/base/config.h>
+
+#include <deal.II/base/cuda.h>
+#include <deal.II/base/cuda_size.h>
+#include <deal.II/base/graph_coloring.h>
+#include <deal.II/base/memory_space.h>
+
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_dgq.h>
+#include <deal.II/fe/fe_values.h>
+#include <deal.II/fe/mapping_q1.h>
+
+#include <deal.II/matrix_free/cuda_hanging_nodes_internal.h>
+#include <deal.II/matrix_free/cuda_matrix_free.h>
+#include <deal.II/matrix_free/shape_info.h>
+
+#include <Kokkos_Core.hpp>
+
+#include <cmath>
+#include <functional>
+#include <string>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace Portable
+{
+ namespace internal
+ {
+ /**
+ * Helper class to (re)initialize MatrixFree object.
+ */
+ template <int dim, typename Number>
+ class ReinitHelper
+ {
+ public:
+ ReinitHelper(
+ MatrixFree<dim, Number> *data,
+ const Mapping<dim> &mapping,
+ const FiniteElement<dim, dim> &fe,
+ const Quadrature<1> &quad,
+ const ::dealii::internal::MatrixFreeFunctions::ShapeInfo<Number>
+ &shape_info,
+ const DoFHandler<dim> &dof_handler,
+ const UpdateFlags &update_flags);
+
+ void
+ resize(const unsigned int n_colors);
+
+ template <typename CellFilter>
+ void
+ fill_data(
+ const unsigned int color,
+ const std::vector<CellFilter> &graph,
+ const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
+
+ private:
+ MatrixFree<dim, Number> *data;
+ // Local buffer
+ std::vector<types::global_dof_index> local_dof_indices;
+ FEValues<dim> fe_values;
+ // Convert the default dof numbering to a lexicographic one
+ const std::vector<unsigned int> &lexicographic_inv;
+ std::vector<types::global_dof_index> lexicographic_dof_indices;
+ const unsigned int fe_degree;
+ const unsigned int dofs_per_cell;
+ const unsigned int q_points_per_cell;
+ const UpdateFlags &update_flags;
+ const unsigned int padding_length;
+ dealii::internal::MatrixFreeFunctions::HangingNodes<dim> hanging_nodes;
+ };
+
+
+
+ template <int dim, typename Number>
+ ReinitHelper<dim, Number>::ReinitHelper(
+ MatrixFree<dim, Number> *data,
+ const Mapping<dim> &mapping,
+ const FiniteElement<dim> &fe,
+ const Quadrature<1> &quad,
+ const ::dealii::internal::MatrixFreeFunctions::ShapeInfo<Number>
+ &shape_info,
+ const DoFHandler<dim> &dof_handler,
+ const UpdateFlags &update_flags)
+ : data(data)
+ , fe_values(mapping,
+ fe,
+ Quadrature<dim>(quad),
+ update_inverse_jacobians | update_quadrature_points |
+ update_values | update_gradients | update_JxW_values)
+ , lexicographic_inv(shape_info.lexicographic_numbering)
+ , fe_degree(data->fe_degree)
+ , dofs_per_cell(data->dofs_per_cell)
+ , q_points_per_cell(data->q_points_per_cell)
+ , update_flags(update_flags)
+ , padding_length(data->get_padding_length())
+ , hanging_nodes(dof_handler.get_triangulation())
+ {
+ local_dof_indices.resize(data->dofs_per_cell);
+ lexicographic_dof_indices.resize(dofs_per_cell);
+ fe_values.always_allow_check_for_cell_similarity(true);
+ }
+
+
+
+ template <int dim, typename Number>
+ void
+ ReinitHelper<dim, Number>::resize(const unsigned int n_colors)
+ {
+ // We need at least three colors when we are using CUDA-aware MPI and
+ // overlapping the communication
+ data->n_cells.resize(std::max(n_colors, 3U), 0);
+ data->local_to_global.resize(n_colors);
+ data->constraint_mask.resize(n_colors);
+
+ data->row_start.resize(n_colors);
+
+ if (update_flags & update_quadrature_points)
+ data->q_points.resize(n_colors);
+
+ if (update_flags & update_JxW_values)
+ data->JxW.resize(n_colors);
+
+ if (update_flags & update_gradients)
+ data->inv_jacobian.resize(n_colors);
+ }
+
+
+
+ template <int dim, typename Number>
+ template <typename CellFilter>
+ void
+ ReinitHelper<dim, Number>::fill_data(
+ const unsigned int color,
+ const std::vector<CellFilter> &graph,
+ const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner)
+ {
+ const unsigned int n_cells = data->n_cells[color];
+
+ // Create the Views
+ data->local_to_global[color] =
+ Kokkos::View<types::global_dof_index **,
+ MemorySpace::Default::kokkos_space>(
+ Kokkos::view_alloc("local_to_global_" + std::to_string(color),
+ Kokkos::WithoutInitializing),
+ n_cells,
+ dofs_per_cell);
+
+ if (update_flags & update_quadrature_points)
+ data->q_points[color] =
+ Kokkos::View<Point<dim, Number> **,
+ MemorySpace::Default::kokkos_space>(
+ Kokkos::view_alloc("q_points_" + std::to_string(color),
+ Kokkos::WithoutInitializing),
+ n_cells,
+ q_points_per_cell);
+
+ if (update_flags & update_JxW_values)
+ data->JxW[color] =
+ Kokkos::View<Number **, MemorySpace::Default::kokkos_space>(
+ Kokkos::view_alloc("JxW_" + std::to_string(color),
+ Kokkos::WithoutInitializing),
+ n_cells,
+ dofs_per_cell);
+
+ if (update_flags & update_gradients)
+ data->inv_jacobian[color] =
+ Kokkos::View<Number **[dim][dim], MemorySpace::Default::kokkos_space>(
+ Kokkos::view_alloc("inv_jacobian_" + std::to_string(color),
+ Kokkos::WithoutInitializing),
+ n_cells,
+ dofs_per_cell);
+
+ // Initialize to zero, i.e., unconstrained cell
+ data->constraint_mask[color] =
+ Kokkos::View<dealii::internal::MatrixFreeFunctions::ConstraintKinds *,
+ MemorySpace::Default::kokkos_space>(
+ "constraint_mask_" + std::to_string(color), n_cells);
+
+ // Create the host mirrow Views and fill them
+ auto constraint_mask_host =
+ Kokkos::create_mirror_view(data->constraint_mask[color]);
+
+ typename std::remove_reference_t<
+ decltype(data->q_points[color])>::HostMirror q_points_host;
+ typename std::remove_reference_t<decltype(data->JxW[color])>::HostMirror
+ JxW_host;
+ typename std::remove_reference_t<
+ decltype(data->inv_jacobian[color])>::HostMirror inv_jacobian_host;
+#if KOKKOS_VERSION >= 30600
+ auto local_to_global_host =
+ Kokkos::create_mirror_view(Kokkos::WithoutInitializing,
+ data->local_to_global[color]);
+ if (update_flags & update_quadrature_points)
+ q_points_host = Kokkos::create_mirror_view(Kokkos::WithoutInitializing,
+ data->q_points[color]);
+ if (update_flags & update_JxW_values)
+ JxW_host = Kokkos::create_mirror_view(Kokkos::WithoutInitializing,
+ data->JxW[color]);
+ if (update_flags & update_gradients)
+ inv_jacobian_host =
+ Kokkos::create_mirror_view(Kokkos::WithoutInitializing,
+ data->inv_jacobian[color]);
+#else
+ auto local_to_global_host =
+ Kokkos::create_mirror_view(data->local_to_global[color]);
+ if (update_flags & update_quadrature_points)
+ q_points_host = Kokkos::create_mirror_view(data->q_points[color]);
+ if (update_flags & update_JxW_values)
+ JxW_host = Kokkos::create_mirror_view(data->JxW[color]);
+ if (update_flags & update_gradients)
+ inv_jacobian_host =
+ Kokkos::create_mirror_view(data->inv_jacobian[color]);
+#endif
+
+ auto cell = graph.cbegin(), end_cell = graph.cend();
+ for (unsigned int cell_id = 0; cell != end_cell; ++cell, ++cell_id)
+ {
+ (*cell)->get_dof_indices(local_dof_indices);
+ // When using MPI, we need to transform the local_dof_indices, which
+ // contain global numbers of dof indices in the MPI universe, to get
+ // local (to the current MPI process) dof indices.
+ if (partitioner)
+ for (auto &index : local_dof_indices)
+ index = partitioner->global_to_local(index);
+
+ for (unsigned int i = 0; i < dofs_per_cell; ++i)
+ lexicographic_dof_indices[i] =
+ local_dof_indices[lexicographic_inv[i]];
+
+ const ArrayView<
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds>
+ cell_id_view(constraint_mask_host[cell_id]);
+
+ hanging_nodes.setup_constraints(*cell,
+ partitioner,
+ {lexicographic_inv},
+ lexicographic_dof_indices,
+ cell_id_view);
+
+ for (unsigned int i = 0; i < dofs_per_cell; ++i)
+ local_to_global_host(cell_id, i) = lexicographic_dof_indices[i];
+
+ fe_values.reinit(*cell);
+
+ // Quadrature points
+ if (update_flags & update_quadrature_points)
+ {
+ for (unsigned int i = 0; i < q_points_per_cell; ++i)
+ q_points_host(cell_id, i) = fe_values.quadrature_point(i);
+ }
+
+ if (update_flags & update_JxW_values)
+ {
+ for (unsigned int i = 0; i < q_points_per_cell; ++i)
+ JxW_host(cell_id, i) = fe_values.JxW(i);
+ }
+
+ if (update_flags & update_gradients)
+ {
+ for (unsigned int i = 0; i < q_points_per_cell; ++i)
+ for (unsigned int d = 0; d < dim; ++d)
+ for (unsigned int e = 0; e < dim; ++e)
+ inv_jacobian_host(cell_id, i, d, e) =
+ fe_values.inverse_jacobian(i)[d][e];
+ }
+ }
+
+ // Copy the data to the device
+ Kokkos::deep_copy(data->constraint_mask[color], constraint_mask_host);
+ Kokkos::deep_copy(data->local_to_global[color], local_to_global_host);
+ if (update_flags & update_quadrature_points)
+ Kokkos::deep_copy(data->q_points[color], q_points_host);
+ if (update_flags & update_JxW_values)
+ Kokkos::deep_copy(data->JxW[color], JxW_host);
+ if (update_flags & update_gradients)
+ Kokkos::deep_copy(data->inv_jacobian[color], inv_jacobian_host);
+ }
+
+
+
+ template <int dim, typename number>
+ std::vector<types::global_dof_index>
+ get_conflict_indices(
+ const FilteredIterator<typename DoFHandler<dim>::active_cell_iterator>
+ &cell,
+ const AffineConstraints<number> &constraints)
+ {
+ std::vector<types::global_dof_index> local_dof_indices(
+ cell->get_fe().n_dofs_per_cell());
+ cell->get_dof_indices(local_dof_indices);
+ constraints.resolve_indices(local_dof_indices);
+
+ return local_dof_indices;
+ }
+
+
+
+ template <typename VectorType>
+ struct VectorLocalSize
+ {
+ static unsigned int
+ get(const VectorType &vec)
+ {
+ return vec.locally_owned_size();
+ }
+ };
+
+#ifdef DEAL_II_WITH_CUDA
+ template <>
+ struct VectorLocalSize<LinearAlgebra::CUDAWrappers::Vector<double>>
+ {
+ static unsigned int
+ get(const LinearAlgebra::CUDAWrappers::Vector<double> &vec)
+ {
+ return vec.size();
+ }
+ };
+
+ template <>
+ struct VectorLocalSize<LinearAlgebra::CUDAWrappers::Vector<float>>
+ {
+ static unsigned int
+ get(const LinearAlgebra::CUDAWrappers::Vector<float> &vec)
+ {
+ return vec.size();
+ }
+ };
+#endif
+
+
+
+ template <int dim, typename Number, typename Functor>
+ struct ApplyKernel
+ {
+ using TeamHandle = Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type;
+ using SharedView1D =
+ Kokkos::View<Number *,
+ MemorySpace::Default::kokkos_space::execution_space::
+ scratch_memory_space,
+ Kokkos::MemoryTraits<Kokkos::Unmanaged>>;
+ using SharedView2D =
+ Kokkos::View<Number *[dim],
+ MemorySpace::Default::kokkos_space::execution_space::
+ scratch_memory_space,
+ Kokkos::MemoryTraits<Kokkos::Unmanaged>>;
+
+ ApplyKernel(Functor func,
+ const typename MatrixFree<dim, Number>::Data gpu_data,
+ Number *const src,
+ Number *dst)
+ : func(func)
+ , gpu_data(gpu_data)
+ , src(src)
+ , dst(dst)
+ {}
+
+ Functor func;
+ const typename MatrixFree<dim, Number>::Data gpu_data;
+ Number *const src;
+ Number *dst;
+
+
+ // Provide the shared memory capacity. This function takes the team_size
+ // as an argument, which allows team_size dependent allocations.
+ size_t
+ team_shmem_size(int /*team_size*/) const
+ {
+ return SharedView1D::shmem_size(Functor::n_local_dofs) +
+ SharedView2D::shmem_size(Functor::n_local_dofs);
+ }
+
+
+ DEAL_II_HOST_DEVICE
+ void
+ operator()(const TeamHandle &team_member) const
+ {
+ // Get the scratch memory
+ SharedView1D values(team_member.team_shmem(), Functor::n_local_dofs);
+ SharedView2D gradients(team_member.team_shmem(), Functor::n_local_dofs);
+
+ SharedData<dim, Number> shared_data(team_member, values, gradients);
+ func(team_member.league_rank(), &gpu_data, &shared_data, src, dst);
+ }
+ };
+ } // namespace internal
+
+
+
+ template <int dim, typename Number>
+ MatrixFree<dim, Number>::MatrixFree()
+ : my_id(-1)
+ , n_dofs(0)
+ , padding_length(0)
+ , dof_handler(nullptr)
+ {}
+
+
+
+ template <int dim, typename Number>
+ template <typename IteratorFiltersType>
+ void
+ MatrixFree<dim, Number>::reinit(const Mapping<dim> &mapping,
+ const DoFHandler<dim> &dof_handler,
+ const AffineConstraints<Number> &constraints,
+ const Quadrature<1> &quad,
+ const IteratorFiltersType &iterator_filter,
+ const AdditionalData &additional_data)
+ {
+ const auto &triangulation = dof_handler.get_triangulation();
+ if (const auto parallel_triangulation =
+ dynamic_cast<const parallel::TriangulationBase<dim> *>(
+ &triangulation))
+ internal_reinit(mapping,
+ dof_handler,
+ constraints,
+ quad,
+ iterator_filter,
+ std::make_shared<const MPI_Comm>(
+ parallel_triangulation->get_communicator()),
+ additional_data);
+ else
+ internal_reinit(mapping,
+ dof_handler,
+ constraints,
+ quad,
+ iterator_filter,
+ nullptr,
+ additional_data);
+ }
+
+
+
+ template <int dim, typename Number>
+ void
+ MatrixFree<dim, Number>::reinit(const Mapping<dim> &mapping,
+ const DoFHandler<dim> &dof_handler,
+ const AffineConstraints<Number> &constraints,
+ const Quadrature<1> &quad,
+ const AdditionalData &additional_data)
+ {
+ IteratorFilters::LocallyOwnedCell locally_owned_cell_filter;
+ reinit(mapping,
+ dof_handler,
+ constraints,
+ quad,
+ locally_owned_cell_filter,
+ additional_data);
+ }
+
+
+
+ template <int dim, typename Number>
+ void
+ MatrixFree<dim, Number>::reinit(const DoFHandler<dim> &dof_handler,
+ const AffineConstraints<Number> &constraints,
+ const Quadrature<1> &quad,
+ const AdditionalData &additional_data)
+ {
+ reinit(StaticMappingQ1<dim>::mapping,
+ dof_handler,
+ constraints,
+ quad,
+ additional_data);
+ }
+
+
+
+ template <int dim, typename Number>
+ typename MatrixFree<dim, Number>::Data
+ MatrixFree<dim, Number>::get_data(unsigned int color) const
+ {
+ Data data_copy;
+ if (q_points.size() > 0)
+ data_copy.q_points = q_points[color];
+ if (inv_jacobian.size() > 0)
+ data_copy.inv_jacobian = inv_jacobian[color];
+ if (JxW.size() > 0)
+ data_copy.JxW = JxW[color];
+ data_copy.local_to_global = local_to_global[color];
+ data_copy.constraint_mask = constraint_mask[color];
+ data_copy.shape_values = shape_values;
+ data_copy.shape_gradients = shape_gradients;
+ data_copy.co_shape_gradients = co_shape_gradients;
+ data_copy.constraint_weights = constraint_weights;
+ data_copy.n_cells = n_cells[color];
+ data_copy.padding_length = padding_length;
+ data_copy.row_start = row_start[color];
+ data_copy.use_coloring = use_coloring;
+
+ return data_copy;
+ }
+
+
+
+ template <int dim, typename Number>
+ template <typename VectorType>
+ void
+ MatrixFree<dim, Number>::copy_constrained_values(const VectorType &src,
+ VectorType &dst) const
+ {
+ static_assert(
+ std::is_same_v<Number, typename VectorType::value_type>,
+ "VectorType::value_type and Number should be of the same type.");
+ Assert(src.size() == dst.size(),
+ ExcMessage("src and dst vectors have different size."));
+ // FIXME When using C++17, we can use KOKKOS_CLASS_LAMBDA and this
+ // work-around can be removed.
+ auto constr_dofs = constrained_dofs;
+ const unsigned int size = internal::VectorLocalSize<VectorType>::get(dst);
+ const Number *src_ptr = src.get_values();
+ Number *dst_ptr = dst.get_values();
+ Kokkos::parallel_for(
+ "dealii::copy_constrained_values",
+ Kokkos::RangePolicy<MemorySpace::Default::kokkos_space::execution_space>(
+ 0, n_constrained_dofs),
+ KOKKOS_LAMBDA(int dof) {
+ // When working with distributed vectors, the constrained dofs are
+ // computed for ghosted vectors but we want to copy the values of the
+ // constrained dofs of non-ghosted vectors.
+ const auto constrained_dof = constr_dofs[dof];
+ if (constrained_dof < size)
+ dst_ptr[constrained_dof] = src_ptr[constrained_dof];
+ });
+ }
+
+
+
+ template <int dim, typename Number>
+ template <typename VectorType>
+ void
+ MatrixFree<dim, Number>::set_constrained_values(Number val,
+ VectorType &dst) const
+ {
+ static_assert(
+ std::is_same_v<Number, typename VectorType::value_type>,
+ "VectorType::value_type and Number should be of the same type.");
+ Number *dst_ptr = dst.get_values();
+ // FIXME When using C++17, we can use KOKKOS_CLASS_LAMBDA and this
+ // work-around can be removed.
+ auto constr_dofs = constrained_dofs;
+ // When working with distributed vectors, the constrained dofs are
+ // computed for ghosted vectors but we want to set the values of the
+ // constrained dofs of non-ghosted vectors.
+ const unsigned int size =
+ partitioner ? dst.locally_owned_size() : dst.size();
+ Kokkos::parallel_for(
+ "dealii::set_constrained_values",
+ Kokkos::RangePolicy<MemorySpace::Default::kokkos_space::execution_space>(
+ 0, n_constrained_dofs),
+ KOKKOS_LAMBDA(int dof) {
+ if (constr_dofs[dof] < size)
+ dst_ptr[constr_dofs[dof]] = val;
+ });
+ }
+
+
+#ifdef DEAL_II_WITH_CUDA
+ template <int dim, typename Number>
+ void
+ MatrixFree<dim, Number>::initialize_dof_vector(
+ LinearAlgebra::CUDAWrappers::Vector<Number> &vec) const
+ {
+ vec.reinit(n_dofs);
+ }
+#endif
+
+
+
+ template <int dim, typename Number>
+ void
+ MatrixFree<dim, Number>::initialize_dof_vector(
+ LinearAlgebra::distributed::Vector<Number, MemorySpace::Default> &vec) const
+ {
+ if (partitioner)
+ vec.reinit(partitioner);
+ else
+ vec.reinit(n_dofs);
+ }
+
+
+
+ template <int dim, typename Number>
+ unsigned int
+ MatrixFree<dim, Number>::get_padding_length() const
+ {
+ return padding_length;
+ }
+
+
+
+ template <int dim, typename Number>
+ template <typename Functor, typename VectorType>
+ void
+ MatrixFree<dim, Number>::cell_loop(const Functor &func,
+ const VectorType &src,
+ VectorType &dst) const
+ {
+ if (partitioner)
+ distributed_cell_loop(func, src, dst);
+ else
+ serial_cell_loop(func, src, dst);
+ }
+
+
+
+ template <int dim, typename Number>
+ template <typename Functor>
+ void
+ MatrixFree<dim, Number>::evaluate_coefficients(Functor func) const
+ {
+ for (unsigned int i = 0; i < n_colors; ++i)
+ if (n_cells[i] > 0)
+ {
+ MemorySpace::Default::kokkos_space::execution_space exec;
+ auto color_data = get_data(i);
+ Kokkos::parallel_for(
+ "dealii::MatrixFree::evaluate_coeff",
+ Kokkos::MDRangePolicy<
+ MemorySpace::Default::kokkos_space::execution_space,
+ Kokkos::Rank<2>>(
+#if KOKKOS_VERSION >= 20900
+ exec,
+#endif
+ {0, 0},
+ {n_cells[i], Functor::n_q_points}),
+ KOKKOS_LAMBDA(const int cell, const int q) {
+ func(&color_data, cell, q);
+ });
+ }
+ }
+
+
+
+ template <int dim, typename Number>
+ std::size_t
+ MatrixFree<dim, Number>::memory_consumption() const
+ {
+ // First compute the size of n_cells, row_starts, kernel launch parameters,
+ // and constrained_dofs
+ std::size_t bytes = n_cells.size() * sizeof(unsigned int) * 2 +
+ n_constrained_dofs * sizeof(unsigned int);
+
+ // For each color, add local_to_global, inv_jacobian, JxW, and q_points.
+ // FIXME
+ for (unsigned int i = 0; i < n_colors; ++i)
+ {
+ bytes += n_cells[i] * padding_length * sizeof(unsigned int) +
+ n_cells[i] * padding_length * dim * dim * sizeof(Number) +
+ n_cells[i] * padding_length * sizeof(Number) +
+ n_cells[i] * padding_length * sizeof(point_type) +
+ n_cells[i] * sizeof(unsigned int);
+ }
+
+ return bytes;
+ }
+
+
+
+ template <int dim, typename Number>
+ template <typename IteratorFiltersType>
+ void
+ MatrixFree<dim, Number>::internal_reinit(
+ const Mapping<dim> &mapping,
+ const DoFHandler<dim> &dof_handler_,
+ const AffineConstraints<Number> &constraints,
+ const Quadrature<1> &quad,
+ const IteratorFiltersType &iterator_filter,
+ const std::shared_ptr<const MPI_Comm> &comm,
+ const AdditionalData additional_data)
+ {
+ dof_handler = &dof_handler_;
+
+ UpdateFlags update_flags = additional_data.mapping_update_flags;
+ if (update_flags & update_gradients)
+ update_flags |= update_JxW_values;
+
+ this->use_coloring = additional_data.use_coloring;
+ this->overlap_communication_computation =
+ additional_data.overlap_communication_computation;
+
+ n_dofs = dof_handler->n_dofs();
+
+ const FiniteElement<dim> &fe = dof_handler->get_fe();
+
+ fe_degree = fe.degree;
+ // TODO this should be a templated parameter
+ const unsigned int n_dofs_1d = fe_degree + 1;
+ const unsigned int n_q_points_1d = quad.size();
+
+ Assert(n_dofs_1d == n_q_points_1d,
+ ExcMessage("n_q_points_1d must be equal to fe_degree+1."));
+
+ // Set padding length to the closest power of two larger than or equal to
+ // the number of threads.
+ padding_length = 1 << static_cast<unsigned int>(
+ std::ceil(dim * std::log2(fe_degree + 1.)));
+
+ dofs_per_cell = fe.n_dofs_per_cell();
+ q_points_per_cell = Utilities::fixed_power<dim>(n_q_points_1d);
+
+ ::dealii::internal::MatrixFreeFunctions::ShapeInfo<Number> shape_info(quad,
+ fe);
+
+ unsigned int size_shape_values = n_dofs_1d * n_q_points_1d;
+
+ shape_values = Kokkos::View<Number *, MemorySpace::Default::kokkos_space>(
+ Kokkos::view_alloc("shape_values", Kokkos::WithoutInitializing),
+ size_shape_values);
+ Kokkos::deep_copy(shape_values,
+ Kokkos::View<Number *, Kokkos::HostSpace>(
+ shape_info.data.front().shape_values.data(),
+ size_shape_values));
+
+ if (update_flags & update_gradients)
+ {
+ shape_gradients =
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>(
+ Kokkos::view_alloc("shape_gradients", Kokkos::WithoutInitializing),
+ size_shape_values);
+ Kokkos::deep_copy(shape_gradients,
+ Kokkos::View<Number *, Kokkos::HostSpace>(
+ shape_info.data.front().shape_gradients.data(),
+ size_shape_values));
+
+
+ co_shape_gradients =
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>(
+ Kokkos::view_alloc("co_shape_gradients",
+ Kokkos::WithoutInitializing),
+ n_q_points_1d * n_q_points_1d);
+ Kokkos::deep_copy(
+ co_shape_gradients,
+ Kokkos::View<Number *, Kokkos::HostSpace>(
+ shape_info.data.front().shape_gradients_collocation.data(),
+ n_q_points_1d * n_q_points_1d));
+ }
+
+ internal::ReinitHelper<dim, Number> helper(
+ this, mapping, fe, quad, shape_info, *dof_handler, update_flags);
+
+ const unsigned int constraint_weights_size =
+ shape_info.data.front().subface_interpolation_matrices[0].size();
+ constraint_weights =
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>(
+ Kokkos::view_alloc("constraint_weights", Kokkos::WithoutInitializing),
+ constraint_weights_size);
+ auto constraint_weights_host =
+ Kokkos::create_mirror_view(constraint_weights);
+ for (unsigned int i = 0; i < constraint_weights_size; ++i)
+ {
+ constraint_weights_host[i] =
+ shape_info.data.front().subface_interpolation_matrices[0][i];
+ }
+ Kokkos::deep_copy(constraint_weights, constraint_weights_host);
+
+ // Create a graph coloring
+ CellFilter begin(iterator_filter, dof_handler->begin_active());
+ CellFilter end(iterator_filter, dof_handler->end());
+
+ if (begin != end)
+ {
+ if (additional_data.use_coloring)
+ {
+ const auto fun = [&](const CellFilter &filter) {
+ return internal::get_conflict_indices<dim, Number>(filter,
+ constraints);
+ };
+ graph = GraphColoring::make_graph_coloring(begin, end, fun);
+ }
+ else
+ {
+ graph.clear();
+ if (additional_data.overlap_communication_computation)
+ {
+ // We create one color (1) with the cells on the boundary of the
+ // local domain and two colors (0 and 2) with the interior
+ // cells.
+ graph.resize(3, std::vector<CellFilter>());
+
+ std::vector<bool> ghost_vertices(
+ dof_handler->get_triangulation().n_vertices(), false);
+
+ for (const auto &cell :
+ dof_handler->get_triangulation().active_cell_iterators())
+ if (cell->is_ghost())
+ for (unsigned int i = 0;
+ i < GeometryInfo<dim>::vertices_per_cell;
+ i++)
+ ghost_vertices[cell->vertex_index(i)] = true;
+
+ std::vector<dealii::FilteredIterator<dealii::TriaActiveIterator<
+ dealii::DoFCellAccessor<dim, dim, false>>>>
+ inner_cells;
+
+ for (auto cell = begin; cell != end; ++cell)
+ {
+ bool ghost_vertex = false;
+
+ for (unsigned int i = 0;
+ i < GeometryInfo<dim>::vertices_per_cell;
+ i++)
+ if (ghost_vertices[cell->vertex_index(i)])
+ {
+ ghost_vertex = true;
+ break;
+ }
+
+ if (ghost_vertex)
+ graph[1].emplace_back(cell);
+ else
+ inner_cells.emplace_back(cell);
+ }
+ for (unsigned i = 0; i < inner_cells.size(); ++i)
+ if (i < inner_cells.size() / 2)
+ graph[0].emplace_back(inner_cells[i]);
+ else
+ graph[2].emplace_back(inner_cells[i]);
+ }
+ else
+ {
+ // If we are not using coloring, all the cells belong to the
+ // same color.
+ graph.resize(1, std::vector<CellFilter>());
+ for (auto cell = begin; cell != end; ++cell)
+ graph[0].emplace_back(cell);
+ }
+ }
+ }
+ n_colors = graph.size();
+
+ helper.resize(n_colors);
+
+ IndexSet locally_relevant_dofs;
+ if (comm)
+ {
+ locally_relevant_dofs =
+ DoFTools::extract_locally_relevant_dofs(*dof_handler);
+ partitioner = std::make_shared<Utilities::MPI::Partitioner>(
+ dof_handler->locally_owned_dofs(), locally_relevant_dofs, *comm);
+ }
+ for (unsigned int i = 0; i < n_colors; ++i)
+ {
+ n_cells[i] = graph[i].size();
+ helper.fill_data(i, graph[i], partitioner);
+ }
+
+ // Setup row starts
+ if (n_colors > 0)
+ row_start[0] = 0;
+ for (unsigned int i = 1; i < n_colors; ++i)
+ row_start[i] = row_start[i - 1] + n_cells[i - 1] * get_padding_length();
+
+ // Constrained indices
+ n_constrained_dofs = constraints.n_constraints();
+
+ if (n_constrained_dofs != 0)
+ {
+ std::vector<dealii::types::global_dof_index> constrained_dofs_host(
+ n_constrained_dofs);
+
+ if (partitioner)
+ {
+ const unsigned int n_local_dofs =
+ locally_relevant_dofs.n_elements();
+ unsigned int i_constraint = 0;
+ for (unsigned int i = 0; i < n_local_dofs; ++i)
+ {
+ // is_constrained uses a global dof id but
+ // constrained_dofs_host works on the local id
+ if (constraints.is_constrained(partitioner->local_to_global(i)))
+ {
+ constrained_dofs_host[i_constraint] = i;
+ ++i_constraint;
+ }
+ }
+ }
+ else
+ {
+ const unsigned int n_local_dofs = dof_handler->n_dofs();
+ unsigned int i_constraint = 0;
+ for (unsigned int i = 0; i < n_local_dofs; ++i)
+ {
+ if (constraints.is_constrained(i))
+ {
+ constrained_dofs_host[i_constraint] = i;
+ ++i_constraint;
+ }
+ }
+ }
+
+ constrained_dofs = Kokkos::View<types::global_dof_index *,
+ MemorySpace::Default::kokkos_space>(
+ Kokkos::view_alloc("constrained_dofs", Kokkos::WithoutInitializing),
+ n_constrained_dofs);
+
+ Kokkos::View<types::global_dof_index *,
+ MemorySpace::Default::kokkos_space,
+ Kokkos::MemoryTraits<Kokkos::Unmanaged>>
+ constrained_dofs_host_view(constrained_dofs_host.data(),
+ constrained_dofs_host.size());
+ Kokkos::deep_copy(constrained_dofs, constrained_dofs_host_view);
+ }
+ }
+
+
+
+ template <int dim, typename Number>
+ template <typename Functor, typename VectorType>
+ void
+ MatrixFree<dim, Number>::serial_cell_loop(const Functor &func,
+ const VectorType &src,
+ VectorType &dst) const
+ {
+ // Execute the loop on the cells
+ for (unsigned int color = 0; color < n_colors; ++color)
+ if (n_cells[color] > 0)
+ {
+ MemorySpace::Default::kokkos_space::execution_space exec;
+ Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>
+ team_policy(
+#if KOKKOS_VERSION >= 20900
+ exec,
+#endif
+ n_cells[color],
+ Kokkos::AUTO);
+
+ internal::ApplyKernel<dim, Number, Functor> apply_kernel(
+ func, get_data(color), src.get_values(), dst.get_values());
+
+ Kokkos::parallel_for("dealii::MatrixFree::serial_cell_loop",
+ team_policy,
+ apply_kernel);
+ }
+ Kokkos::fence();
+ }
+
+
+
+ template <int dim, typename Number>
+ template <typename Functor>
+ void
+ MatrixFree<dim, Number>::distributed_cell_loop(
+ const Functor &func,
+ const LinearAlgebra::distributed::Vector<Number, MemorySpace::Default> &src,
+ LinearAlgebra::distributed::Vector<Number, MemorySpace::Default> &dst) const
+ {
+ MemorySpace::Default::kokkos_space::execution_space exec;
+
+ // in case we have compatible partitioners, we can simply use the provided
+ // vectors
+ if (src.get_partitioner().get() == partitioner.get() &&
+ dst.get_partitioner().get() == partitioner.get())
+ {
+ // This code is inspired to the code in TaskInfo::loop.
+ if (overlap_communication_computation)
+ {
+ src.update_ghost_values_start(0);
+
+ // In parallel, it's possible that some processors do not own any
+ // cells.
+ if (n_cells[0] > 0)
+ {
+ Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>
+ team_policy(
+#if KOKKOS_VERSION >= 20900
+ exec,
+#endif
+ n_cells[0],
+ Kokkos::AUTO);
+
+ internal::ApplyKernel<dim, Number, Functor> apply_kernel(
+ func, get_data(0), src.get_values(), dst.get_values());
+
+ Kokkos::parallel_for(
+ "dealii::MatrixFree::distributed_cell_loop_0",
+ team_policy,
+ apply_kernel);
+ }
+ src.update_ghost_values_finish();
+
+ // In serial this color does not exist because there are no ghost
+ // cells
+ if (n_cells[1] > 0)
+ {
+ Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>
+ team_policy(
+#if KOKKOS_VERSION >= 20900
+ exec,
+#endif
+ n_cells[1],
+ Kokkos::AUTO);
+
+ internal::ApplyKernel<dim, Number, Functor> apply_kernel(
+ func, get_data(1), src.get_values(), dst.get_values());
+
+ Kokkos::parallel_for(
+ "dealii::MatrixFree::distributed_cell_loop_1",
+ team_policy,
+ apply_kernel);
+
+ // We need a synchronization point because we don't want
+ // CUDA-aware MPI to start the MPI communication until the
+ // kernel is done.
+ Kokkos::fence();
+ }
+
+ dst.compress_start(0, VectorOperation::add);
+ // When the mesh is coarse it is possible that some processors do
+ // not own any cells
+ if (n_cells[2] > 0)
+ {
+ Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>
+ team_policy(
+#if KOKKOS_VERSION >= 20900
+ exec,
+#endif
+ n_cells[2],
+ Kokkos::AUTO);
+
+ internal::ApplyKernel<dim, Number, Functor> apply_kernel(
+ func, get_data(2), src.get_values(), dst.get_values());
+
+ Kokkos::parallel_for(
+ "dealii::MatrixFree::distributed_cell_loop_2",
+ team_policy,
+ apply_kernel);
+ }
+ dst.compress_finish(VectorOperation::add);
+ }
+ else
+ {
+ src.update_ghost_values();
+ std::vector<
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>>
+ values_colors(n_colors);
+ std::vector<
+ Kokkos::View<Number *[dim], MemorySpace::Default::kokkos_space>>
+ gradients_colors(n_colors);
+
+ // Execute the loop on the cells
+ for (unsigned int i = 0; i < n_colors; ++i)
+ if (n_cells[i] > 0)
+ {
+ Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>
+ team_policy(
+#if KOKKOS_VERSION >= 20900
+ exec,
+#endif
+ n_cells[i],
+ Kokkos::AUTO);
+
+ internal::ApplyKernel<dim, Number, Functor> apply_kernel(
+ func, get_data(i), src.get_values(), dst.get_values());
+
+ Kokkos::parallel_for(
+ "dealii::MatrixFree::distributed_cell_loop_" +
+ std::to_string(i),
+ team_policy,
+ apply_kernel);
+ }
+ dst.compress(VectorOperation::add);
+ }
+ src.zero_out_ghost_values();
+ }
+ else
+ {
+ // Create the ghosted source and the ghosted destination
+ LinearAlgebra::distributed::Vector<Number, MemorySpace::Default>
+ ghosted_src(partitioner);
+ LinearAlgebra::distributed::Vector<Number, MemorySpace::Default>
+ ghosted_dst(ghosted_src);
+ ghosted_src = src;
+ ghosted_dst = dst;
+ ghosted_dst.zero_out_ghost_values();
+
+ // Execute the loop on the cells
+ for (unsigned int i = 0; i < n_colors; ++i)
+ if (n_cells[i] > 0)
+ {
+ Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>
+ team_policy(
+#if KOKKOS_VERSION >= 20900
+ exec,
+#endif
+ n_cells[i],
+ Kokkos::AUTO);
+
+ internal::ApplyKernel<dim, Number, Functor> apply_kernel(
+ func,
+ get_data(i),
+ ghosted_src.get_values(),
+ ghosted_dst.get_values());
+
+ Kokkos::parallel_for(
+ "dealii::MatrixFree::distributed_cell_loop_" +
+ std::to_string(i),
+ team_policy,
+ apply_kernel);
+ }
+
+ // Add the ghosted values
+ ghosted_dst.compress(VectorOperation::add);
+ dst = ghosted_dst;
+ }
+ }
+
+#ifdef DEAL_II_WITH_CUDA
+ template <int dim, typename Number>
+ template <typename Functor>
+ void
+ MatrixFree<dim, Number>::distributed_cell_loop(
+ const Functor &,
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &,
+ LinearAlgebra::CUDAWrappers::Vector<Number> &) const
+ {
+ DEAL_II_ASSERT_UNREACHABLE();
+ }
+#endif
+} // namespace Portable
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+#ifndef dealii__tensor_product_kernels_h
+#define dealii__tensor_product_kernels_h
+
+#include <deal.II/base/config.h>
+
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/matrix_free/cuda_matrix_free.templates.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+
+namespace Portable
+{
+ namespace internal
+ {
+ /**
+ * In this namespace, the evaluator routines that evaluate the tensor
+ * products are implemented.
+ *
+ * @ingroup CUDAWrappers
+ * @ingroup Portable
+ */
+ // TODO: for now only the general variant is implemented
+ enum EvaluatorVariant
+ {
+ evaluate_general,
+ evaluate_symmetric,
+ evaluate_evenodd
+ };
+
+
+
+#if KOKKOS_VERSION >= 40000
+ /**
+ * Helper function for values() and gradients() in 1D
+ */
+ template <int n_q_points_1d,
+ typename Number,
+ int direction,
+ bool dof_to_quad,
+ bool add,
+ bool in_place,
+ typename ViewTypeIn,
+ typename ViewTypeOut>
+ DEAL_II_HOST_DEVICE void
+ apply_1d(const Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type
+ &team_member,
+ const Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ shape_data,
+ const ViewTypeIn in,
+ ViewTypeOut out)
+ {
+ Number t[n_q_points_1d];
+ Kokkos::parallel_for(Kokkos::TeamThreadRange(team_member, n_q_points_1d),
+ [&](const int &q) {
+ t[q] = 0;
+ // This loop simply multiplies the shape function
+ // at the quadrature point by the value finite
+ // element coefficient.
+ // FIXME check why using parallel_reduce
+ // ThreadVector is slower
+ for (int k = 0; k < n_q_points_1d; ++k)
+ {
+ const unsigned int shape_idx =
+ dof_to_quad ? (q + k * n_q_points_1d) :
+ (k + q * n_q_points_1d);
+ const unsigned int source_idx = k;
+ t[q] += shape_data[shape_idx] * in(source_idx);
+ }
+ });
+
+ if constexpr (in_place)
+ team_member.team_barrier();
+
+ Kokkos::parallel_for(Kokkos::TeamThreadRange(team_member, n_q_points_1d),
+ [&](const int &q) {
+ const unsigned int destination_idx = q;
+ if constexpr (add)
+ Kokkos::atomic_add(&out(destination_idx), t[q]);
+ else
+ out(destination_idx) = t[q];
+ });
+ }
+
+
+
+ /**
+ * Helper function for values() and gradients() in 2D
+ */
+ template <int n_q_points_1d,
+ typename Number,
+ int direction,
+ bool dof_to_quad,
+ bool add,
+ bool in_place,
+ typename ViewTypeIn,
+ typename ViewTypeOut>
+ DEAL_II_HOST_DEVICE void
+ apply_2d(const Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type
+ &team_member,
+ const Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ shape_data,
+ const ViewTypeIn in,
+ ViewTypeOut out)
+ {
+ using TeamType = Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type;
+ constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, 2);
+
+ Number t[n_q_points];
+ auto thread_policy =
+ Kokkos::TeamThreadMDRange<Kokkos::Rank<2>, TeamType>(team_member,
+ n_q_points_1d,
+ n_q_points_1d);
+ Kokkos::parallel_for(thread_policy, [&](const int i, const int j) {
+ int q_point = i + j * n_q_points_1d;
+ t[q_point] = 0;
+
+ // This loop simply multiplies the shape function at the quadrature
+ // point by the value finite element coefficient.
+ // FIXME check why using parallel_reduce ThreadVector is slower
+ for (int k = 0; k < n_q_points_1d; ++k)
+ {
+ const unsigned int shape_idx =
+ dof_to_quad ? (j + k * n_q_points_1d) : (k + j * n_q_points_1d);
+ const unsigned int source_idx = (direction == 0) ?
+ (k + n_q_points_1d * i) :
+ (i + n_q_points_1d * k);
+ t[q_point] += shape_data[shape_idx] * in(source_idx);
+ }
+ });
+
+ if (in_place)
+ team_member.team_barrier();
+
+ Kokkos::parallel_for(thread_policy, [&](const int i, const int j) {
+ const int q_point = i + j * n_q_points_1d;
+ const unsigned int destination_idx =
+ (direction == 0) ? (j + n_q_points_1d * i) : (i + n_q_points_1d * j);
+
+ if (add)
+ Kokkos::atomic_add(&out(destination_idx), t[q_point]);
+ else
+ out(destination_idx) = t[q_point];
+ });
+ }
+
+
+
+ /**
+ * Helper function for values() and gradients() in 3D
+ */
+ template <int n_q_points_1d,
+ typename Number,
+ int direction,
+ bool dof_to_quad,
+ bool add,
+ bool in_place,
+ typename ViewTypeIn,
+ typename ViewTypeOut>
+ DEAL_II_HOST_DEVICE void
+ apply_3d(const Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type
+ &team_member,
+ const Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ shape_data,
+ const ViewTypeIn in,
+ ViewTypeOut out)
+ {
+ using TeamType = Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type;
+ constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, 3);
+
+ Number t[n_q_points];
+ auto thread_policy = Kokkos::TeamThreadMDRange<Kokkos::Rank<3>, TeamType>(
+ team_member, n_q_points_1d, n_q_points_1d, n_q_points_1d);
+ Kokkos::parallel_for(
+ thread_policy, [&](const int i, const int j, const int q) {
+ const int q_point =
+ i + j * n_q_points_1d + q * n_q_points_1d * n_q_points_1d;
+ t[q_point] = 0;
+
+ // This loop simply multiplies the shape function at the quadrature
+ // point by the value finite element coefficient.
+ // FIXME check why using parallel_reduce ThreadVector is slower
+ for (int k = 0; k < n_q_points_1d; ++k)
+ {
+ const unsigned int shape_idx =
+ dof_to_quad ? (q + k * n_q_points_1d) : (k + q * n_q_points_1d);
+ const unsigned int source_idx =
+ (direction == 0) ?
+ (k + n_q_points_1d * (i + n_q_points_1d * j)) :
+ (direction == 1) ?
+ (i + n_q_points_1d * (k + n_q_points_1d * j)) :
+ (i + n_q_points_1d * (j + n_q_points_1d * k));
+ t[q_point] += shape_data[shape_idx] * in(source_idx);
+ }
+ });
+
+ if (in_place)
+ team_member.team_barrier();
+
+ Kokkos::parallel_for(
+ thread_policy, [&](const int i, const int j, const int q) {
+ const int q_point =
+ i + j * n_q_points_1d + q * n_q_points_1d * n_q_points_1d;
+ const unsigned int destination_idx =
+ (direction == 0) ? (q + n_q_points_1d * (i + n_q_points_1d * j)) :
+ (direction == 1) ? (i + n_q_points_1d * (q + n_q_points_1d * j)) :
+ (i + n_q_points_1d * (j + n_q_points_1d * q));
+
+ if (add)
+ Kokkos::atomic_add(&out(destination_idx), t[q_point]);
+ else
+ out(destination_idx) = t[q_point];
+ });
+ }
+#endif
+
+
+
+ /**
+ * Helper function for values() and gradients().
+ */
+ template <int dim,
+ int n_q_points_1d,
+ typename Number,
+ int direction,
+ bool dof_to_quad,
+ bool add,
+ bool in_place,
+ typename ViewTypeIn,
+ typename ViewTypeOut>
+ DEAL_II_HOST_DEVICE void
+ apply(const Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type
+ &team_member,
+ const Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ shape_data,
+ const ViewTypeIn in,
+ ViewTypeOut out)
+ {
+#if KOKKOS_VERSION >= 40000
+ if constexpr (dim == 1)
+ apply_1d<n_q_points_1d, Number, direction, dof_to_quad, add, in_place>(
+ team_member, shape_data, in, out);
+ if constexpr (dim == 2)
+ apply_2d<n_q_points_1d, Number, direction, dof_to_quad, add, in_place>(
+ team_member, shape_data, in, out);
+ if constexpr (dim == 3)
+ apply_3d<n_q_points_1d, Number, direction, dof_to_quad, add, in_place>(
+ team_member, shape_data, in, out);
+#else
+ constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, dim);
+
+ Number t[n_q_points];
+ Kokkos::parallel_for(
+ Kokkos::TeamThreadRange(team_member, n_q_points),
+ [&](const int &q_point) {
+ const unsigned int i = (dim == 1) ? 0 : q_point % n_q_points_1d;
+ const unsigned int j =
+ (dim == 3) ? (q_point / n_q_points_1d) % n_q_points_1d : 0;
+ const unsigned int q =
+ (dim == 1) ? q_point :
+ (dim == 2) ? (q_point / n_q_points_1d) % n_q_points_1d :
+ q_point / (n_q_points_1d * n_q_points_1d);
+
+ // This loop simply multiplies the shape function at the quadrature
+ // point by the value finite element coefficient.
+ t[q_point] = 0;
+ for (int k = 0; k < n_q_points_1d; ++k)
+ {
+ const unsigned int shape_idx =
+ dof_to_quad ? (q + k * n_q_points_1d) : (k + q * n_q_points_1d);
+ const unsigned int source_idx =
+ (direction == 0) ?
+ (k + n_q_points_1d * (i + n_q_points_1d * j)) :
+ (direction == 1) ?
+ (i + n_q_points_1d * (k + n_q_points_1d * j)) :
+ (i + n_q_points_1d * (j + n_q_points_1d * k));
+ t[q_point] += shape_data[shape_idx] *
+ (in_place ? out(source_idx) : in(source_idx));
+ }
+ });
+
+ if (in_place)
+ team_member.team_barrier();
+
+ Kokkos::parallel_for(
+ Kokkos::TeamThreadRange(team_member, n_q_points),
+ [&](const int &q_point) {
+ const unsigned int i = (dim == 1) ? 0 : q_point % n_q_points_1d;
+ const unsigned int j =
+ (dim == 3) ? (q_point / n_q_points_1d) % n_q_points_1d : 0;
+ const unsigned int q =
+ (dim == 1) ? q_point :
+ (dim == 2) ? (q_point / n_q_points_1d) % n_q_points_1d :
+ q_point / (n_q_points_1d * n_q_points_1d);
+
+ const unsigned int destination_idx =
+ (direction == 0) ? (q + n_q_points_1d * (i + n_q_points_1d * j)) :
+ (direction == 1) ? (i + n_q_points_1d * (q + n_q_points_1d * j)) :
+ (i + n_q_points_1d * (j + n_q_points_1d * q));
+
+ if (add)
+ Kokkos::atomic_add(&out(destination_idx), t[q_point]);
+ else
+ out(destination_idx) = t[q_point];
+ });
+#endif
+ }
+
+
+ /**
+ * Generic evaluator framework.
+ *
+ * @ingroup CUDAWrappers
+ * @ingroup Portable
+ */
+ template <EvaluatorVariant variant,
+ int dim,
+ int fe_degree,
+ int n_q_points_1d,
+ typename Number>
+ struct EvaluatorTensorProduct
+ {};
+
+
+
+ /**
+ * Internal evaluator for 1d-3d shape function using the tensor product form
+ * of the basis functions.
+ *
+ * @ingroup CUDAWrappers
+ * @ingroup Portable
+ */
+ template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+ struct EvaluatorTensorProduct<evaluate_general,
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ Number>
+ {
+ public:
+ using TeamHandle = Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type;
+
+ DEAL_II_HOST_DEVICE
+ EvaluatorTensorProduct(
+ const TeamHandle &team_member,
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space> shape_values,
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ shape_gradients,
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ co_shape_gradients);
+
+ /**
+ * Evaluate the finite element function at the quadrature points.
+ */
+ template <typename ViewType>
+ DEAL_II_HOST_DEVICE void
+ evaluate_values(ViewType u);
+
+ /**
+ * Evaluate the gradients of the finite element function at the quadrature
+ * points.
+ */
+ template <typename ViewTypeIn, typename ViewTypeOut>
+ DEAL_II_HOST_DEVICE void
+ evaluate_gradients(const ViewTypeIn u, ViewTypeOut grad_u);
+
+ /**
+ * Evaluate the values and the gradients of the finite element function at
+ * the quadrature points.
+ */
+ template <typename ViewType1, typename ViewType2>
+ DEAL_II_HOST_DEVICE void
+ evaluate_values_and_gradients(ViewType1 u, ViewType2 grad_u);
+
+ /**
+ * Helper function for integrate(). Integrate the finite element function.
+ */
+ template <typename ViewType>
+ DEAL_II_HOST_DEVICE void
+ integrate_values(ViewType u);
+
+ /**
+ * Helper function for integrate(). Integrate the gradients of the finite
+ * element function.
+ */
+ template <bool add, typename ViewType1, typename ViewType2>
+ DEAL_II_HOST_DEVICE void
+ integrate_gradients(ViewType1 u, ViewType2 grad_u);
+
+ /**
+ * Helper function for integrate(). Integrate the values and the gradients
+ * of the finite element function.
+ */
+ template <typename ViewType1, typename ViewType2>
+ DEAL_II_HOST_DEVICE void
+ integrate_values_and_gradients(ViewType1 u, ViewType2 grad_u);
+
+ /**
+ * Evaluate/integrate the values of a finite element function at the
+ * quadrature points for a given @p direction.
+ */
+ template <int direction,
+ bool dof_to_quad,
+ bool add,
+ bool in_place,
+ typename ViewTypeIn,
+ typename ViewTypeOut>
+ DEAL_II_HOST_DEVICE void
+ values(const ViewTypeIn in, ViewTypeOut out) const;
+
+ /**
+ * Evaluate/integrate the gradient of a finite element function at the
+ * quadrature points for a given @p direction.
+ */
+ template <int direction,
+ bool dof_to_quad,
+ bool add,
+ bool in_place,
+ typename ViewTypeIn,
+ typename ViewTypeOut>
+ DEAL_II_HOST_DEVICE void
+ gradients(const ViewTypeIn in, ViewTypeOut out) const;
+
+ public:
+ /**
+ * Evaluate the gradient of a finite element function at the quadrature
+ * points for a given @p direction for collocation methods.
+ */
+ template <int direction,
+ bool dof_to_quad,
+ bool add,
+ bool in_place,
+ typename ViewTypeIn,
+ typename ViewTypeOut>
+ DEAL_II_HOST_DEVICE void
+ co_gradients(const ViewTypeIn in, ViewTypeOut out) const;
+
+ /**
+ * TeamPolicy handle.
+ */
+ const TeamHandle &team_member;
+
+ /**
+ * Values of the shape functions.
+ */
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space> shape_values;
+
+ /**
+ * Values of the shape function gradients.
+ */
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ shape_gradients;
+
+ /**
+ * Values of the shape function gradients for collocation methods.
+ */
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ co_shape_gradients;
+ };
+
+
+
+ template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+ DEAL_II_HOST_DEVICE
+ EvaluatorTensorProduct<evaluate_general,
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ Number>::
+ EvaluatorTensorProduct(
+ const TeamHandle &team_member,
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space> shape_values,
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ shape_gradients,
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ co_shape_gradients)
+ : team_member(team_member)
+ , shape_values(shape_values)
+ , shape_gradients(shape_gradients)
+ , co_shape_gradients(co_shape_gradients)
+ {}
+
+
+
+ template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+ template <int direction,
+ bool dof_to_quad,
+ bool add,
+ bool in_place,
+ typename ViewTypeIn,
+ typename ViewTypeOut>
+ DEAL_II_HOST_DEVICE void
+ EvaluatorTensorProduct<evaluate_general,
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ Number>::values(const ViewTypeIn in,
+ ViewTypeOut out) const
+ {
+ apply<dim, n_q_points_1d, Number, direction, dof_to_quad, add, in_place>(
+ team_member, shape_values, in, out);
+ }
+
+
+
+ template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+ template <int direction,
+ bool dof_to_quad,
+ bool add,
+ bool in_place,
+ typename ViewTypeIn,
+ typename ViewTypeOut>
+ DEAL_II_HOST_DEVICE void
+ EvaluatorTensorProduct<evaluate_general,
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ Number>::gradients(const ViewTypeIn in,
+ ViewTypeOut out) const
+ {
+ apply<dim, n_q_points_1d, Number, direction, dof_to_quad, add, in_place>(
+ team_member, shape_gradients, in, out);
+ }
+
+
+
+ template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+ template <int direction,
+ bool dof_to_quad,
+ bool add,
+ bool in_place,
+ typename ViewTypeIn,
+ typename ViewTypeOut>
+ DEAL_II_HOST_DEVICE void
+ EvaluatorTensorProduct<evaluate_general,
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ Number>::co_gradients(const ViewTypeIn in,
+ ViewTypeOut out) const
+ {
+ apply<dim, n_q_points_1d, Number, direction, dof_to_quad, add, in_place>(
+ team_member, co_shape_gradients, in, out);
+ }
+
+
+
+ template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+ template <typename ViewType>
+ DEAL_II_HOST_DEVICE inline void
+ EvaluatorTensorProduct<evaluate_general,
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ Number>::evaluate_values(ViewType u)
+ {
+ if constexpr (dim == 1)
+ values<0, true, false, true>(u, u);
+ else if constexpr (dim == 2)
+ {
+ values<0, true, false, true>(u, u);
+ team_member.team_barrier();
+ values<1, true, false, true>(u, u);
+ }
+ else if constexpr (dim == 3)
+ {
+ values<0, true, false, true>(u, u);
+ team_member.team_barrier();
+ values<1, true, false, true>(u, u);
+ team_member.team_barrier();
+ values<2, true, false, true>(u, u);
+ }
+ else
+ Kokkos::abort("dim must not exceed 3!");
+ }
+
+
+
+ template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+ template <typename ViewType>
+ DEAL_II_HOST_DEVICE inline void
+ EvaluatorTensorProduct<evaluate_general,
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ Number>::integrate_values(ViewType u)
+ {
+ if constexpr (dim == 1)
+ values<0, false, false, true>(u, u);
+ else if constexpr (dim == 2)
+ {
+ values<0, false, false, true>(u, u);
+ team_member.team_barrier();
+ values<1, false, false, true>(u, u);
+ }
+ else if constexpr (dim == 3)
+ {
+ values<0, false, false, true>(u, u);
+ team_member.team_barrier();
+ values<1, false, false, true>(u, u);
+ team_member.team_barrier();
+ values<2, false, false, true>(u, u);
+ }
+ else
+ Kokkos::abort("dim must not exceed 3!");
+ }
+
+
+
+ template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+ template <typename ViewTypeIn, typename ViewTypeOut>
+ DEAL_II_HOST_DEVICE inline void
+ EvaluatorTensorProduct<evaluate_general,
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ Number>::evaluate_gradients(const ViewTypeIn u,
+ ViewTypeOut grad_u)
+ {
+ if constexpr (dim == 1)
+ {
+ gradients<0, true, false, false>(
+ u, Kokkos::subview(grad_u, Kokkos::ALL, 0));
+ }
+ else if constexpr (dim == 2)
+ {
+ gradients<0, true, false, false>(
+ u, Kokkos::subview(grad_u, Kokkos::ALL, 0));
+ values<0, true, false, false>(
+ u, Kokkos::subview(grad_u, Kokkos::ALL, 1));
+
+ team_member.team_barrier();
+
+ values<1, true, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 0),
+ Kokkos::subview(grad_u, Kokkos::ALL, 0));
+ gradients<1, true, false, true>(
+ Kokkos::subview(grad_u, Kokkos::ALL, 1),
+ Kokkos::subview(grad_u, Kokkos::ALL, 1));
+ }
+ else if constexpr (dim == 3)
+ {
+ gradients<0, true, false, false>(
+ u, Kokkos::subview(grad_u, Kokkos::ALL, 0));
+ values<0, true, false, false>(
+ u, Kokkos::subview(grad_u, Kokkos::ALL, 1));
+ values<0, true, false, false>(
+ u, Kokkos::subview(grad_u, Kokkos::ALL, 2));
+
+ team_member.team_barrier();
+
+ values<1, true, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 0),
+ Kokkos::subview(grad_u, Kokkos::ALL, 0));
+ gradients<1, true, false, true>(
+ Kokkos::subview(grad_u, Kokkos::ALL, 1),
+ Kokkos::subview(grad_u, Kokkos::ALL, 1));
+ values<1, true, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 2),
+ Kokkos::subview(grad_u, Kokkos::ALL, 2));
+
+ team_member.team_barrier();
+
+ values<2, true, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 0),
+ Kokkos::subview(grad_u, Kokkos::ALL, 0));
+ values<2, true, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 1),
+ Kokkos::subview(grad_u, Kokkos::ALL, 1));
+ gradients<2, true, false, true>(
+ Kokkos::subview(grad_u, Kokkos::ALL, 2),
+ Kokkos::subview(grad_u, Kokkos::ALL, 2));
+ }
+ else
+ Kokkos::abort("dim must not exceed 3!");
+ }
+
+
+
+ template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+ template <typename ViewType1, typename ViewType2>
+ DEAL_II_HOST_DEVICE inline void
+ EvaluatorTensorProduct<evaluate_general,
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ Number>::evaluate_values_and_gradients(ViewType1 u,
+ ViewType2
+ grad_u)
+ {
+ if constexpr (dim == 1)
+ {
+ values<0, true, false, true>(u, u);
+ team_member.team_barrier();
+
+ co_gradients<0, true, false, false>(
+ u, Kokkos::subview(grad_u, Kokkos::ALL, 0));
+ }
+ else if constexpr (dim == 2)
+ {
+ values<0, true, false, true>(u, u);
+ team_member.team_barrier();
+ values<1, true, false, true>(u, u);
+ team_member.team_barrier();
+
+ co_gradients<0, true, false, false>(
+ u, Kokkos::subview(grad_u, Kokkos::ALL, 0));
+ co_gradients<1, true, false, false>(
+ u, Kokkos::subview(grad_u, Kokkos::ALL, 1));
+ }
+ else if constexpr (dim == 3)
+ {
+ values<0, true, false, true>(u, u);
+ team_member.team_barrier();
+ values<1, true, false, true>(u, u);
+ team_member.team_barrier();
+ values<2, true, false, true>(u, u);
+ team_member.team_barrier();
+
+ co_gradients<0, true, false, false>(
+ u, Kokkos::subview(grad_u, Kokkos::ALL, 0));
+ co_gradients<1, true, false, false>(
+ u, Kokkos::subview(grad_u, Kokkos::ALL, 1));
+ co_gradients<2, true, false, false>(
+ u, Kokkos::subview(grad_u, Kokkos::ALL, 2));
+ }
+ else
+ Kokkos::abort("dim must not exceed 3!");
+ }
+
+
+
+ template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+ template <bool add, typename ViewType1, typename ViewType2>
+ DEAL_II_HOST_DEVICE inline void
+ EvaluatorTensorProduct<evaluate_general,
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ Number>::integrate_gradients(ViewType1 u,
+ ViewType2 grad_u)
+ {
+ if constexpr (dim == 1)
+ {
+ gradients<0, false, add, false>(
+ Kokkos::subview(grad_u, Kokkos::ALL, dim), u);
+ }
+ else if constexpr (dim == 2)
+ {
+ gradients<0, false, false, true>(
+ Kokkos::subview(grad_u, Kokkos::ALL, 0),
+ Kokkos::subview(grad_u, Kokkos::ALL, 0));
+ values<0, false, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 1),
+ Kokkos::subview(grad_u,
+ Kokkos::ALL,
+ 1));
+
+ team_member.team_barrier();
+
+ values<1, false, add, false>(Kokkos::subview(grad_u, Kokkos::ALL, 0),
+ u);
+ team_member.team_barrier();
+ gradients<1, false, true, false>(
+ Kokkos::subview(grad_u, Kokkos::ALL, 1), u);
+ }
+ else if constexpr (dim == 3)
+ {
+ gradients<0, false, false, true>(
+ Kokkos::subview(grad_u, Kokkos::ALL, 0),
+ Kokkos::subview(grad_u, Kokkos::ALL, 0));
+ values<0, false, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 1),
+ Kokkos::subview(grad_u,
+ Kokkos::ALL,
+ 1));
+ values<0, false, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 2),
+ Kokkos::subview(grad_u,
+ Kokkos::ALL,
+ 2));
+
+ team_member.team_barrier();
+
+ values<1, false, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 0),
+ Kokkos::subview(grad_u,
+ Kokkos::ALL,
+ 0));
+ gradients<1, false, false, true>(
+ Kokkos::subview(grad_u, Kokkos::ALL, 1),
+ Kokkos::subview(grad_u, Kokkos::ALL, 1));
+ values<1, false, false, true>(Kokkos::subview(grad_u, Kokkos::ALL, 2),
+ Kokkos::subview(grad_u,
+ Kokkos::ALL,
+ 2));
+
+ team_member.team_barrier();
+
+ values<2, false, add, false>(Kokkos::subview(grad_u, Kokkos::ALL, 0),
+ u);
+ team_member.team_barrier();
+ values<2, false, true, false>(Kokkos::subview(grad_u, Kokkos::ALL, 1),
+ u);
+ team_member.team_barrier();
+ gradients<2, false, true, false>(
+ Kokkos::subview(grad_u, Kokkos::ALL, 2), u);
+ }
+ else
+ Kokkos::abort("dim must not exceed 3!");
+ }
+
+
+
+ template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+ template <typename ViewType1, typename ViewType2>
+ DEAL_II_HOST_DEVICE inline void
+ EvaluatorTensorProduct<evaluate_general,
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ Number>::integrate_values_and_gradients(ViewType1 u,
+ ViewType2
+ grad_u)
+ {
+ if constexpr (dim == 1)
+ {
+ co_gradients<0, false, true, false>(
+ Kokkos::subview(grad_u, Kokkos::ALL, 0), u);
+ team_member.team_barrier();
+
+ values<0, false, false, true>(u, u);
+ }
+ else if constexpr (dim == 2)
+ {
+ co_gradients<1, false, true, false>(
+ Kokkos::subview(grad_u, Kokkos::ALL, 1), u);
+ team_member.team_barrier();
+ co_gradients<0, false, true, false>(
+ Kokkos::subview(grad_u, Kokkos::ALL, 0), u);
+ team_member.team_barrier();
+
+ values<1, false, false, true>(u, u);
+ team_member.team_barrier();
+ values<0, false, false, true>(u, u);
+ team_member.team_barrier();
+ }
+ else if constexpr (dim == 3)
+ {
+ co_gradients<2, false, true, false>(
+ Kokkos::subview(grad_u, Kokkos::ALL, 2), u);
+ team_member.team_barrier();
+ co_gradients<1, false, true, false>(
+ Kokkos::subview(grad_u, Kokkos::ALL, 1), u);
+ team_member.team_barrier();
+ co_gradients<0, false, true, false>(
+ Kokkos::subview(grad_u, Kokkos::ALL, 0), u);
+ team_member.team_barrier();
+
+ values<2, false, false, true>(u, u);
+ team_member.team_barrier();
+ values<1, false, false, true>(u, u);
+ team_member.team_barrier();
+ values<0, false, false, true>(u, u);
+ team_member.team_barrier();
+ }
+ else
+ Kokkos::abort("dim must not exceed 3!");
+ }
+ } // namespace internal
+} // namespace Portable
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
## ---------------------------------------------------------------------
set(_src
- cuda_matrix_free.cc
dof_info.cc
evaluation_template_factory.cc
evaluation_template_factory_inst2.cc
mapping_info_inst2.cc
mapping_info_inst3.cc
matrix_free.cc
+ portable_matrix_free.cc
shape_info.cc
task_info.cc
vector_data_exchange.cc
//
// ---------------------------------------------------------------------
-#include <deal.II/matrix_free/cuda_matrix_free.templates.h>
+#include <deal.II/matrix_free/portable_matrix_free.templates.h>
DEAL_II_NAMESPACE_OPEN
-namespace CUDAWrappers
+namespace Portable
{
// Do not instantiate for dim = 1
template class MatrixFree<2, float>;
template class MatrixFree<2, double>;
template class MatrixFree<3, float>;
template class MatrixFree<3, double>;
-} // namespace CUDAWrappers
+} // namespace Portable
DEAL_II_NAMESPACE_CLOSE
#include <deal.II/lac/cuda_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
-#include <deal.II/matrix_free/cuda_matrix_free.templates.h>
+#include <deal.II/matrix_free/portable_matrix_free.templates.h>
#include "../tests.h"
DummyOperator() = default;
DEAL_II_HOST_DEVICE void
- operator()(
- const unsigned int cell,
- const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data,
- CUDAWrappers::SharedData<dim, double> *shared_data,
- const double *src,
- double *dst) const;
+ operator()(const unsigned int cell,
+ const typename Portable::MatrixFree<dim, double>::Data *gpu_data,
+ Portable::SharedData<dim, double> *shared_data,
+ const double *src,
+ double *dst) const;
static const unsigned int n_dofs_1d = fe_degree + 1;
static const unsigned int n_local_dofs =
template <int dim, int fe_degree>
DEAL_II_HOST_DEVICE void
DummyOperator<dim, fe_degree>::operator()(
- const unsigned int cell,
- const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data,
- CUDAWrappers::SharedData<dim, double> *shared_data,
+ const unsigned int cell,
+ const typename Portable::MatrixFree<dim, double>::Data *gpu_data,
+ Portable::SharedData<dim, double> *shared_data,
const double *,
double *dst) const
{
class DummyMatrixFree : public Subscriptor
{
public:
- DummyMatrixFree(const CUDAWrappers::MatrixFree<dim, double> &data_in,
- const unsigned int size);
+ DummyMatrixFree(const Portable::MatrixFree<dim, double> &data_in,
+ const unsigned int size);
void
eval(LinearAlgebra::distributed::Vector<double, MemorySpace::Default> &dst)
const;
private:
- const CUDAWrappers::MatrixFree<dim, double> &data;
+ const Portable::MatrixFree<dim, double> &data;
};
template <int dim, int fe_degree>
DummyMatrixFree<dim, fe_degree>::DummyMatrixFree(
- const CUDAWrappers::MatrixFree<dim, double> &data_in,
- const unsigned int size)
+ const Portable::MatrixFree<dim, double> &data_in,
+ const unsigned int size)
: data(data_in)
{}
constraints.close();
// Computation on the device
- MappingQ<dim> mapping(fe_degree);
- CUDAWrappers::MatrixFree<dim, double> mf_data;
- typename CUDAWrappers::MatrixFree<dim, double>::AdditionalData
- additional_data;
+ MappingQ<dim> mapping(fe_degree);
+ Portable::MatrixFree<dim, double> mf_data;
+ typename Portable::MatrixFree<dim, double>::AdditionalData additional_data;
additional_data.mapping_update_flags = update_values | update_gradients |
update_JxW_values |
update_quadrature_points;
const unsigned int n_colors = graph.size();
for (unsigned int color = 0; color < n_colors; ++color)
{
- typename CUDAWrappers::MatrixFree<dim, double>::Data gpu_data =
+ typename Portable::MatrixFree<dim, double>::Data gpu_data =
mf_data.get_data(color);
const unsigned int n_cells = gpu_data.n_cells;
- auto gpu_data_host = CUDAWrappers::copy_mf_data_to_host<dim, double>(
+ auto gpu_data_host = Portable::copy_mf_data_to_host<dim, double>(
gpu_data, additional_data.mapping_update_flags);
for (unsigned int cell_id = 0; cell_id < n_cells; ++cell_id)
{
-// Test CUDAWrappers::MatrixFree::initialize_dof_vector.
+// Test Portable::MatrixFree::initialize_dof_vector.
#include <deal.II/distributed/tria.h>
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/cuda_vector.h>
-#include <deal.II/matrix_free/cuda_matrix_free.h>
+#include <deal.II/matrix_free/portable_matrix_free.h>
#include <deal.II/numerics/vector_tools.h>
AffineConstraints<double> constraints(relevant_set);
constraints.close();
- MappingQ<dim> mapping(fe_degree);
- CUDAWrappers::MatrixFree<dim, Number> mf_data;
- const QGauss<1> quad(fe_degree + 1);
- typename CUDAWrappers::MatrixFree<dim, Number>::AdditionalData
- additional_data;
+ MappingQ<dim> mapping(fe_degree);
+ Portable::MatrixFree<dim, Number> mf_data;
+ const QGauss<1> quad(fe_degree + 1);
+ typename Portable::MatrixFree<dim, Number>::AdditionalData additional_data;
mf_data.reinit(mapping, dof, constraints, quad, additional_data);
VectorType vector;
deallog << "Testing " << dof.get_fe().get_name() << std::endl;
- MappingQ<dim> mapping(fe_degree);
- CUDAWrappers::MatrixFree<dim, Number> mf_data;
- const QGauss<1> quad(fe_degree + 1);
- typename CUDAWrappers::MatrixFree<dim, Number>::AdditionalData
- additional_data;
+ MappingQ<dim> mapping(fe_degree);
+ Portable::MatrixFree<dim, Number> mf_data;
+ const QGauss<1> quad(fe_degree + 1);
+ typename Portable::MatrixFree<dim, Number>::AdditionalData additional_data;
additional_data.mapping_update_flags = update_values | update_gradients |
update_JxW_values |
update_quadrature_points;
deallog << "Testing " << dof.get_fe().get_name() << std::endl;
- MappingQ<dim> mapping(fe_degree);
- CUDAWrappers::MatrixFree<dim, Number> mf_data;
- const QGauss<1> quad(fe_degree + 1);
- typename CUDAWrappers::MatrixFree<dim, Number>::AdditionalData
- additional_data;
+ MappingQ<dim> mapping(fe_degree);
+ Portable::MatrixFree<dim, Number> mf_data;
+ const QGauss<1> quad(fe_degree + 1);
+ typename Portable::MatrixFree<dim, Number>::AdditionalData additional_data;
additional_data.mapping_update_flags = update_values | update_gradients |
update_JxW_values |
update_quadrature_points;
deallog << "Testing " << dof.get_fe().get_name() << std::endl;
- MappingQ<dim> mapping(fe_degree);
- CUDAWrappers::MatrixFree<dim, Number> mf_data;
- const QGauss<1> quad(fe_degree + 1);
- typename CUDAWrappers::MatrixFree<dim, Number>::AdditionalData
- additional_data;
+ MappingQ<dim> mapping(fe_degree);
+ Portable::MatrixFree<dim, Number> mf_data;
+ const QGauss<1> quad(fe_degree + 1);
+ typename Portable::MatrixFree<dim, Number>::AdditionalData additional_data;
additional_data.mapping_update_flags = update_values | update_gradients |
update_JxW_values |
update_quadrature_points;
deallog << "Testing " << dof.get_fe().get_name() << std::endl;
- MappingQ<dim> mapping(fe_degree);
- CUDAWrappers::MatrixFree<dim, Number> mf_data;
- const QGauss<1> quad(fe_degree + 1);
- typename CUDAWrappers::MatrixFree<dim, Number>::AdditionalData
- additional_data;
+ MappingQ<dim> mapping(fe_degree);
+ Portable::MatrixFree<dim, Number> mf_data;
+ const QGauss<1> quad(fe_degree + 1);
+ typename Portable::MatrixFree<dim, Number>::AdditionalData additional_data;
additional_data.mapping_update_flags = update_values | update_gradients |
update_JxW_values |
update_quadrature_points;
deallog << "Testing " << dof.get_fe().get_name() << std::endl;
- MappingQ<dim> mapping(fe_degree);
- CUDAWrappers::MatrixFree<dim, Number> mf_data;
- const QGauss<1> quad(fe_degree + 1);
- typename CUDAWrappers::MatrixFree<dim, Number>::AdditionalData
- additional_data;
+ MappingQ<dim> mapping(fe_degree);
+ Portable::MatrixFree<dim, Number> mf_data;
+ const QGauss<1> quad(fe_degree + 1);
+ typename Portable::MatrixFree<dim, Number>::AdditionalData additional_data;
additional_data.mapping_update_flags = update_values | update_gradients |
update_JxW_values |
update_quadrature_points;
FE_Q<2> fe_1(fe_degree_1);
DoFHandler<2> dof_1(tria);
dof_1.distribute_dofs(fe_1);
- MappingQ<2> mapping_1(fe_degree_1);
- CUDAWrappers::MatrixFree<2, double> mf_data_1;
- CUDAWrappers::MatrixFree<2, double>::AdditionalData additional_data_1;
+ MappingQ<2> mapping_1(fe_degree_1);
+ Portable::MatrixFree<2, double> mf_data_1;
+ Portable::MatrixFree<2, double>::AdditionalData additional_data_1;
additional_data_1.mapping_update_flags = update_values | update_gradients |
update_JxW_values |
update_quadrature_points;
FE_Q<2> fe_2(fe_degree_2);
DoFHandler<2> dof_2(tria);
dof_2.distribute_dofs(fe_2);
- MappingQ<2> mapping_2(fe_degree_2);
- CUDAWrappers::MatrixFree<2, double> mf_data_2;
- CUDAWrappers::MatrixFree<2, double>::AdditionalData additional_data_2;
+ MappingQ<2> mapping_2(fe_degree_2);
+ Portable::MatrixFree<2, double> mf_data_2;
+ Portable::MatrixFree<2, double>::AdditionalData additional_data_2;
additional_data_2.mapping_update_flags = update_values | update_gradients |
update_JxW_values |
update_quadrature_points;
// ---------------------------------------------------------------------
-// check that CUDAWrappers::FEEvaluation::submit_dof_value/get_dof_value
+// check that Portable::FEEvaluation::submit_dof_value/get_dof_value
// works correctly.
#include <deal.II/grid/grid_generator.h>
#include <deal.II/grid/manifold_lib.h>
-#include <deal.II/matrix_free/cuda_fe_evaluation.h>
+#include <deal.II/matrix_free/portable_fe_evaluation.h>
#include "../tests.h"
static const unsigned int n_local_dofs = Utilities::pow(n_dofs_1d, dim);
static const unsigned int n_q_points = Utilities::pow(n_q_points_1d, dim);
- MatrixFreeTest(const CUDAWrappers::MatrixFree<dim, Number> &data_in)
+ MatrixFreeTest(const Portable::MatrixFree<dim, Number> &data_in)
: data(data_in){};
DEAL_II_HOST_DEVICE void
- operator()(
- const unsigned int cell,
- const typename CUDAWrappers::MatrixFree<dim, Number>::Data *gpu_data,
- CUDAWrappers::SharedData<dim, Number> *shared_data,
- const Number *src,
- Number *dst) const
+ operator()(const unsigned int cell,
+ const typename Portable::MatrixFree<dim, Number>::Data *gpu_data,
+ Portable::SharedData<dim, Number> *shared_data,
+ const Number *src,
+ Number *dst) const
{
- CUDAWrappers::FEEvaluation<dim, fe_degree, n_q_points_1d, 1, Number>
- fe_eval(gpu_data, shared_data);
+ Portable::FEEvaluation<dim, fe_degree, n_q_points_1d, 1, Number> fe_eval(
+ gpu_data, shared_data);
// set to unit vector
auto fe_eval_ptr = &fe_eval;
};
protected:
- const CUDAWrappers::MatrixFree<dim, Number> &data;
+ const Portable::MatrixFree<dim, Number> &data;
};
template <int dim, int fe_degree, int n_q_points_1d, typename Number>
do_test(const DoFHandler<dim> &dof,
const AffineConstraints<double> &constraints)
{
- CUDAWrappers::MatrixFree<dim, number> mf_data;
+ Portable::MatrixFree<dim, number> mf_data;
{
const QGauss<1> quad(fe_degree + 1);
- typename CUDAWrappers::MatrixFree<dim, number>::AdditionalData data;
+ typename Portable::MatrixFree<dim, number>::AdditionalData data;
data.mapping_update_flags = update_values | update_gradients |
update_JxW_values | update_quadrature_points;
mf_data.reinit(dof, constraints, quad, data);
deallog << "Testing " << dof.get_fe().get_name() << std::endl;
- MappingQ<dim> mapping(fe_degree);
- CUDAWrappers::MatrixFree<dim, Number> mf_data;
- const QGauss<1> uad(fe_degree + 1);
- typename CUDAWrappers::MatrixFree<dim, Number>::AdditionalData
- additional_data;
+ MappingQ<dim> mapping(fe_degree);
+ Portable::MatrixFree<dim, Number> mf_data;
+ const QGauss<1> uad(fe_degree + 1);
+ typename Portable::MatrixFree<dim, Number>::AdditionalData additional_data;
additional_data.mapping_update_flags = update_values | update_gradients |
update_JxW_values |
update_quadrature_points;
-// Test that initializing with CUDAWrappers::MatrixFree with empty ranges work.
+// Test that initializing with Portable::MatrixFree with empty ranges work.
#include <deal.II/distributed/tria.h>
#include <deal.II/lac/affine_constraints.h>
-#include <deal.II/matrix_free/cuda_matrix_free.h>
+#include <deal.II/matrix_free/portable_matrix_free.h>
#include <deal.II/numerics/vector_tools.h>
constraints);
constraints.close();
- MappingQ<dim> mapping(fe_degree);
- CUDAWrappers::MatrixFree<dim, Number> mf_data;
- const QGauss<1> quad(fe_degree + 1);
- typename CUDAWrappers::MatrixFree<dim, Number>::AdditionalData
- additional_data;
+ MappingQ<dim> mapping(fe_degree);
+ Portable::MatrixFree<dim, Number> mf_data;
+ const QGauss<1> quad(fe_degree + 1);
+ typename Portable::MatrixFree<dim, Number>::AdditionalData additional_data;
additional_data.mapping_update_flags = update_values | update_gradients |
update_JxW_values |
update_quadrature_points;
{
deallog << "Testing " << dof.get_fe().get_name() << std::endl;
- MappingQ<dim> mapping(fe_degree);
- CUDAWrappers::MatrixFree<dim, Number> mf_data;
- typename CUDAWrappers::MatrixFree<dim, Number>::AdditionalData
- additional_data;
+ MappingQ<dim> mapping(fe_degree);
+ Portable::MatrixFree<dim, Number> mf_data;
+ typename Portable::MatrixFree<dim, Number>::AdditionalData additional_data;
additional_data.mapping_update_flags = update_values | update_gradients |
update_JxW_values |
update_quadrature_points;
#include <deal.II/lac/cuda_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
-#include <deal.II/matrix_free/cuda_fe_evaluation.h>
-#include <deal.II/matrix_free/cuda_matrix_free.h>
+#include <deal.II/matrix_free/portable_fe_evaluation.h>
+#include <deal.II/matrix_free/portable_matrix_free.h>
#include "../tests.h"
public:
DEAL_II_HOST_DEVICE
HelmholtzOperatorQuad(
- const typename CUDAWrappers::MatrixFree<dim, Number>::Data *gpu_data,
- Number *coef,
- int cell)
+ const typename Portable::MatrixFree<dim, Number>::Data *gpu_data,
+ Number *coef,
+ int cell)
: gpu_data(gpu_data)
, coef(coef)
, cell(cell)
DEAL_II_HOST_DEVICE void
operator()(
- CUDAWrappers::FEEvaluation<dim, fe_degree, n_q_points_1d, 1, Number>
- *fe_eval,
+ Portable::FEEvaluation<dim, fe_degree, n_q_points_1d, 1, Number> *fe_eval,
int q_point) const;
static const unsigned int n_q_points =
dealii::Utilities::pow(n_q_points_1d, dim);
private:
- const typename CUDAWrappers::MatrixFree<dim, Number>::Data *gpu_data;
- Number *coef;
- int cell;
+ const typename Portable::MatrixFree<dim, Number>::Data *gpu_data;
+ Number *coef;
+ int cell;
};
template <int dim, int fe_degree, typename Number, int n_q_points_1d>
DEAL_II_HOST_DEVICE void
HelmholtzOperatorQuad<dim, fe_degree, Number, n_q_points_1d>::operator()(
- CUDAWrappers::FEEvaluation<dim, fe_degree, n_q_points_1d, 1, Number> *fe_eval,
+ Portable::FEEvaluation<dim, fe_degree, n_q_points_1d, 1, Number> *fe_eval,
int q_point) const
{
unsigned int pos = gpu_data->local_q_point_id(cell, n_q_points, q_point);
{}
DEAL_II_HOST_DEVICE void
- operator()(
- const unsigned int cell,
- const typename CUDAWrappers::MatrixFree<dim, Number>::Data *gpu_data,
- CUDAWrappers::SharedData<dim, Number> *shared_data,
- const Number *src,
- Number *dst) const;
+ operator()(const unsigned int cell,
+ const typename Portable::MatrixFree<dim, Number>::Data *gpu_data,
+ Portable::SharedData<dim, Number> *shared_data,
+ const Number *src,
+ Number *dst) const;
Number *coef;
};
template <int dim, int fe_degree, typename Number, int n_q_points_1d>
DEAL_II_HOST_DEVICE void
HelmholtzOperator<dim, fe_degree, Number, n_q_points_1d>::operator()(
- const unsigned int cell,
- const typename CUDAWrappers::MatrixFree<dim, Number>::Data *gpu_data,
- CUDAWrappers::SharedData<dim, Number> *shared_data,
- const Number *src,
- Number *dst) const
+ const unsigned int cell,
+ const typename Portable::MatrixFree<dim, Number>::Data *gpu_data,
+ Portable::SharedData<dim, Number> *shared_data,
+ const Number *src,
+ Number *dst) const
{
- CUDAWrappers::FEEvaluation<dim, fe_degree, n_q_points_1d, 1, Number> fe_eval(
+ Portable::FEEvaluation<dim, fe_degree, n_q_points_1d, 1, Number> fe_eval(
gpu_data, shared_data);
fe_eval.read_dof_values(src);
fe_eval.evaluate(EvaluationFlags::values | EvaluationFlags::gradients);
{}
DEAL_II_HOST_DEVICE void
- operator()(
- const typename CUDAWrappers::MatrixFree<dim, Number>::Data *gpu_data,
- const unsigned int cell,
- const unsigned int q) const;
+ operator()(const typename Portable::MatrixFree<dim, Number>::Data *gpu_data,
+ const unsigned int cell,
+ const unsigned int q) const;
static const unsigned int n_dofs_1d = fe_degree + 1;
static const unsigned int n_local_dofs =
template <int dim, int fe_degree, typename Number, int n_q_points_1d>
DEAL_II_HOST_DEVICE void
VaryingCoefficientFunctor<dim, fe_degree, Number, n_q_points_1d>::operator()(
- const typename CUDAWrappers::MatrixFree<dim, Number>::Data *gpu_data,
- const unsigned int cell,
- const unsigned int q) const
+ const typename Portable::MatrixFree<dim, Number>::Data *gpu_data,
+ const unsigned int cell,
+ const unsigned int q) const
{
const unsigned int pos = gpu_data->local_q_point_id(cell, n_q_points, q);
const auto q_point = gpu_data->get_quadrature_point(cell, q);
class MatrixFreeTest : public Subscriptor
{
public:
- MatrixFreeTest(const CUDAWrappers::MatrixFree<dim, Number> &data_in,
- const unsigned int size,
+ MatrixFreeTest(const Portable::MatrixFree<dim, Number> &data_in,
+ const unsigned int size,
const bool constant_coeff = true);
void
types::global_dof_index internal_m;
private:
- const CUDAWrappers::MatrixFree<dim, Number> &data;
+ const Portable::MatrixFree<dim, Number> &data;
LinearAlgebra::distributed::Vector<double, MemorySpace::Default> coef;
};
typename VectorType,
int n_q_points_1d>
MatrixFreeTest<dim, fe_degree, Number, VectorType, n_q_points_1d>::
- MatrixFreeTest(const CUDAWrappers::MatrixFree<dim, Number> &data_in,
- const unsigned int size,
- const bool constant_coeff)
+ MatrixFreeTest(const Portable::MatrixFree<dim, Number> &data_in,
+ const unsigned int size,
+ const bool constant_coeff)
: data(data_in)
{
coef.reinit(size);