// an object of this type to a CUDAWrappers::MatrixFree
// object that expects the class to have an `operator()` that fills the
// values provided in the constructor for a given cell. This operator
- // needs to run on the device, so it needs to be marked as `__device__`
- // for the compiler.
+ // needs to run on the device, so it needs to be marked as
+ // `DEAL_II_HOST_DEVICE` for the compiler.
template <int dim, int fe_degree>
class VaryingCoefficientFunctor
{
: coef(coefficient)
{}
- __device__ void operator()(
+ DEAL_II_HOST_DEVICE void operator()(
+ const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data,
const unsigned int cell,
- const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data);
+ const unsigned int q) const;
// Since CUDAWrappers::MatrixFree::Data doesn't know about the size of its
- // arrays, we need to store the number of quadrature points and the numbers
- // of degrees of freedom in this class to do necessary index conversions.
+ // arrays, we need to store the number of quadrature points and the
+ // numbers of degrees of freedom in this class to do necessary index
+ // conversions.
static const unsigned int n_dofs_1d = fe_degree + 1;
static const unsigned int n_local_dofs = Utilities::pow(n_dofs_1d, dim);
static const unsigned int n_q_points = Utilities::pow(n_dofs_1d, dim);
// the introduction that we have defined it as $a(\mathbf
// x)=\frac{10}{0.05 + 2\|\mathbf x\|^2}$
template <int dim, int fe_degree>
- __device__ void VaryingCoefficientFunctor<dim, fe_degree>::operator()(
+ DEAL_II_HOST_DEVICE void
+ VaryingCoefficientFunctor<dim, fe_degree>::operator()(
+ const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data,
const unsigned int cell,
- const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data)
+ const unsigned int q) const
{
- const unsigned int pos = CUDAWrappers::local_q_point_id<dim, double>(
- cell, gpu_data, n_dofs_1d, n_q_points);
- const Point<dim> q_point =
- CUDAWrappers::get_quadrature_point<dim, double>(cell,
- gpu_data,
- n_dofs_1d);
+ const unsigned int pos = gpu_data->local_q_point_id(cell, n_q_points, q);
+ const Point<dim> q_point = gpu_data->get_quadrature_point(cell, q);
double p_square = 0.;
for (unsigned int i = 0; i < dim; ++i)
// step-37. In contrast to there, the actual quadrature point
// index is treated implicitly by converting the current thread
// index. As before, the functions of this class need to run on
- // the device, so need to be marked as `__device__` for the
+ // the device, so need to be marked as `DEAL_II_HOST_DEVICE` for the
// compiler.
template <int dim, int fe_degree>
class HelmholtzOperatorQuad
{
public:
- __device__ HelmholtzOperatorQuad(double coef)
+ DEAL_II_HOST_DEVICE HelmholtzOperatorQuad(
+ const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data,
+ double * coef,
+ int cell)
: coef(coef)
{}
- __device__ void operator()(
+ DEAL_II_HOST_DEVICE void operator()(
CUDAWrappers::FEEvaluation<dim, fe_degree, fe_degree + 1, 1, double>
- *fe_eval) const;
+ * fe_eval,
+ const int q_point) const;
+
+ static const unsigned int n_q_points =
+ dealii::Utilities::pow(fe_degree + 1, dim);
private:
- double coef;
+ const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data;
+ double * coef;
+ int cell;
};
// the two terms on the left-hand side correspond to the two function calls
// here:
template <int dim, int fe_degree>
- __device__ void HelmholtzOperatorQuad<dim, fe_degree>::operator()(
+ DEAL_II_HOST_DEVICE void HelmholtzOperatorQuad<dim, fe_degree>::operator()(
CUDAWrappers::FEEvaluation<dim, fe_degree, fe_degree + 1, 1, double>
- *fe_eval) const
+ * fe_eval,
+ const int q_point) const
{
- fe_eval->submit_value(coef * fe_eval->get_value());
- fe_eval->submit_gradient(fe_eval->get_gradient());
+ const unsigned int pos =
+ gpu_data->local_q_point_id(cell, n_q_points, q_point);
+
+ fe_eval->submit_value(coef[pos] * fe_eval->get_value(q_point), q_point);
+ fe_eval->submit_gradient(fe_eval->get_gradient(q_point), q_point);
}
class LocalHelmholtzOperator
{
public:
+ // Again, the CUDAWrappers::MatrixFree object doesn't know about the number
+ // of degrees of freedom and the number of quadrature points so we need
+ // to store these for index calculations in the call operator.
+ static constexpr unsigned int n_dofs_1d = fe_degree + 1;
+ static constexpr unsigned int n_local_dofs =
+ Utilities::pow(fe_degree + 1, dim);
+ static constexpr unsigned int n_q_points =
+ Utilities::pow(fe_degree + 1, dim);
+
LocalHelmholtzOperator(double *coefficient)
: coef(coefficient)
{}
- __device__ void operator()(
+ DEAL_II_HOST_DEVICE void operator()(
const unsigned int cell,
const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data,
CUDAWrappers::SharedData<dim, double> * shared_data,
const double * src,
double * dst) const;
- // Again, the CUDAWrappers::MatrixFree object doesn't know about the number
- // of degrees of freedom and the number of quadrature points so we need
- // to store these for index calculations in the call operator.
- static const unsigned int n_dofs_1d = fe_degree + 1;
- static const unsigned int n_local_dofs = Utilities::pow(fe_degree + 1, dim);
- static const unsigned int n_q_points = Utilities::pow(fe_degree + 1, dim);
-
private:
double *coef;
};
// vector and we write value and gradient information to the destination
// vector.
template <int dim, int fe_degree>
- __device__ void LocalHelmholtzOperator<dim, fe_degree>::operator()(
+ DEAL_II_HOST_DEVICE void LocalHelmholtzOperator<dim, fe_degree>::operator()(
const unsigned int cell,
const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data,
CUDAWrappers::SharedData<dim, double> * shared_data,
const double * src,
double * dst) const
{
- const unsigned int pos = CUDAWrappers::local_q_point_id<dim, double>(
- cell, gpu_data, n_dofs_1d, n_q_points);
-
CUDAWrappers::FEEvaluation<dim, fe_degree, fe_degree + 1, 1, double>
- fe_eval(cell, gpu_data, shared_data);
+ fe_eval(gpu_data, shared_data);
fe_eval.read_dof_values(src);
fe_eval.evaluate(true, true);
fe_eval.apply_for_each_quad_point(
- HelmholtzOperatorQuad<dim, fe_degree>(coef[pos]));
+ HelmholtzOperatorQuad<dim, fe_degree>(gpu_data, coef, cell));
fe_eval.integrate(true, true);
fe_eval.distribute_local_to_global(dst);
}
*/
namespace CUDAWrappers
{
- namespace internal
- {
- /**
- * Compute the dof/quad index for a given thread id, dimension, and
- * number of points in each space dimensions.
- */
- template <int dim, int n_points_1d>
- DEAL_II_HOST_DEVICE inline unsigned int
- compute_index()
- {
- KOKKOS_IF_ON_DEVICE(
- return (dim == 1 ?
- threadIdx.x % n_points_1d :
- dim == 2 ?
- threadIdx.x % n_points_1d + n_points_1d * threadIdx.y :
- threadIdx.x % n_points_1d +
- n_points_1d * (threadIdx.y + n_points_1d * threadIdx.z));)
- KOKKOS_IF_ON_HOST(return 0;)
- }
- } // namespace internal
-
/**
* This class provides all the functions necessary to evaluate functions at
* quadrature points and cell integrations. In functionality, this class is
* Constructor.
*/
DEAL_II_HOST_DEVICE
- FEEvaluation(const unsigned int cell_id,
- const data_type * data,
- SharedData<dim, Number> *shdata);
+ FEEvaluation(const data_type *data, SharedData<dim, Number> *shdata);
/**
* For the vector @p src, read out the values on the degrees of freedom of
* id.
*/
DEAL_II_HOST_DEVICE value_type
- get_value() const;
+ get_value(int q_point) const;
/**
* Same as above, except that the local dof index is computed from the
* thread id.
*/
DEAL_II_HOST_DEVICE value_type
- get_dof_value() const;
+ get_dof_value(int q_point) const;
/**
* Same as above, except that the quadrature point is computed from the
* thread id.
*/
DEAL_II_HOST_DEVICE void
- submit_value(const value_type &val_in);
+ submit_value(const value_type &val_in, int q_point);
/**
* Same as above, except that the local dof index is computed from the
* thread id.
*/
DEAL_II_HOST_DEVICE void
- submit_dof_value(const value_type &val_in);
+ submit_dof_value(const value_type &val_in, int q_point);
/**
* Same as above, except that the quadrature point is computed from the
* thread id.
*/
DEAL_II_HOST_DEVICE gradient_type
- get_gradient() const;
+ get_gradient(int q_point) const;
/**
* Same as above, except that the quadrature point is computed from the
* thread id.
*/
DEAL_II_HOST_DEVICE void
- submit_gradient(const gradient_type &grad_in);
+ submit_gradient(const gradient_type &grad_in, int q_point);
// clang-format off
/**
apply_for_each_quad_point(const Functor &func);
private:
- // FIXME We would like to use
- // Kokkos::Subview<Kokkos::View<types::global_dof_index **,
- // MemorySpace::Default::kokkos_space>, int, decltype(Kokkos::ALL)>
- // but we get error: incomplete type is not allowed. I cannot reproduce
- // outside of deal.II. Need to investigate more.
- Kokkos::Subview<Kokkos::View<types::global_dof_index **,
- MemorySpace::Default::kokkos_space>,
- int,
- Kokkos::pair<int, int>>
- local_to_global;
- unsigned int n_cells;
- unsigned int padding_length;
-
- const dealii::internal::MatrixFreeFunctions::ConstraintKinds
- constraint_mask;
-
- const bool use_coloring;
-
- // FIXME We would like to use
- // Kokkos::Subview<Kokkos::View<Number **[dim][dim],
- // MemorySpace::Default::kokkos_space>, int, decltype(Kokkos::ALL),
- // decltype(Kokkos::ALL), decltype(Kokkos::ALL)> but we get error:
- // incomplete type is not allowed. I cannot reproduce outside of deal.II.
- // Need to investigate more.
- Kokkos::Subview<
- Kokkos::View<Number **[dim][dim], MemorySpace::Default::kokkos_space>,
- int,
- Kokkos::pair<int, int>,
- Kokkos::pair<int, int>,
- Kokkos::pair<int, int>>
- inv_jac;
- // FIXME We would like to use
- // Kokkos::Subview<Kokkos::View<Number **,
- // MemorySpace::Default::kokkos_space>, int, decltype(Kokkos::ALL)>
- // but we get error: incomplete type is not allowed. I cannot reproduce
- // outside of deal.II. Need to investigate more.
- Kokkos::Subview<Kokkos::View<Number **, MemorySpace::Default::kokkos_space>,
- int,
- Kokkos::pair<int, int>>
- JxW;
-
- // Data shared by multiple cells
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space> shape_values;
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space> shape_gradients;
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- co_shape_gradients;
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
- constraint_weights;
-
- // Internal buffer
- Kokkos::Subview<Kokkos::View<Number *, MemorySpace::Default::kokkos_space>,
- Kokkos::pair<int, int>>
- values;
- Kokkos::Subview<
- Kokkos::View<Number *[dim], MemorySpace::Default::kokkos_space>,
- Kokkos::pair<int, int>,
- Kokkos::pair<int, int>>
- gradients;
+ const data_type * data;
+ SharedData<dim, Number> *shared_data;
+ int cell_id;
};
typename Number>
DEAL_II_HOST_DEVICE
FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
- FEEvaluation(const unsigned int cell_id,
- const data_type * data,
- SharedData<dim, Number> *shdata)
- : local_to_global(Kokkos::subview(
- data->local_to_global,
- cell_id,
- Kokkos::pair<int, int>(0, Utilities::pow(n_q_points_1d, dim))))
- , n_cells(data->n_cells)
- , padding_length(data->padding_length)
- , constraint_mask(data->constraint_mask[cell_id])
- , use_coloring(data->use_coloring)
- , inv_jac(Kokkos::subview(
- data->inv_jacobian,
- cell_id,
- Kokkos::pair<int, int>(0, Utilities::pow(n_q_points_1d, dim)),
- Kokkos::pair<int, int>(0, dim),
- Kokkos::pair<int, int>(0, dim)))
- , JxW(Kokkos::subview(
- data->JxW,
- cell_id,
- Kokkos::pair<int, int>(0, Utilities::pow(n_q_points_1d, dim))))
- , shape_values(data->shape_values)
- , shape_gradients(data->shape_gradients)
- , co_shape_gradients(data->co_shape_gradients)
- , constraint_weights(data->constraint_weights)
- , values(shdata->values)
- , gradients(shdata->gradients)
+ FEEvaluation(const data_type *data, SharedData<dim, Number> *shdata)
+ : data(data)
+ , shared_data(shdata)
+ , cell_id(shared_data->team_member.league_rank())
{}
{
static_assert(n_components_ == 1, "This function only supports FE with one \
components");
- const unsigned int idx = internal::compute_index<dim, n_q_points_1d>();
-
- const types::global_dof_index src_idx = local_to_global[idx];
- values[idx] = src[src_idx];
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
-
- internal::resolve_hanging_nodes<dim, fe_degree, false>(constraint_weights,
- constraint_mask,
- values);
+ // Populate the scratch memory
+ Kokkos::parallel_for(Kokkos::TeamThreadRange(shared_data->team_member,
+ n_q_points),
+ [&](const int &i) {
+ shared_data->values(i) =
+ src[data->local_to_global(cell_id, i)];
+ });
+ shared_data->team_member.team_barrier();
+
+ internal::resolve_hanging_nodes<dim, fe_degree, false>(
+ shared_data->team_member,
+ data->constraint_weights,
+ data->constraint_mask(cell_id),
+ shared_data->values);
}
{
static_assert(n_components_ == 1, "This function only supports FE with one \
components");
- internal::resolve_hanging_nodes<dim, fe_degree, true>(constraint_weights,
- constraint_mask,
- values);
-
- const unsigned int idx = internal::compute_index<dim, n_q_points_1d>();
- const types::global_dof_index destination_idx = local_to_global[idx];
+ internal::resolve_hanging_nodes<dim, fe_degree, true>(
+ shared_data->team_member,
+ data->constraint_weights,
+ data->constraint_mask(cell_id),
+ shared_data->values);
- if (use_coloring)
- dst[destination_idx] += values[idx];
+ if (data->use_coloring)
+ {
+ Kokkos::parallel_for(Kokkos::TeamThreadRange(shared_data->team_member,
+ n_q_points),
+ [&](const int &i) {
+ dst[data->local_to_global(cell_id, i)] +=
+ shared_data->values(i);
+ });
+ }
else
- atomicAdd(&dst[destination_idx], values[idx]);
+ {
+ Kokkos::parallel_for(
+ Kokkos::TeamThreadRange(shared_data->team_member, n_q_points),
+ [&](const int &i) {
+ Kokkos::atomic_add(&dst[data->local_to_global(cell_id, i)],
+ shared_data->values(i));
+ });
+ }
}
fe_degree,
n_q_points_1d,
Number>
- evaluator_tensor_product(shape_values,
- shape_gradients,
- co_shape_gradients);
+ evaluator_tensor_product(shared_data->team_member,
+ data->shape_values,
+ data->shape_gradients,
+ data->co_shape_gradients);
if (evaluate_val == true && evaluate_grad == true)
{
- evaluator_tensor_product.value_and_gradient_at_quad_pts(values,
- gradients);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ evaluator_tensor_product.value_and_gradient_at_quad_pts(
+ shared_data->values, shared_data->gradients);
+ shared_data->team_member.team_barrier();
}
else if (evaluate_grad == true)
{
- evaluator_tensor_product.gradient_at_quad_pts(values, gradients);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ evaluator_tensor_product.gradient_at_quad_pts(shared_data->values,
+ shared_data->gradients);
+ shared_data->team_member.team_barrier();
}
else if (evaluate_val == true)
{
- evaluator_tensor_product.value_at_quad_pts(values);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ evaluator_tensor_product.value_at_quad_pts(shared_data->values);
+ shared_data->team_member.team_barrier();
}
}
fe_degree,
n_q_points_1d,
Number>
- evaluator_tensor_product(shape_values,
- shape_gradients,
- co_shape_gradients);
+ evaluator_tensor_product(shared_data->team_member,
+ data->shape_values,
+ data->shape_gradients,
+ data->co_shape_gradients);
if (integrate_val == true && integrate_grad == true)
{
- evaluator_tensor_product.integrate_value_and_gradient(values,
- gradients);
+ evaluator_tensor_product.integrate_value_and_gradient(
+ shared_data->values, shared_data->gradients);
}
else if (integrate_val == true)
{
- evaluator_tensor_product.integrate_value(values);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ evaluator_tensor_product.integrate_value(shared_data->values);
+ shared_data->team_member.team_barrier();
}
else if (integrate_grad == true)
{
- evaluator_tensor_product.template integrate_gradient<false>(values,
- gradients);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ evaluator_tensor_product.template integrate_gradient<false>(
+ shared_data->values, shared_data->gradients);
+ shared_data->team_member.team_barrier();
}
}
n_q_points_1d,
n_components_,
Number>::value_type
- FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
- get_value() const
+ FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::get_value(
+ int q_point) const
{
- const unsigned int q_point = internal::compute_index<dim, n_q_points_1d>();
- return values[q_point];
+ return shared_data->values(q_point);
}
n_components_,
Number>::value_type
FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
- get_dof_value() const
+ get_dof_value(int q_point) const
{
- const unsigned int dof = internal::compute_index<dim, fe_degree + 1>();
- return values[dof];
+ return shared_data->values(q_point);
}
typename Number>
DEAL_II_HOST_DEVICE void
FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
- submit_value(const value_type &val_in)
+ submit_value(const value_type &val_in, int q_point)
{
- const unsigned int q_point = internal::compute_index<dim, n_q_points_1d>();
- values[q_point] = val_in * JxW[q_point];
+ shared_data->values(q_point) = val_in * data->JxW(cell_id, q_point);
}
typename Number>
DEAL_II_HOST_DEVICE void
FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
- submit_dof_value(const value_type &val_in)
+ submit_dof_value(const value_type &val_in, int q_point)
{
- const unsigned int dof = internal::compute_index<dim, fe_degree + 1>();
- values[dof] = val_in;
+ shared_data->values(q_point) = val_in;
}
n_components_,
Number>::gradient_type
FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
- get_gradient() const
+ get_gradient(int q_point) const
{
static_assert(n_components_ == 1, "This function only supports FE with one \
components");
- // TODO optimize if the mesh is uniform
- const unsigned int q_point = internal::compute_index<dim, n_q_points_1d>();
- gradient_type grad;
+ gradient_type grad;
for (unsigned int d_1 = 0; d_1 < dim; ++d_1)
{
Number tmp = 0.;
for (unsigned int d_2 = 0; d_2 < dim; ++d_2)
- tmp += inv_jac(q_point, d_2, d_1) * gradients(q_point, d_2);
+ tmp += data->inv_jacobian(cell_id, q_point, d_2, d_1) *
+ shared_data->gradients(q_point, d_2);
grad[d_1] = tmp;
}
typename Number>
DEAL_II_HOST_DEVICE void
FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
- submit_gradient(const gradient_type &grad_in)
+ submit_gradient(const gradient_type &grad_in, int q_point)
{
- // TODO optimize if the mesh is uniform
- const unsigned int q_point = internal::compute_index<dim, n_q_points_1d>();
for (unsigned int d_1 = 0; d_1 < dim; ++d_1)
{
Number tmp = 0.;
for (unsigned int d_2 = 0; d_2 < dim; ++d_2)
- tmp += inv_jac(q_point, d_1, d_2) * grad_in[d_2];
- gradients(q_point, d_1) = tmp * JxW[q_point];
+ tmp += data->inv_jacobian(cell_id, q_point, d_1, d_2) * grad_in[d_2];
+ shared_data->gradients(q_point, d_1) =
+ tmp * data->JxW(cell_id, q_point);
}
}
FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
apply_for_each_quad_point(const Functor &func)
{
- func(this);
-
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ Kokkos::parallel_for(Kokkos::TeamThreadRange(shared_data->team_member,
+ n_q_points),
+ [&](const int &i) { func(this, i); });
+ shared_data->team_member.team_barrier();
}
} // namespace CUDAWrappers
+ template <unsigned int fe_degree, unsigned int direction>
+ DEAL_II_HOST_DEVICE inline bool
+ is_constrained_dof(
+ const dealii::internal::MatrixFreeFunctions::ConstraintKinds
+ & constraint_mask,
+ const unsigned int x_idx,
+ const unsigned int y_idx)
+ {
+ return ((direction == 0) &&
+ (((constraint_mask & dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::subcell_y) !=
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::
+ unconstrained) ?
+ (y_idx == 0) :
+ (y_idx == fe_degree))) ||
+ ((direction == 1) &&
+ (((constraint_mask & dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::subcell_x) !=
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::
+ unconstrained) ?
+ (x_idx == 0) :
+ (x_idx == fe_degree)));
+ }
+
+ template <unsigned int fe_degree, unsigned int direction>
+ DEAL_II_HOST_DEVICE inline bool
+ is_constrained_dof(
+ const dealii::internal::MatrixFreeFunctions::ConstraintKinds
+ & constraint_mask,
+ const unsigned int x_idx,
+ const unsigned int y_idx,
+ const unsigned int z_idx,
+ const dealii::internal::MatrixFreeFunctions::ConstraintKinds face1_type,
+ const dealii::internal::MatrixFreeFunctions::ConstraintKinds face2_type,
+ const dealii::internal::MatrixFreeFunctions::ConstraintKinds face1,
+ const dealii::internal::MatrixFreeFunctions::ConstraintKinds face2,
+ const dealii::internal::MatrixFreeFunctions::ConstraintKinds edge)
+ {
+ const unsigned int face1_idx = (direction == 0) ? y_idx :
+ (direction == 1) ? z_idx :
+ x_idx;
+ const unsigned int face2_idx = (direction == 0) ? z_idx :
+ (direction == 1) ? x_idx :
+ y_idx;
+
+ const bool on_face1 = ((constraint_mask & face1_type) !=
+ dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained) ?
+ (face1_idx == 0) :
+ (face1_idx == fe_degree);
+ const bool on_face2 = ((constraint_mask & face2_type) !=
+ dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained) ?
+ (face2_idx == 0) :
+ (face2_idx == fe_degree);
+ return (
+ (((constraint_mask & face1) != dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained) &&
+ on_face1) ||
+ (((constraint_mask & face2) != dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained) &&
+ on_face2) ||
+ (((constraint_mask & edge) != dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained) &&
+ on_face1 && on_face2));
+ }
+
+
+
template <unsigned int fe_degree,
unsigned int direction,
bool transpose,
typename Number>
DEAL_II_HOST_DEVICE inline void
interpolate_boundary_2d(
+ const Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type
+ &team_member,
Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
constraint_weights,
const dealii::internal::MatrixFreeFunctions::ConstraintKinds
- constraint_mask,
- Kokkos::Subview<
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>,
- Kokkos::pair<int, int>> values)
+ & constraint_mask,
+ Kokkos::View<Number *,
+ MemorySpace::Default::kokkos_space::execution_space::
+ scratch_memory_space,
+ Kokkos::MemoryTraits<Kokkos::Unmanaged>> values)
{
- const unsigned int x_idx = threadIdx.x % (fe_degree + 1);
- const unsigned int y_idx = threadIdx.y;
-
- const auto this_type =
- (direction == 0) ?
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_x :
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_y;
-
- const unsigned int interp_idx = (direction == 0) ? x_idx : y_idx;
+ constexpr unsigned int n_q_points_1d = fe_degree + 1;
+ constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, 2);
- Number t = 0;
// Flag is true if dof is constrained for the given direction and the
// given face.
const bool constrained_face =
unconstrained))) !=
dealii::internal::MatrixFreeFunctions::ConstraintKinds::unconstrained;
- // Flag is true if for the given direction, the dof is constrained with
- // the right type and is on the correct side (left (= 0) or right (=
- // fe_degree))
- const bool constrained_dof =
- ((direction == 0) &&
- (((constraint_mask & dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::subcell_y) !=
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::
- unconstrained) ?
- (y_idx == 0) :
- (y_idx == fe_degree))) ||
- ((direction == 1) &&
- (((constraint_mask & dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::subcell_x) !=
- dealii::internal::MatrixFreeFunctions::ConstraintKinds::
- unconstrained) ?
- (x_idx == 0) :
- (x_idx == fe_degree)));
-
- if (constrained_face && constrained_dof)
- {
- const bool type = (constraint_mask & this_type) !=
- dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained;
-
- if (type)
+ Number t[n_q_points];
+ Kokkos::parallel_for(
+ Kokkos::TeamThreadRange(team_member, n_q_points),
+ [&](const int &q_point) {
+ const unsigned int x_idx = q_point % n_q_points_1d;
+ const unsigned int y_idx = q_point / n_q_points_1d;
+
+ const auto this_type =
+ (direction == 0) ?
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::
+ subcell_x :
+ dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_y;
+
+ const unsigned int interp_idx = (direction == 0) ? x_idx : y_idx;
+ t[q_point] = 0;
+
+ // Flag is true if for the given direction, the dof is constrained
+ // with the right type and is on the correct side (left (= 0) or right
+ // (= fe_degree))
+ const bool constrained_dof =
+ is_constrained_dof<fe_degree, direction>(constraint_mask,
+ x_idx,
+ y_idx);
+
+ if (constrained_face && constrained_dof)
{
- for (unsigned int i = 0; i <= fe_degree; ++i)
+ const bool type = (constraint_mask & this_type) !=
+ dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained;
+
+ if (type)
{
- const unsigned int real_idx =
- (direction == 0) ? index2<fe_degree + 1>(i, y_idx) :
- index2<fe_degree + 1>(x_idx, i);
-
- const Number w =
- transpose ?
- constraint_weights[i * (fe_degree + 1) + interp_idx] :
- constraint_weights[interp_idx * (fe_degree + 1) + i];
- t += w * values[real_idx];
+ for (unsigned int i = 0; i <= fe_degree; ++i)
+ {
+ const unsigned int real_idx =
+ (direction == 0) ? index2<n_q_points_1d>(i, y_idx) :
+ index2<n_q_points_1d>(x_idx, i);
+
+ const Number w =
+ transpose ?
+ constraint_weights[i * n_q_points_1d + interp_idx] :
+ constraint_weights[interp_idx * n_q_points_1d + i];
+ t[q_point] += w * values[real_idx];
+ }
}
- }
- else
- {
- for (unsigned int i = 0; i <= fe_degree; ++i)
+ else
{
- const unsigned int real_idx =
- (direction == 0) ? index2<fe_degree + 1>(i, y_idx) :
- index2<fe_degree + 1>(x_idx, i);
-
- const Number w =
- transpose ?
- constraint_weights[(fe_degree - i) * (fe_degree + 1) +
- fe_degree - interp_idx] :
- constraint_weights[(fe_degree - interp_idx) *
- (fe_degree + 1) +
- fe_degree - i];
- t += w * values[real_idx];
+ for (unsigned int i = 0; i <= fe_degree; ++i)
+ {
+ const unsigned int real_idx =
+ (direction == 0) ? index2<n_q_points_1d>(i, y_idx) :
+ index2<n_q_points_1d>(x_idx, i);
+
+ const Number w =
+ transpose ?
+ constraint_weights[(fe_degree - i) * n_q_points_1d +
+ fe_degree - interp_idx] :
+ constraint_weights[(fe_degree - interp_idx) *
+ n_q_points_1d +
+ fe_degree - i];
+ t[q_point] += w * values[real_idx];
+ }
}
}
- }
+ });
// The synchronization is done for all the threads in one block with
// each block being assigned to one element.
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
- if (constrained_face && constrained_dof)
- values[index2<fe_degree + 1>(x_idx, y_idx)] = t;
-
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
+ Kokkos::parallel_for(Kokkos::TeamThreadRange(team_member, n_q_points),
+ [&](const int &q_point) {
+ const unsigned int x_idx = q_point % n_q_points_1d;
+ const unsigned int y_idx = q_point / n_q_points_1d;
+ const bool constrained_dof =
+ is_constrained_dof<fe_degree, direction>(
+ constraint_mask, x_idx, y_idx);
+ if (constrained_face && constrained_dof)
+ values[index2<fe_degree + 1>(x_idx, y_idx)] =
+ t[q_point];
+ });
+
+ team_member.team_barrier();
}
typename Number>
DEAL_II_HOST_DEVICE inline void
interpolate_boundary_3d(
+ const Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type
+ &team_member,
Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
constraint_weights,
const dealii::internal::MatrixFreeFunctions::ConstraintKinds
- constraint_mask,
- Kokkos::Subview<
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>,
- Kokkos::pair<int, int>> values)
+ constraint_mask,
+ Kokkos::View<Number *,
+ MemorySpace::Default::kokkos_space::execution_space::
+ scratch_memory_space,
+ Kokkos::MemoryTraits<Kokkos::Unmanaged>> values)
{
- const unsigned int x_idx = threadIdx.x % (fe_degree + 1);
- const unsigned int y_idx = threadIdx.y;
- const unsigned int z_idx = threadIdx.z;
+ constexpr unsigned int n_q_points_1d = fe_degree + 1;
+ constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, 3);
const auto this_type =
(direction == 0) ?
dealii::internal::MatrixFreeFunctions::ConstraintKinds::edge_z;
const auto constrained_face = constraint_mask & (face1 | face2 | edge);
- const unsigned int interp_idx = (direction == 0) ? x_idx :
- (direction == 1) ? y_idx :
- z_idx;
- const unsigned int face1_idx = (direction == 0) ? y_idx :
- (direction == 1) ? z_idx :
- x_idx;
- const unsigned int face2_idx = (direction == 0) ? z_idx :
- (direction == 1) ? x_idx :
- y_idx;
-
- Number t = 0;
- const bool on_face1 = ((constraint_mask & face1_type) !=
- dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained) ?
- (face1_idx == 0) :
- (face1_idx == fe_degree);
- const bool on_face2 = ((constraint_mask & face2_type) !=
- dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained) ?
- (face2_idx == 0) :
- (face2_idx == fe_degree);
- const bool constrained_dof =
- ((((constraint_mask & face1) != dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained) &&
- on_face1) ||
- (((constraint_mask & face2) != dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained) &&
- on_face2) ||
- (((constraint_mask & edge) != dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained) &&
- on_face1 && on_face2));
-
- if ((constrained_face != dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained) &&
- constrained_dof)
- {
- const bool type = (constraint_mask & this_type) !=
- dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained;
- if (type)
+ Number t[n_q_points];
+ Kokkos::parallel_for(
+ Kokkos::TeamThreadRange(team_member, n_q_points),
+ [&](const int &q_point) {
+ const unsigned int x_idx = q_point % n_q_points_1d;
+ const unsigned int y_idx = (q_point / n_q_points_1d) % n_q_points_1d;
+ const unsigned int z_idx = q_point / (n_q_points_1d * n_q_points_1d);
+
+ const unsigned int interp_idx = (direction == 0) ? x_idx :
+ (direction == 1) ? y_idx :
+ z_idx;
+ const bool constrained_dof =
+ is_constrained_dof<fe_degree, direction>(constraint_mask,
+ x_idx,
+ y_idx,
+ z_idx,
+ face1_type,
+ face2_type,
+ face1,
+ face2,
+ edge);
+ t[q_point] = 0;
+ if ((constrained_face != dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained) &&
+ constrained_dof)
{
- for (unsigned int i = 0; i <= fe_degree; ++i)
+ const bool type = (constraint_mask & this_type) !=
+ dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained;
+ if (type)
{
- const unsigned int real_idx =
- (direction == 0) ? index3<fe_degree + 1>(i, y_idx, z_idx) :
- (direction == 1) ? index3<fe_degree + 1>(x_idx, i, z_idx) :
- index3<fe_degree + 1>(x_idx, y_idx, i);
-
- const Number w =
- transpose ?
- constraint_weights[i * (fe_degree + 1) + interp_idx] :
- constraint_weights[interp_idx * (fe_degree + 1) + i];
- t += w * values[real_idx];
+ for (unsigned int i = 0; i <= fe_degree; ++i)
+ {
+ const unsigned int real_idx =
+ (direction == 0) ?
+ index3<fe_degree + 1>(i, y_idx, z_idx) :
+ (direction == 1) ?
+ index3<fe_degree + 1>(x_idx, i, z_idx) :
+ index3<fe_degree + 1>(x_idx, y_idx, i);
+
+ const Number w =
+ transpose ?
+ constraint_weights[i * n_q_points_1d + interp_idx] :
+ constraint_weights[interp_idx * n_q_points_1d + i];
+ t[q_point] += w * values[real_idx];
+ }
}
- }
- else
- {
- for (unsigned int i = 0; i <= fe_degree; ++i)
+ else
{
- const unsigned int real_idx =
- (direction == 0) ? index3<fe_degree + 1>(i, y_idx, z_idx) :
- (direction == 1) ? index3<fe_degree + 1>(x_idx, i, z_idx) :
- index3<fe_degree + 1>(x_idx, y_idx, i);
-
- const Number w =
- transpose ?
- constraint_weights[(fe_degree - i) * (fe_degree + 1) +
- fe_degree - interp_idx] :
- constraint_weights[(fe_degree - interp_idx) *
- (fe_degree + 1) +
- fe_degree - i];
- t += w * values[real_idx];
+ for (unsigned int i = 0; i <= fe_degree; ++i)
+ {
+ const unsigned int real_idx =
+ (direction == 0) ?
+ index3<n_q_points_1d>(i, y_idx, z_idx) :
+ (direction == 1) ?
+ index3<n_q_points_1d>(x_idx, i, z_idx) :
+ index3<n_q_points_1d>(x_idx, y_idx, i);
+
+ const Number w =
+ transpose ?
+ constraint_weights[(fe_degree - i) * n_q_points_1d +
+ fe_degree - interp_idx] :
+ constraint_weights[(fe_degree - interp_idx) *
+ n_q_points_1d +
+ fe_degree - i];
+ t[q_point] += w * values[real_idx];
+ }
}
}
- }
+ });
// The synchronization is done for all the threads in one block with
// each block being assigned to one element.
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
-
- if ((constrained_face != dealii::internal::MatrixFreeFunctions::
- ConstraintKinds::unconstrained) &&
- constrained_dof)
- values[index3<fe_degree + 1>(x_idx, y_idx, z_idx)] = t;
-
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
+
+ Kokkos::parallel_for(
+ Kokkos::TeamThreadRange(team_member, n_q_points),
+ [&](const int &q_point) {
+ const unsigned int x_idx = q_point % n_q_points_1d;
+ const unsigned int y_idx = (q_point / n_q_points_1d) % n_q_points_1d;
+ const unsigned int z_idx = q_point / (n_q_points_1d * n_q_points_1d);
+ const bool constrained_dof =
+ is_constrained_dof<fe_degree, direction>(constraint_mask,
+ x_idx,
+ y_idx,
+ z_idx,
+ face1_type,
+ face2_type,
+ face1,
+ face2,
+ edge);
+ if ((constrained_face != dealii::internal::MatrixFreeFunctions::
+ ConstraintKinds::unconstrained) &&
+ constrained_dof)
+ values[index3<fe_degree + 1>(x_idx, y_idx, z_idx)] = t[q_point];
+ });
+
+ team_member.team_barrier();
}
template <int dim, int fe_degree, bool transpose, typename Number>
DEAL_II_HOST_DEVICE void
resolve_hanging_nodes(
+ const Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type
+ &team_member,
Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
constraint_weights,
const dealii::internal::MatrixFreeFunctions::ConstraintKinds
- constraint_mask,
- Kokkos::Subview<
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>,
- Kokkos::pair<int, int>> values)
+ constraint_mask,
+ Kokkos::View<Number *,
+ MemorySpace::Default::kokkos_space::execution_space::
+ scratch_memory_space,
+ Kokkos::MemoryTraits<Kokkos::Unmanaged>> values)
{
if (dim == 2)
{
- interpolate_boundary_2d<fe_degree, 0, transpose>(constraint_weights,
+ interpolate_boundary_2d<fe_degree, 0, transpose>(team_member,
+ constraint_weights,
constraint_mask,
values);
- interpolate_boundary_2d<fe_degree, 1, transpose>(constraint_weights,
+ interpolate_boundary_2d<fe_degree, 1, transpose>(team_member,
+ constraint_weights,
constraint_mask,
values);
}
else if (dim == 3)
{
// Interpolate y and z faces (x-direction)
- interpolate_boundary_3d<fe_degree, 0, transpose>(constraint_weights,
+ interpolate_boundary_3d<fe_degree, 0, transpose>(team_member,
+ constraint_weights,
constraint_mask,
values);
// Interpolate x and z faces (y-direction)
- interpolate_boundary_3d<fe_degree, 1, transpose>(constraint_weights,
+ interpolate_boundary_3d<fe_degree, 1, transpose>(team_member,
+ constraint_weights,
constraint_mask,
values);
// Interpolate x and y faces (z-direction)
- interpolate_boundary_3d<fe_degree, 2, transpose>(constraint_weights,
+ interpolate_boundary_3d<fe_degree, 2, transpose>(team_member,
+ constraint_weights,
constraint_mask,
values);
}
* the destingation vector. Otherwise, use atomic operations.
*/
bool use_coloring;
+
+ /**
+ * Return the quadrature point index local. The index is
+ * only unique for a given MPI process.
+ */
+ DEAL_II_HOST_DEVICE unsigned int
+ local_q_point_id(const unsigned int cell,
+ const unsigned int n_q_points,
+ const unsigned int q_point) const
+ {
+ return (row_start / padding_length + cell) * n_q_points + q_point;
+ }
+
+
+ /**
+ * Return the quadrature point.
+ */
+ DEAL_II_HOST_DEVICE
+ typename CUDAWrappers::MatrixFree<dim, Number>::point_type &
+ get_quadrature_point(const unsigned int cell,
+ const unsigned int q_point) const
+ {
+ return q_points(cell, q_point);
+ }
};
/**
*/
std::shared_ptr<const Utilities::MPI::Partitioner> partitioner;
- /**
- * Cells per block (determined by the function cells_per_block_shmem() ).
- */
- unsigned int cells_per_block;
-
- /**
- * Grid dimensions used to launch the CUDA kernels
- * in *_constrained_values-operations.
- */
- dim3 constraint_grid_dim;
-
- /**
- * Block dimensions used to launch the CUDA kernels
- * in *_constrained_values-operations.
- */
- dim3 constraint_block_dim;
/**
* Length of the padding (closest power of two larger than or equal to
- // TODO We should rework this to use scratch memory
- /**
- * Structure to pass the shared memory into a general user function.
- */
template <int dim, typename Number>
struct SharedData
{
+ using TeamHandle = Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type;
+
+ using SharedView1D = Kokkos::View<
+ Number *,
+ MemorySpace::Default::kokkos_space::execution_space::scratch_memory_space,
+ Kokkos::MemoryTraits<Kokkos::Unmanaged>>;
+ using SharedView2D = Kokkos::View<
+ Number *[dim],
+ MemorySpace::Default::kokkos_space::execution_space::scratch_memory_space,
+ Kokkos::MemoryTraits<Kokkos::Unmanaged>>;
+
+ DEAL_II_HOST_DEVICE
+ SharedData(const TeamHandle &team_member,
+ SharedView1D values,
+ SharedView2D gradients)
+ : team_member(team_member)
+ , values(values)
+ , gradients(gradients)
+ {}
+
+ /**
+ * TeamPolicy handle.
+ */
+ TeamHandle team_member;
+
/**
* Memory for dof and quad values.
*/
- Kokkos::Subview<Kokkos::View<Number *, MemorySpace::Default::kokkos_space>,
- Kokkos::pair<int, int>>
- values;
+ SharedView1D values;
/**
* Memory for computed gradients in reference coordinate system.
*/
- Kokkos::Subview<
- Kokkos::View<Number *[dim], MemorySpace::Default::kokkos_space>,
- Kokkos::pair<int, int>,
- Kokkos::pair<int, int>>
- gradients;
+ SharedView2D gradients;
};
- // This function determines the number of cells per block, possibly at compile
- // time (by virtue of being 'constexpr')
- // TODO this function should be rewritten using meta-programming
- DEAL_II_HOST_DEVICE constexpr unsigned int
- cells_per_block_shmem(int dim, int fe_degree)
- {
- /* clang-format off */
- // We are limiting the number of threads according to the
- // following formulas:
- // - in 2d: `threads = cells * (k+1)^d <= 4*CUDAWrappers::warp_size`
- // - in 3d: `threads = cells * (k+1)^d <= 2*CUDAWrappers::warp_size`
- return dim==2 ? (fe_degree==1 ? CUDAWrappers::warp_size : // 128
- fe_degree==2 ? CUDAWrappers::warp_size/4 : // 72
- fe_degree==3 ? CUDAWrappers::warp_size/8 : // 64
- fe_degree==4 ? CUDAWrappers::warp_size/8 : // 100
- 1) :
- dim==3 ? (fe_degree==1 ? CUDAWrappers::warp_size/4 : // 64
- fe_degree==2 ? CUDAWrappers::warp_size/16 : // 54
- 1) : 1;
- /* clang-format on */
- }
-
-
- /*----------------------- Helper functions ---------------------------------*/
- /**
- * Compute the quadrature point index in the local cell of a given thread.
- *
- * @relates CUDAWrappers::MatrixFree
- */
- template <int dim>
- DEAL_II_HOST_DEVICE inline unsigned int
- q_point_id_in_cell(const unsigned int n_q_points_1d)
- {
- KOKKOS_IF_ON_DEVICE(
- return (dim == 1 ?
- threadIdx.x % n_q_points_1d :
- dim == 2 ?
- threadIdx.x % n_q_points_1d + n_q_points_1d * threadIdx.y :
- threadIdx.x % n_q_points_1d +
- n_q_points_1d * (threadIdx.y + n_q_points_1d * threadIdx.z));)
-
- KOKKOS_IF_ON_HOST(AssertThrow(false, ExcInternalError()); return 0;)
- }
-
-
-
- /**
- * Return the quadrature point index local of a given thread. The index is
- * only unique for a given MPI process.
- *
- * @relates CUDAWrappers::MatrixFree
- */
- template <int dim, typename Number>
- DEAL_II_HOST_DEVICE inline unsigned int
- local_q_point_id(
- const unsigned int cell,
- const typename CUDAWrappers::MatrixFree<dim, Number>::Data *data,
- const unsigned int n_q_points_1d,
- const unsigned int n_q_points)
- {
- return (data->row_start / data->padding_length + cell) * n_q_points +
- q_point_id_in_cell<dim>(n_q_points_1d);
- }
-
-
-
- /**
- * Return the quadrature point associated with a given thread.
- *
- * @relates CUDAWrappers::MatrixFree
- */
- template <int dim, typename Number>
- DEAL_II_HOST_DEVICE inline
- typename CUDAWrappers::MatrixFree<dim, Number>::point_type &
- get_quadrature_point(
- const unsigned int cell,
- const typename CUDAWrappers::MatrixFree<dim, Number>::Data *data,
- const unsigned int n_q_points_1d)
- {
- return data->q_points(cell, q_point_id_in_cell<dim>(n_q_points_1d));
- }
-
/**
* Structure which is passed to the kernel. It is used to pass all the
* necessary information from the CPU to the GPU.
* the destingation vector. Otherwise, use atomic operations.
*/
bool use_coloring;
+
+
+
+ /**
+ * This function is the host version of local_q_point_id().
+ */
+ unsigned int
+ local_q_point_id(const unsigned int cell,
+ const unsigned int n_q_points,
+ const unsigned int q_point) const
+ {
+ return (row_start / padding_length + cell) * n_q_points + q_point;
+ }
+
+
+
+ /**
+ * This function is the host version of get_quadrature_point().
+ */
+ Point<dim, Number>
+ get_quadrature_point(const unsigned int cell,
+ const unsigned int q_point) const
+ {
+ return q_points(cell, q_point);
+ }
};
}
-
- /**
- * This function is the host version of local_q_point_id().
- *
- * @relates CUDAWrappers::MatrixFree
- */
- template <int dim, typename Number>
- inline unsigned int
- local_q_point_id_host(const unsigned int cell,
- const DataHost<dim, Number> &data,
- const unsigned int n_q_points,
- const unsigned int i)
- {
- return (data.row_start / data.padding_length + cell) * n_q_points + i;
- }
-
-
-
- /**
- * This function is the host version of get_quadrature_point(). It assumes
- * that the data in MatrixFree<dim, Number>::Data has been copied to the host
- * using copy_mf_data_to_host().
- *
- * @relates CUDAWrappers::MatrixFree
- */
- template <int dim, typename Number>
- inline Point<dim, Number>
- get_quadrature_point_host(const unsigned int cell,
- const DataHost<dim, Number> &data,
- const unsigned int i)
- {
- return data.q_points(cell, i);
- }
-
-
/*----------------------- Inline functions ---------------------------------*/
# ifndef DOXYGEN
void
ReinitHelper<dim, Number>::setup_cell_arrays(const unsigned int color)
{
- const unsigned int n_cells = data->n_cells[color];
- const unsigned int cells_per_block = data->cells_per_block;
-
- // Setup kernel parameters
- const double apply_n_blocks = std::ceil(
- static_cast<double>(n_cells) / static_cast<double>(cells_per_block));
- const auto apply_x_n_blocks =
- static_cast<unsigned int>(std::round(std::sqrt(apply_n_blocks)));
- const auto apply_y_n_blocks = static_cast<unsigned int>(
- std::ceil(apply_n_blocks / static_cast<double>(apply_x_n_blocks)));
-
- data->grid_dim[color] = dim3(apply_x_n_blocks, apply_y_n_blocks);
-
- // TODO this should be a templated parameter.
- const unsigned int n_dofs_1d = fe_degree + 1;
-
- if (dim == 1)
- data->block_dim[color] = dim3(n_dofs_1d * cells_per_block);
- else if (dim == 2)
- data->block_dim[color] = dim3(n_dofs_1d * cells_per_block, n_dofs_1d);
- else
- data->block_dim[color] =
- dim3(n_dofs_1d * cells_per_block, n_dofs_1d, n_dofs_1d);
-
+ const unsigned int n_cells = data->n_cells[color];
local_to_global = Kokkos::View<types::global_dof_index **,
MemorySpace::Default::kokkos_space>(
- template <int dim, typename Number, typename Functor>
- __global__ void
- apply_kernel_shmem(
- Functor func,
- const typename MatrixFree<dim, Number>::Data gpu_data,
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space> values,
- Kokkos::View<Number *[dim], MemorySpace::Default::kokkos_space> gradients,
- Number *const src,
- Number * dst)
- {
- constexpr unsigned int cells_per_block =
- cells_per_block_shmem(dim, Functor::n_dofs_1d - 1);
-
- const unsigned int local_cell = threadIdx.x / Functor::n_dofs_1d;
- const unsigned int cell =
- local_cell + cells_per_block * (blockIdx.x + gridDim.x * blockIdx.y);
-
- if (cell < gpu_data.n_cells)
- {
- SharedData<dim, Number> shared_data(
- {Kokkos::subview(
- values,
- Kokkos::pair<int, int>(cell * Functor::n_local_dofs,
- (cell + 1) * Functor::n_local_dofs)),
- Kokkos::subview(gradients,
- Kokkos::pair<int, int>(cell * Functor::n_q_points,
- (cell + 1) *
- Functor::n_q_points),
- Kokkos::pair<int, int>(0, dim))});
-
- func(cell, &gpu_data, &shared_data, src, dst);
- }
- }
-
-
-
- template <int dim, typename Number, typename Functor>
- __global__ void
- evaluate_coeff(Functor func,
- const typename MatrixFree<dim, Number>::Data gpu_data)
- {
- constexpr unsigned int cells_per_block =
- cells_per_block_shmem(dim, Functor::n_dofs_1d - 1);
-
- const unsigned int local_cell = threadIdx.x / Functor::n_dofs_1d;
- const unsigned int cell =
- local_cell + cells_per_block * (blockIdx.x + gridDim.x * blockIdx.y);
-
- if (cell < gpu_data.n_cells)
- func(cell, &gpu_data);
- }
-
-
-
template <typename VectorType>
struct VectorLocalSize
{
return vec.size();
}
};
+
+
+
+ template <int dim, typename Number, typename Functor>
+ struct ApplyKernel
+ {
+ using TeamHandle = Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type;
+ using SharedView1D =
+ Kokkos::View<Number *,
+ MemorySpace::Default::kokkos_space::execution_space::
+ scratch_memory_space,
+ Kokkos::MemoryTraits<Kokkos::Unmanaged>>;
+ using SharedView2D =
+ Kokkos::View<Number *[dim],
+ MemorySpace::Default::kokkos_space::execution_space::
+ scratch_memory_space,
+ Kokkos::MemoryTraits<Kokkos::Unmanaged>>;
+
+ ApplyKernel(Functor func,
+ const typename MatrixFree<dim, Number>::Data gpu_data,
+ Number *const src,
+ Number * dst)
+ : func(func)
+ , gpu_data(gpu_data)
+ , src(src)
+ , dst(dst)
+ {}
+
+ Functor func;
+ const typename MatrixFree<dim, Number>::Data gpu_data;
+ Number *const src;
+ Number * dst;
+
+
+ // Provide the shared memory capacity. This function takes the team_size
+ // as an argument, which allows team_size dependent allocations.
+ size_t
+ team_shmem_size(int /*team_size*/) const
+ {
+ return SharedView1D::shmem_size(Functor::n_local_dofs) +
+ SharedView2D::shmem_size(Functor::n_local_dofs);
+ }
+
+
+ DEAL_II_HOST_DEVICE
+ void
+ operator()(const TeamHandle &team_member) const
+ {
+ // Get the scratch memory
+ SharedView1D values(team_member.team_shmem(), Functor::n_local_dofs);
+ SharedView2D gradients(team_member.team_shmem(), Functor::n_local_dofs);
+
+ SharedData<dim, Number> shared_data(team_member, values, gradients);
+ func(team_member.league_rank(), &gpu_data, &shared_data, src, dst);
+ }
+ };
} // namespace internal
for (unsigned int i = 0; i < n_colors; ++i)
if (n_cells[i] > 0)
{
- internal::evaluate_coeff<dim, Number, Functor>
- <<<grid_dim[i], block_dim[i]>>>(func, get_data(i));
- AssertCudaKernel();
+ MemorySpace::Default::kokkos_space::execution_space exec;
+ auto color_data = get_data(i);
+ Kokkos::parallel_for(
+ "dealii::MatrixFree::evaluate_coeff",
+ Kokkos::MDRangePolicy<
+ MemorySpace::Default::kokkos_space::execution_space,
+ Kokkos::Rank<2>>(exec, {0, 0}, {n_cells[i], Functor::n_q_points}),
+ KOKKOS_LAMBDA(const int cell, const int q) {
+ func(&color_data, cell, q);
+ });
}
}
Kokkos::deep_copy(co_shape_gradients, co_shape_gradients_host);
}
- // Setup the number of cells per CUDA thread block
- cells_per_block = cells_per_block_shmem(dim, fe_degree);
-
internal::ReinitHelper<dim, Number> helper(
this, mapping, fe, quad, shape_info, *dof_handler, update_flags);
if (n_constrained_dofs != 0)
{
- const auto constraint_n_blocks = static_cast<unsigned int>(
- std::ceil(static_cast<double>(n_constrained_dofs) /
- static_cast<double>(block_size)));
- const auto constraint_x_n_blocks =
- static_cast<unsigned int>(std::round(std::sqrt(constraint_n_blocks)));
- const auto constraint_y_n_blocks = static_cast<unsigned int>(
- std::ceil(static_cast<double>(constraint_n_blocks) /
- static_cast<double>(constraint_x_n_blocks)));
-
- constraint_grid_dim =
- dim3(constraint_x_n_blocks, constraint_y_n_blocks);
- constraint_block_dim = dim3(block_size);
-
std::vector<dealii::types::global_dof_index> constrained_dofs_host(
n_constrained_dofs);
const VectorType &src,
VectorType & dst) const
{
- std::vector<Kokkos::View<Number *, MemorySpace::Default::kokkos_space>>
- values_colors(n_colors);
- std::vector<Kokkos::View<Number *[dim], MemorySpace::Default::kokkos_space>>
- gradients_colors(n_colors);
// Execute the loop on the cells
- for (unsigned int i = 0; i < n_colors; ++i)
- if (n_cells[i] > 0)
+ for (unsigned int color = 0; color < n_colors; ++color)
+ if (n_cells[color] > 0)
{
- const unsigned int size =
- (grid_dim[i].x * grid_dim[i].y * grid_dim[i].z) * cells_per_block *
- Functor::n_local_dofs;
- values_colors[i] =
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("values_" + std::to_string(i),
- Kokkos::WithoutInitializing),
- size);
- gradients_colors[i] =
- Kokkos::View<Number *[dim], MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("gradients_" + std::to_string(i),
- Kokkos::WithoutInitializing),
- size);
- internal::apply_kernel_shmem<dim, Number, Functor>
- <<<grid_dim[i], block_dim[i]>>>(func,
- get_data(i),
- values_colors[i],
- gradients_colors[i],
- src.get_values(),
- dst.get_values());
- AssertCudaKernel();
+ MemorySpace::Default::kokkos_space::execution_space exec;
+ Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>
+ team_policy(exec, n_cells[color], Kokkos::AUTO);
+
+ internal::ApplyKernel<dim, Number, Functor> apply_kernel(
+ func, get_data(color), src.get_values(), dst.get_values());
+
+ Kokkos::parallel_for("dealii::MatrixFree::serial_cell_loop",
+ team_policy,
+ apply_kernel);
}
- cudaDeviceSynchronize();
+ Kokkos::fence();
}
const LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &src,
LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &dst) const
{
+ MemorySpace::Default::kokkos_space::execution_space exec;
+
// in case we have compatible partitioners, we can simply use the provided
// vectors
if (src.get_partitioner().get() == partitioner.get() &&
{
src.update_ghost_values_start(0);
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space> values;
- Kokkos::View<Number *[dim], MemorySpace::Default::kokkos_space>
- gradients;
// In parallel, it's possible that some processors do not own any
// cells.
if (n_cells[0] > 0)
{
- const unsigned int size =
- (grid_dim[0].x * grid_dim[0].y * grid_dim[0].z) *
- cells_per_block * Functor::n_local_dofs;
- values =
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("values", Kokkos::WithoutInitializing),
- size);
- gradients = Kokkos::View<Number *[dim],
- MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("gradients", Kokkos::WithoutInitializing),
- size);
- internal::apply_kernel_shmem<dim, Number, Functor>
- <<<grid_dim[0], block_dim[0]>>>(func,
- get_data(0),
- values,
- gradients,
- src.get_values(),
- dst.get_values());
- AssertCudaKernel();
+ Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>
+ team_policy(exec, n_cells[0], Kokkos::AUTO);
+
+ internal::ApplyKernel<dim, Number, Functor> apply_kernel(
+ func, get_data(0), src.get_values(), dst.get_values());
+
+ Kokkos::parallel_for(
+ "dealii::MatrixFree::distributed_cell_loop_0",
+ team_policy,
+ apply_kernel);
}
src.update_ghost_values_finish();
// cells
if (n_cells[1] > 0)
{
- const unsigned int size =
- (grid_dim[1].x * grid_dim[1].y * grid_dim[1].z) *
- cells_per_block * Functor::n_local_dofs;
- values =
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("values", Kokkos::WithoutInitializing),
- size);
- gradients = Kokkos::View<Number *[dim],
- MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("gradients", Kokkos::WithoutInitializing),
- size);
- internal::apply_kernel_shmem<dim, Number, Functor>
- <<<grid_dim[1], block_dim[1]>>>(func,
- get_data(1),
- values,
- gradients,
- src.get_values(),
- dst.get_values());
- AssertCudaKernel();
+ Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>
+ team_policy(exec, n_cells[1], Kokkos::AUTO);
+
+ internal::ApplyKernel<dim, Number, Functor> apply_kernel(
+ func, get_data(1), src.get_values(), dst.get_values());
+
+ Kokkos::parallel_for(
+ "dealii::MatrixFree::distributed_cell_loop_1",
+ team_policy,
+ apply_kernel);
+
// We need a synchronization point because we don't want
// CUDA-aware MPI to start the MPI communication until the
// kernel is done.
- cudaDeviceSynchronize();
+ Kokkos::fence();
}
dst.compress_start(0, VectorOperation::add);
// not own any cells
if (n_cells[2] > 0)
{
- const unsigned int size =
- (grid_dim[2].x * grid_dim[2].y * grid_dim[2].z) *
- cells_per_block * Functor::n_local_dofs;
- values =
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("values", Kokkos::WithoutInitializing),
- size);
- gradients = Kokkos::View<Number *[dim],
- MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("gradients", Kokkos::WithoutInitializing),
- size);
- internal::apply_kernel_shmem<dim, Number, Functor>
- <<<grid_dim[2], block_dim[2]>>>(func,
- get_data(2),
- values,
- gradients,
- src.get_values(),
- dst.get_values());
- AssertCudaKernel();
+ Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>
+ team_policy(exec, n_cells[2], Kokkos::AUTO);
+
+ internal::ApplyKernel<dim, Number, Functor> apply_kernel(
+ func, get_data(2), src.get_values(), dst.get_values());
+
+ Kokkos::parallel_for(
+ "dealii::MatrixFree::distributed_cell_loop_2",
+ team_policy,
+ apply_kernel);
}
dst.compress_finish(VectorOperation::add);
}
for (unsigned int i = 0; i < n_colors; ++i)
if (n_cells[i] > 0)
{
- const unsigned int size =
- (grid_dim[i].x * grid_dim[i].y * grid_dim[i].z) *
- cells_per_block * Functor::n_local_dofs;
- values_colors[i] =
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("values_" + std::to_string(i),
- Kokkos::WithoutInitializing),
- size);
- gradients_colors[i] =
- Kokkos::View<Number *[dim],
- MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("gradients_" + std::to_string(i),
- Kokkos::WithoutInitializing),
- size);
- internal::apply_kernel_shmem<dim, Number, Functor>
- <<<grid_dim[i], block_dim[i]>>>(func,
- get_data(i),
- values_colors[i],
- gradients_colors[i],
- src.get_values(),
- dst.get_values());
+ Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>
+ team_policy(exec, n_cells[i], Kokkos::AUTO);
+
+ internal::ApplyKernel<dim, Number, Functor> apply_kernel(
+ func, get_data(i), src.get_values(), dst.get_values());
+
+ Kokkos::parallel_for(
+ "dealii::MatrixFree::distributed_cell_loop_" +
+ std::to_string(i),
+ team_policy,
+ apply_kernel);
}
dst.compress(VectorOperation::add);
}
ghosted_src = src;
ghosted_dst = dst;
- std::vector<Kokkos::View<Number *, MemorySpace::Default::kokkos_space>>
- values_colors(n_colors);
- std::vector<
- Kokkos::View<Number *[dim], MemorySpace::Default::kokkos_space>>
- gradients_colors(n_colors);
// Execute the loop on the cells
for (unsigned int i = 0; i < n_colors; ++i)
if (n_cells[i] > 0)
{
- const unsigned int size =
- (grid_dim[i].x * grid_dim[i].y * grid_dim[i].z) *
- cells_per_block * Functor::n_local_dofs;
- values_colors[i] =
- Kokkos::View<Number *, MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("values_" + std::to_string(i),
- Kokkos::WithoutInitializing),
- size);
- gradients_colors[i] =
- Kokkos::View<Number *[dim], MemorySpace::Default::kokkos_space>(
- Kokkos::view_alloc("gradients_" + std::to_string(i),
- Kokkos::WithoutInitializing),
- size);
- internal::apply_kernel_shmem<dim, Number, Functor>
- <<<grid_dim[i], block_dim[i]>>>(func,
- get_data(i),
- values_colors[i],
- gradients_colors[i],
- ghosted_src.get_values(),
- ghosted_dst.get_values());
- AssertCudaKernel();
+ Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>
+ team_policy(exec, n_cells[i], Kokkos::AUTO);
+
+ internal::ApplyKernel<dim, Number, Functor> apply_kernel(
+ func, get_data(i), src.get_values(), dst.get_values());
+
+ Kokkos::parallel_for(
+ "dealii::MatrixFree::distributed_cell_loop_" +
+ std::to_string(i),
+ team_policy,
+ apply_kernel);
}
// Add the ghosted values
typename ViewTypeIn,
typename ViewTypeOut>
DEAL_II_HOST_DEVICE void
- apply(const Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ apply(const Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type
+ &team_member,
+ const Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
shape_data,
const ViewTypeIn in,
ViewTypeOut out)
{
- KOKKOS_IF_ON_DEVICE(
- const unsigned int i = (dim == 1) ? 0 : threadIdx.x % n_q_points_1d;
- const unsigned int j = (dim == 3) ? threadIdx.y : 0;
- const unsigned int q = (dim == 1) ? (threadIdx.x % n_q_points_1d) :
- (dim == 2) ? threadIdx.y :
- threadIdx.z;
-
- // This loop simply multiplies the shape function at the quadrature
- // point by the value finite element coefficient.
- Number t = 0;
- for (int k = 0; k < n_q_points_1d; ++k) {
- const unsigned int shape_idx =
- dof_to_quad ? (q + k * n_q_points_1d) : (k + q * n_q_points_1d);
- const unsigned int source_idx =
- (direction == 0) ? (k + n_q_points_1d * (i + n_q_points_1d * j)) :
- (direction == 1) ? (i + n_q_points_1d * (k + n_q_points_1d * j)) :
- (i + n_q_points_1d * (j + n_q_points_1d * k));
- t += shape_data[shape_idx] *
- (in_place ? out[source_idx] : in[source_idx]);
- }
-
- if (in_place) __syncthreads();
-
- const unsigned int destination_idx =
- (direction == 0) ? (q + n_q_points_1d * (i + n_q_points_1d * j)) :
- (direction == 1) ? (i + n_q_points_1d * (q + n_q_points_1d * j)) :
- (i + n_q_points_1d * (j + n_q_points_1d * q));
-
- if (add) Kokkos::atomic_add(&out[destination_idx], t);
- else out[destination_idx] = t;)
+ constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, dim);
+
+ Number t[n_q_points];
+ Kokkos::parallel_for(
+ Kokkos::TeamThreadRange(team_member, n_q_points),
+ [&](const int &q_point) {
+ const unsigned int i = (dim == 1) ? 0 : q_point % n_q_points_1d;
+ const unsigned int j =
+ (dim == 3) ? (q_point / n_q_points_1d) % n_q_points_1d : 0;
+ const unsigned int q =
+ (dim == 1) ? q_point :
+ (dim == 2) ? (q_point / n_q_points_1d) % n_q_points_1d :
+ q_point / (n_q_points_1d * n_q_points_1d);
+
+ // This loop simply multiplies the shape function at the quadrature
+ // point by the value finite element coefficient.
+ t[q_point] = 0;
+ for (int k = 0; k < n_q_points_1d; ++k)
+ {
+ const unsigned int shape_idx =
+ dof_to_quad ? (q + k * n_q_points_1d) : (k + q * n_q_points_1d);
+ const unsigned int source_idx =
+ (direction == 0) ?
+ (k + n_q_points_1d * (i + n_q_points_1d * j)) :
+ (direction == 1) ?
+ (i + n_q_points_1d * (k + n_q_points_1d * j)) :
+ (i + n_q_points_1d * (j + n_q_points_1d * k));
+ t[q_point] += shape_data[shape_idx] *
+ (in_place ? out(source_idx) : in(source_idx));
+ }
+ });
+
+ if (in_place)
+ team_member.team_barrier();
+
+ Kokkos::parallel_for(
+ Kokkos::TeamThreadRange(team_member, n_q_points),
+ [&](const int &q_point) {
+ const unsigned int i = (dim == 1) ? 0 : q_point % n_q_points_1d;
+ const unsigned int j =
+ (dim == 3) ? (q_point / n_q_points_1d) % n_q_points_1d : 0;
+ const unsigned int q =
+ (dim == 1) ? q_point :
+ (dim == 2) ? (q_point / n_q_points_1d) % n_q_points_1d :
+ q_point / (n_q_points_1d * n_q_points_1d);
+
+ const unsigned int destination_idx =
+ (direction == 0) ? (q + n_q_points_1d * (i + n_q_points_1d * j)) :
+ (direction == 1) ? (i + n_q_points_1d * (q + n_q_points_1d * j)) :
+ (i + n_q_points_1d * (j + n_q_points_1d * q));
+
+ if (add)
+ Kokkos::atomic_add(&out(destination_idx), t[q_point]);
+ else
+ out(destination_idx) = t[q_point];
+ });
}
n_q_points_1d,
Number>
{
+ using TeamHandle = Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type;
+
DEAL_II_HOST_DEVICE
EvaluatorTensorProduct(
+ const TeamHandle & team_member,
Kokkos::View<Number *, MemorySpace::Default::kokkos_space> shape_values,
Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
shape_gradients,
DEAL_II_HOST_DEVICE void
integrate_value_and_gradient(ViewType1 u, ViewType2 grad_u);
+ /**
+ * TeamPolicy handle.
+ */
+ const TeamHandle &team_member;
+
/**
* Values of the shape functions.
*/
n_q_points_1d,
Number>::
EvaluatorTensorProduct(
+ const TeamHandle & team_member,
Kokkos::View<Number *, MemorySpace::Default::kokkos_space> shape_values,
Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
shape_gradients,
Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
co_shape_gradients)
- : shape_values(shape_values)
+ : team_member(team_member)
+ , shape_values(shape_values)
, shape_gradients(shape_gradients)
, co_shape_gradients(co_shape_gradients)
{}
ViewTypeOut out) const
{
apply<dim, n_q_points_1d, Number, direction, dof_to_quad, add, in_place>(
- shape_values, in, out);
+ team_member, shape_values, in, out);
}
ViewTypeOut out) const
{
apply<dim, n_q_points_1d, Number, direction, dof_to_quad, add, in_place>(
- shape_gradients, in, out);
+ team_member, shape_gradients, in, out);
}
ViewTypeOut out) const
{
apply<dim, n_q_points_1d, Number, direction, dof_to_quad, add, in_place>(
- co_shape_gradients, in, out);
+ team_member, co_shape_gradients, in, out);
}
case 2:
{
values<0, true, false, true>(u, u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<1, true, false, true>(u, u);
break;
case 3:
{
values<0, true, false, true>(u, u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<1, true, false, true>(u, u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<2, true, false, true>(u, u);
break;
case 2:
{
values<0, false, false, true>(u, u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<1, false, false, true>(u, u);
break;
case 3:
{
values<0, false, false, true>(u, u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<1, false, false, true>(u, u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<2, false, false, true>(u, u);
break;
values<0, true, false, false>(
u, Kokkos::subview(grad_u, Kokkos::ALL, 1));
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<1, true, false, true>(
Kokkos::subview(grad_u, Kokkos::ALL, 0),
values<0, true, false, false>(
u, Kokkos::subview(grad_u, Kokkos::ALL, 2));
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<1, true, false, true>(
Kokkos::subview(grad_u, Kokkos::ALL, 0),
Kokkos::subview(grad_u, Kokkos::ALL, 2),
Kokkos::subview(grad_u, Kokkos::ALL, 2));
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<2, true, false, true>(
Kokkos::subview(grad_u, Kokkos::ALL, 0),
case 1:
{
values<0, true, false, true>(u, u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
co_gradients<0, true, false, false>(
u, Kokkos::subview(grad_u, Kokkos::ALL, 0));
case 2:
{
values<0, true, false, true>(u, u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<1, true, false, true>(u, u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
co_gradients<0, true, false, false>(
u, Kokkos::subview(grad_u, Kokkos::ALL, 0));
case 3:
{
values<0, true, false, true>(u, u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<1, true, false, true>(u, u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<2, true, false, true>(u, u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
co_gradients<0, true, false, false>(
u, Kokkos::subview(grad_u, Kokkos::ALL, 0));
Kokkos::subview(grad_u, Kokkos::ALL, 1),
Kokkos::subview(grad_u, Kokkos::ALL, 1));
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<1, false, add, false>(
Kokkos::subview(grad_u, Kokkos::ALL, 0), u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
gradients<1, false, true, false>(
Kokkos::subview(grad_u, Kokkos::ALL, 1), u);
Kokkos::subview(grad_u, Kokkos::ALL, 2),
Kokkos::subview(grad_u, Kokkos::ALL, 2));
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<1, false, false, true>(
Kokkos::subview(grad_u, Kokkos::ALL, 0),
Kokkos::subview(grad_u, Kokkos::ALL, 2),
Kokkos::subview(grad_u, Kokkos::ALL, 2));
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<2, false, add, false>(
Kokkos::subview(grad_u, Kokkos::ALL, 0), u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<2, false, true, false>(
Kokkos::subview(grad_u, Kokkos::ALL, 1), u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
gradients<2, false, true, false>(
Kokkos::subview(grad_u, Kokkos::ALL, 2), u);
{
co_gradients<0, false, true, false>(
Kokkos::subview(grad_u, Kokkos::ALL, 0), u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<0, false, false, true>(u, u);
{
co_gradients<1, false, true, false>(
Kokkos::subview(grad_u, Kokkos::ALL, 1), u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
co_gradients<0, false, true, false>(
Kokkos::subview(grad_u, Kokkos::ALL, 0), u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<1, false, false, true>(u, u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<0, false, false, true>(u, u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
break;
}
{
co_gradients<2, false, true, false>(
Kokkos::subview(grad_u, Kokkos::ALL, 2), u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
co_gradients<1, false, true, false>(
Kokkos::subview(grad_u, Kokkos::ALL, 1), u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
co_gradients<0, false, true, false>(
Kokkos::subview(grad_u, Kokkos::ALL, 0), u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<2, false, false, true>(u, u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<1, false, false, true>(u, u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
values<0, false, false, true>(u, u);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ team_member.team_barrier();
break;
}
DummyOperator<dim, fe_degree>::operator()(
const unsigned int cell,
const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data,
- CUDAWrappers::SharedData<dim, double> *,
+ CUDAWrappers::SharedData<dim, double> * shared_data,
const double *,
double *dst) const
{
- const unsigned int pos = CUDAWrappers::local_q_point_id<dim, double>(
- cell, gpu_data, n_dofs_1d, n_q_points);
- auto point = CUDAWrappers::get_quadrature_point<dim, double>(cell,
- gpu_data,
- fe_degree + 1);
- dst[pos] = dim == 2 ? point(0) + point(1) : point(0) + point(1) + point(2);
+ Kokkos::parallel_for(
+ Kokkos::TeamThreadRange(shared_data->team_member, n_q_points),
+ [&](const int q_point) {
+ const unsigned int pos =
+ gpu_data->local_q_point_id(cell, n_q_points, q_point);
+
+ auto point = gpu_data->get_quadrature_point(cell, q_point);
+ dst[pos] =
+ dim == 2 ? point(0) + point(1) : point(0) + point(1) + point(2);
+ });
}
for (unsigned int i = 0; i < n_q_points_per_cell; ++i)
{
unsigned int const pos =
- CUDAWrappers::local_q_point_id_host<dim, double>(
- cell_id, gpu_data_host, n_q_points_per_cell, i);
- auto p = CUDAWrappers::get_quadrature_point_host<dim, double>(
- cell_id, gpu_data_host, i);
+ gpu_data_host.local_q_point_id(cell_id, n_q_points_per_cell, i);
+ auto p = gpu_data_host.get_quadrature_point(cell_id, i);
const double p_val = dim == 2 ? p(0) + p(1) : p(0) + p(1) + p(2);
AssertThrow(std::abs(coef[pos] - p_val) < 1e-12,
ExcInternalError());
// much easier to check their correctness directly rather than from the results
// in dependent functions
+#include "deal.II/base/memory_space.h"
+
#include <deal.II/lac/cuda_vector.h>
#include <deal.II/lac/read_write_vector.h>
namespace CUDA = LinearAlgebra::CUDAWrappers;
+using TeamHandle = Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type;
+
template <int M, int N, int type, bool add, bool dof_to_quad>
-__global__ void
+DEAL_II_HOST_DEVICE void
evaluate_tensor_product(
+ const TeamHandle & team_member,
Kokkos::View<double *, MemorySpace::Default::kokkos_space> shape_values,
Kokkos::View<double *, MemorySpace::Default::kokkos_space> shape_gradients,
Kokkos::View<double *, MemorySpace::Default::kokkos_space> co_shape_gradients,
- double * dst,
- double * src)
+ Kokkos::View<double *, MemorySpace::Default::kokkos_space> dst,
+ Kokkos::View<double *, MemorySpace::Default::kokkos_space> src)
{
CUDAWrappers::internal::EvaluatorTensorProduct<
CUDAWrappers::internal::evaluate_general,
M - 1,
N,
double>
- evaluator(shape_values, shape_gradients, co_shape_gradients);
+ evaluator(team_member, shape_values, shape_gradients, co_shape_gradients);
if (type == 0)
evaluator.template values<0, dof_to_quad, add, false>(src, dst);
if (type == 1 && M % 2 == 1 && N % 2 == 1)
shape_host[M / 2 * N + N / 2] = 0.;
- LinearAlgebra::ReadWriteVector<double> x_host(N), x_ref(N), y_host(M),
- y_ref(M);
+ LinearAlgebra::ReadWriteVector<double> x_ref(N), y_ref(M);
+ Kokkos::View<double[N], MemorySpace::Default::kokkos_space> x_dev(
+ Kokkos::view_alloc("x_dev", Kokkos::WithoutInitializing));
+ Kokkos::View<double[M], MemorySpace::Default::kokkos_space> y_dev(
+ Kokkos::view_alloc("y_dev", Kokkos::WithoutInitializing));
+ auto x_host = Kokkos::create_mirror_view(x_dev);
+ auto y_host = Kokkos::create_mirror_view(y_dev);
+
for (unsigned int i = 0; i < N; ++i)
- x_host[i] = static_cast<double>(Testing::rand()) / RAND_MAX;
+ x_host(i) = static_cast<double>(Testing::rand()) / RAND_MAX;
// Compute reference
for (unsigned int i = 0; i < M; ++i)
{
- y_host[i] = 1.;
- y_ref[i] = add ? y_host[i] : 0.;
+ y_host(i) = 1.;
+ y_ref[i] = add ? y_host(i) : 0.;
for (unsigned int j = 0; j < N; ++j)
- y_ref[i] += shape_host[i * N + j] * x_host[j];
+ y_ref[i] += shape_host[i * N + j] * x_host(j);
}
// Copy data to the GPU.
- CUDA::Vector<double> x_dev(N), y_dev(M);
- x_dev.import(x_host, VectorOperation::insert);
- y_dev.import(y_host, VectorOperation::insert);
-
+ Kokkos::deep_copy(x_dev, x_host);
+ Kokkos::deep_copy(y_dev, y_host);
Kokkos::View<double *, MemorySpace::Default::kokkos_space> shape_values(
Kokkos::view_alloc("shape_values", Kokkos::WithoutInitializing),
// Launch the kernel
- evaluate_tensor_product<M, N, type, add, false><<<1, M>>>(shape_values,
- shape_gradients,
- co_shape_gradients,
- y_dev.get_values(),
- x_dev.get_values());
+ MemorySpace::Default::kokkos_space::execution_space exec;
+ Kokkos::TeamPolicy<MemorySpace::Default::kokkos_space::execution_space>
+ team_policy(exec, 1, Kokkos::AUTO);
+ Kokkos::parallel_for(
+ team_policy, KOKKOS_LAMBDA(const TeamHandle &team_member) {
+ evaluate_tensor_product<M, N, type, add, false>(team_member,
+ shape_values,
+ shape_gradients,
+ co_shape_gradients,
+ y_dev,
+ x_dev);
+ });
// Check the results on the host
- y_host.import(y_dev, VectorOperation::insert);
+ Kokkos::deep_copy(y_host, y_dev);
deallog << "Errors no transpose: ";
for (unsigned int i = 0; i < M; ++i)
- deallog << y_host[i] - y_ref[i] << " ";
+ deallog << y_host(i) - y_ref[i] << " ";
deallog << std::endl;
for (unsigned int i = 0; i < M; ++i)
- y_host[i] = static_cast<double>(Testing::rand()) / RAND_MAX;
+ y_host(i) = static_cast<double>(Testing::rand()) / RAND_MAX;
// Copy y_host to the device
- y_dev.import(y_host, VectorOperation::insert);
+ Kokkos::deep_copy(y_dev, y_host);
// Compute reference
for (unsigned int i = 0; i < N; ++i)
{
- x_host[i] = 2.;
- x_ref[i] = add ? x_host[i] : 0.;
+ x_host(i) = 2.;
+ x_ref[i] = add ? x_host(i) : 0.;
for (unsigned int j = 0; j < M; ++j)
- x_ref[i] += shape_host[j * N + i] * y_host[j];
+ x_ref[i] += shape_host[j * N + i] * y_host(j);
}
// Copy x_host to the device
- x_dev.import(x_host, VectorOperation::insert);
+ Kokkos::deep_copy(x_dev, x_host);
// Launch the kernel
- evaluate_tensor_product<M, N, type, add, true><<<1, M>>>(shape_values,
- shape_gradients,
- co_shape_gradients,
- x_dev.get_values(),
- y_dev.get_values());
+ Kokkos::parallel_for(
+ team_policy, KOKKOS_LAMBDA(const TeamHandle &team_member) {
+ evaluate_tensor_product<M, N, type, add, true>(team_member,
+ shape_values,
+ shape_gradients,
+ co_shape_gradients,
+ x_dev,
+ y_dev);
+ });
// Check the results on the host
- x_host.import(x_dev, VectorOperation::insert);
+ Kokkos::deep_copy(x_host, x_dev);
deallog << "Errors transpose: ";
for (unsigned int i = 0; i < N; ++i)
- deallog << x_host[i] - x_ref[i] << " ";
+ deallog << x_host(i) - x_ref[i] << " ";
deallog << std::endl;
}
namespace CUDA = LinearAlgebra::CUDAWrappers;
+using TeamHandle = Kokkos::TeamPolicy<
+ MemorySpace::Default::kokkos_space::execution_space>::member_type;
+
template <int M, int N, int type, bool add, bool dof_to_quad>
-__global__ void
+DEAL_II_HOST_DEVICE void
evaluate_tensor_product(
+ const TeamHandle & team_member,
Kokkos::View<double *, MemorySpace::Default::kokkos_space> shape_values,
Kokkos::View<double *, MemorySpace::Default::kokkos_space> shape_gradients,
Kokkos::View<double *, MemorySpace::Default::kokkos_space> co_shape_gradients,
- double * dst,
- double * src)
+ Kokkos::View<double *, MemorySpace::Default::kokkos_space> dst,
+ Kokkos::View<double *, MemorySpace::Default::kokkos_space> src)
{
CUDAWrappers::internal::EvaluatorTensorProduct<
CUDAWrappers::internal::evaluate_general,
M - 1,
N,
double>
- evaluator(shape_values, shape_gradients, co_shape_gradients);
+ evaluator(team_member, shape_values, shape_gradients, co_shape_gradients);
if (type == 0)
{
evaluator.template values<0, dof_to_quad, false, false>(src, src);
- __syncthreads();
+ team_member.team_barrier();
evaluator.template values<1, dof_to_quad, add, false>(src, dst);
}
if (type == 1)
{
evaluator.template gradients<0, dof_to_quad, false, false>(src, src);
- __syncthreads();
+ team_member.team_barrier();
evaluator.template gradients<1, dof_to_quad, add, false>(src, dst);
}
}
constexpr int M_2d = M * M;
constexpr int N_2d = N * N;
- LinearAlgebra::ReadWriteVector<double> x_host(N_2d), x_ref(N_2d),
- y_host(M_2d), y_ref(M_2d);
+ LinearAlgebra::ReadWriteVector<double> x_ref(N_2d), y_ref(M_2d);
+ Kokkos::View<double[N_2d], MemorySpace::Default::kokkos_space> x_dev(
+ Kokkos::view_alloc("x_dev", Kokkos::WithoutInitializing));
+ Kokkos::View<double[M_2d], MemorySpace::Default::kokkos_space> y_dev(
+ Kokkos::view_alloc("y_dev", Kokkos::WithoutInitializing));
+ auto x_host = Kokkos::create_mirror_view(x_dev);
+ auto y_host = Kokkos::create_mirror_view(y_dev);
+
for (unsigned int i = 0; i < N_2d; ++i)
- x_host[i] = static_cast<double>(Testing::rand()) / RAND_MAX;
+ x_host(i) = static_cast<double>(Testing::rand()) / RAND_MAX;
FullMatrix<double> shape_2d(M_2d, N_2d);
for (unsigned int i = 0; i < M; ++i)
// Compute reference
for (unsigned int i = 0; i < M_2d; ++i)
{
- y_host[i] = 1.;
- y_ref[i] = add ? y_host[i] : 0.;
+ y_host(i) = 1.;
+ y_ref[i] = add ? y_host(i) : 0.;
for (unsigned int j = 0; j < N_2d; ++j)
- y_ref[i] += shape_2d(i, j) * x_host[j];
+ y_ref[i] += shape_2d(i, j) * x_host(j);
}
// Copy data to the GPU.
- CUDA::Vector<double> x_dev(N_2d), y_dev(M_2d);
- x_dev.import(x_host, VectorOperation::insert);
- y_dev.import(y_host, VectorOperation::insert);
+ Kokkos::deep_copy(x_dev, x_host);
+ Kokkos::deep_copy(y_dev, y_host);
Kokkos::View<double *, MemorySpace::Default::kokkos_space> shape_values(
Kokkos::deep_copy(co_shape_gradients, shape_host_view);
// Launch the kernel
- dim3 block_dim(M, N);
- evaluate_tensor_product<M, N, type, add, false>
- <<<1, block_dim>>>(shape_values,
- shape_gradients,
- co_shape_gradients,
- y_dev.get_values(),
- x_dev.get_values());
+ MemorySpace::Default::kokkos_space::execution_space exec;
+ Kokkos::TeamPolicy<MemorySpace::Default::kokkos_space::execution_space>
+ team_policy(exec, 1, Kokkos::AUTO);
+ Kokkos::parallel_for(
+ team_policy, KOKKOS_LAMBDA(const TeamHandle &team_member) {
+ evaluate_tensor_product<M, N, type, add, false>(team_member,
+ shape_values,
+ shape_gradients,
+ co_shape_gradients,
+ y_dev,
+ x_dev);
+ });
// Check the results on the host
- y_host.import(y_dev, VectorOperation::insert);
+ Kokkos::deep_copy(y_host, y_dev);
deallog << "Errors no transpose: ";
for (unsigned int i = 0; i < M_2d; ++i)
- deallog << y_host[i] - y_ref[i] << " ";
+ deallog << y_host(i) - y_ref[i] << " ";
deallog << std::endl;
for (unsigned int i = 0; i < M_2d; ++i)
- y_host[i] = static_cast<double>(Testing::rand()) / RAND_MAX;
+ y_host(i) = static_cast<double>(Testing::rand()) / RAND_MAX;
// Copy y_host to the device
- y_dev.import(y_host, VectorOperation::insert);
+ Kokkos::deep_copy(y_dev, y_host);
// Compute reference
for (unsigned int i = 0; i < N_2d; ++i)
{
- x_host[i] = 2.;
- x_ref[i] = add ? x_host[i] : 0.;
+ x_host(i) = 2.;
+ x_ref[i] = add ? x_host(i) : 0.;
for (unsigned int j = 0; j < M_2d; ++j)
- x_ref[i] += shape_2d(j, i) * y_host[j];
+ x_ref[i] += shape_2d(j, i) * y_host(j);
}
// Copy x_host to the device
- x_dev.import(x_host, VectorOperation::insert);
+ Kokkos::deep_copy(x_dev, x_host);
// Launch the kernel
- evaluate_tensor_product<M, N, type, add, true>
- <<<1, block_dim>>>(shape_values,
- shape_gradients,
- co_shape_gradients,
- x_dev.get_values(),
- y_dev.get_values());
+ Kokkos::parallel_for(
+ team_policy, KOKKOS_LAMBDA(const TeamHandle &team_member) {
+ evaluate_tensor_product<M, N, type, add, true>(team_member,
+ shape_values,
+ shape_gradients,
+ co_shape_gradients,
+ x_dev,
+ y_dev);
+ });
// Check the results on the host
- x_host.import(x_dev, VectorOperation::insert);
+ Kokkos::deep_copy(x_host, x_dev);
deallog << "Errors transpose: ";
for (unsigned int i = 0; i < N_2d; ++i)
- deallog << x_host[i] - x_ref[i] << " ";
+ deallog << x_host(i) - x_ref[i] << " ";
deallog << std::endl;
}
tria.begin(tria.n_levels() - 1)->set_refine_flag();
tria.last()->set_refine_flag();
tria.execute_coarsening_and_refinement();
- cell = tria.begin_active();
for (unsigned int i = 0; i < 10 - 3 * dim; ++i)
{
cell = tria.begin_active();
Number * dst) const
{
CUDAWrappers::FEEvaluation<dim, fe_degree, n_q_points_1d, 1, Number>
- fe_eval(cell, gpu_data, shared_data);
+ fe_eval(gpu_data, shared_data);
// set to unit vector
- fe_eval.submit_dof_value(1.);
- KOKKOS_IF_ON_DEVICE(__syncthreads();)
+ auto fe_eval_ptr = &fe_eval;
+ Kokkos::parallel_for(Kokkos::TeamThreadRange(shared_data->team_member,
+ n_local_dofs),
+ [&](int i) { fe_eval_ptr->submit_dof_value(1., i); });
+ shared_data->team_member.team_barrier();
fe_eval.evaluate(/*evaluate_values =*/true, /*evaluate_gradients=*/true);
#ifndef __APPLE__
- // values should evaluate to one, derivatives to zero
- assert(fe_eval.get_value() == 1.);
- for (unsigned int e = 0; e < dim; ++e)
- assert(fe_eval.get_gradient()[e] == 0.);
+ Kokkos::parallel_for(Kokkos::TeamThreadRange(shared_data->team_member,
+ n_local_dofs),
+ [&](int i) {
+ // values should evaluate to one, derivatives to zero
+ assert(fe_eval_ptr->get_value(i) == 1.);
+ for (unsigned int e = 0; e < dim; ++e)
+ assert(fe_eval_ptr->get_gradient(i)[e] == 0.);
+ });
fe_eval.integrate(/*integrate_values = */ true,
/*integrate_gradients=*/true);
- assert(fe_eval.get_dof_value() == 1.);
+
+ Kokkos::parallel_for(
+ Kokkos::TeamThreadRange(shared_data->team_member, n_local_dofs),
+ KOKKOS_LAMBDA(int i) { assert(fe_eval_ptr->get_dof_value(i) == 1.); });
#endif
}
{
public:
DEAL_II_HOST_DEVICE
- HelmholtzOperatorQuad(Number coef)
- : coef(coef)
+ HelmholtzOperatorQuad(
+ const typename CUDAWrappers::MatrixFree<dim, Number>::Data *gpu_data,
+ Number * coef,
+ int cell)
+ : gpu_data(gpu_data)
+ , coef(coef)
+ , cell(cell)
{}
DEAL_II_HOST_DEVICE void
operator()(
CUDAWrappers::FEEvaluation<dim, fe_degree, n_q_points_1d, 1, Number>
- *fe_eval) const;
+ * fe_eval,
+ int q_point) const;
+
+ static const unsigned int n_q_points =
+ dealii::Utilities::pow(n_q_points_1d, dim);
private:
- Number coef;
+ const typename CUDAWrappers::MatrixFree<dim, Number>::Data *gpu_data;
+ Number * coef;
+ int cell;
};
template <int dim, int fe_degree, typename Number, int n_q_points_1d>
DEAL_II_HOST_DEVICE void
HelmholtzOperatorQuad<dim, fe_degree, Number, n_q_points_1d>::operator()(
- CUDAWrappers::FEEvaluation<dim, fe_degree, n_q_points_1d, 1, Number> *fe_eval)
- const
+ CUDAWrappers::FEEvaluation<dim, fe_degree, n_q_points_1d, 1, Number> *fe_eval,
+ int q_point) const
{
- fe_eval->submit_value(coef * fe_eval->get_value());
- fe_eval->submit_gradient(fe_eval->get_gradient());
+ unsigned int pos = gpu_data->local_q_point_id(cell, n_q_points, q_point);
+ fe_eval->submit_value(coef[pos] * fe_eval->get_value(q_point), q_point);
+ fe_eval->submit_gradient(fe_eval->get_gradient(q_point), q_point);
}
class HelmholtzOperator
{
public:
+ static const unsigned int n_dofs_1d = fe_degree + 1;
+ static const unsigned int n_local_dofs =
+ dealii::Utilities::pow(fe_degree + 1, dim);
+ static const unsigned int n_q_points =
+ dealii::Utilities::pow(n_q_points_1d, dim);
+
HelmholtzOperator(Number *coefficient)
: coef(coefficient)
{}
const Number * src,
Number * dst) const;
- static const unsigned int n_dofs_1d = fe_degree + 1;
- static const unsigned int n_local_dofs =
- dealii::Utilities::pow(fe_degree + 1, dim);
- static const unsigned int n_q_points =
- dealii::Utilities::pow(n_q_points_1d, dim);
-
Number *coef;
};
const Number * src,
Number * dst) const
{
- const unsigned int pos = CUDAWrappers::local_q_point_id<dim, Number>(
- cell, gpu_data, n_dofs_1d, n_q_points);
-
CUDAWrappers::FEEvaluation<dim, fe_degree, n_q_points_1d, 1, Number> fe_eval(
- cell, gpu_data, shared_data);
+ gpu_data, shared_data);
fe_eval.read_dof_values(src);
fe_eval.evaluate(true, true);
fe_eval.apply_for_each_quad_point(
- HelmholtzOperatorQuad<dim, fe_degree, Number, n_q_points_1d>(coef[pos]));
+ HelmholtzOperatorQuad<dim, fe_degree, Number, n_q_points_1d>(gpu_data,
+ coef,
+ cell));
fe_eval.integrate(true, true);
fe_eval.distribute_local_to_global(dst);
}
DEAL_II_HOST_DEVICE void
operator()(
+ const typename CUDAWrappers::MatrixFree<dim, Number>::Data *gpu_data,
const unsigned int cell,
- const typename CUDAWrappers::MatrixFree<dim, Number>::Data *gpu_data);
+ const unsigned int q) const;
static const unsigned int n_dofs_1d = fe_degree + 1;
static const unsigned int n_local_dofs =
template <int dim, int fe_degree, typename Number, int n_q_points_1d>
DEAL_II_HOST_DEVICE void
VaryingCoefficientFunctor<dim, fe_degree, Number, n_q_points_1d>::operator()(
+ const typename CUDAWrappers::MatrixFree<dim, Number>::Data *gpu_data,
const unsigned int cell,
- const typename CUDAWrappers::MatrixFree<dim, Number>::Data *gpu_data)
+ const unsigned int q) const
{
- const unsigned int pos = CUDAWrappers::local_q_point_id<dim, Number>(
- cell, gpu_data, n_dofs_1d, n_q_points);
- const auto q_point =
- CUDAWrappers::get_quadrature_point<dim, Number>(cell, gpu_data, n_dofs_1d);
+ const unsigned int pos = gpu_data->local_q_point_id(cell, n_q_points, q);
+ const auto q_point = gpu_data->get_quadrature_point(cell, q);
+
+
Number p_square = 0.;
for (unsigned int i = 0; i < dim; ++i)