#include <deal.II/base/config.h>
+#include "deal.II/base/memory_space.h"
+
#ifdef DEAL_II_WITH_CUDA
# include <deal.II/base/tensor.h>
# include <deal.II/matrix_free/cuda_matrix_free.templates.h>
# include <deal.II/matrix_free/cuda_tensor_product_kernels.h>
+# include <Kokkos_Core.hpp>
+
DEAL_II_NAMESPACE_OPEN
/**
const bool use_coloring;
Number *inv_jac;
- Number *JxW;
+ // We would like to use
+ // Kokkos::Subview<Kokkos::View<Number **,
+ // MemorySpace::Default::kokkos_space>, int, decltype(Kokkos::ALL)>
+ // but we get error: incomplete type is not allowed. I cannot reproduce
+ // outside of deal.II. Need to investigate more.
+ Kokkos::Subview<Kokkos::View<Number **, MemorySpace::Default::kokkos_space>,
+ int,
+ Kokkos::pair<int, int>>
+ JxW;
// Internal buffer
Number *values;
, mf_object_id(data->id)
, constraint_mask(data->constraint_mask[cell_id])
, use_coloring(data->use_coloring)
+ , JxW(Kokkos::subview(
+ data->JxW,
+ cell_id,
+ Kokkos::pair<int, int>(0, Utilities::pow(n_q_points_1d, dim))))
, values(shdata->values)
{
local_to_global = data->local_to_global + padding_length * cell_id;
inv_jac = data->inv_jacobian + padding_length * cell_id;
- JxW = data->JxW + padding_length * cell_id;
for (unsigned int i = 0; i < dim; ++i)
gradients[i] = shdata->gradients[i];
#include <deal.II/base/config.h>
+#include "deal.II/base/memory_space.h"
+
#ifdef DEAL_II_WITH_CUDA
# include <deal.II/base/cuda_size.h>
+# include <deal.II/base/memory_space.h>
# include <deal.II/base/mpi_stub.h>
# include <deal.II/base/partitioner.h>
# include <deal.II/base/quadrature.h>
/**
* Constructor.
*/
- AdditionalData(
- const ParallelizationScheme parallelization_scheme = parallel_in_elem,
- const UpdateFlags mapping_update_flags = update_gradients |
- update_JxW_values |
- update_quadrature_points,
- const bool use_coloring = false,
- const bool overlap_communication_computation = false)
- : parallelization_scheme(parallelization_scheme)
- , mapping_update_flags(mapping_update_flags)
+ AdditionalData(const UpdateFlags mapping_update_flags =
+ update_gradients | update_JxW_values |
+ update_quadrature_points,
+ const bool use_coloring = false,
+ const bool overlap_communication_computation = false)
+ : mapping_update_flags(mapping_update_flags)
, use_coloring(use_coloring)
, overlap_communication_computation(overlap_communication_computation)
{
ExcMessage(
"Overlapping communication and coloring are incompatible options. Only one of them can be enabled."));
}
- /**
- * Parallelization scheme used, parallelization over degrees of freedom or
- * over cells.
- */
- ParallelizationScheme parallelization_scheme;
/**
* This flag is used to determine which quantities should be cached. This
* class can cache data needed for gradient computations (inverse
Number *inv_jacobian;
/**
- * Pointer to the Jacobian times the weights.
+ * Kokkos::View of the Jacobian times the weights.
*/
- Number *JxW;
+ Kokkos::View<Number **, MemorySpace::Default::kokkos_space> JxW;
/**
* ID of the associated MatrixFree object.
*/
int my_id;
- /**
- * Parallelization scheme used, parallelization over degrees of freedom or
- * over cells.
- */
- ParallelizationScheme parallelization_scheme;
-
/**
* If true, use graph coloring. Otherwise, use atomic operations. Graph
* coloring ensures bitwise reproducibility but is slower on Pascal and
std::vector<Number *> inv_jacobian;
/**
- * Vector of pointer to the Jacobian times the weights associated to the
- * cells of each color.
+ * Vector of Kokkos::View to the Jacobian times the weights associated to
+ * the cells of each color.
*/
- std::vector<Number *> JxW;
+ std::vector<Kokkos::View<Number **, MemorySpace::Default::kokkos_space>>
+ JxW;
/**
* Pointer to the constrained degrees of freedom.
if (update_flags & update_JxW_values)
{
data_host.JxW.resize(n_elements);
- Utilities::CUDA::copy_to_host(data.JxW, data_host.JxW);
+ Utilities::CUDA::copy_to_host(data.JxW.data(), data_host.JxW);
}
data_host.constraint_mask.resize(data_host.n_cells);
#include <deal.II/base/config.h>
+#include "deal.II/base/memory_space.h"
+
#include <deal.II/matrix_free/cuda_matrix_free.h>
+#include <string>
+
#ifdef DEAL_II_WITH_CUDA
# include <deal.II/base/cuda.h>
// Host data
std::vector<types::global_dof_index> local_to_global_host;
std::vector<Point<dim, Number>> q_points_host;
- std::vector<Number> JxW_host;
- std::vector<Number> inv_jacobian_host;
+ Kokkos::View<Number **, MemorySpace::Default::kokkos_space> JxW;
+ std::vector<Number> inv_jacobian_host;
std::vector<dealii::internal::MatrixFreeFunctions::ConstraintKinds>
constraint_mask_host;
// Local buffer
// TODO this should be a templated parameter.
const unsigned int n_dofs_1d = fe_degree + 1;
- if (data->parallelization_scheme ==
- MatrixFree<dim, Number>::parallel_in_elem)
- {
- if (dim == 1)
- data->block_dim[color] = dim3(n_dofs_1d * cells_per_block);
- else if (dim == 2)
- data->block_dim[color] =
- dim3(n_dofs_1d * cells_per_block, n_dofs_1d);
- else
- data->block_dim[color] =
- dim3(n_dofs_1d * cells_per_block, n_dofs_1d, n_dofs_1d);
- }
+ if (dim == 1)
+ data->block_dim[color] = dim3(n_dofs_1d * cells_per_block);
+ else if (dim == 2)
+ data->block_dim[color] = dim3(n_dofs_1d * cells_per_block, n_dofs_1d);
else
- data->block_dim[color] = dim3(cells_per_block);
+ data->block_dim[color] =
+ dim3(n_dofs_1d * cells_per_block, n_dofs_1d, n_dofs_1d);
local_to_global_host.resize(n_cells * padding_length);
q_points_host.resize(n_cells * padding_length);
if (update_flags & update_JxW_values)
- JxW_host.resize(n_cells * padding_length);
+ JxW = Kokkos::View<Number **, MemorySpace::Default::kokkos_space>(
+ Kokkos::view_alloc("JxW_" + std::to_string(color),
+ Kokkos::WithoutInitializing),
+ n_cells,
+ dofs_per_cell);
if (update_flags & update_gradients)
inv_jacobian_host.resize(n_cells * padding_length * dim * dim);
if (update_flags & update_JxW_values)
{
+ // FIXME too many deep_copy
+ auto JxW_host = Kokkos::create_mirror_view_and_copy(
+ MemorySpace::Host::kokkos_space{}, JxW);
std::vector<double> JxW_values_double = fe_values.get_JxW_values();
- const unsigned int offset = cell_id * padding_length;
for (unsigned int i = 0; i < q_points_per_cell; ++i)
- JxW_host[i + offset] = static_cast<Number>(JxW_values_double[i]);
+ JxW_host(cell_id, i) = static_cast<Number>(JxW_values_double[i]);
+ Kokkos::deep_copy(JxW, JxW_host);
}
if (update_flags & update_gradients)
const unsigned int n_cells = data->n_cells[color];
// Local-to-global mapping
- if (data->parallelization_scheme ==
- MatrixFree<dim, Number>::parallel_over_elem)
- transpose_in_place(local_to_global_host, n_cells, padding_length);
-
alloc_and_copy(
&data->local_to_global[color],
ArrayView<const types::global_dof_index>(local_to_global_host.data(),
// Quadrature points
if (update_flags & update_quadrature_points)
{
- if (data->parallelization_scheme ==
- MatrixFree<dim, Number>::parallel_over_elem)
- transpose_in_place(q_points_host, n_cells, padding_length);
-
alloc_and_copy(&data->q_points[color],
ArrayView<const Point<dim, Number>>(
q_points_host.data(), q_points_host.size()),
// Jacobian determinants/quadrature weights
if (update_flags & update_JxW_values)
{
- if (data->parallelization_scheme ==
- MatrixFree<dim, Number>::parallel_over_elem)
- transpose_in_place(JxW_host, n_cells, padding_length);
-
- alloc_and_copy(&data->JxW[color],
- ArrayView<const Number>(JxW_host.data(),
- JxW_host.size()),
- n_cells * padding_length);
+ data->JxW[color] = JxW;
}
// Inverse jacobians
padding_length * n_cells,
dim * dim);
- // Transpose second time means we get the following index order:
- // q*n_cells*dim*dim + i*n_cells + cell_id which is good for an
- // element-level parallelization
- if (data->parallelization_scheme ==
- MatrixFree<dim, Number>::parallel_over_elem)
- transpose_in_place(inv_jacobian_host,
- n_cells * dim * dim,
- padding_length);
-
alloc_and_copy(&data->inv_jacobian[color],
ArrayView<const Number>(inv_jacobian_host.data(),
inv_jacobian_host.size()),
Utilities::CUDA::free(inv_jacobian_color_ptr);
inv_jacobian.clear();
- for (auto &JxW_color_ptr : JxW)
- Utilities::CUDA::free(JxW_color_ptr);
- JxW.clear();
-
for (auto &constraint_mask_color_ptr : constraint_mask)
Utilities::CUDA::free(constraint_mask_color_ptr);
constraint_mask.clear();
n_constrained_dofs * sizeof(unsigned int);
// For each color, add local_to_global, inv_jacobian, JxW, and q_points.
+ // FIXME
for (unsigned int i = 0; i < n_colors; ++i)
{
bytes += n_cells[i] * padding_length * sizeof(unsigned int) +
if (update_flags & update_gradients)
update_flags |= update_JxW_values;
- if (additional_data.parallelization_scheme != parallel_over_elem &&
- additional_data.parallelization_scheme != parallel_in_elem)
- AssertThrow(false, ExcMessage("Invalid parallelization scheme."));
-
- this->parallelization_scheme = additional_data.parallelization_scheme;
- this->use_coloring = additional_data.use_coloring;
+ this->use_coloring = additional_data.use_coloring;
this->overlap_communication_computation =
additional_data.overlap_communication_computation;