#include <deal.II/base/config.h>
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
+#include <deal.II/base/cuda_size.h>
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/quadrature.h>
+#include <deal.II/base/tensor.h>
-# include <deal.II/base/cuda_size.h>
-# include <deal.II/base/mpi.h>
-# include <deal.II/base/quadrature.h>
-# include <deal.II/base/tensor.h>
+#include <deal.II/dofs/dof_handler.h>
-# include <deal.II/dofs/dof_handler.h>
+#include <deal.II/fe/fe_update_flags.h>
+#include <deal.II/fe/mapping.h>
+#include <deal.II/fe/mapping_q1.h>
-# include <deal.II/fe/fe_update_flags.h>
-# include <deal.II/fe/mapping.h>
-# include <deal.II/fe/mapping_q1.h>
+#include <deal.II/grid/filtered_iterator.h>
-# include <deal.II/lac/affine_constraints.h>
-# include <deal.II/lac/cuda_vector.h>
-# include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/affine_constraints.h>
+#include <deal.II/lac/cuda_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
DEAL_II_NAMESPACE_OPEN
namespace CUDAWrappers
{
// forward declaration
-# ifndef DOXYGEN
+#ifndef DOXYGEN
namespace internal
{
template <int dim, typename Number>
class ReinitHelper;
}
-# endif
+#endif
/**
* This class collects all the data that is stored for the matrix free
public:
using jacobian_type = Tensor<2, dim, Tensor<1, dim, Number>>;
using point_type = Point<dim, Number>;
+ using CellFilter =
+ FilteredIterator<typename DoFHandler<dim>::active_cell_iterator>;
/**
* Parallelization scheme used: parallel_in_elem (parallelism at the level
, use_coloring(use_coloring)
, overlap_communication_computation(overlap_communication_computation)
{
-# ifndef DEAL_II_MPI_WITH_CUDA_SUPPORT
+#ifndef DEAL_II_MPI_WITH_CUDA_SUPPORT
AssertThrow(
overlap_communication_computation == false,
ExcMessage(
"Overlapping communication and computation requires CUDA-aware MPI."));
-# endif
+#endif
if (overlap_communication_computation == true)
AssertThrow(
use_coloring == false || overlap_communication_computation == false,
initialize_dof_vector(
LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &vec) const;
+ /**
+ * Return the colored graph of locally owned active cells.
+ */
+ const std::vector<std::vector<CellFilter>> &
+ get_colored_graph() const;
+
/**
* Return the partitioner that represents the locally owned data and the
* ghost indices where access is needed to for the cell loop. The
*/
const DoFHandler<dim> *dof_handler;
+ /**
+ * Colored graphed of locally owned active cells.
+ */
+ std::vector<std::vector<CellFilter>> graph;
+
friend class internal::ReinitHelper<dim, Number>;
};
+#ifdef DEAL_II_COMPILER_CUDA_AWARE
+
// This function determines the number of cells per block, possibly at compile
// time (by virtue of being 'constexpr')
// TODO this function should be rewritten using meta-programming
q_point_id_in_cell<dim>(n_q_points_1d));
}
+ /**
+ * Structure which is passed to the kernel. It is used to pass all the
+ * necessary information from the CPU to the GPU.
+ */
+ template <int dim, typename Number>
+ struct DataHost
+ {
+ /**
+ * Vector of quadrature points.
+ */
+ std::vector<Point<dim, Number>> q_points;
+
+ /**
+ * Map the position in the local vector to the position in the global
+ * vector.
+ */
+ std::vector<types::global_dof_index> local_to_global;
+
+ /**
+ * Vector of inverse Jacobians.
+ */
+ std::vector<Number> inv_jacobian;
+
+ /**
+ * Vector of Jacobian times the weights.
+ */
+ std::vector<Number> JxW;
+
+ /**
+ * ID of the associated MatrixFree object.
+ */
+ unsigned int id;
+
+ /**
+ * Number of cells.
+ */
+ unsigned int n_cells;
+
+ /**
+ * Length of the padding.
+ */
+ unsigned int padding_length;
+
+ /**
+ * Row start (including padding).
+ */
+ unsigned int row_start;
+
+ /**
+ * Mask deciding where constraints are set on a given cell.
+ */
+ std::vector<unsigned int> constraint_mask;
+
+ /**
+ * If true, use graph coloring has been used and we can simply add into
+ * the destingation vector. Otherwise, use atomic operations.
+ */
+ bool use_coloring;
+ };
+
+
+
+ /**
+ * Copy @p data from the device to the device. @p update_flags should be
+ * identical to the one used in MatrixFree::AdditionalData.
+ *
+ * @relates CUDAWrappers::MatrixFree
+ */
+ template <int dim, typename Number>
+ DataHost<dim, Number>
+ copy_mf_data_to_host(
+ const typename dealii::CUDAWrappers::MatrixFree<dim, Number>::Data &data,
+ const UpdateFlags &update_flags)
+ {
+ DataHost<dim, Number> data_host;
+
+ data_host.id = data.id;
+ data_host.n_cells = data.n_cells;
+ data_host.padding_length = data.padding_length;
+ data_host.row_start = data.row_start;
+ data_host.use_coloring = data.use_coloring;
+
+ const unsigned int n_elements =
+ data_host.n_cells * data_host.padding_length;
+ if (update_flags & update_quadrature_points)
+ {
+ data_host.q_points.resize(n_elements);
+ Utilities::CUDA::copy_to_host(data.q_points, data_host.q_points);
+ }
+
+ data_host.local_to_global.resize(n_elements);
+ Utilities::CUDA::copy_to_host(data.local_to_global,
+ data_host.local_to_global);
+
+ if (update_flags & update_gradients)
+ {
+ data_host.inv_jacobian.resize(n_elements * dim * dim);
+ Utilities::CUDA::copy_to_host(data.inv_jacobian,
+ data_host.inv_jacobian);
+ }
+
+ if (update_flags & update_JxW_values)
+ {
+ data_host.JxW.resize(n_elements);
+ Utilities::CUDA::copy_to_host(data.JxW, data_host.JxW);
+ }
+
+ data_host.constraint_mask.resize(data_host.n_cells);
+ Utilities::CUDA::copy_to_host(data.constraint_mask,
+ data_host.constraint_mask);
+
+ return data_host;
+ }
+
+
+
+ /**
+ * This function is the host version of local_q_point_id().
+ *
+ * @relates CUDAWrappers::MatrixFree
+ */
+ template <int dim, typename Number>
+ inline unsigned int
+ local_q_point_id_host(const unsigned int cell,
+ const DataHost<dim, Number> &data,
+ const unsigned int n_q_points,
+ const unsigned int i)
+ {
+ return (data.row_start / data.padding_length + cell) * n_q_points + i;
+ }
+
+
+
+ /**
+ * This function is the host version of get_quadrature_point(). It assumes
+ * that the data in MatrixFree<dim, Number>::Data has been copied to the host
+ * using copy_mf_data_to_host().
+ *
+ * @relates CUDAWrappers::MatrixFree
+ */
+ template <int dim, typename Number>
+ inline Point<dim, Number>
+ get_quadrature_point_host(const unsigned int cell,
+ const DataHost<dim, Number> &data,
+ const unsigned int i)
+ {
+ return data.q_points[data.padding_length * cell + i];
+ }
+#endif
+
/*----------------------- Inline functions ---------------------------------*/
-# ifndef DOXYGEN
+#ifndef DOXYGEN
template <int dim, typename Number>
- const std::shared_ptr<const Utilities::MPI::Partitioner> &
+ inline const std::vector<std::vector<
+ FilteredIterator<typename DoFHandler<dim>::active_cell_iterator>>> &
+ MatrixFree<dim, Number>::get_colored_graph() const
+ {
+ return graph;
+ }
+
+
+
+ template <int dim, typename Number>
+ inline const std::shared_ptr<const Utilities::MPI::Partitioner> &
MatrixFree<dim, Number>::get_vector_partitioner() const
{
return partitioner;
}
+
+
template <int dim, typename Number>
- const DoFHandler<dim> &
+ inline const DoFHandler<dim> &
MatrixFree<dim, Number>::get_dof_handler() const
{
Assert(dof_handler != nullptr, ExcNotInitialized());
return *dof_handler;
}
-# endif
+#endif
} // namespace CUDAWrappers
DEAL_II_NAMESPACE_CLOSE
#endif
-
-#endif