#ifdef DEAL_II_COMPILER_CUDA_AWARE
+# include <deal.II/base/mpi.h>
# include <deal.II/base/quadrature.h>
# include <deal.II/base/tensor.h>
# include <deal.II/lac/affine_constraints.h>
# include <deal.II/lac/cuda_vector.h>
+# include <deal.II/lac/la_parallel_vector.h>
DEAL_II_NAMESPACE_OPEN
*/
MatrixFree();
+ /**
+ * Return the length of the padding.
+ */
unsigned int
get_padding_length() const;
* degrees of freedom, the DoFHandler and the mapping describe the
* transformation from unit to real cell, and the finite element
* underlying the DoFHandler together with the quadrature formula
- * describe the local operations.
+ * describe the local operations. This function supports distributed
+ * computation (MPI).
+ */
+ void
+ reinit(const Mapping<dim> & mapping,
+ const DoFHandler<dim> & dof_handler,
+ const AffineConstraints<Number> &constraints,
+ const Quadrature<1> & quad,
+ const MPI_Comm & comm,
+ const AdditionalData additional_data = AdditionalData());
+
+ /**
+ * Initializes the data structures. Same as above but using a Q1 mapping.
+ */
+ void
+ reinit(const DoFHandler<dim> & dof_handler,
+ const AffineConstraints<Number> &constraints,
+ const Quadrature<1> & quad,
+ const MPI_Comm & comm,
+ const AdditionalData AdditionalData = AdditionalData());
+
+ /**
+ * Extracts the information needed to perform loops over cells. The
+ * DoFHandler and AffineConstraints objects describe the layout of
+ * degrees of freedom, the DoFHandler and the mapping describe the
+ * transformation from unit to real cell, and the finite element
+ * underlying the DoFHandler together with the quadrature formula
+ * describe the local operations. This function does not support distributed
+ * computation.
*/
void
reinit(const Mapping<dim> & mapping,
const VectorType &src,
VectorType & dst) const;
+ /**
+ * Copy the values of the constrained entries from @p src to @p dst. This is
+ * used to impose zero Dirichlet boundary condition.
+ */
template <typename VectorType>
void
copy_constrained_values(const VectorType &src, VectorType &dst) const;
+ /**
+ * Set the entries in @p dst corresponding to constrained values to @p val.
+ * The main purpose of this function is to set the constrained entries of
+ * the source vector used in cell_loop() to zero.
+ */
template <typename VectorType>
void
set_constrained_values(const Number val, VectorType &dst) const;
memory_consumption() const;
private:
+ /**
+ * Initializes the data structures.
+ */
+ void
+ reinit(const Mapping<dim> & mapping,
+ const DoFHandler<dim> & dof_handler,
+ const AffineConstraints<Number> &constraints,
+ const Quadrature<1> & quad,
+ std::shared_ptr<const MPI_Comm> comm,
+ const AdditionalData additional_data);
+
+ /**
+ * Helper function. Loop over all the cells and apply the functor on each
+ * element in parallel. This function is used when MPI is not used.
+ */
+ template <typename functor, typename VectorType>
+ void
+ serial_cell_loop(const functor & func,
+ const VectorType &src,
+ VectorType & dst) const;
+
+ /**
+ * Helper function. Loop over all the cells and apply the functor on each
+ * element in parallel. This function is used when MPI is used.
+ */
+ template <typename functor>
+ void
+ distributed_cell_loop(
+ const functor & func,
+ const LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &src,
+ LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &dst) const;
+
+ /**
+ * This function should never be called. Calling it results in an internal
+ * error. This function exists only because cell_loop needs
+ * distributed_cell_loop() to exist for LinearAlgebra::CUDAWrappers::Vector.
+ */
+ template <typename functor>
+ void
+ distributed_cell_loop(
+ const functor & func,
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &src,
+ LinearAlgebra::CUDAWrappers::Vector<Number> & dst) const;
+
+ /**
+ * Helper function. Copy the values of the constrained entries of @p src to
+ * @p dst. This function is used when MPI is not used.
+ */
+ template <typename VectorType>
+ void
+ serial_copy_constrained_values(const VectorType &src,
+ VectorType & dst) const;
+
+ /**
+ * Helper function. Copy the values of the constrained entries of @p src to
+ * @p dst. This function is used when MPI is used.
+ */
+ void
+ distributed_copy_constrained_values(
+ const LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &src,
+ LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &dst) const;
+
+ /**
+ * This function should never be called. Calling it results in an internal
+ * error. This function exists only because copy_constrained_values needs
+ * distributed_copy_constrained_values() to exist for
+ * LinearAlgebra::CUDAWrappers::Vector.
+ */
+ void
+ distributed_copy_constrained_values(
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &src,
+ LinearAlgebra::CUDAWrappers::Vector<Number> & dst) const;
+
+ /**
+ * Helper function. Set the constrained entries of @p dst to @p val. This
+ * function is used when MPI is not used.
+ */
+ template <typename VectorType>
+ void
+ serial_set_constrained_values(const Number val, VectorType &dst) const;
+
+ /**
+ * Helper function. Set the constrained entries of @p dst to @p val. This
+ * function is used when MPI is used.
+ */
+ void
+ distributed_set_constrained_values(
+ const Number val,
+ LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &dst) const;
+
+ /**
+ * This function should never be called. Calling it results in an internal
+ * error. This function exists only because set_constrained_values needs
+ * distributed_set_constrained_values() to exist for
+ * LinearAlgebra::CUDAWrappers::Vector.
+ */
+ void
+ distributed_set_constrained_values(
+ const Number val,
+ LinearAlgebra::CUDAWrappers::Vector<Number> &dst) const;
+
/**
* Parallelization scheme used, parallelization over degrees of freedom or
* over cells.
*/
std::vector<dim3> block_dim;
- // Parallelization parameter
+ /**
+ * Unique pointer to a Partitioner for distributed Vectors used in
+ * cell_loop. When MPI is not used the pointer is null.
+ */
+ std::unique_ptr<const Utilities::MPI::Partitioner> partitioner;
+
+ // Parallelization parameters
unsigned int cells_per_block;
dim3 constraint_grid_dim;
dim3 constraint_block_dim;
# include <deal.II/base/cuda_size.h>
# include <deal.II/base/graph_coloring.h>
+# include <deal.II/base/std_cxx14/memory.h>
+
+# include <deal.II/dofs/dof_tools.h>
# include <deal.II/fe/fe_values.h>
cudaError_t error_code = cudaMalloc(array_device, n * sizeof(Number1));
AssertCuda(error_code);
+ // TODO This is very dangerous because we are doing a memcopy but with
+ // different data types. However this is very useful to move Point to the
+ // device where they are stored as Tensor. Thus, we need Point on the
+ // device in order to make this function safer.
error_code = cudaMemcpy(*array_device,
array_host.data(),
n * sizeof(Number1),
template <typename CellFilter>
void
- get_cell_data(const CellFilter &cell, const unsigned int cell_id);
+ get_cell_data(
+ const CellFilter & cell,
+ const unsigned int cell_id,
+ const std::unique_ptr<const Utilities::MPI::Partitioner> &partitioner);
void
alloc_and_copy_arrays(const unsigned int cell);
template <int dim, typename Number>
template <typename CellFilter>
void
- ReinitHelper<dim, Number>::get_cell_data(const CellFilter & cell,
- const unsigned int cell_id)
+ ReinitHelper<dim, Number>::get_cell_data(
+ const CellFilter & cell,
+ const unsigned int cell_id,
+ const std::unique_ptr<const Utilities::MPI::Partitioner> &partitioner)
{
cell->get_dof_indices(local_dof_indices);
-
+ // When using MPI, we need to transform the local_dof_indices, which
+ // contains global dof indices, to get local (to the current MPI process)
+ // dof indices.
+ if (partitioner)
+ for (auto &index : local_dof_indices)
+ index = partitioner->global_to_local(index);
for (unsigned int i = 0; i < dofs_per_cell; ++i)
lexicographic_dof_indices[i] = local_dof_indices[lexicographic_inv[i]];
hanging_nodes.setup_constraints(lexicographic_dof_indices,
cell,
+ partitioner,
constraint_mask_host[cell_id]);
memcpy(&local_to_global_host[cell_id * padding_length],
copy_constrained_dofs(
const dealii::types::global_dof_index *constrained_dofs,
const unsigned int n_constrained_dofs,
+ const unsigned int size,
const Number * src,
Number * dst)
{
const unsigned int dof =
threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
- if (dof < n_constrained_dofs)
+ // When working with distributed vectors, the constrained dofs are
+ // computed for ghosted vectors but we want to copy the values of the
+ // constrained dofs of non-ghosted vectors.
+ if ((dof < n_constrained_dofs) && (constrained_dofs[dof] < size))
dst[constrained_dofs[dof]] = src[constrained_dofs[dof]];
}
set_constrained_dofs(
const dealii::types::global_dof_index *constrained_dofs,
const unsigned int n_constrained_dofs,
+ const unsigned int size,
Number val,
Number * dst)
{
const unsigned int dof =
threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
- if (dof < n_constrained_dofs)
+ // When working with distributed vectors, the constrained dofs are
+ // computed for ghosted vectors but we want to set the values of the
+ // constrained dofs of non-ghosted vectors.
+ if ((dof < n_constrained_dofs) && (constrained_dofs[dof] < size))
dst[constrained_dofs[dof]] = val;
}
const DoFHandler<dim> & dof_handler,
const AffineConstraints<Number> &constraints,
const Quadrature<1> & quad,
+ const MPI_Comm & comm,
+ const AdditionalData additional_data)
+ {
+ reinit(mapping,
+ dof_handler,
+ constraints,
+ quad,
+ std::make_shared<const MPI_Comm>(comm),
+ additional_data);
+ }
+
+
+
+ template <int dim, typename Number>
+ void
+ MatrixFree<dim, Number>::reinit(const DoFHandler<dim> & dof_handler,
+ const AffineConstraints<Number> &constraints,
+ const Quadrature<1> & quad,
+ const MPI_Comm & comm,
+ const AdditionalData additional_data)
+ {
+ reinit(StaticMappingQ1<dim>::mapping,
+ dof_handler,
+ constraints,
+ quad,
+ std::make_shared<const MPI_Comm>(comm),
+ additional_data);
+ }
+
+
+
+ template <int dim, typename Number>
+ void
+ MatrixFree<dim, Number>::reinit(const Mapping<dim> & mapping,
+ const DoFHandler<dim> & dof_handler,
+ const AffineConstraints<Number> &constraints,
+ const Quadrature<1> & quad,
+ const AdditionalData additional_data)
+ {
+ reinit(mapping, dof_handler, constraints, quad, nullptr, additional_data);
+ }
+
+
+
+ template <int dim, typename Number>
+ void
+ MatrixFree<dim, Number>::reinit(const DoFHandler<dim> & dof_handler,
+ const AffineConstraints<Number> &constraints,
+ const Quadrature<1> & quad,
+ const AdditionalData additional_data)
+ {
+ reinit(StaticMappingQ1<dim>::mapping,
+ dof_handler,
+ constraints,
+ quad,
+ nullptr,
+ additional_data);
+ }
+
+
+
+ template <int dim, typename Number>
+ MatrixFree<dim, Number>::Data
+ MatrixFree<dim, Number>::get_data(unsigned int color) const
+ {
+ Data data_copy;
+ data_copy.q_points = q_points[color];
+ data_copy.local_to_global = local_to_global[color];
+ data_copy.inv_jacobian = inv_jacobian[color];
+ data_copy.JxW = JxW[color];
+ data_copy.constraint_mask = constraint_mask[color];
+ data_copy.n_cells = n_cells[color];
+ data_copy.padding_length = padding_length;
+ data_copy.row_start = row_start[color];
+
+ return data_copy;
+ }
+
+
+
+ template <int dim, typename Number>
+ void
+ MatrixFree<dim, Number>::free()
+ {
+ for (unsigned int i = 0; i < q_points.size(); ++i)
+ {
+ if (q_points[i] != nullptr)
+ {
+ cudaError_t cuda_error = cudaFree(q_points[i]);
+ AssertCuda(cuda_error);
+ q_points[i] = nullptr;
+ }
+ }
+
+ for (unsigned int i = 0; i < local_to_global.size(); ++i)
+ {
+ if (local_to_global[i] != nullptr)
+ {
+ cudaError_t cuda_error = cudaFree(local_to_global[i]);
+ AssertCuda(cuda_error);
+ local_to_global[i] = nullptr;
+ }
+ }
+
+ for (unsigned int i = 0; i < inv_jacobian.size(); ++i)
+ {
+ if (inv_jacobian[i] != nullptr)
+ {
+ cudaError_t cuda_error = cudaFree(inv_jacobian[i]);
+ AssertCuda(cuda_error);
+ inv_jacobian[i] = nullptr;
+ }
+ }
+
+ for (unsigned int i = 0; i < JxW.size(); ++i)
+ {
+ if (JxW[i] != nullptr)
+ {
+ cudaError_t cuda_error = cudaFree(JxW[i]);
+ AssertCuda(cuda_error);
+ JxW[i] = nullptr;
+ }
+ }
+
+ for (unsigned int i = 0; i < constraint_mask.size(); ++i)
+ {
+ if (constraint_mask[i] != nullptr)
+ {
+ cudaError_t cuda_error = cudaFree(constraint_mask[i]);
+ AssertCuda(cuda_error);
+ constraint_mask[i] = nullptr;
+ }
+ }
+
+
+ q_points.clear();
+ local_to_global.clear();
+ inv_jacobian.clear();
+ JxW.clear();
+ constraint_mask.clear();
+
+ if (constrained_dofs != nullptr)
+ {
+ cudaError_t cuda_error = cudaFree(constrained_dofs);
+ AssertCuda(cuda_error);
+ constrained_dofs = nullptr;
+ }
+ }
+
+
+
+ template <int dim, typename Number>
+ template <typename VectorType>
+ void
+ MatrixFree<dim, Number>::copy_constrained_values(const VectorType &src,
+ VectorType & dst) const
+ {
+ static_assert(
+ std::is_same<Number, typename VectorType::value_type>::value,
+ "VectorType::value_type and Number should be of the same type.");
+ if (partitioner)
+ distributed_copy_constrained_values(src, dst);
+ else
+ serial_copy_constrained_values(src, dst);
+ }
+
+
+
+ template <int dim, typename Number>
+ template <typename VectorType>
+ void
+ MatrixFree<dim, Number>::set_constrained_values(Number val,
+ VectorType &dst) const
+ {
+ static_assert(
+ std::is_same<Number, typename VectorType::value_type>::value,
+ "VectorType::value_type and Number should be of the same type.");
+ if (partitioner)
+ distributed_set_constrained_values(val, dst);
+ else
+ serial_set_constrained_values(val, dst);
+ }
+
+
+
+ template <int dim, typename Number>
+ unsigned int
+ MatrixFree<dim, Number>::get_padding_length() const
+ {
+ return padding_length;
+ }
+
+
+
+ template <int dim, typename Number>
+ template <typename functor, typename VectorType>
+ void
+ MatrixFree<dim, Number>::cell_loop(const functor & func,
+ const VectorType &src,
+ VectorType & dst) const
+ {
+ if (partitioner)
+ distributed_cell_loop(func, src, dst);
+ else
+ serial_cell_loop(func, src, dst);
+ }
+
+
+
+ template <int dim, typename Number>
+ std::size_t
+ MatrixFree<dim, Number>::memory_consumption() const
+ {
+ // First compute the size of n_cells, row_starts, kernel launch parameters,
+ // and constrained_dofs
+ std::size_t bytes = n_cells.size() * sizeof(unsigned int) * 2 +
+ 2 * n_colors * sizeof(dim3) +
+ n_constrained_dofs * sizeof(unsigned int);
+
+ // For each color, add local_to_global, inv_jacobian, JxW, and q_points.
+ for (unsigned int i = 0; i < n_colors; ++i)
+ {
+ bytes += n_cells[i] * padding_length * sizeof(unsigned int) +
+ n_cells[i] * padding_length * dim * dim * sizeof(Number) +
+ n_cells[i] * padding_length * sizeof(Number) +
+ n_cells[i] * padding_length * sizeof(point_type) +
+ n_cells[i] * sizeof(unsigned int);
+ }
+
+ return bytes;
+ }
+
+
+
+ template <int dim, typename Number>
+ void
+ MatrixFree<dim, Number>::reinit(const Mapping<dim> & mapping,
+ const DoFHandler<dim> & dof_handler,
+ const AffineConstraints<Number> &constraints,
+ const Quadrature<1> & quad,
+ std::shared_ptr<const MPI_Comm> comm,
const AdditionalData additional_data)
{
if (typeid(Number) == typeid(double))
n_colors = graph.size();
helper.setup_color_arrays(n_colors);
+
+ IndexSet locally_relevant_dofs;
+ if (comm)
+ {
+ DoFTools::extract_locally_relevant_dofs(dof_handler,
+ locally_relevant_dofs);
+ partitioner = std_cxx14::make_unique<Utilities::MPI::Partitioner>(
+ dof_handler.locally_owned_dofs(), locally_relevant_dofs, *comm);
+ }
for (unsigned int i = 0; i < n_colors; ++i)
{
n_cells[i] = graph[i].size();
typename std::vector<CellFilter>::iterator cell = graph[i].begin(),
end_cell = graph[i].end();
for (unsigned int cell_id = 0; cell != end_cell; ++cell, ++cell_id)
- helper.get_cell_data(*cell, cell_id);
+ helper.get_cell_data(*cell, cell_id, partitioner);
helper.alloc_and_copy_arrays(i);
}
std::vector<dealii::types::global_dof_index> constrained_dofs_host(
n_constrained_dofs);
- unsigned int i_constraint = 0;
- const unsigned int n_dofs = dof_handler.n_dofs();
- for (unsigned int i = 0; i < n_dofs; ++i)
+ if (partitioner)
+ {
+ const unsigned int n_local_dofs =
+ locally_relevant_dofs.n_elements();
+ unsigned int i_constraint = 0;
+ for (unsigned int i = 0; i < n_local_dofs; ++i)
+ {
+ // is_constrained uses a global dof id but constrained_dofs_host
+ // works on the local id
+ if (constraints.is_constrained(partitioner->local_to_global(i)))
+ {
+ constrained_dofs_host[i_constraint] = i;
+ ++i_constraint;
+ }
+ }
+ }
+ else
{
- if (constraints.is_constrained(i))
+ const unsigned int n_local_dofs = dof_handler.n_dofs();
+ unsigned int i_constraint = 0;
+ for (unsigned int i = 0; i < n_local_dofs; ++i)
{
- constrained_dofs_host[i_constraint] = i;
- ++i_constraint;
+ if (constraints.is_constrained(i))
+ {
+ constrained_dofs_host[i_constraint] = i;
+ ++i_constraint;
+ }
}
}
template <int dim, typename Number>
- MatrixFree<dim, Number>::Data
- MatrixFree<dim, Number>::get_data(unsigned int color) const
+ template <typename functor, typename VectorType>
+ void
+ MatrixFree<dim, Number>::serial_cell_loop(const functor & func,
+ const VectorType &src,
+ VectorType & dst) const
{
- Data data_copy;
- data_copy.q_points = q_points[color];
- data_copy.local_to_global = local_to_global[color];
- data_copy.inv_jacobian = inv_jacobian[color];
- data_copy.JxW = JxW[color];
- data_copy.constraint_mask = constraint_mask[color];
- data_copy.n_cells = n_cells[color];
- data_copy.padding_length = padding_length;
- data_copy.row_start = row_start[color];
-
- return data_copy;
+ // Execute the loop on the cells
+ for (unsigned int i = 0; i < n_colors; ++i)
+ internal::apply_kernel_shmem<dim, Number, functor>
+ <<<grid_dim[i], block_dim[i]>>>(func,
+ get_data(i),
+ src.get_values(),
+ dst.get_values());
}
template <int dim, typename Number>
+ template <typename functor>
void
- MatrixFree<dim, Number>::free()
+ MatrixFree<dim, Number>::distributed_cell_loop(
+ const functor & func,
+ const LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &src,
+ LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &dst) const
{
- for (unsigned int i = 0; i < q_points.size(); ++i)
- {
- if (q_points[i] != nullptr)
- {
- cudaError_t cuda_error = cudaFree(q_points[i]);
- AssertCuda(cuda_error);
- q_points[i] = nullptr;
- }
- }
-
- for (unsigned int i = 0; i < local_to_global.size(); ++i)
- {
- if (local_to_global[i] != nullptr)
- {
- cudaError_t cuda_error = cudaFree(local_to_global[i]);
- AssertCuda(cuda_error);
- local_to_global[i] = nullptr;
- }
- }
-
- for (unsigned int i = 0; i < inv_jacobian.size(); ++i)
- {
- if (inv_jacobian[i] != nullptr)
- {
- cudaError_t cuda_error = cudaFree(inv_jacobian[i]);
- AssertCuda(cuda_error);
- inv_jacobian[i] = nullptr;
- }
- }
-
- for (unsigned int i = 0; i < JxW.size(); ++i)
- {
- if (JxW[i] != nullptr)
- {
- cudaError_t cuda_error = cudaFree(JxW[i]);
- AssertCuda(cuda_error);
- JxW[i] = nullptr;
- }
- }
+ // Create the ghosted source and the ghosted destination
+ LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> ghosted_src(
+ partitioner);
+ LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> ghosted_dst(
+ ghosted_src);
+ ghosted_src = src;
+
+ // Execute the loop on the cells
+ for (unsigned int i = 0; i < n_colors; ++i)
+ internal::apply_kernel_shmem<dim, Number, functor>
+ <<<grid_dim[i], block_dim[i]>>>(func,
+ get_data(i),
+ ghosted_src.get_values(),
+ ghosted_dst.get_values());
- for (unsigned int i = 0; i < constraint_mask.size(); ++i)
- {
- if (constraint_mask[i] != nullptr)
- {
- cudaError_t cuda_error = cudaFree(constraint_mask[i]);
- AssertCuda(cuda_error);
- constraint_mask[i] = nullptr;
- }
- }
+ // Add the ghosted values
+ ghosted_dst.compress(VectorOperation::add);
+ dst = ghosted_dst;
+ }
- q_points.clear();
- local_to_global.clear();
- inv_jacobian.clear();
- JxW.clear();
- constraint_mask.clear();
- if (constrained_dofs != nullptr)
- {
- cudaError_t cuda_error = cudaFree(constrained_dofs);
- AssertCuda(cuda_error);
- constrained_dofs = nullptr;
- }
+ template <int dim, typename Number>
+ template <typename functor>
+ void
+ MatrixFree<dim, Number>::distributed_cell_loop(
+ const functor &,
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &,
+ LinearAlgebra::CUDAWrappers::Vector<Number> &) const
+ {
+ Assert(false, ExcInternalError());
}
template <int dim, typename Number>
template <typename VectorType>
void
- MatrixFree<dim, Number>::copy_constrained_values(const VectorType &src,
- VectorType & dst) const
+ MatrixFree<dim, Number>::serial_copy_constrained_values(const VectorType &src,
+ VectorType &dst) const
{
- static_assert(
- std::is_same<Number, typename VectorType::value_type>::value,
- "VectorType::value_type and Number should be of the same type.");
+ Assert(src.size() == dst.size(),
+ ExcMessage("src and dst vectors have different size."));
internal::copy_constrained_dofs<Number>
<<<constraint_grid_dim, constraint_block_dim>>>(constrained_dofs,
n_constrained_dofs,
+ src.size(),
src.get_values(),
dst.get_values());
}
template <int dim, typename Number>
- template <typename VectorType>
void
- MatrixFree<dim, Number>::set_constrained_values(Number val,
- VectorType &dst) const
+ MatrixFree<dim, Number>::distributed_copy_constrained_values(
+ const LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &src,
+ LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &dst) const
{
- static_assert(
- std::is_same<Number, typename VectorType::value_type>::value,
- "VectorType::value_type and Number should be of the same type.");
- internal::set_constrained_dofs<Number>
+ Assert(src.size() == dst.size(),
+ ExcMessage("src and dst vectors have different local size."));
+ internal::copy_constrained_dofs<Number>
<<<constraint_grid_dim, constraint_block_dim>>>(constrained_dofs,
n_constrained_dofs,
- val,
+ src.local_size(),
+ src.get_values(),
dst.get_values());
}
template <int dim, typename Number>
- unsigned int
- MatrixFree<dim, Number>::get_padding_length() const
+ void
+ MatrixFree<dim, Number>::distributed_copy_constrained_values(
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &,
+ LinearAlgebra::CUDAWrappers::Vector<Number> &) const
{
- return padding_length;
+ Assert(false, ExcInternalError());
}
template <int dim, typename Number>
- template <typename functor, typename VectorType>
+ template <typename VectorType>
void
- MatrixFree<dim, Number>::cell_loop(const functor & func,
- const VectorType &src,
- VectorType & dst) const
+ MatrixFree<dim, Number>::serial_set_constrained_values(const Number val,
+ VectorType & dst) const
{
- for (unsigned int i = 0; i < n_colors; ++i)
- internal::apply_kernel_shmem<dim, Number, functor>
- <<<grid_dim[i], block_dim[i]>>>(func,
- get_data(i),
- src.get_values(),
- dst.get_values());
+ internal::set_constrained_dofs<Number>
+ <<<constraint_grid_dim, constraint_block_dim>>>(constrained_dofs,
+ n_constrained_dofs,
+ dst.size(),
+ val,
+ dst.get_values());
}
template <int dim, typename Number>
- std::size_t
- MatrixFree<dim, Number>::memory_consumption() const
+ void
+ MatrixFree<dim, Number>::distributed_set_constrained_values(
+ const Number val,
+ LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &dst) const
{
- // First compute the size of n_cells, row_starts, kernel launch parameters,
- // and constrained_dofs
- std::size_t bytes = n_cells.size() * sizeof(unsigned int) * 2 +
- 2 * n_colors * sizeof(dim3) +
- n_constrained_dofs * sizeof(unsigned int);
+ internal::set_constrained_dofs<Number>
+ <<<constraint_grid_dim, constraint_block_dim>>>(constrained_dofs,
+ n_constrained_dofs,
+ dst.local_size(),
+ val,
+ dst.get_values());
+ }
- // For each color, add local_to_global, inv_jacobian, JxW, and q_points.
- for (unsigned int i = 0; i < n_colors; ++i)
- {
- bytes += n_cells[i] * padding_length * sizeof(unsigned int) +
- n_cells[i] * padding_length * dim * dim * sizeof(Number) +
- n_cells[i] * padding_length * sizeof(Number) +
- n_cells[i] * padding_length * sizeof(point_type) +
- n_cells[i] * sizeof(unsigned int);
- }
- return bytes;
+
+ template <int dim, typename Number>
+ void
+ MatrixFree<dim, Number>::distributed_set_constrained_values(
+ const Number,
+ LinearAlgebra::CUDAWrappers::Vector<Number> &) const
+ {
+ Assert(false, ExcInternalError());
}
} // namespace CUDAWrappers