struct Binop_Subtraction<std::complex<Number>>
{
__device__ static inline std::complex<Number>
- operation(const std::complex<Number> a, const std::complex<Number> b)
+ operation(const std::complex<Number> a,
+ const std::complex<Number> /*b*/)
{
printf("This function is not implemented for std::complex<Number>!");
assert(false);
reduce(Number * result,
volatile Number *result_buffer,
const size_type local_idx,
- const size_type global_idx,
- const size_type N)
+ const size_type /*global_idx*/,
+ const size_type /*N*/)
{
for (size_type s = block_size / 2; s > warp_size; s = s >> 1)
{
template <typename Number>
- inline SparseMatrix<Number>::size_type
+ inline typename SparseMatrix<Number>::size_type
SparseMatrix<Number>::m() const
{
return n_rows;
template <typename Number>
- inline SparseMatrix<Number>::size_type
+ inline typename SparseMatrix<Number>::size_type
SparseMatrix<Number>::n() const
{
return n_cols;
}
else if (integrate_grad == true)
{
- evaluator_tensor_product.integrate_gradient<false>(values, gradients);
+ evaluator_tensor_product.template integrate_gradient<false>(values,
+ gradients);
__syncthreads();
}
}
const DoFHandler<dim> &dof_handler,
const UpdateFlags & update_flags)
: data(data)
- , fe_degree(data->fe_degree)
- , dofs_per_cell(data->dofs_per_cell)
- , q_points_per_cell(data->q_points_per_cell)
, fe_values(mapping,
fe,
Quadrature<dim>(quad),
update_inverse_jacobians | update_quadrature_points |
update_values | update_gradients | update_JxW_values)
, lexicographic_inv(shape_info.lexicographic_numbering)
+ , fe_degree(data->fe_degree)
+ , dofs_per_cell(data->dofs_per_cell)
+ , q_points_per_cell(data->q_points_per_cell)
, update_flags(update_flags)
, padding_length(data->get_padding_length())
, hanging_nodes(dof_handler.get_triangulation())
// Setup kernel parameters
const double apply_n_blocks = std::ceil(
static_cast<double>(n_cells) / static_cast<double>(cells_per_block));
- const unsigned int apply_x_n_blocks =
- std::round(std::sqrt(apply_n_blocks));
- const unsigned int apply_y_n_blocks =
- std::ceil(apply_n_blocks / static_cast<double>(apply_x_n_blocks));
+ const auto apply_x_n_blocks =
+ static_cast<unsigned int>(std::round(std::sqrt(apply_n_blocks)));
+ const auto apply_y_n_blocks = static_cast<unsigned int>(
+ std::ceil(apply_n_blocks / static_cast<double>(apply_x_n_blocks)));
data->grid_dim[color] = dim3(apply_x_n_blocks, apply_y_n_blocks);
template <int dim, typename Number>
MatrixFree<dim, Number>::MatrixFree()
- : n_dofs(0)
+ : my_id(-1)
+ , n_dofs(0)
, constrained_dofs(nullptr)
, padding_length(0)
- , my_id(-1)
, dof_handler(nullptr)
{}
template <int dim, typename Number>
- MatrixFree<dim, Number>::Data
+ typename MatrixFree<dim, Number>::Data
MatrixFree<dim, Number>::get_data(unsigned int color) const
{
Data data_copy;
std::ceil(dim * std::log2(fe_degree + 1.)));
dofs_per_cell = fe.n_dofs_per_cell();
- q_points_per_cell = std::pow(n_q_points_1d, dim);
+ q_points_per_cell = static_cast<unsigned int>(std::pow(n_q_points_1d, dim));
const ::dealii::internal::MatrixFreeFunctions::ShapeInfo<Number> shape_info(
quad, fe);
if (n_constrained_dofs != 0)
{
- const unsigned int constraint_n_blocks =
+ const auto constraint_n_blocks = static_cast<unsigned int>(
std::ceil(static_cast<double>(n_constrained_dofs) /
- static_cast<double>(block_size));
- const unsigned int constraint_x_n_blocks =
- std::round(std::sqrt(constraint_n_blocks));
- const unsigned int constraint_y_n_blocks =
+ static_cast<double>(block_size)));
+ const auto constraint_x_n_blocks =
+ static_cast<unsigned int>(std::round(std::sqrt(constraint_n_blocks)));
+ const auto constraint_y_n_blocks = static_cast<unsigned int>(
std::ceil(static_cast<double>(constraint_n_blocks) /
- static_cast<double>(constraint_x_n_blocks));
+ static_cast<double>(constraint_x_n_blocks)));
constraint_grid_dim =
dim3(constraint_x_n_blocks, constraint_y_n_blocks);
Assert(n_quads + 2 * n_unused_pairs + n_unused_singles ==
tria_faces.quads.used.size(),
ExcInternalError());
+ (void)n_quads;
// how many single quads are needed in addition to n_unused_quads?
const int additional_single_quads = new_quads_single - n_unused_singles;
Assert(n_objects + 2 * n_unused_pairs + n_unused_singles ==
tria_objects.used.size(),
ExcInternalError());
+ (void)n_objects;
// how many single objects are needed in addition to
// n_unused_objects?
// make sure that all subcelldata entries have been processed
// TODO: this is not guaranteed, why?
// AssertDimension(counter, boundary_objects_in.size());
+ (void)counter;
}
*/
template <typename Number>
cusparseStatus_t
- cusparseXcsrilu02(cusparseHandle_t handle,
- int m,
- int nnz,
- const cusparseMatDescr_t descrA,
- Number * csrValA_valM,
- const int * csrRowPtrA,
- const int * csrColIndA,
- csrilu02Info_t info,
- cusparseSolvePolicy_t policy,
- void * pBuffer)
+ cusparseXcsrilu02(cusparseHandle_t /*handle*/,
+ int /*m*/,
+ int /*nnz*/,
+ const cusparseMatDescr_t /*descrA*/,
+ Number * /*csrValA_valM*/,
+ const int * /*csrRowPtrA*/,
+ const int * /*csrColIndA*/,
+ csrilu02Info_t /*info*/,
+ cusparseSolvePolicy_t /*policy*/,
+ void * /*pBuffer*/)
{
AssertThrow(false, ExcNotImplemented());
return CUSPARSE_STATUS_INVALID_VALUE;
n_nonzero_elements = A.n_nonzero_elements();
AssertDimension(A.m(), A.n());
- matrix_pointer = &A;
- const auto cusparse_matrix = A.get_cusparse_matrix();
- const Number *const A_val_dev = std::get<0>(cusparse_matrix);
+ matrix_pointer = &A;
+ const Number *A_val_dev;
+ std::tie(A_val_dev,
+ P_column_index_dev,
+ P_row_ptr_dev,
+ std::ignore,
+ std::ignore) = A.get_cusparse_matrix();
// create a copy of the matrix entries since the algorithm works in-place.
P_val_dev.reset(
A_val_dev,
n_nonzero_elements * sizeof(Number),
cudaMemcpyDeviceToDevice);
-
- P_column_index_dev = std::get<1>(cusparse_matrix);
- P_row_ptr_dev = std::get<2>(cusparse_matrix);
- const cusparseMatDescr_t mat_descr = std::get<3>(cusparse_matrix);
+ AssertCuda(cuda_status);
// initialize an internal buffer we need later on
tmp_dev.reset(Utilities::CUDA::allocate_device_data<Number>(n_rows));
n_nonzero_elements = A.n_nonzero_elements();
AssertDimension(A.m(), A.n());
- const auto cusparse_matrix = A.get_cusparse_matrix();
- const Number *const A_val_dev = std::get<0>(cusparse_matrix);
+ const Number *A_val_dev;
+ std::tie(A_val_dev,
+ P_column_index_dev,
+ P_row_ptr_dev,
+ std::ignore,
+ std::ignore) = A.get_cusparse_matrix();
// create a copy of the matrix entries since the algorithm works in-place.
P_val_dev.reset(
A_val_dev,
n_nonzero_elements * sizeof(Number),
cudaMemcpyDeviceToDevice);
-
- P_column_index_dev = std::get<1>(cusparse_matrix);
- P_row_ptr_dev = std::get<2>(cusparse_matrix);
- const cusparseMatDescr_t mat_descr = std::get<3>(cusparse_matrix);
+ AssertCuda(cuda_status);
// initialize an internal buffer we need later on
tmp_dev.reset(Utilities::CUDA::allocate_device_data<Number>(n_rows));
__global__ void
linfty_norm(const typename SparseMatrix<Number>::size_type n_rows,
const Number * val_dev,
- const int * column_index_dev,
- const int * row_ptr_dev,
- Number * sums)
+ const int * /*column_index_dev*/,
+ const int *row_ptr_dev,
+ Number * sums)
{
const typename SparseMatrix<Number>::size_type row =
threadIdx.x + blockIdx.x * blockDim.x;
val_dev.get(),
nnz * sizeof(Number),
cudaMemcpyDeviceToDevice);
+ AssertCuda(cuda_error);
return matrix_values.l2_norm();
}
if (!cell->is_active() && cell->child(0)->coarsen_flag_set())
++n_coarsen_fathers;
Assert(n_cells_to_coarsen >= 2 * n_coarsen_fathers, ExcInternalError());
+ (void)n_cells_to_coarsen;
// allocate the needed memory. initialize
// the following arrays in an efficient