From: Daniel Arndt Date: Thu, 9 Feb 2023 02:55:47 +0000 (-0500) Subject: Fix warnings with Clang-15+Cuda X-Git-Tag: v9.5.0-rc1~569^2~1 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=28f06706cc5790a7296d7cd16bcc2747640e79cc;p=dealii.git Fix warnings with Clang-15+Cuda --- diff --git a/include/deal.II/lac/cuda_kernels.h b/include/deal.II/lac/cuda_kernels.h index 178ad998d7..fce8598b56 100644 --- a/include/deal.II/lac/cuda_kernels.h +++ b/include/deal.II/lac/cuda_kernels.h @@ -103,7 +103,8 @@ namespace LinearAlgebra struct Binop_Subtraction> { __device__ static inline std::complex - operation(const std::complex a, const std::complex b) + operation(const std::complex a, + const std::complex /*b*/) { printf("This function is not implemented for std::complex!"); assert(false); diff --git a/include/deal.II/lac/cuda_kernels.templates.h b/include/deal.II/lac/cuda_kernels.templates.h index 86136320b5..f80b15dba5 100644 --- a/include/deal.II/lac/cuda_kernels.templates.h +++ b/include/deal.II/lac/cuda_kernels.templates.h @@ -196,8 +196,8 @@ namespace LinearAlgebra reduce(Number * result, volatile Number *result_buffer, const size_type local_idx, - const size_type global_idx, - const size_type N) + const size_type /*global_idx*/, + const size_type /*N*/) { for (size_type s = block_size / 2; s > warp_size; s = s >> 1) { diff --git a/include/deal.II/lac/cuda_sparse_matrix.h b/include/deal.II/lac/cuda_sparse_matrix.h index 166c11ffe7..da735bf9ec 100644 --- a/include/deal.II/lac/cuda_sparse_matrix.h +++ b/include/deal.II/lac/cuda_sparse_matrix.h @@ -377,7 +377,7 @@ namespace CUDAWrappers template - inline SparseMatrix::size_type + inline typename SparseMatrix::size_type SparseMatrix::m() const { return n_rows; @@ -386,7 +386,7 @@ namespace CUDAWrappers template - inline SparseMatrix::size_type + inline typename SparseMatrix::size_type SparseMatrix::n() const { return n_cols; diff --git a/include/deal.II/matrix_free/cuda_fe_evaluation.h b/include/deal.II/matrix_free/cuda_fe_evaluation.h index ed694816b6..3fe5af5aeb 100644 --- a/include/deal.II/matrix_free/cuda_fe_evaluation.h +++ b/include/deal.II/matrix_free/cuda_fe_evaluation.h @@ -399,7 +399,8 @@ namespace CUDAWrappers } else if (integrate_grad == true) { - evaluator_tensor_product.integrate_gradient(values, gradients); + evaluator_tensor_product.template integrate_gradient(values, + gradients); __syncthreads(); } } diff --git a/include/deal.II/matrix_free/cuda_matrix_free.templates.h b/include/deal.II/matrix_free/cuda_matrix_free.templates.h index bc5efd162e..b08f6673ba 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.templates.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.templates.h @@ -261,15 +261,15 @@ namespace CUDAWrappers const DoFHandler &dof_handler, const UpdateFlags & update_flags) : data(data) - , fe_degree(data->fe_degree) - , dofs_per_cell(data->dofs_per_cell) - , q_points_per_cell(data->q_points_per_cell) , fe_values(mapping, fe, Quadrature(quad), update_inverse_jacobians | update_quadrature_points | update_values | update_gradients | update_JxW_values) , lexicographic_inv(shape_info.lexicographic_numbering) + , fe_degree(data->fe_degree) + , dofs_per_cell(data->dofs_per_cell) + , q_points_per_cell(data->q_points_per_cell) , update_flags(update_flags) , padding_length(data->get_padding_length()) , hanging_nodes(dof_handler.get_triangulation()) @@ -323,10 +323,10 @@ namespace CUDAWrappers // Setup kernel parameters const double apply_n_blocks = std::ceil( static_cast(n_cells) / static_cast(cells_per_block)); - const unsigned int apply_x_n_blocks = - std::round(std::sqrt(apply_n_blocks)); - const unsigned int apply_y_n_blocks = - std::ceil(apply_n_blocks / static_cast(apply_x_n_blocks)); + const auto apply_x_n_blocks = + static_cast(std::round(std::sqrt(apply_n_blocks))); + const auto apply_y_n_blocks = static_cast( + std::ceil(apply_n_blocks / static_cast(apply_x_n_blocks))); data->grid_dim[color] = dim3(apply_x_n_blocks, apply_y_n_blocks); @@ -621,10 +621,10 @@ namespace CUDAWrappers template MatrixFree::MatrixFree() - : n_dofs(0) + : my_id(-1) + , n_dofs(0) , constrained_dofs(nullptr) , padding_length(0) - , my_id(-1) , dof_handler(nullptr) {} @@ -708,7 +708,7 @@ namespace CUDAWrappers template - MatrixFree::Data + typename MatrixFree::Data MatrixFree::get_data(unsigned int color) const { Data data_copy; @@ -936,7 +936,7 @@ namespace CUDAWrappers std::ceil(dim * std::log2(fe_degree + 1.))); dofs_per_cell = fe.n_dofs_per_cell(); - q_points_per_cell = std::pow(n_q_points_1d, dim); + q_points_per_cell = static_cast(std::pow(n_q_points_1d, dim)); const ::dealii::internal::MatrixFreeFunctions::ShapeInfo shape_info( quad, fe); @@ -1106,14 +1106,14 @@ namespace CUDAWrappers if (n_constrained_dofs != 0) { - const unsigned int constraint_n_blocks = + const auto constraint_n_blocks = static_cast( std::ceil(static_cast(n_constrained_dofs) / - static_cast(block_size)); - const unsigned int constraint_x_n_blocks = - std::round(std::sqrt(constraint_n_blocks)); - const unsigned int constraint_y_n_blocks = + static_cast(block_size))); + const auto constraint_x_n_blocks = + static_cast(std::round(std::sqrt(constraint_n_blocks))); + const auto constraint_y_n_blocks = static_cast( std::ceil(static_cast(constraint_n_blocks) / - static_cast(constraint_x_n_blocks)); + static_cast(constraint_x_n_blocks))); constraint_grid_dim = dim3(constraint_x_n_blocks, constraint_y_n_blocks); diff --git a/source/grid/tria.cc b/source/grid/tria.cc index 90d1c18c76..5490a71609 100644 --- a/source/grid/tria.cc +++ b/source/grid/tria.cc @@ -1108,6 +1108,7 @@ namespace internal Assert(n_quads + 2 * n_unused_pairs + n_unused_singles == tria_faces.quads.used.size(), ExcInternalError()); + (void)n_quads; // how many single quads are needed in addition to n_unused_quads? const int additional_single_quads = new_quads_single - n_unused_singles; @@ -1330,6 +1331,7 @@ namespace internal Assert(n_objects + 2 * n_unused_pairs + n_unused_singles == tria_objects.used.size(), ExcInternalError()); + (void)n_objects; // how many single objects are needed in addition to // n_unused_objects? @@ -2618,6 +2620,7 @@ namespace internal // make sure that all subcelldata entries have been processed // TODO: this is not guaranteed, why? // AssertDimension(counter, boundary_objects_in.size()); + (void)counter; } diff --git a/source/lac/cuda_precondition.cc b/source/lac/cuda_precondition.cc index 8573fbb16e..8f400cfbd3 100644 --- a/source/lac/cuda_precondition.cc +++ b/source/lac/cuda_precondition.cc @@ -28,16 +28,16 @@ namespace */ template cusparseStatus_t - cusparseXcsrilu02(cusparseHandle_t handle, - int m, - int nnz, - const cusparseMatDescr_t descrA, - Number * csrValA_valM, - const int * csrRowPtrA, - const int * csrColIndA, - csrilu02Info_t info, - cusparseSolvePolicy_t policy, - void * pBuffer) + cusparseXcsrilu02(cusparseHandle_t /*handle*/, + int /*m*/, + int /*nnz*/, + const cusparseMatDescr_t /*descrA*/, + Number * /*csrValA_valM*/, + const int * /*csrRowPtrA*/, + const int * /*csrColIndA*/, + csrilu02Info_t /*info*/, + cusparseSolvePolicy_t /*policy*/, + void * /*pBuffer*/) { AssertThrow(false, ExcNotImplemented()); return CUSPARSE_STATUS_INVALID_VALUE; @@ -1289,9 +1289,13 @@ namespace CUDAWrappers n_nonzero_elements = A.n_nonzero_elements(); AssertDimension(A.m(), A.n()); - matrix_pointer = &A; - const auto cusparse_matrix = A.get_cusparse_matrix(); - const Number *const A_val_dev = std::get<0>(cusparse_matrix); + matrix_pointer = &A; + const Number *A_val_dev; + std::tie(A_val_dev, + P_column_index_dev, + P_row_ptr_dev, + std::ignore, + std::ignore) = A.get_cusparse_matrix(); // create a copy of the matrix entries since the algorithm works in-place. P_val_dev.reset( @@ -1300,10 +1304,7 @@ namespace CUDAWrappers A_val_dev, n_nonzero_elements * sizeof(Number), cudaMemcpyDeviceToDevice); - - P_column_index_dev = std::get<1>(cusparse_matrix); - P_row_ptr_dev = std::get<2>(cusparse_matrix); - const cusparseMatDescr_t mat_descr = std::get<3>(cusparse_matrix); + AssertCuda(cuda_status); // initialize an internal buffer we need later on tmp_dev.reset(Utilities::CUDA::allocate_device_data(n_rows)); @@ -1611,8 +1612,12 @@ namespace CUDAWrappers n_nonzero_elements = A.n_nonzero_elements(); AssertDimension(A.m(), A.n()); - const auto cusparse_matrix = A.get_cusparse_matrix(); - const Number *const A_val_dev = std::get<0>(cusparse_matrix); + const Number *A_val_dev; + std::tie(A_val_dev, + P_column_index_dev, + P_row_ptr_dev, + std::ignore, + std::ignore) = A.get_cusparse_matrix(); // create a copy of the matrix entries since the algorithm works in-place. P_val_dev.reset( @@ -1621,10 +1626,7 @@ namespace CUDAWrappers A_val_dev, n_nonzero_elements * sizeof(Number), cudaMemcpyDeviceToDevice); - - P_column_index_dev = std::get<1>(cusparse_matrix); - P_row_ptr_dev = std::get<2>(cusparse_matrix); - const cusparseMatDescr_t mat_descr = std::get<3>(cusparse_matrix); + AssertCuda(cuda_status); // initialize an internal buffer we need later on tmp_dev.reset(Utilities::CUDA::allocate_device_data(n_rows)); diff --git a/source/lac/cuda_sparse_matrix.cc b/source/lac/cuda_sparse_matrix.cc index 5909297bf1..d1cca009f8 100644 --- a/source/lac/cuda_sparse_matrix.cc +++ b/source/lac/cuda_sparse_matrix.cc @@ -265,9 +265,9 @@ namespace CUDAWrappers __global__ void linfty_norm(const typename SparseMatrix::size_type n_rows, const Number * val_dev, - const int * column_index_dev, - const int * row_ptr_dev, - Number * sums) + const int * /*column_index_dev*/, + const int *row_ptr_dev, + Number * sums) { const typename SparseMatrix::size_type row = threadIdx.x + blockIdx.x * blockDim.x; @@ -668,6 +668,7 @@ namespace CUDAWrappers val_dev.get(), nnz * sizeof(Number), cudaMemcpyDeviceToDevice); + AssertCuda(cuda_error); return matrix_values.l2_norm(); } diff --git a/source/numerics/solution_transfer.cc b/source/numerics/solution_transfer.cc index 6803c0f98a..a22805f9ed 100644 --- a/source/numerics/solution_transfer.cc +++ b/source/numerics/solution_transfer.cc @@ -332,6 +332,7 @@ SolutionTransfer:: if (!cell->is_active() && cell->child(0)->coarsen_flag_set()) ++n_coarsen_fathers; Assert(n_cells_to_coarsen >= 2 * n_coarsen_fathers, ExcInternalError()); + (void)n_cells_to_coarsen; // allocate the needed memory. initialize // the following arrays in an efficient