From: David Wells Date: Sat, 9 May 2020 19:28:34 +0000 (-0400) Subject: cppcheck: use ++i, not i++. X-Git-Tag: v9.2.0-rc1~60^2~4 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=16fa83ceca28fbc8e8f75b92ec60cc047fb2e7c9;p=dealii.git cppcheck: use ++i, not i++. --- diff --git a/include/deal.II/base/mpi_compute_index_owner_internal.h b/include/deal.II/base/mpi_compute_index_owner_internal.h index 33afc8c1a1..91092aa273 100644 --- a/include/deal.II/base/mpi_compute_index_owner_internal.h +++ b/include/deal.II/base/mpi_compute_index_owner_internal.h @@ -243,7 +243,7 @@ namespace Utilities // 2) collect relevant processes and process local dict entries for (auto interval = owned_indices.begin_intervals(); interval != owned_indices.end_intervals(); - interval++) + ++interval) { // Due to the granularity of the dictionary, the interval // might be split into several ranges of processor owner @@ -737,7 +737,7 @@ namespace Utilities for (auto interval = is.begin_intervals(); interval != is.end_intervals(); - interval++) + ++interval) send_buffer.emplace_back(*interval->begin(), interval->last() + 1); } diff --git a/include/deal.II/lac/full_matrix.templates.h b/include/deal.II/lac/full_matrix.templates.h index 9aa608ba69..e5d8726a29 100644 --- a/include/deal.II/lac/full_matrix.templates.h +++ b/include/deal.II/lac/full_matrix.templates.h @@ -582,11 +582,11 @@ FullMatrix::mmult(FullMatrix & dst, // arrange the loops in a way that we keep write operations low, (writing is // usually more costly than reading), even though we need to access the data // in src not in a contiguous way. - for (size_type i = 0; i < m; i++) - for (size_type j = 0; j < n; j++) + for (size_type i = 0; i < m; ++i) + for (size_type j = 0; j < n; ++j) { number2 add_value = adding ? dst(i, j) : 0.; - for (size_type k = 0; k < l; k++) + for (size_type k = 0; k < l; ++k) add_value += static_cast((*this)(i, k)) * static_cast((src(k, j))); dst(i, j) = add_value; @@ -687,11 +687,11 @@ FullMatrix::Tmmult(FullMatrix & dst, // optimized gemm operation in case the matrix is big, so this shouldn't be // too bad. else - for (size_type i = 0; i < m; i++) - for (size_type j = 0; j < n; j++) + for (size_type i = 0; i < m; ++i) + for (size_type j = 0; j < n; ++j) { number2 add_value = adding ? dst(i, j) : 0.; - for (size_type k = 0; k < l; k++) + for (size_type k = 0; k < l; ++k) add_value += static_cast((*this)(k, i)) * static_cast((src(k, j))); dst(i, j) = add_value; @@ -788,11 +788,11 @@ FullMatrix::mTmult(FullMatrix & dst, else // arrange the loops in a way that we keep write operations low, (writing is // usually more costly than reading). - for (size_type i = 0; i < m; i++) - for (size_type j = 0; j < n; j++) + for (size_type i = 0; i < m; ++i) + for (size_type j = 0; j < n; ++j) { number2 add_value = adding ? dst(i, j) : 0.; - for (size_type k = 0; k < l; k++) + for (size_type k = 0; k < l; ++k) add_value += static_cast((*this)(i, k)) * static_cast(src(j, k)); dst(i, j) = add_value; @@ -873,11 +873,11 @@ FullMatrix::TmTmult(FullMatrix & dst, // in the calling matrix in a non-contiguous way, possibly leading to cache // misses. However, we should usually end up in the optimized gemm operation // in case the matrix is big, so this shouldn't be too bad. - for (size_type i = 0; i < m; i++) - for (size_type j = 0; j < n; j++) + for (size_type i = 0; i < m; ++i) + for (size_type j = 0; j < n; ++j) { number2 add_value = adding ? dst(i, j) : 0.; - for (size_type k = 0; k < l; k++) + for (size_type k = 0; k < l; ++k) add_value += static_cast((*this)(k, i)) * static_cast(src(j, k)); dst(i, j) = add_value; @@ -1560,13 +1560,13 @@ FullMatrix::cholesky(const FullMatrix &A) /* reinit *this to 0 */ this->reinit(A.m(), A.n()); - for (size_type i = 0; i < this->n_cols(); i++) + for (size_type i = 0; i < this->n_cols(); ++i) { double SLik2 = 0.0; - for (size_type j = 0; j < i; j++) + for (size_type j = 0; j < i; ++j) { double SLikLjk = 0.0; - for (size_type k = 0; k < j; k++) + for (size_type k = 0; k < j; ++k) { SLikLjk += (*this)(i, k) * (*this)(j, k); }; @@ -1591,9 +1591,9 @@ FullMatrix::outer_product(const Vector &V, ExcMessage("Vectors V, W must be the same size.")); this->reinit(V.size(), V.size()); - for (size_type i = 0; i < this->n(); i++) + for (size_type i = 0; i < this->n(); ++i) { - for (size_type j = 0; j < this->n(); j++) + for (size_type j = 0; j < this->n(); ++j) { (*this)(i, j) = V(i) * W(j); } @@ -1697,8 +1697,8 @@ FullMatrix::copy_from(const Tensor<2, dim> &T, AssertIndexRange(src_r_i, src_r_j + 1); AssertIndexRange(src_c_i, src_c_j + 1); - for (size_type i = 0; i < src_r_j - src_r_i + 1; i++) - for (size_type j = 0; j < src_c_j - src_c_i + 1; j++) + for (size_type i = 0; i < src_r_j - src_r_i + 1; ++i) + for (size_type j = 0; j < src_c_j - src_c_i + 1; ++j) { const unsigned int src_r_index = static_cast(i + src_r_i); const unsigned int src_c_index = static_cast(j + src_c_i); @@ -1725,8 +1725,8 @@ void FullMatrix::copy_to(Tensor<2, dim> & T, AssertIndexRange(src_r_i, src_r_j + 1); AssertIndexRange(src_c_j, src_c_j + 1); - for (size_type i = 0; i < src_r_j - src_r_i + 1; i++) - for (size_type j = 0; j < src_c_j - src_c_i + 1; j++) + for (size_type i = 0; i < src_r_j - src_r_i + 1; ++i) + for (size_type j = 0; j < src_c_j - src_c_i + 1; ++j) { const unsigned int dst_r_index = static_cast(i + dst_r); const unsigned int dst_c_index = static_cast(j + dst_c); diff --git a/include/deal.II/lac/sparse_decomposition.templates.h b/include/deal.II/lac/sparse_decomposition.templates.h index 6a9571cbf0..d642c18731 100644 --- a/include/deal.II/lac/sparse_decomposition.templates.h +++ b/include/deal.II/lac/sparse_decomposition.templates.h @@ -150,7 +150,7 @@ SparseLUDecomposition::prebuild_lower_bound() prebuilt_lower_bound.resize(N); - for (size_type row = 0; row < N; row++) + for (size_type row = 0; row < N; ++row) { prebuilt_lower_bound[row] = Utilities::lower_bound(&column_numbers[rowstart_indices[row] + 1], diff --git a/include/deal.II/lac/sparse_matrix.templates.h b/include/deal.II/lac/sparse_matrix.templates.h index 9636e89cbf..3933a20fa6 100644 --- a/include/deal.II/lac/sparse_matrix.templates.h +++ b/include/deal.II/lac/sparse_matrix.templates.h @@ -808,9 +808,9 @@ SparseMatrix::Tvmult(OutVector &dst, const InVector &src) const dst = 0; - for (size_type i = 0; i < m(); i++) + for (size_type i = 0; i < m(); ++i) { - for (size_type j = cols->rowstart[i]; j < cols->rowstart[i + 1]; j++) + for (size_type j = cols->rowstart[i]; j < cols->rowstart[i + 1]; ++j) { const size_type p = cols->colnums[j]; dst(p) += typename OutVector::value_type(val[j]) * @@ -864,8 +864,8 @@ SparseMatrix::Tvmult_add(OutVector &dst, const InVector &src) const Assert(!PointerComparison::equal(&src, &dst), ExcSourceEqualsDestination()); - for (size_type i = 0; i < m(); i++) - for (size_type j = cols->rowstart[i]; j < cols->rowstart[i + 1]; j++) + for (size_type i = 0; i < m(); ++i) + for (size_type j = cols->rowstart[i]; j < cols->rowstart[i + 1]; ++j) { const size_type p = cols->colnums[j]; dst(p) += typename OutVector::value_type(val[j]) * @@ -1851,10 +1851,10 @@ SparseMatrix::SSOR(Vector &dst, const number om) const size_type j; somenumber s; - for (size_type i = 0; i < n; i++) + for (size_type i = 0; i < n; ++i) { s = 0.; - for (j = cols->rowstart[i]; j < cols->rowstart[i + 1]; j++) + for (j = cols->rowstart[i]; j < cols->rowstart[i + 1]; ++j) { const size_type p = cols->colnums[j]; if (p != SparsityPattern::invalid_entry) @@ -1871,7 +1871,7 @@ SparseMatrix::SSOR(Vector &dst, const number om) const i--) // this time, i is signed, but always positive! { s = 0.; - for (j = cols->rowstart[i]; j < cols->rowstart[i + 1]; j++) + for (j = cols->rowstart[i]; j < cols->rowstart[i + 1]; ++j) { const size_type p = cols->colnums[j]; if (p != SparsityPattern::invalid_entry) diff --git a/include/deal.II/lac/sparse_mic.templates.h b/include/deal.II/lac/sparse_mic.templates.h index b74393a112..398bd9fbb7 100644 --- a/include/deal.II/lac/sparse_mic.templates.h +++ b/include/deal.II/lac/sparse_mic.templates.h @@ -101,10 +101,10 @@ SparseMIC::initialize(const SparseMatrix &matrix, inner_sums.resize(this->m()); // precalc sum(j=k+1, N, a[k][j])) - for (size_type row = 0; row < this->m(); row++) + for (size_type row = 0; row < this->m(); ++row) inner_sums[row] = get_rowsum(row); - for (size_type row = 0; row < this->m(); row++) + for (size_type row = 0; row < this->m(); ++row) { const number temp = this->begin(row)->value(); number temp1 = 0; @@ -176,7 +176,7 @@ SparseMIC::vmult(Vector & dst, } // Now: v = Xu - for (size_type row = 0; row < N; row++) + for (size_type row = 0; row < N; ++row) dst(row) *= diag[row]; // x = (X-U)v