From: Martin Kronbichler Date: Fri, 16 Aug 2024 09:29:18 +0000 (+0200) Subject: SolverGMRES: Implement optimized orthogonalization also for dealii::Vector X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=cac2fb20817cbfd93dbd378ead7d7f544ac053bf;p=dealii.git SolverGMRES: Implement optimized orthogonalization also for dealii::Vector --- diff --git a/include/deal.II/lac/solver_gmres.h b/include/deal.II/lac/solver_gmres.h index 82f306f404..0d3632a729 100644 --- a/include/deal.II/lac/solver_gmres.h +++ b/include/deal.II/lac/solver_gmres.h @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -132,6 +131,7 @@ namespace internal * triangular matrix by Givens rotations, and eventually solves the * minimization problem in the projected Krylov space. */ + template class ArnoldiProcess { public: @@ -193,6 +193,11 @@ namespace internal const FullMatrix & get_hessenberg_matrix() const; + /** + * Temporary vector to implement work for deal.II vector types + */ + std::vector vector_ptrs; + private: /** * Projected system matrix in upper Hessenberg form. @@ -617,7 +622,9 @@ protected: * Class that performs the actual orthogonalization process and solves the * projected linear system. */ - internal::SolverGMRESImplementation::ArnoldiProcess arnoldi_process; + internal::SolverGMRESImplementation::ArnoldiProcess< + typename VectorType::value_type> + arnoldi_process; }; @@ -711,7 +718,9 @@ private: * Class that performs the actual orthogonalization process and solves the * projected linear system. */ - internal::SolverGMRESImplementation::ArnoldiProcess arnoldi_process; + internal::SolverGMRESImplementation::ArnoldiProcess< + typename VectorType::value_type> + arnoldi_process; }; /** @} */ @@ -818,30 +827,34 @@ namespace internal template - struct is_dealii_compatible_distributed_vector; + struct is_dealii_compatible_vector; template - struct is_dealii_compatible_distributed_vector< + struct is_dealii_compatible_vector< VectorType, std::enable_if_t>> { - static constexpr bool value = std::is_same_v< - VectorType, - LinearAlgebra::distributed::Vector>; + static constexpr bool value = + std::is_same_v< + VectorType, + LinearAlgebra::distributed::Vector> || + std::is_same_v>; }; template - struct is_dealii_compatible_distributed_vector< + struct is_dealii_compatible_vector< VectorType, std::enable_if_t>> { - static constexpr bool value = std::is_same_v< - typename VectorType::BlockType, - LinearAlgebra::distributed::Vector>; + static constexpr bool value = + std::is_same_v< + typename VectorType::BlockType, + LinearAlgebra::distributed::Vector> || + std::is_same_v>; }; @@ -918,14 +931,14 @@ namespace internal template ::value, - VectorType> * = nullptr> + std::enable_if_t::value, + VectorType> * = nullptr> void Tvmult_add(const unsigned int n, const VectorType &vv, const TmpVectors &orthogonal_vectors, - Vector &h) + Vector &h, + std::vector &) { for (unsigned int i = 0; i < n; ++i) { @@ -939,159 +952,41 @@ namespace internal + // worker method for deal.II's vector types implemented in .cc file + template + void + do_Tvmult_add(const unsigned int n_vectors, + const std::size_t locally_owned_size, + const Number *current_vector, + const std::vector &orthogonal_vectors, + Vector &b); + + + template ::value, - VectorType> * = nullptr> + std::enable_if_t::value, + VectorType> * = nullptr> void - Tvmult_add(const unsigned int n, - const VectorType &vv, - const TmpVectors &orthogonal_vectors, - Vector &h) + Tvmult_add( + const unsigned int n, + const VectorType &vv, + const TmpVectors &orthogonal_vectors, + Vector &h, + std::vector &vector_ptrs) { for (unsigned int b = 0; b < n_blocks(vv); ++b) { - unsigned int j = 0; - - if (n <= 128) - { - // optimized path - static constexpr unsigned int n_lanes = - VectorizedArray::size(); - - VectorizedArray hs[128]; - for (unsigned int i = 0; i < n; ++i) - hs[i] = 0.0; - VectorizedArray - correct[delayed_reorthogonalization ? 129 : 1]; - if (delayed_reorthogonalization) - for (unsigned int i = 0; i < n + 1; ++i) - correct[i] = 0.0; - - unsigned int c = 0; - - constexpr unsigned int inner_batch_size = - delayed_reorthogonalization ? 6 : 12; - - for (; c < block(vv, b).locally_owned_size() / n_lanes / - inner_batch_size; - ++c, j += n_lanes * inner_batch_size) - { - VectorizedArray vvec[inner_batch_size]; - for (unsigned int k = 0; k < inner_batch_size; ++k) - vvec[k].load(block(vv, b).begin() + j + k * n_lanes); - VectorizedArray last_vector[inner_batch_size]; - for (unsigned int k = 0; k < inner_batch_size; ++k) - last_vector[k].load( - block(orthogonal_vectors[n - 1], b).begin() + j + - k * n_lanes); - - { - VectorizedArray local_sum_0 = - last_vector[0] * vvec[0]; - VectorizedArray local_sum_1 = - last_vector[0] * last_vector[0]; - VectorizedArray local_sum_2 = vvec[0] * vvec[0]; - for (unsigned int k = 1; k < inner_batch_size; ++k) - { - local_sum_0 += last_vector[k] * vvec[k]; - if (delayed_reorthogonalization) - { - local_sum_1 += last_vector[k] * last_vector[k]; - local_sum_2 += vvec[k] * vvec[k]; - } - } - hs[n - 1] += local_sum_0; - if (delayed_reorthogonalization) - { - correct[n - 1] += local_sum_1; - correct[n] += local_sum_2; - } - } - - for (unsigned int i = 0; i < n - 1; ++i) - { - // break the dependency chain into the field hs[i] for - // small sizes i by first accumulating 4 or 8 results - // into a local variable - VectorizedArray temp; - temp.load(block(orthogonal_vectors[i], b).begin() + j); - VectorizedArray local_sum_0 = temp * vvec[0]; - VectorizedArray local_sum_1 = - delayed_reorthogonalization ? temp * last_vector[0] : - 0.; - for (unsigned int k = 1; k < inner_batch_size; ++k) - { - temp.load(block(orthogonal_vectors[i], b).begin() + - j + k * n_lanes); - local_sum_0 += temp * vvec[k]; - if (delayed_reorthogonalization) - local_sum_1 += temp * last_vector[k]; - } - hs[i] += local_sum_0; - if (delayed_reorthogonalization) - correct[i] += local_sum_1; - } - } - - c *= inner_batch_size; - for (; c < block(vv, b).locally_owned_size() / n_lanes; - ++c, j += n_lanes) - { - VectorizedArray vvec, last_vector; - vvec.load(block(vv, b).begin() + j); - last_vector.load(block(orthogonal_vectors[n - 1], b).begin() + - j); - hs[n - 1] += last_vector * vvec; - if (delayed_reorthogonalization) - { - correct[n - 1] += last_vector * last_vector; - correct[n] += vvec * vvec; - } - - for (unsigned int i = 0; i < n - 1; ++i) - { - VectorizedArray temp; - temp.load(block(orthogonal_vectors[i], b).begin() + j); - hs[i] += temp * vvec; - if (delayed_reorthogonalization) - correct[i] += temp * last_vector; - } - } - - for (unsigned int i = 0; i < n; ++i) - { - h(i) += hs[i].sum(); - if (delayed_reorthogonalization) - h(i + n) += correct[i].sum(); - } - if (delayed_reorthogonalization) - h(n + n) += correct[n].sum(); - } - - // remainder loop of optimized path or non-optimized path (if - // n>128) - for (; j < block(vv, b).locally_owned_size(); ++j) - { - const double vvec = block(vv, b).local_element(j); - const double last_vector = - block(orthogonal_vectors[n - 1], b).local_element(j); - h(n - 1) += last_vector * vvec; - if (delayed_reorthogonalization) - { - h(n + n - 1) += last_vector * last_vector; - h(n + n) += vvec * vvec; - } - for (unsigned int i = 0; i < n - 1; ++i) - { - const double temp = - block(orthogonal_vectors[i], b).local_element(j); - h(i) += temp * vvec; - if (delayed_reorthogonalization) - h(n + i) += temp * last_vector; - } - } + vector_ptrs.resize(n); + for (unsigned int i = 0; i < n; ++i) + vector_ptrs[i] = block(orthogonal_vectors[i], b).begin(); + + do_Tvmult_add(n, + block(vv, b).end() - + block(vv, b).begin(), + block(vv, b).begin(), + vector_ptrs, + h); } Utilities::MPI::sum(h, block(vv, 0).get_mpi_communicator(), h); @@ -1101,14 +996,14 @@ namespace internal template ::value, - VectorType> * = nullptr> + std::enable_if_t::value, + VectorType> * = nullptr> double subtract_and_norm(const unsigned int n, const TmpVectors &orthogonal_vectors, const Vector &h, - VectorType &vv) + VectorType &vv, + std::vector &) { Assert(n > 0, ExcInternalError()); @@ -1147,155 +1042,43 @@ namespace internal + // worker method for deal.II's vector types implemented in .cc file + template + double + do_subtract_and_norm(const unsigned int n_vectors, + const std::size_t locally_owned_size, + const std::vector &orthogonal_vectors, + const Vector &h, + Number *current_vector); + + + template ::value, - VectorType> * = nullptr> + std::enable_if_t::value, + VectorType> * = nullptr> double - subtract_and_norm(const unsigned int n, - const TmpVectors &orthogonal_vectors, - const Vector &h, - VectorType &vv) + subtract_and_norm( + const unsigned int n, + const TmpVectors &orthogonal_vectors, + const Vector &h, + VectorType &vv, + std::vector &vector_ptrs) { - static constexpr unsigned int n_lanes = VectorizedArray::size(); - - double norm_vv_temp = 0.0; - VectorType &last_vector = - const_cast(orthogonal_vectors[n - 1]); - const double inverse_norm_previous = - delayed_reorthogonalization ? 1. / h(n + n - 1) : 0.; - const double scaling_factor_vv = - delayed_reorthogonalization ? - (h(n + n) > 0.0 ? inverse_norm_previous / h(n + n) : - inverse_norm_previous / h(n + n - 1)) : - 0.; + double norm_vv_temp = 0.0; for (unsigned int b = 0; b < n_blocks(vv); ++b) { - VectorizedArray norm_vv_temp_vectorized = 0.0; - - constexpr unsigned int inner_batch_size = - delayed_reorthogonalization ? 6 : 12; - - unsigned int j = 0; - unsigned int c = 0; - for (; c < - block(vv, b).locally_owned_size() / n_lanes / inner_batch_size; - ++c, j += n_lanes * inner_batch_size) - { - VectorizedArray temp[inner_batch_size]; - VectorizedArray last_vec[inner_batch_size]; - - const double last_factor = h(n - 1); - for (unsigned int k = 0; k < inner_batch_size; ++k) - { - temp[k].load(block(vv, b).begin() + j + k * n_lanes); - last_vec[k].load(block(last_vector, b).begin() + j + - k * n_lanes); - if (!delayed_reorthogonalization) - temp[k] -= last_factor * last_vec[k]; - } - - for (unsigned int i = 0; i < n - 1; ++i) - { - const double factor = h(i); - const double correction_factor = - (delayed_reorthogonalization ? h(n + i) : 0.0); - for (unsigned int k = 0; k < inner_batch_size; ++k) - { - VectorizedArray vec; - vec.load(block(orthogonal_vectors[i], b).begin() + j + - k * n_lanes); - temp[k] -= factor * vec; - if (delayed_reorthogonalization) - last_vec[k] -= correction_factor * vec; - } - } - - if (delayed_reorthogonalization) - for (unsigned int k = 0; k < inner_batch_size; ++k) - { - last_vec[k] = last_vec[k] * inverse_norm_previous; - last_vec[k].store(block(last_vector, b).begin() + j + - k * n_lanes); - temp[k] -= last_factor * last_vec[k]; - temp[k] = temp[k] * scaling_factor_vv; - temp[k].store(block(vv, b).begin() + j + k * n_lanes); - } - else - for (unsigned int k = 0; k < inner_batch_size; ++k) - { - temp[k].store(block(vv, b).begin() + j + k * n_lanes); - norm_vv_temp_vectorized += temp[k] * temp[k]; - } - } - - c *= inner_batch_size; - for (; c < block(vv, b).locally_owned_size() / n_lanes; - ++c, j += n_lanes) - { - VectorizedArray temp, last_vec; - temp.load(block(vv, b).begin() + j); - last_vec.load(block(last_vector, b).begin() + j); - if (!delayed_reorthogonalization) - temp -= h(n - 1) * last_vec; - - for (unsigned int i = 0; i < n - 1; ++i) - { - VectorizedArray vec; - vec.load(block(orthogonal_vectors[i], b).begin() + j); - temp -= h(i) * vec; - if (delayed_reorthogonalization) - last_vec -= h(n + i) * vec; - } - - if (delayed_reorthogonalization) - { - last_vec = last_vec * inverse_norm_previous; - last_vec.store(block(last_vector, b).begin() + j); - temp -= h(n - 1) * last_vec; - temp = temp * scaling_factor_vv; - temp.store(block(vv, b).begin() + j); - } - else - { - temp.store(block(vv, b).begin() + j); - norm_vv_temp_vectorized += temp * temp; - } - } - - if (!delayed_reorthogonalization) - norm_vv_temp += norm_vv_temp_vectorized.sum(); - - for (; j < block(vv, b).locally_owned_size(); ++j) - { - double temp = block(vv, b).local_element(j); - double last_vec = block(last_vector, b).local_element(j); - if (delayed_reorthogonalization) - { - for (unsigned int i = 0; i < n - 1; ++i) - { - const double vec = - block(orthogonal_vectors[i], b).local_element(j); - temp -= h(i) * vec; - last_vec -= h(n + i) * vec; - } - last_vec *= inverse_norm_previous; - block(last_vector, b).local_element(j) = last_vec; - temp -= h(n - 1) * last_vec; - temp *= scaling_factor_vv; - } - else - { - temp -= h(n - 1) * last_vec; - for (unsigned int i = 0; i < n - 1; ++i) - temp -= - h(i) * block(orthogonal_vectors[i], b).local_element(j); - norm_vv_temp += temp * temp; - } - block(vv, b).local_element(j) = temp; - } + vector_ptrs.resize(n); + for (unsigned int i = 0; i < n; ++i) + vector_ptrs[i] = block(orthogonal_vectors[i], b).begin(); + + norm_vv_temp += do_subtract_and_norm( + n, + block(vv, b).end() - block(vv, b).begin(), + vector_ptrs, + h, + block(vv, b).begin()); } return std::sqrt( @@ -1305,15 +1088,15 @@ namespace internal template ::value, - VectorType> * = nullptr> + std::enable_if_t::value, + VectorType> * = nullptr> void add(VectorType &p, const unsigned int n, const Vector &h, const TmpVectors &tmp_vectors, - const bool zero_out) + const bool zero_out, + std::vector &) { if (zero_out) p.equ(h(0), tmp_vectors[0]); @@ -1326,31 +1109,48 @@ namespace internal + // worker method for deal.II's vector types implemented in .cc file + template + void + do_add(const unsigned int n_vectors, + const std::size_t locally_owned_size, + const std::vector &tmp_vectors, + const Vector &h, + const bool zero_out, + Number *output); + + + template ::value, - VectorType> * = nullptr> + std::enable_if_t::value, + VectorType> * = nullptr> void - add(VectorType &p, - const unsigned int n, - const Vector &h, - const TmpVectors &tmp_vectors, - const bool zero_out) + add(VectorType &p, + const unsigned int n, + const Vector &h, + const TmpVectors &tmp_vectors, + const bool zero_out, + std::vector &vector_ptrs) { for (unsigned int b = 0; b < n_blocks(p); ++b) - for (unsigned int j = 0; j < block(p, b).locally_owned_size(); ++j) - { - double temp = zero_out ? 0 : block(p, b).local_element(j); - for (unsigned int i = 0; i < n; ++i) - temp += block(tmp_vectors[i], b).local_element(j) * h(i); - block(p, b).local_element(j) = temp; - } + { + vector_ptrs.resize(n); + for (unsigned int i = 0; i < n; ++i) + vector_ptrs[i] = block(tmp_vectors[i], b).begin(); + do_add(n, + block(p, b).end() - block(p, b).begin(), + vector_ptrs, + h, + zero_out, + block(p, b).begin()); + } } + template inline void - ArnoldiProcess::initialize( + ArnoldiProcess::initialize( const LinearAlgebra::OrthogonalizationStrategy orthogonalization_strategy, const unsigned int basis_size, const bool force_reorthogonalization) @@ -1375,9 +1175,10 @@ namespace internal + template template inline double - ArnoldiProcess::orthonormalize_nth_vector( + ArnoldiProcess::orthonormalize_nth_vector( const unsigned int n, TmpVectors &orthogonal_vectors, const unsigned int accumulated_iterations, @@ -1405,7 +1206,7 @@ namespace internal // 4 of Bielich et al. (2022). // To avoid un-scaled numbers as appearing with the original - // algorithm of Bielich et al., we use a preliminary scaling of the + // algorithm by Bielich et al., we use a preliminary scaling of the // last vector. This will be corrected in the delayed step. const double previous_scaling = n > 0 ? h(n + n - 2) : 1.; @@ -1413,7 +1214,7 @@ namespace internal h.reinit(n + n + 1); // global reduction - Tvmult_add(n, vv, orthogonal_vectors, h); + Tvmult_add(n, vv, orthogonal_vectors, h, vector_ptrs); // delayed correction terms double tmp = 0; @@ -1444,8 +1245,8 @@ namespace internal hessenberg_matrix(i, n - 1) = (h(i) - sum) / alpha_j; } - // Compute estimate norm for approximate convergence criterion (to - // be corrected in next iteration) + // compute norm estimate for approximate convergence criterion + // (value of norm to be corrected in next iteration) double sum = 0; for (unsigned int i = 0; i < n - 1; ++i) sum += h(i) * h(i); @@ -1455,10 +1256,10 @@ namespace internal // projection and delayed reorthogonalization. We scale the vector // vv here by the preliminary norm to avoid working with too large - // values and correct to the actual norm in high precision in the - // next iteration. + // values and correct the actual norm in the Hessenberg matrix in + // high precision in the next iteration. h(n + n) = hessenberg_matrix(n, n - 1); - subtract_and_norm(n, orthogonal_vectors, h, vv); + subtract_and_norm(n, orthogonal_vectors, h, vv, vector_ptrs); // transform new column of upper Hessenberg matrix into upper // triangular form by computing the respective factor @@ -1503,9 +1304,9 @@ namespace internal LinearAlgebra::OrthogonalizationStrategy:: classical_gram_schmidt) { - Tvmult_add(n, vv, orthogonal_vectors, h); - norm_vv = - subtract_and_norm(n, orthogonal_vectors, h, vv); + Tvmult_add(n, vv, orthogonal_vectors, h, vector_ptrs); + norm_vv = subtract_and_norm( + n, orthogonal_vectors, h, vv, vector_ptrs); } else { @@ -1561,8 +1362,9 @@ namespace internal + template inline double - ArnoldiProcess::do_givens_rotation( + ArnoldiProcess::do_givens_rotation( const bool delayed_reorthogonalization, const int col, FullMatrix &matrix, @@ -1570,7 +1372,7 @@ namespace internal Vector &rhs) { // for the delayed orthogonalization, we can only compute the column of - // the previous column (as there will be correction terms added to the + // the previous iteration (as there will be correction terms added to the // present column for stability reasons), but we still want to compute // the residual estimate from the accumulated work; we therefore perform // givens rotations on two columns simultaneously @@ -1645,8 +1447,9 @@ namespace internal + template inline const Vector & - ArnoldiProcess::solve_projected_system( + ArnoldiProcess::solve_projected_system( const bool orthogonalization_finished) { FullMatrix tmp_triangular_matrix; @@ -1656,11 +1459,12 @@ namespace internal unsigned int n = givens_rotations.size(); // If we solve with the delayed orthogonalization, we still need to - // perform the elimination of the last column. We distinguish two cases, - // one where the orthogonalization has finished (i.e., end of inner - // iteration in GMRES) and we can safely overwrite the content of the - // tridiagonal matrix and right hand side, and the case during the inner - // iterations where we need to create copies of the matrices in the QR + // perform the elimination of the last column before we can solve the + // projected system. We distinguish two cases, one where the + // orthogonalization has finished (i.e., end of inner iteration in + // GMRES) and we can safely overwrite the content of the tridiagonal + // matrix and right hand side, and the case during the inner iterations, + // where we need to create copies of the matrices in the QR // decomposition as well as the right hand side. if (orthogonalization_strategy == LinearAlgebra::OrthogonalizationStrategy:: @@ -1705,8 +1509,9 @@ namespace internal + template inline const FullMatrix & - ArnoldiProcess::get_hessenberg_matrix() const + ArnoldiProcess::get_hessenberg_matrix() const { return hessenberg_matrix; } @@ -1883,10 +1688,8 @@ void SolverGMRES::solve(const MatrixType &A, } } - const double norm_v = - arnoldi_process.orthonormalize_nth_vector(0, - basis_vectors, - accumulated_iterations); + const double norm_v = arnoldi_process.orthonormalize_nth_vector( + 0, basis_vectors, accumulated_iterations, re_orthogonalize_signal); // check the residual here as well since it may be that we got the exact // (or an almost exact) solution vector at the outset. if we wouldn't @@ -2022,11 +1825,21 @@ void SolverGMRES::solve(const MatrixType &A, if (left_precondition) dealii::internal::SolverGMRESImplementation::add( - x, inner_iteration, projected_solution, basis_vectors, false); + x, + inner_iteration, + projected_solution, + basis_vectors, + false, + arnoldi_process.vector_ptrs); else { dealii::internal::SolverGMRESImplementation::add( - p, inner_iteration, projected_solution, basis_vectors, true); + p, + inner_iteration, + projected_solution, + basis_vectors, + true, + arnoldi_process.vector_ptrs); preconditioner.vmult(v, p); x.add(1., v); } @@ -2145,6 +1958,7 @@ double SolverGMRES::criterion() } + //----------------------------------------------------------------------// template @@ -2248,7 +2062,12 @@ void SolverFGMRES::solve(const MatrixType &A, const Vector &projected_solution = arnoldi_process.solve_projected_system(true); dealii::internal::SolverGMRESImplementation::add( - x, inner_iteration, projected_solution, z, false); + x, + inner_iteration, + projected_solution, + z, + false, + arnoldi_process.vector_ptrs); } while (iteration_state == SolverControl::iterate); diff --git a/source/lac/CMakeLists.txt b/source/lac/CMakeLists.txt index 7b9dfcd42d..aa320376f2 100644 --- a/source/lac/CMakeLists.txt +++ b/source/lac/CMakeLists.txt @@ -32,6 +32,7 @@ set(_unity_include_src read_write_vector.cc solver.cc solver_control.cc + solver_gmres.cc sparse_decomposition.cc sparse_direct.cc sparse_ilu.cc @@ -72,6 +73,7 @@ set(_inst read_write_vector.inst.in scalapack.inst.in solver.inst.in + solver_gmres.inst.in sparse_matrix_ez.inst.in sparse_matrix.inst.in tensor_product_matrix.inst.in diff --git a/source/lac/solver_gmres.cc b/source/lac/solver_gmres.cc new file mode 100644 index 0000000000..01db0ff967 --- /dev/null +++ b/source/lac/solver_gmres.cc @@ -0,0 +1,382 @@ +// ------------------------------------------------------------------------ +// +// SPDX-License-Identifier: LGPL-2.1-or-later +// Copyright (C) 2024 by the deal.II authors +// +// This file is part of the deal.II library. +// +// Part of the source code is dual licensed under Apache-2.0 WITH +// LLVM-exception OR LGPL-2.1-or-later. Detailed license information +// governing the source code and code contributions can be found in +// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II. +// +// ------------------------------------------------------------------------ + + +#include + +#include + +DEAL_II_NAMESPACE_OPEN + +namespace internal +{ + namespace SolverGMRESImplementation + { + template + void + do_Tvmult_add(const unsigned int n_vectors, + const std::size_t locally_owned_size, + const Number *current_vector, + const std::vector &orthogonal_vectors, + Vector &h) + { + unsigned int j = 0; + + if (n_vectors <= 128) + { + // optimized path + static constexpr unsigned int n_lanes = + VectorizedArray::size(); + + VectorizedArray hs[128]; + for (unsigned int i = 0; i < n_vectors; ++i) + hs[i] = 0.0; + VectorizedArray + correct[delayed_reorthogonalization ? 129 : 1]; + if (delayed_reorthogonalization) + for (unsigned int i = 0; i < n_vectors + 1; ++i) + correct[i] = 0.0; + + unsigned int c = 0; + + constexpr unsigned int inner_batch_size = + delayed_reorthogonalization ? 6 : 12; + + for (; c < locally_owned_size / n_lanes / inner_batch_size; + ++c, j += n_lanes * inner_batch_size) + { + VectorizedArray vvec[inner_batch_size]; + for (unsigned int k = 0; k < inner_batch_size; ++k) + vvec[k].load(current_vector + j + k * n_lanes); + VectorizedArray prev_vector[inner_batch_size]; + for (unsigned int k = 0; k < inner_batch_size; ++k) + prev_vector[k].load(orthogonal_vectors[n_vectors - 1] + j + + k * n_lanes); + + { + VectorizedArray local_sum_0 = prev_vector[0] * vvec[0]; + VectorizedArray local_sum_1 = + prev_vector[0] * prev_vector[0]; + VectorizedArray local_sum_2 = vvec[0] * vvec[0]; + for (unsigned int k = 1; k < inner_batch_size; ++k) + { + local_sum_0 += prev_vector[k] * vvec[k]; + if (delayed_reorthogonalization) + { + local_sum_1 += prev_vector[k] * prev_vector[k]; + local_sum_2 += vvec[k] * vvec[k]; + } + } + hs[n_vectors - 1] += local_sum_0; + if (delayed_reorthogonalization) + { + correct[n_vectors - 1] += local_sum_1; + correct[n_vectors] += local_sum_2; + } + } + + for (unsigned int i = 0; i < n_vectors - 1; ++i) + { + // break the dependency chain into the field hs[i] for + // small sizes i by first accumulating 4 or 8 results + // into a local variable + VectorizedArray temp; + temp.load(orthogonal_vectors[i] + j); + VectorizedArray local_sum_0 = temp * vvec[0]; + VectorizedArray local_sum_1 = + delayed_reorthogonalization ? temp * prev_vector[0] : 0.; + for (unsigned int k = 1; k < inner_batch_size; ++k) + { + temp.load(orthogonal_vectors[i] + j + k * n_lanes); + local_sum_0 += temp * vvec[k]; + if (delayed_reorthogonalization) + local_sum_1 += temp * prev_vector[k]; + } + hs[i] += local_sum_0; + if (delayed_reorthogonalization) + correct[i] += local_sum_1; + } + } + + c *= inner_batch_size; + for (; c < locally_owned_size / n_lanes; ++c, j += n_lanes) + { + VectorizedArray vvec, prev_vector; + vvec.load(current_vector + j); + prev_vector.load(orthogonal_vectors[n_vectors - 1] + j); + hs[n_vectors - 1] += prev_vector * vvec; + if (delayed_reorthogonalization) + { + correct[n_vectors - 1] += prev_vector * prev_vector; + correct[n_vectors] += vvec * vvec; + } + + for (unsigned int i = 0; i < n_vectors - 1; ++i) + { + VectorizedArray temp; + temp.load(orthogonal_vectors[i] + j); + hs[i] += temp * vvec; + if (delayed_reorthogonalization) + correct[i] += temp * prev_vector; + } + } + + for (unsigned int i = 0; i < n_vectors; ++i) + { + h(i) += hs[i].sum(); + if (delayed_reorthogonalization) + h(i + n_vectors) += correct[i].sum(); + } + if (delayed_reorthogonalization) + h(n_vectors + n_vectors) += correct[n_vectors].sum(); + } + + // remainder loop of optimized path or non-optimized path (if + // n>128) + for (; j < locally_owned_size; ++j) + { + const double vvec = current_vector[j]; + const double prev_vector = orthogonal_vectors[n_vectors - 1][j]; + h(n_vectors - 1) += prev_vector * vvec; + if (delayed_reorthogonalization) + { + h(n_vectors + n_vectors - 1) += prev_vector * prev_vector; + h(n_vectors + n_vectors) += vvec * vvec; + } + for (unsigned int i = 0; i < n_vectors - 1; ++i) + { + const double temp = orthogonal_vectors[i][j]; + h(i) += temp * vvec; + if (delayed_reorthogonalization) + h(n_vectors + i) += temp * prev_vector; + } + } + } + + + + template + double + do_subtract_and_norm(const unsigned int n_vectors, + const std::size_t locally_owned_size, + const std::vector &orthogonal_vectors, + const Vector &h, + Number *current_vector) + { + double norm_vv_temp = 0; + + Number *previous_vector = + const_cast(orthogonal_vectors[n_vectors - 1]); + const double inverse_norm_previous = + delayed_reorthogonalization ? 1. / h(n_vectors + n_vectors - 1) : 0.; + const double scaling_factor_vv = + delayed_reorthogonalization ? + (h(n_vectors + n_vectors) > 0.0 ? + inverse_norm_previous / h(n_vectors + n_vectors) : + inverse_norm_previous / h(n_vectors + n_vectors - 1)) : + 0.; + VectorizedArray norm_vv_temp_vectorized = 0.0; + + static constexpr unsigned int n_lanes = VectorizedArray::size(); + constexpr unsigned int inner_batch_size = + delayed_reorthogonalization ? 6 : 12; + + unsigned int j = 0; + unsigned int c = 0; + for (; c < locally_owned_size / n_lanes / inner_batch_size; + ++c, j += n_lanes * inner_batch_size) + { + VectorizedArray temp[inner_batch_size]; + VectorizedArray prev_vector[inner_batch_size]; + + const double last_factor = h(n_vectors - 1); + for (unsigned int k = 0; k < inner_batch_size; ++k) + { + temp[k].load(current_vector + j + k * n_lanes); + prev_vector[k].load(previous_vector + j + k * n_lanes); + if (!delayed_reorthogonalization) + temp[k] -= last_factor * prev_vector[k]; + } + + for (unsigned int i = 0; i < n_vectors - 1; ++i) + { + const double factor = h(i); + const double correction_factor = + (delayed_reorthogonalization ? h(n_vectors + i) : 0.0); + for (unsigned int k = 0; k < inner_batch_size; ++k) + { + VectorizedArray vec; + vec.load(orthogonal_vectors[i] + j + k * n_lanes); + temp[k] -= factor * vec; + if (delayed_reorthogonalization) + prev_vector[k] -= correction_factor * vec; + } + } + + if (delayed_reorthogonalization) + for (unsigned int k = 0; k < inner_batch_size; ++k) + { + prev_vector[k] = prev_vector[k] * inverse_norm_previous; + prev_vector[k].store(previous_vector + j + k * n_lanes); + temp[k] -= last_factor * prev_vector[k]; + temp[k] = temp[k] * scaling_factor_vv; + temp[k].store(current_vector + j + k * n_lanes); + } + else + for (unsigned int k = 0; k < inner_batch_size; ++k) + { + temp[k].store(current_vector + j + k * n_lanes); + norm_vv_temp_vectorized += temp[k] * temp[k]; + } + } + + c *= inner_batch_size; + for (; c < locally_owned_size / n_lanes; ++c, j += n_lanes) + { + VectorizedArray temp, prev_vector; + temp.load(current_vector + j); + prev_vector.load(previous_vector + j); + if (!delayed_reorthogonalization) + temp -= h(n_vectors - 1) * prev_vector; + + for (unsigned int i = 0; i < n_vectors - 1; ++i) + { + VectorizedArray vec; + vec.load(orthogonal_vectors[i] + j); + temp -= h(i) * vec; + if (delayed_reorthogonalization) + prev_vector -= h(n_vectors + i) * vec; + } + + if (delayed_reorthogonalization) + { + prev_vector = prev_vector * inverse_norm_previous; + prev_vector.store(previous_vector + j); + temp -= h(n_vectors - 1) * prev_vector; + temp = temp * scaling_factor_vv; + temp.store(current_vector + j); + } + else + { + temp.store(current_vector + j); + norm_vv_temp_vectorized += temp * temp; + } + } + + if (!delayed_reorthogonalization) + norm_vv_temp += norm_vv_temp_vectorized.sum(); + + for (; j < locally_owned_size; ++j) + { + double temp = current_vector[j]; + double prev_vector = previous_vector[j]; + if (delayed_reorthogonalization) + { + for (unsigned int i = 0; i < n_vectors - 1; ++i) + { + const double vec = orthogonal_vectors[i][j]; + temp -= h(i) * vec; + prev_vector -= h(n_vectors + i) * vec; + } + prev_vector *= inverse_norm_previous; + previous_vector[j] = prev_vector; + temp -= h(n_vectors - 1) * prev_vector; + temp *= scaling_factor_vv; + } + else + { + temp -= h(n_vectors - 1) * prev_vector; + for (unsigned int i = 0; i < n_vectors - 1; ++i) + temp -= h(i) * orthogonal_vectors[i][j]; + norm_vv_temp += temp * temp; + } + current_vector[j] = temp; + } + + return norm_vv_temp; + } + + + + template + void + do_add(const unsigned int n_vectors, + const std::size_t locally_owned_size, + const std::vector &tmp_vectors, + const Vector &h, + const bool zero_out, + Number *output) + { + static constexpr unsigned int n_lanes = VectorizedArray::size(); + constexpr unsigned int inner_batch_size = 12; + + unsigned int j = 0; + unsigned int c = 0; + for (; c < locally_owned_size / n_lanes / inner_batch_size; + ++c, j += n_lanes * inner_batch_size) + { + VectorizedArray temp[inner_batch_size]; + if (zero_out) + for (VectorizedArray &a : temp) + a = {}; + else + for (unsigned int k = 0; k < inner_batch_size; ++k) + temp[k].load(output + j + k * n_lanes); + + for (unsigned int i = 0; i < n_vectors; ++i) + { + const double h_i = h(i); + for (unsigned int k = 0; k < inner_batch_size; ++k) + { + VectorizedArray v_ij; + v_ij.load(tmp_vectors[i] + j + k * n_lanes); + temp[k] += v_ij * h_i; + } + } + + for (unsigned int k = 0; k < inner_batch_size; ++k) + temp[k].store(output + j + k * n_lanes); + } + + c *= inner_batch_size; + for (; c < locally_owned_size / n_lanes; ++c, j += n_lanes) + { + VectorizedArray temp = {}; + if (!zero_out) + temp.load(output + j); + + for (unsigned int i = 0; i < n_vectors; ++i) + { + VectorizedArray v_ij; + v_ij.load(tmp_vectors[i] + j); + temp += v_ij * h(i); + } + + temp.store(output + j); + } + + for (; j < locally_owned_size; ++j) + { + double temp = zero_out ? 0.0 : output[j]; + for (unsigned int i = 0; i < n_vectors; ++i) + temp += tmp_vectors[i][j] * h(i); + output[j] = temp; + } + } + } // namespace SolverGMRESImplementation +} // namespace internal + +#include "solver_gmres.inst" + +DEAL_II_NAMESPACE_CLOSE diff --git a/source/lac/solver_gmres.inst.in b/source/lac/solver_gmres.inst.in new file mode 100644 index 0000000000..2e8573b415 --- /dev/null +++ b/source/lac/solver_gmres.inst.in @@ -0,0 +1,56 @@ +// ------------------------------------------------------------------------ +// +// SPDX-License-Identifier: LGPL-2.1-or-later +// Copyright (C) 2013 - 2019 by the deal.II authors +// +// This file is part of the deal.II library. +// +// Part of the source code is dual licensed under Apache-2.0 WITH +// LLVM-exception OR LGPL-2.1-or-later. Detailed license information +// governing the source code and code contributions can be found in +// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II. +// +// ------------------------------------------------------------------------ + + + +for (S : REAL_SCALARS) + { + template void internal::SolverGMRESImplementation::do_Tvmult_add( + const unsigned int, + const std::size_t, + const S *, + const std::vector &, + Vector &); + + template void internal::SolverGMRESImplementation::do_Tvmult_add( + const unsigned int, + const std::size_t, + const S *, + const std::vector &, + Vector &); + + template double + internal::SolverGMRESImplementation::do_subtract_and_norm( + const unsigned int, + const std::size_t, + const std::vector &, + const Vector &, + S *); + + template double + internal::SolverGMRESImplementation::do_subtract_and_norm( + const unsigned int, + const std::size_t, + const std::vector &, + const Vector &, + S *); + + template void internal::SolverGMRESImplementation::do_add( + const unsigned int, + const std::size_t, + const std::vector &, + const Vector &, + const bool, + S *); + }