From: Peter Munch Date: Sat, 15 Oct 2022 11:00:51 +0000 (+0200) Subject: SolverGMRES: add classical Gram-Schmidt X-Git-Tag: v9.5.0-rc1~894^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F14349%2Fhead;p=dealii.git SolverGMRES: add classical Gram-Schmidt --- diff --git a/include/deal.II/lac/solver_gmres.h b/include/deal.II/lac/solver_gmres.h index 74891ad5ca..76bc111c58 100644 --- a/include/deal.II/lac/solver_gmres.h +++ b/include/deal.II/lac/solver_gmres.h @@ -22,6 +22,7 @@ #include #include +#include #include #include @@ -38,6 +39,18 @@ DEAL_II_NAMESPACE_OPEN +// forward declarations +#ifndef DOXYGEN +namespace LinearAlgebra +{ + namespace distributed + { + template + class Vector; + } // namespace distributed +} // namespace LinearAlgebra +#endif + /** * @addtogroup Solvers * @{ @@ -184,6 +197,18 @@ public: */ struct AdditionalData { + enum class OrthogonalizationStrategy + { + /** + * Use modified Gram-Schmidt algorithm. + */ + modified_gram_schmidt, + /** + * Use classical Gram-Schmidt algorithm. + */ + classical_gram_schmidt + }; + /** * Constructor. By default, set the number of temporary vectors to 30, * i.e. do a restart every 28 iterations. Also set preconditioning from @@ -191,11 +216,14 @@ public: * and re-orthogonalization only if necessary. Also, the batched mode with * reduced functionality to track information is disabled by default. */ - explicit AdditionalData(const unsigned int max_n_tmp_vectors = 30, - const bool right_preconditioning = false, - const bool use_default_residual = true, - const bool force_re_orthogonalization = false, - const bool batched_mode = false); + explicit AdditionalData( + const unsigned int max_n_tmp_vectors = 30, + const bool right_preconditioning = false, + const bool use_default_residual = true, + const bool force_re_orthogonalization = false, + const bool batched_mode = false, + const OrthogonalizationStrategy orthogonalization_strategy = + OrthogonalizationStrategy::modified_gram_schmidt); /** * Maximum number of temporary vectors. This parameter controls the size @@ -234,6 +262,11 @@ public: * all signals, eigenvalue computations, and log stream are disabled. */ bool batched_mode; + + /** + * Strategy to orthogonalize vectors. + */ + OrthogonalizationStrategy orthogonalization_strategy; }; /** @@ -407,28 +440,6 @@ protected: Vector &si, int col) const; - /** - * Orthogonalize the vector @p vv against the @p dim (orthogonal) vectors - * given by the first argument using the modified Gram-Schmidt algorithm. - * The factors used for orthogonalization are stored in @p h. The boolean @p - * re_orthogonalize specifies whether the modified Gram-Schmidt algorithm - * should be applied twice. The algorithm checks loss of orthogonality in - * the procedure every fifth step and sets the flag to true in that case. - * All subsequent iterations use re-orthogonalization. - * Calls the signal re_orthogonalize_signal if it is connected. - */ - static double - iterated_modified_gram_schmidt( - const internal::SolverGMRESImplementation::TmpVectors - & orthogonal_vectors, - const unsigned int dim, - const unsigned int accumulated_iterations, - VectorType & vv, - Vector & h, - bool & re_orthogonalize, - const boost::signals2::signal &re_orthogonalize_signal = - boost::signals2::signal()); - /** * Estimates the eigenvalues from the Hessenberg matrix, H_orig, generated * during the inner iterations. Uses these estimate to compute the condition @@ -644,16 +655,18 @@ namespace internal template inline SolverGMRES::AdditionalData::AdditionalData( - const unsigned int max_n_tmp_vectors, - const bool right_preconditioning, - const bool use_default_residual, - const bool force_re_orthogonalization, - const bool batched_mode) + const unsigned int max_n_tmp_vectors, + const bool right_preconditioning, + const bool use_default_residual, + const bool force_re_orthogonalization, + const bool batched_mode, + const OrthogonalizationStrategy orthogonalization_strategy) : max_n_tmp_vectors(max_n_tmp_vectors) , right_preconditioning(right_preconditioning) , use_default_residual(use_default_residual) , force_re_orthogonalization(force_re_orthogonalization) , batched_mode(batched_mode) + , orthogonalization_strategy(orthogonalization_strategy) { Assert(3 <= max_n_tmp_vectors, ExcMessage("SolverGMRES needs at least three " @@ -710,81 +723,375 @@ SolverGMRES::givens_rotation(Vector &h, -template -inline double -SolverGMRES::iterated_modified_gram_schmidt( - const internal::SolverGMRESImplementation::TmpVectors - & orthogonal_vectors, - const unsigned int dim, - const unsigned int accumulated_iterations, - VectorType & vv, - Vector & h, - bool & reorthogonalize, - const boost::signals2::signal &reorthogonalize_signal) +namespace internal { - Assert(dim > 0, ExcInternalError()); - const unsigned int inner_iteration = dim - 1; + namespace SolverGMRESImplementation + { + template + void + Tvmult_add(const unsigned int dim, + const VectorType & vv, + const internal::SolverGMRESImplementation::TmpVectors + & orthogonal_vectors, + Vector &h) + { + for (unsigned int i = 0; i < dim; ++i) + h[i] += vv * orthogonal_vectors[i]; + } + + + + template + void + Tvmult_add( + const unsigned int dim, + const LinearAlgebra::distributed::Vector &vv, + const internal::SolverGMRESImplementation::TmpVectors< + LinearAlgebra::distributed::Vector> + & orthogonal_vectors, + Vector &h) + { + unsigned int j = 0; + + if (dim <= 128) + { + // optimized path + static constexpr unsigned int n_lanes = + VectorizedArray::size(); + + VectorizedArray hs[128]; + for (unsigned int d = 0; d < dim; ++d) + hs[d] = 0.0; + + unsigned int c = 0; + + for (; c < vv.locally_owned_size() / n_lanes / 4; + ++c, j += n_lanes * 4) + for (unsigned int i = 0; i < dim; ++i) + { + VectorizedArray vvec[4]; + for (unsigned int k = 0; k < 4; ++k) + vvec[k].load(vv.begin() + j + k * n_lanes); + + for (unsigned int k = 0; k < 4; ++k) + { + VectorizedArray temp; + temp.load(orthogonal_vectors[i].begin() + j + k * n_lanes); + hs[i] += temp * vvec[k]; + } + } + + c *= 4; + for (; c < vv.locally_owned_size() / n_lanes; ++c, j += n_lanes) + for (unsigned int i = 0; i < dim; ++i) + { + VectorizedArray vvec, temp; + vvec.load(vv.begin() + j); + temp.load(orthogonal_vectors[i].begin() + j); + hs[i] += temp * vvec; + } + + for (unsigned int i = 0; i < dim; ++i) + for (unsigned int v = 0; v < n_lanes; ++v) + h(i) += hs[i][v]; + } + + // remainder loop of optimized path or non-optimized path (if + // dim>128) + for (; j < vv.locally_owned_size(); ++j) + for (unsigned int i = 0; i < dim; ++i) + h(i) += orthogonal_vectors[i].local_element(j) * vv.local_element(j); + + Utilities::MPI::sum(h, MPI_COMM_WORLD, h); + } + + + + template + double + substract_and_norm( + const unsigned int dim, + const internal::SolverGMRESImplementation::TmpVectors + & orthogonal_vectors, + const Vector &h, + VectorType & vv) + { + Assert(dim > 0, ExcInternalError()); + + for (unsigned int i = 0; i < dim - 1; ++i) + vv.add(-h(i), orthogonal_vectors[i]); + + return vv.add_and_dot(-h(dim - 1), orthogonal_vectors[dim - 1], vv); + } + + + + template + double + substract_and_norm( + const unsigned int dim, + const internal::SolverGMRESImplementation::TmpVectors< + LinearAlgebra::distributed::Vector> + & orthogonal_vectors, + const Vector &h, + LinearAlgebra::distributed::Vector &vv) + { + static constexpr unsigned int n_lanes = VectorizedArray::size(); + + double norm_vv_temp = 0.0; + VectorizedArray norm_vv_temp_vectorized = 0.0; + + unsigned int j = 0; + unsigned int c = 0; + for (; c < vv.locally_owned_size() / n_lanes / 4; ++c, j += n_lanes * 4) + { + VectorizedArray temp[4]; + + for (unsigned int k = 0; k < 4; ++k) + temp[k].load(vv.begin() + j + k * n_lanes); + + for (unsigned int i = 0; i < dim; ++i) + { + const double factor = h(i); + for (unsigned int k = 0; k < 4; ++k) + { + VectorizedArray vec; + vec.load(orthogonal_vectors[i].begin() + j + k * n_lanes); + temp[k] -= factor * vec; + } + } + + for (unsigned int k = 0; k < 4; ++k) + temp[k].store(vv.begin() + j + k * n_lanes); + + norm_vv_temp_vectorized += (temp[0] * temp[0] + temp[1] * temp[1]) + + (temp[2] * temp[2] + temp[3] * temp[3]); + } + + c *= 4; + for (; c < vv.locally_owned_size() / n_lanes; ++c, j += n_lanes) + { + VectorizedArray temp; + temp.load(vv.begin() + j); + + for (unsigned int i = 0; i < dim; ++i) + { + VectorizedArray vec; + vec.load(orthogonal_vectors[i].begin() + j); + temp -= h(i) * vec; + } - // need initial norm for detection of re-orthogonalization, see below - double norm_vv_start = 0; - const bool consider_reorthogonalize = - (reorthogonalize == false) && (inner_iteration % 5 == 4); - if (consider_reorthogonalize) - norm_vv_start = vv.l2_norm(); + temp.store(vv.begin() + j); - for (unsigned int i = 0; i < dim; ++i) - h[i] = 0; + norm_vv_temp_vectorized += temp * temp; + } + + for (unsigned int v = 0; v < n_lanes; ++v) + norm_vv_temp += norm_vv_temp_vectorized[v]; + + for (; j < vv.locally_owned_size(); ++j) + { + double temp = vv(j); + for (unsigned int i = 0; i < dim; ++i) + temp -= h(i) * orthogonal_vectors[i](j); + vv(j) = temp; + + norm_vv_temp += temp * temp; + } + + return std::sqrt(Utilities::MPI::sum(norm_vv_temp, MPI_COMM_WORLD)); + } - for (unsigned int c = 0; c < 2; ++c) // 0: orthogonalize, 1: reorthogonalize + + template + double + sadd_and_norm(VectorType & v, + const double factor_a, + const VectorType &b, + const double factor_b) { - // Orthogonalization - double htmp = vv * orthogonal_vectors[0]; - h(0) += htmp; + v.sadd(factor_a, factor_b, b); + return v.l2_norm(); + } + + + template + double + sadd_and_norm( + LinearAlgebra::distributed::Vector &v, + const double factor_a, + const LinearAlgebra::distributed::Vector &b, + const double factor_b) + { + double norm = 0; + + for (unsigned int j = 0; j < v.locally_owned_size(); ++j) + { + const double temp = + v.local_element(j) * factor_a + b.local_element(j) * factor_b; + + v.local_element(j) = temp; + + norm += temp * temp; + } + + return std::sqrt(Utilities::MPI::sum(norm, MPI_COMM_WORLD)); + } + + + + template + void + add(VectorType & p, + const unsigned int dim, + const Vector &h, + const internal::SolverGMRESImplementation::TmpVectors + & tmp_vectors, + const bool zero_out) + { + if (zero_out) + p.equ(h(0), tmp_vectors[0]); + else + p.add(h(0), tmp_vectors[0]); + for (unsigned int i = 1; i < dim; ++i) + p.add(h(i), tmp_vectors[i]); + } + + + + template + void + add(LinearAlgebra::distributed::Vector &p, + const unsigned int dim, + const Vector & h, + const internal::SolverGMRESImplementation::TmpVectors< + LinearAlgebra::distributed::Vector> + & tmp_vectors, + const bool zero_out) + { + for (unsigned int j = 0; j < p.locally_owned_size(); ++j) { - htmp = vv.add_and_dot(-htmp, - orthogonal_vectors[i - 1], - orthogonal_vectors[i]); - h(i) += htmp; + double temp = zero_out ? 0 : p.local_element(j); + for (unsigned int i = 0; i < dim; ++i) + temp += tmp_vectors[i].local_element(j) * h(i); + p.local_element(j) = temp; } + } - double norm_vv = - std::sqrt(vv.add_and_dot(-htmp, orthogonal_vectors[dim - 1], vv)); - if (c == 1) - return norm_vv; // reorthogonalization already performed -> finished - // Re-orthogonalization if loss of orthogonality detected. For the test, - // use a strategy discussed in C. T. Kelley, Iterative Methods for Linear - // and Nonlinear Equations, SIAM, Philadelphia, 1995: Compare the norm of - // vv after orthogonalization with its norm when starting the - // orthogonalization. If vv became very small (here: less than the square - // root of the machine precision times 10), it is almost in the span of - // the previous vectors, which indicates loss of precision. + /** + * Orthogonalize the vector @p vv against the @p dim (orthogonal) vectors + * given by @p orthogonal_vectors using the modified or classical + * Gram-Schmidt algorithm. + * The factors used for orthogonalization are stored in @p h. The boolean @p + * re_orthogonalize specifies whether the Gram-Schmidt algorithm + * should be applied twice. The algorithm checks loss of orthogonality in + * the procedure every fifth step and sets the flag to true in that case. + * All subsequent iterations use re-orthogonalization. + * Calls the signal re_orthogonalize_signal if it is connected. + */ + template + inline double + iterated_gram_schmidt( + const typename SolverGMRES::AdditionalData:: + OrthogonalizationStrategy orthogonalization_strategy, + const internal::SolverGMRESImplementation::TmpVectors + & orthogonal_vectors, + const unsigned int dim, + const unsigned int accumulated_iterations, + VectorType & vv, + Vector & h, + bool & reorthogonalize, + const boost::signals2::signal &reorthogonalize_signal = + boost::signals2::signal()) + { + Assert(dim > 0, ExcInternalError()); + const unsigned int inner_iteration = dim - 1; + + // need initial norm for detection of re-orthogonalization, see below + double norm_vv_start = 0; + const bool consider_reorthogonalize = + (reorthogonalize == false) && (inner_iteration % 5 == 4); if (consider_reorthogonalize) + norm_vv_start = vv.l2_norm(); + + for (unsigned int i = 0; i < dim; ++i) + h[i] = 0; + + for (unsigned int c = 0; c < 2; + ++c) // 0: orthogonalize, 1: reorthogonalize { - if (norm_vv > - 10. * norm_vv_start * - std::sqrt(std::numeric_limits< - typename VectorType::value_type>::epsilon())) - return norm_vv; + // Orthogonalization + double norm_vv = 0.0; + if (orthogonalization_strategy == + SolverGMRES::AdditionalData:: + OrthogonalizationStrategy::modified_gram_schmidt) + { + double htmp = vv * orthogonal_vectors[0]; + h(0) += htmp; + for (unsigned int i = 1; i < dim; ++i) + { + htmp = vv.add_and_dot(-htmp, + orthogonal_vectors[i - 1], + orthogonal_vectors[i]); + h(i) += htmp; + } + + norm_vv = std::sqrt( + vv.add_and_dot(-htmp, orthogonal_vectors[dim - 1], vv)); + } + else if (orthogonalization_strategy == + SolverGMRES::AdditionalData:: + OrthogonalizationStrategy::classical_gram_schmidt) + { + Tvmult_add(dim, vv, orthogonal_vectors, h); + norm_vv = substract_and_norm(dim, orthogonal_vectors, h, vv); + } else { - reorthogonalize = true; - if (!reorthogonalize_signal.empty()) - reorthogonalize_signal(accumulated_iterations); + AssertThrow(false, ExcNotImplemented()); } - } - if (reorthogonalize == false) - return norm_vv; // no reorthogonalization needed -> finished - } + if (c == 1) + return norm_vv; // reorthogonalization already performed -> finished + + // Re-orthogonalization if loss of orthogonality detected. For the + // test, use a strategy discussed in C. T. Kelley, Iterative Methods + // for Linear and Nonlinear Equations, SIAM, Philadelphia, 1995: + // Compare the norm of vv after orthogonalization with its norm when + // starting the orthogonalization. If vv became very small (here: less + // than the square root of the machine precision times 10), it is + // almost in the span of the previous vectors, which indicates loss of + // precision. + if (consider_reorthogonalize) + { + if (norm_vv > + 10. * norm_vv_start * + std::sqrt(std::numeric_limits< + typename VectorType::value_type>::epsilon())) + return norm_vv; - AssertThrow(false, ExcInternalError()); + else + { + reorthogonalize = true; + if (!reorthogonalize_signal.empty()) + reorthogonalize_signal(accumulated_iterations); + } + } - return 0.0; -} + if (reorthogonalize == false) + return norm_vv; // no reorthogonalization needed -> finished + } + + AssertThrow(false, ExcInternalError()); + + return 0.0; + } + } // namespace SolverGMRESImplementation +} // namespace internal @@ -935,19 +1242,23 @@ SolverGMRES::solve(const MatrixType & A, // reset this vector to the right size h.reinit(n_tmp_vectors - 1); + double rho = 0.0; + if (left_precondition) { A.vmult(p, x); p.sadd(-1., 1., b); preconditioner.vmult(v, p); + rho = v.l2_norm(); } else { A.vmult(v, x); - v.sadd(-1., 1., b); - }; - - double rho = v.l2_norm(); + rho = dealii::internal::SolverGMRESImplementation::sadd_and_norm(v, + -1, + b, + 1.0); + } // check the residual here as well since it may be that we got the exact // (or an almost exact) solution vector at the outset. if we wouldn't @@ -1018,13 +1329,15 @@ SolverGMRES::solve(const MatrixType & A, dim = inner_iteration + 1; const double s = - iterated_modified_gram_schmidt(tmp_vectors, - dim, - accumulated_iterations, - vv, - h, - re_orthogonalize, - re_orthogonalize_signal); + internal::SolverGMRESImplementation::iterated_gram_schmidt( + additional_data.orthogonalization_strategy, + tmp_vectors, + dim, + accumulated_iterations, + vv, + h, + re_orthogonalize, + re_orthogonalize_signal); h(inner_iteration + 1) = s; // s=0 is a lucky breakdown, the solver will reach convergence, @@ -1122,13 +1435,12 @@ SolverGMRES::solve(const MatrixType & A, condition_number_signal); if (left_precondition) - for (unsigned int i = 0; i < dim; ++i) - x.add(h(i), tmp_vectors[i]); + dealii::internal::SolverGMRESImplementation::add( + x, dim, h, tmp_vectors, false); else { - p.equ(h(0), tmp_vectors[0]); - for (unsigned int i = 1; i < dim; ++i) - p.add(h(i), tmp_vectors[i]); + dealii::internal::SolverGMRESImplementation::add( + p, dim, h, tmp_vectors, true); preconditioner.vmult(v, p); x.add(1., v); };