From: Denis Davydov Date: Tue, 1 Aug 2017 18:48:59 +0000 (+0200) Subject: extend pArpack interface to mode 1 and 2 X-Git-Tag: v9.0.0-rc1~1361^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F4678%2Fhead;p=dealii.git extend pArpack interface to mode 1 and 2 --- diff --git a/doc/news/changes/minor/20170801DenisDavydov b/doc/news/changes/minor/20170801DenisDavydov new file mode 100644 index 0000000000..9751b2751b --- /dev/null +++ b/doc/news/changes/minor/20170801DenisDavydov @@ -0,0 +1,4 @@ +New: Extend pArpack solver to cover mode 1 (standard eigenvalue problem) and +mode 2 (generalized eigenvalue problem without spectral transformation). +
+(Denis Davydov, 2017/08/01) diff --git a/include/deal.II/lac/parpack_solver.h b/include/deal.II/lac/parpack_solver.h index 5548deb3f5..47fc83538c 100644 --- a/include/deal.II/lac/parpack_solver.h +++ b/include/deal.II/lac/parpack_solver.h @@ -108,10 +108,17 @@ extern "C" { * eigenvector/eigenvalue pairs to solve for. Here, lambda is a * vector that will contain the eigenvalues computed, x a vector * of objects of type V that will contain the eigenvectors - * computed. OP is an inverse operation for the matrix A - + * computed. + * + * Currently, only three modes of (P)Arpack are implemented. In mode 3 (default), + * OP is an inverse operation for the matrix A - * sigma * B, where sigma is a shift value, set to zero - * by default. Note that (P)Arpack supports other transformations, but currently - * this class implements only shift-and-invert mode. + * by default. Whereas in mode 2, OP is an inverse of M. + * Finally, mode 1 corresponds to standard eigenvalue problem without + * spectral transformation $Ax=\lambda x$. + * The mode can be specified via AdditionalData object. Note that for + * shift-and-invert (mode=3), the sought eigenpairs are those after the + * spectral transformation is applied. * * The OP can be specified by using a LinearOperator: * @code @@ -125,9 +132,6 @@ extern "C" { * const auto op_shift_invert = inverse_operator(op_shift, cg, PreconditionIdentity ()); * @endcode * - * Through the AdditionalData the user can specify some of the parameters to - * be set. - * * The class is intended to be used with MPI and can work on arbitrary vector * and matrix distributed classes. Both symmetric and non-symmetric * A are supported. @@ -137,7 +141,7 @@ extern "C" { * also how to set the parameters appropriately please take a look into the * PARPACK manual. * - * @author Denis Davydov, 2015. + * @author Denis Davydov, 2015, 2017 */ template class PArpackSolver : public Subscriptor @@ -261,10 +265,12 @@ public: const unsigned int number_of_arnoldi_vectors; const WhichEigenvalues eigenvalue_of_interest; const bool symmetric; + const int mode; AdditionalData( const unsigned int number_of_arnoldi_vectors = 15, const WhichEigenvalues eigenvalue_of_interest = largest_magnitude, - const bool symmetric = false); + const bool symmetric = false, + const int mode = 3); }; /** @@ -307,6 +313,9 @@ public: * Set shift @p sigma for shift-and-invert spectral transformation. * * If this function is not called, the shift is assumed to be zero. + * + * @note only relevant for mode=3 (see the general documentation of this + * class for a definition of what the different modes are). */ void set_shift(const std::complex sigma); @@ -314,6 +323,10 @@ public: * Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling * the pd(n/s)eupd and pd(n/s)aupd functions of * PARPACK. + * + * In mode=3, @p inverse should correspond to $[A-\sigma B]^{-1}$, + * whereas in mode=2 it should represent $B^{-1}$. For + * mode=1 both @p B and @p inverse are ignored. */ template @@ -537,11 +550,13 @@ template PArpackSolver::AdditionalData:: AdditionalData (const unsigned int number_of_arnoldi_vectors, const WhichEigenvalues eigenvalue_of_interest, - const bool symmetric) + const bool symmetric, + const int mode) : number_of_arnoldi_vectors(number_of_arnoldi_vectors), eigenvalue_of_interest(eigenvalue_of_interest), - symmetric(symmetric) + symmetric(symmetric), + mode(mode) { //Check for possible options for symmetric problems if (symmetric) @@ -555,6 +570,8 @@ AdditionalData (const unsigned int number_of_arnoldi_vectors, Assert(eigenvalue_of_interest!=smallest_imaginary_part, ExcMessage("'smallest imaginary part' can only be used for non-symmetric problems!")); } + Assert (mode >= 1 && mode <= 3, + ExcMessage("Currently, only modes 1, 2 and 3 are supported.")); } template @@ -676,7 +693,7 @@ void PArpackSolver::reinit(const IndexSet &locally_owned_dofs, template template void PArpackSolver::solve -(const MatrixType1 &/*system_matrix*/, +(const MatrixType1 &system_matrix, const MatrixType2 &mass_matrix, const INVERSE &inverse, std::vector > &eigenvalues, @@ -709,11 +726,8 @@ void PArpackSolver::solve Assert (additional_data.number_of_arnoldi_vectors > 2*n_eigenvalues+1, PArpackExcSmallNumberofArnoldiVectors( additional_data.number_of_arnoldi_vectors, n_eigenvalues)); - // ARPACK mode for dnaupd, here only - // Mode 3: K*x = lambda*M*x, K symmetric, M symmetric positive semi-definite - //c ===> OP = (inv[K - sigma*M])*M and B = M. - //c ===> Shift-and-Invert mode - int mode = 3; + + int mode = additional_data.mode; // reverse communication parameter // must be zero on the first call to pdnaupd @@ -721,7 +735,9 @@ void PArpackSolver::solve // 'G' generalized eigenvalue problem // 'I' standard eigenvalue problem - char bmat[2] = "G"; + char bmat[2]; + bmat[0] = (mode == 1 ) ? 'I' : 'G'; + bmat[1] = '\0'; // Specify the eigenvalues of interest, possible parameters: // "LA" algebraically largest @@ -781,9 +797,9 @@ void PArpackSolver::solve iparam[3] = 1; // Sets the mode of dsaupd: - // 1 is exact shifting, - // 2 is user-supplied shifts, - // 3 is shift-invert mode, + // 1 is A*x=lambda*x, OP = A, B = I + // 2 is A*x = lambda*M*x, OP = inv[M]*A, B = M + // 3 is shift-invert mode, OP = inv[A-sigma*M]*M, B = M // 4 is buckling mode, // 5 is Cayley mode. @@ -815,192 +831,157 @@ void PArpackSolver::solve &resid[0], &ncv, &v[0], &ldv, &iparam[0], &ipntr[0], &workd[0], &workl[0], &lworkl, &info); + AssertThrow (info == 0, PArpackExcInfoPdnaupd(info)); + + // if we converge, we shall not modify anything in work arrays! if (ido == 99) break; - switch (mode) - { -// OP = (inv[K - sigma*M])*M - case 3: + // IPNTR(1) is the pointer into WORKD for X, + // IPNTR(2) is the pointer into WORKD for Y. + const int shift_x = ipntr[0]-1; + const int shift_y = ipntr[1]-1; + Assert (shift_x>=0, dealii::ExcInternalError() ); + Assert (shift_x+nloc <= (int)workd.size(), dealii::ExcInternalError() ); + Assert (shift_y>=0, dealii::ExcInternalError() ); + Assert (shift_y+nloc <= (int)workd.size(), dealii::ExcInternalError() ); + + src = 0.; + + // switch based on both ido and mode + if ((ido == -1) || + (ido == 1 && mode<3)) + // compute Y = OP * X { - switch (ido) - { - case -1: - // compute Y = OP * X where - // IPNTR(1) is the pointer into WORKD for X, - // IPNTR(2) is the pointer into WORKD for Y. + src.add (nloc, + &local_indices[0], + &workd[0]+shift_x ); + src.compress (VectorOperation::add); + + if (mode == 3) + // OP = inv[K - sigma*M]*M { - const int shift_x = ipntr[0]-1; - const int shift_y = ipntr[1]-1; - Assert (shift_x>=0, dealii::ExcInternalError() ); - Assert (shift_x+nloc <= (int)workd.size(), dealii::ExcInternalError() ); - Assert (shift_y>=0, dealii::ExcInternalError() ); - Assert (shift_y+nloc <= (int)workd.size(), dealii::ExcInternalError() ); - - src = 0.0; - src.add (nloc, - &local_indices[0], - &workd[0]+shift_x ); - src.compress (VectorOperation::add); - - // multiplication with mass matrix M mass_matrix.vmult(tmp, src); - // solving linear system inverse.vmult(dst,tmp); - - // store the result - dst.extract_subvector_to (local_indices.begin(), - local_indices.end(), - &workd[0]+shift_y ); } - break; - - case 1: - // compute Y = OP * X where - // IPNTR(1) is the pointer into WORKD for X, - // IPNTR(2) is the pointer into WORKD for Y. - // In mode 3,4 and 5, the vector B * X is already - // available in WORKD(ipntr(3)). It does not - // need to be recomputed in forming OP * X. + else if (mode == 2) + // OP = inv[M]*K { - const int shift_x = ipntr[0]-1; - const int shift_y = ipntr[1]-1; - const int shift_b_x = ipntr[2]-1; - - Assert (shift_x>=0, dealii::ExcInternalError() ); - Assert (shift_x+nloc <= (int)workd.size(), dealii::ExcInternalError() ); - Assert (shift_y>=0, dealii::ExcInternalError() ); - Assert (shift_y+nloc <= (int)workd.size(), dealii::ExcInternalError() ); - Assert (shift_b_x>=0, dealii::ExcInternalError() ); - Assert (shift_b_x+nloc <= (int)workd.size(), dealii::ExcInternalError() ); - Assert (shift_y>=0, dealii::ExcInternalError() ); - Assert (shift_y+nloc <= (int)workd.size(), dealii::ExcInternalError() ); - - src = 0.0; // B*X - src.add (nloc, - &local_indices[0], - &workd[0]+shift_b_x ); - - tmp = 0.0; // X - tmp.add (nloc, - &local_indices[0], - &workd[0]+shift_x); - - src.compress (VectorOperation::add); - tmp.compress (VectorOperation::add); - - // solving linear system - inverse.vmult(dst,src); - - // store the result - dst.extract_subvector_to (local_indices.begin(), + system_matrix.vmult(tmp, src); + // store M*X in X + tmp.extract_subvector_to (local_indices.begin(), local_indices.end(), - &workd[0]+shift_y ); - + &workd[0]+shift_x); + inverse.vmult(dst,tmp); } - break; - - case 2: - // compute Y = B * X where - // IPNTR(1) is the pointer into WORKD for X, - // IPNTR(2) is the pointer into WORKD for Y. + else if (mode == 1) { + system_matrix.vmult(dst, src); + } + else + AssertThrow (false, PArpackExcMode(mode)); - const int shift_x = ipntr[0]-1; - const int shift_y = ipntr[1]-1; - Assert (shift_x>=0, dealii::ExcInternalError() ); - Assert (shift_x+nloc <= (int)workd.size(), dealii::ExcInternalError() ); - Assert (shift_y>=0, dealii::ExcInternalError() ); - Assert (shift_y+nloc <= (int)workd.size(), dealii::ExcInternalError() ); - - src = 0.0; - src.add (nloc, - &local_indices[0], - &workd[0]+shift_x ); - src.compress (VectorOperation::add); - - // Multiplication with mass matrix M - mass_matrix.vmult(dst, src); - - // store the result - dst.extract_subvector_to (local_indices.begin(), - local_indices.end(), - &workd[0]+shift_y); + } + else if (ido == 1 && mode >= 3) + // compute Y = OP * X for mode 3, 4 and 5, where + // the vector B * X is already available in WORKD(ipntr(3)). + { + const int shift_b_x = ipntr[2]-1; + Assert (shift_b_x>=0, dealii::ExcInternalError() ); + Assert (shift_b_x+nloc <= (int)workd.size(), dealii::ExcInternalError() ); + + // B*X + src.add (nloc, + &local_indices[0], + &workd[0]+shift_b_x ); + src.compress (VectorOperation::add); + + // solving linear system + Assert (mode == 3, ExcNotImplemented()); + inverse.vmult(dst,src); + } + else if (ido == 2) + // compute Y = B * X + { + src.add (nloc, + &local_indices[0], + &workd[0]+shift_x ); + src.compress (VectorOperation::add); + // Multiplication with mass matrix M + if (mode == 1) + { + dst = src; } - break; - - default: - AssertThrow (false, PArpackExcIdo(ido)); - break; + else + // mode 2,3 and 5 have B=M + { + mass_matrix.vmult(dst, src); } } - break; - default: - AssertThrow (false, PArpackExcMode(mode)); - break; - } - } + else + AssertThrow (false, PArpackExcIdo(ido)); + // Note: IDO = 3 does not appear to be required for currently + // implemented modes - if (info<0) - { - AssertThrow (false, PArpackExcInfoPdnaupd(info)); - } - else - { - // 1 - compute eigenvectors, - // 0 - only eigenvalues - int rvec = 1; + // store the result + dst.extract_subvector_to (local_indices.begin(), + local_indices.end(), + &workd[0]+shift_y); + } // end of pd*aupd_ loop - // which eigenvectors - char howmany[4] = "All"; + // 1 - compute eigenvectors, + // 0 - only eigenvalues + int rvec = 1; - std::vector eigenvalues_real (n_eigenvalues+1, 0.); - std::vector eigenvalues_im (n_eigenvalues+1, 0.); + // which eigenvectors + char howmany[4] = "All"; - // call of ARPACK pdneupd routine - if (additional_data.symmetric) - pdseupd_(&mpi_communicator_fortran, &rvec, howmany, &select[0], &eigenvalues_real[0], - &z[0], &ldz, &sigmar, - bmat, &n_inside_arpack, which, &nev, &tol, - &resid[0], &ncv, &v[0], &ldv, - &iparam[0], &ipntr[0], &workd[0], &workl[0], &lworkl, &info); - else - pdneupd_(&mpi_communicator_fortran, &rvec, howmany, &select[0], &eigenvalues_real[0], - &eigenvalues_im[0], &v[0], &ldz, &sigmar, &sigmai, - &workev[0], bmat, &n_inside_arpack, which, &nev, &tol, - &resid[0], &ncv, &v[0], &ldv, - &iparam[0], &ipntr[0], &workd[0], &workl[0], &lworkl, &info); + std::vector eigenvalues_real (n_eigenvalues+1, 0.); + std::vector eigenvalues_im (n_eigenvalues+1, 0.); - if (info == 1) - { - AssertThrow (false, PArpackExcInfoMaxIt(control().max_steps())); - } - else if (info == 3) - { - AssertThrow (false, PArpackExcNoShifts(1)); - } - else if (info!=0) - { - AssertThrow (false, PArpackExcInfoPdneupd(info)); - } + // call of ARPACK pdneupd routine + if (additional_data.symmetric) + pdseupd_(&mpi_communicator_fortran, &rvec, howmany, &select[0], &eigenvalues_real[0], + &z[0], &ldz, &sigmar, + bmat, &n_inside_arpack, which, &nev, &tol, + &resid[0], &ncv, &v[0], &ldv, + &iparam[0], &ipntr[0], &workd[0], &workl[0], &lworkl, &info); + else + pdneupd_(&mpi_communicator_fortran, &rvec, howmany, &select[0], &eigenvalues_real[0], + &eigenvalues_im[0], &v[0], &ldz, &sigmar, &sigmai, + &workev[0], bmat, &n_inside_arpack, which, &nev, &tol, + &resid[0], &ncv, &v[0], &ldv, + &iparam[0], &ipntr[0], &workd[0], &workl[0], &lworkl, &info); - for (int i=0; i (eigenvalues_real[i], - eigenvalues_im[i]); + eigenvectors[i].add (nloc, + &local_indices[0], + &v[i*nloc] ); + eigenvectors[i].compress (VectorOperation::add); } + for (size_type i=0; i (eigenvalues_real[i], + eigenvalues_im[i]); + // Throw an error if the solver did not converge. AssertThrow (iparam[4] >= (int)n_eigenvalues, PArpackExcConvergedEigenvectors(n_eigenvalues,iparam[4])); diff --git a/tests/arpack/step-36_parpack_mf_02.cc b/tests/arpack/step-36_parpack_mf_02.cc new file mode 100644 index 0000000000..7b425e6739 --- /dev/null +++ b/tests/arpack/step-36_parpack_mf_02.cc @@ -0,0 +1,249 @@ +/* --------------------------------------------------------------------- + * + * Copyright (C) 2017 by the deal.II authors + * + * This file is part of the deal.II library. + * + * The deal.II library is free software; you can use it, redistribute + * it, and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * The full text of the license can be found in the file LICENSE at + * the top level of the deal.II distribution. + * + * --------------------------------------------------------------------- + + * + * Same as step-36_parpack_mf but solve for largest eigenvalues of Laplace. + * For the same problem slepc/step-36_parallel_02.cc produces: + * 6099.84 + * 6035.05 + * 6035.05 + * 5970.26 + * 5931.69 + */ + +#include "../tests.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include +#include +#include +#include + +#include + +#include +#include + + +const unsigned int dim = 2; + +using namespace dealii; + +const double eps = 1e-10; + +const unsigned int fe_degree = 1; + +void test () +{ + const unsigned int global_mesh_refinement_steps = 5; + const unsigned int number_of_eigenvalues = 5; + + MPI_Comm mpi_communicator = MPI_COMM_WORLD; + const unsigned int n_mpi_processes = Utilities::MPI::n_mpi_processes(mpi_communicator); + const unsigned int this_mpi_process = Utilities::MPI::this_mpi_process(mpi_communicator); + + parallel::distributed::Triangulation triangulation (mpi_communicator); + GridGenerator::hyper_cube (triangulation, -1, 1); + triangulation.refine_global (global_mesh_refinement_steps); + + + DoFHandler dof_handler(triangulation); + FE_Q fe(fe_degree); + dof_handler.distribute_dofs (fe); + + + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs (dof_handler, + locally_relevant_dofs); + ConstraintMatrix constraints; + constraints.reinit (locally_relevant_dofs); + DoFTools::make_hanging_node_constraints (dof_handler, constraints); + VectorTools::interpolate_boundary_values (dof_handler, + 0, + ZeroFunction (), + constraints); + constraints.close (); + + std::shared_ptr > mf_data(new MatrixFree ()); + { + const QGauss<1> quad (fe_degree+1); + typename MatrixFree::AdditionalData data; + data.tasks_parallel_scheme = + MatrixFree::AdditionalData::partition_color; + data.mapping_update_flags = update_values | update_gradients | update_JxW_values; + mf_data->reinit (dof_handler, constraints, quad, data); + } + + std::vector > eigenfunctions; + std::vector eigenvalues; + MatrixFreeOperators::MassOperator > mass; + MatrixFreeOperators::LaplaceOperator > laplace; + mass.initialize(mf_data); + laplace.initialize(mf_data); + + eigenfunctions.resize (number_of_eigenvalues); + eigenvalues.resize (number_of_eigenvalues); + for (unsigned int i=0; iinitialize_dof_vector (eigenfunctions[i]); + + // test PArpack with matrix-free + { + std::vector > lambda(number_of_eigenvalues); + + // set up iterative inverse + static ReductionControl inner_control_c(dof_handler.n_dofs(), 0.0, 1.e-14); + + typedef LinearAlgebra::distributed::Vector VectorType; + SolverCG solver_c(inner_control_c); + PreconditionIdentity preconditioner; + const auto invert = + inverse_operator(linear_operator(mass), + solver_c, + preconditioner); + + const unsigned int num_arnoldi_vectors = 2*eigenvalues.size() + 10; + PArpackSolver >::AdditionalData + additional_data(num_arnoldi_vectors, + PArpackSolver >::largest_magnitude, + true, + 2); + + SolverControl solver_control( + dof_handler.n_dofs(), 1e-9, /*log_history*/ false, /*log_results*/ false); + + PArpackSolver > eigensolver( + solver_control, mpi_communicator, additional_data); + + eigensolver.reinit(eigenfunctions[0]); + // make sure initial vector is orthogonal to the space due to constraints + { + LinearAlgebra::distributed::Vector init_vector; + mf_data->initialize_dof_vector(init_vector); + for (auto it = init_vector.begin(); it != init_vector.end(); ++it) + *it = static_cast(Testing::rand())/static_cast(RAND_MAX); + + constraints.set_zero(init_vector); + eigensolver.set_initial_vector(init_vector); + } + // avoid output of iterative solver: + const unsigned int previous_depth = deallog.depth_file(0); + eigensolver.solve (laplace, + mass, + invert, + lambda, + eigenfunctions, + eigenvalues.size()); + deallog.depth_file(previous_depth); + + for (unsigned int i = 0; i < lambda.size(); i++) + eigenvalues[i] = lambda[i].real(); + + for (unsigned int i=0; i < eigenvalues.size(); i++) + deallog << eigenvalues[i] << std::endl; + + // make sure that we have eigenvectors and they are mass-orthonormal: + // a) (A*x_i-\lambda*B*x_i).L2() == 0 + // b) x_j*B*x_i=\delta_{ij} + { + const double precision = 1e-7; + LinearAlgebra::distributed::Vector Ax(eigenfunctions[0]), Bx(eigenfunctions[0]); + for (unsigned int i=0; i < eigenfunctions.size(); ++i) + { + mass.vmult(Bx,eigenfunctions[i]); + + for (unsigned int j=0; j < eigenfunctions.size(); j++) + { + const double err = std::abs( eigenfunctions[j] * Bx - (i==j)); + Assert( err< precision, + ExcMessage("Eigenvectors " + + Utilities::int_to_string(i) + + " and " + + Utilities::int_to_string(j) + + " are not orthonormal: " + + std::to_string(err))); + } + + laplace.vmult(Ax,eigenfunctions[i]); + Ax.add(-1.0*eigenvalues[i],Bx); + const double err = Ax.l2_norm(); + Assert (err < precision, + ExcMessage("Returned vector " + + Utilities::int_to_string(i) + + " is not an eigenvector: " + + std::to_string(err))); + } + } + } + + + dof_handler.clear (); + deallog << "Ok"< +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include +#include +#include +#include + +#include + +#include +#include + + +const unsigned int dim = 2; + +using namespace dealii; + +const double eps = 1e-10; + +const unsigned int fe_degree = 1; + +void test () +{ + const unsigned int global_mesh_refinement_steps = 5; + const unsigned int number_of_eigenvalues = 5; + + MPI_Comm mpi_communicator = MPI_COMM_WORLD; + const unsigned int n_mpi_processes = Utilities::MPI::n_mpi_processes(mpi_communicator); + const unsigned int this_mpi_process = Utilities::MPI::this_mpi_process(mpi_communicator); + + parallel::distributed::Triangulation triangulation (mpi_communicator); + GridGenerator::hyper_cube (triangulation, -1, 1); + triangulation.refine_global (global_mesh_refinement_steps); + + + DoFHandler dof_handler(triangulation); + FE_Q fe(fe_degree); + dof_handler.distribute_dofs (fe); + + + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs (dof_handler, + locally_relevant_dofs); + ConstraintMatrix constraints; + constraints.reinit (locally_relevant_dofs); + DoFTools::make_hanging_node_constraints (dof_handler, constraints); + VectorTools::interpolate_boundary_values (dof_handler, + 0, + ZeroFunction (), + constraints); + constraints.close (); + + std::shared_ptr > mf_data(new MatrixFree ()); + { + const QGauss<1> quad (fe_degree+1); + typename MatrixFree::AdditionalData data; + data.tasks_parallel_scheme = + MatrixFree::AdditionalData::partition_color; + data.mapping_update_flags = update_values | update_gradients | update_JxW_values; + mf_data->reinit (dof_handler, constraints, quad, data); + } + + std::vector > eigenfunctions; + std::vector eigenvalues; + MatrixFreeOperators::MassOperator > mass; + MatrixFreeOperators::LaplaceOperator > laplace; + mass.initialize(mf_data); + laplace.initialize(mf_data); + + eigenfunctions.resize (number_of_eigenvalues); + eigenvalues.resize (number_of_eigenvalues); + for (unsigned int i=0; iinitialize_dof_vector (eigenfunctions[i]); + + // test PArpack with matrix-free + { + std::vector > lambda(number_of_eigenvalues); + + // set up iterative inverse + static ReductionControl inner_control_c(dof_handler.n_dofs(), 0.0, 1.e-14); + + typedef LinearAlgebra::distributed::Vector VectorType; + SolverCG solver_c(inner_control_c); + PreconditionIdentity preconditioner; + const auto invert = + inverse_operator(linear_operator(mass), + solver_c, + preconditioner); + + const unsigned int num_arnoldi_vectors = 2*eigenvalues.size() + 10; + PArpackSolver >::AdditionalData + additional_data(num_arnoldi_vectors, + PArpackSolver >::largest_magnitude, + true, + 1); + + SolverControl solver_control( + dof_handler.n_dofs(), 1e-9, /*log_history*/ false, /*log_results*/ false); + + PArpackSolver > eigensolver( + solver_control, mpi_communicator, additional_data); + + eigensolver.reinit(eigenfunctions[0]); + // make sure initial vector is orthogonal to the space due to constraints + { + LinearAlgebra::distributed::Vector init_vector; + mf_data->initialize_dof_vector(init_vector); + for (auto it = init_vector.begin(); it != init_vector.end(); ++it) + *it = static_cast(Testing::rand())/static_cast(RAND_MAX); + + constraints.set_zero(init_vector); + eigensolver.set_initial_vector(init_vector); + } + // avoid output of iterative solver: + const unsigned int previous_depth = deallog.depth_file(0); + eigensolver.solve (laplace, + mass, + invert, + lambda, + eigenfunctions, + eigenvalues.size()); + deallog.depth_file(previous_depth); + + for (unsigned int i = 0; i < lambda.size(); i++) + eigenvalues[i] = lambda[i].real(); + + for (unsigned int i=0; i < eigenvalues.size(); i++) + deallog << eigenvalues[i] << std::endl; + + // make sure that we have eigenvectors and they are mass-orthonormal: + // a) (A*x_i-\lambda*x_i).L2() == 0 + // b) x_j*x_i=\delta_{ij} + { + const double precision = 1e-7; + LinearAlgebra::distributed::Vector Ax(eigenfunctions[0]); + for (unsigned int i=0; i < eigenfunctions.size(); ++i) + { + for (unsigned int j=0; j < eigenfunctions.size(); j++) + { + const double err = std::abs( eigenfunctions[j] * eigenfunctions[i] - (i==j)); + Assert( err< precision, + ExcMessage("Eigenvectors " + + Utilities::int_to_string(i) + + " and " + + Utilities::int_to_string(j) + + " are not orthonormal: " + + std::to_string(err))); + } + + laplace.vmult(Ax,eigenfunctions[i]); + Ax.add(-1.0*eigenvalues[i],eigenfunctions[i]); + const double err = Ax.l2_norm(); + Assert (err < precision, + ExcMessage("Returned vector " + + Utilities::int_to_string(i) + + " is not an eigenvector: " + + std::to_string(err))); + } + } + } + + + dof_handler.clear (); + deallog << "Ok"< diff --git a/tests/slepc/step-36_parallel_02.cc b/tests/slepc/step-36_parallel_02.cc new file mode 100644 index 0000000000..4ef4090b09 --- /dev/null +++ b/tests/slepc/step-36_parallel_02.cc @@ -0,0 +1,383 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2004 - 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +// same as step-36_parallel, but solve for largest eigenvalues of GHEP + +#include "../tests.h" + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + + +#include +#include +#include +#include + +#include +#include + +// test parallel (MPI) version of Step-36 + +const unsigned int dim = 2;//run in 2d to save time + +const double eps = 1e-10; + +void test (std::string solver_name, + std::string preconditioner_name) +{ + const unsigned int global_mesh_refinement_steps = 5; + const unsigned int number_of_eigenvalues = 5; + + MPI_Comm mpi_communicator = MPI_COMM_WORLD; + const unsigned int n_mpi_processes = dealii::Utilities::MPI::n_mpi_processes(mpi_communicator); + const unsigned int this_mpi_process = dealii::Utilities::MPI::this_mpi_process(mpi_communicator); + + + dealii::Triangulation triangulation; + dealii::DoFHandler dof_handler(triangulation); + dealii::FE_Q fe(1); + dealii::ConstraintMatrix constraints; + dealii::IndexSet locally_owned_dofs; + dealii::IndexSet locally_relevant_dofs; + + std::vector eigenfunctions; + std::vector eigenvalues; + dealii::PETScWrappers::MPI::SparseMatrix stiffness_matrix, mass_matrix; + + dealii::GridGenerator::hyper_cube (triangulation, -1, 1); + triangulation.refine_global (global_mesh_refinement_steps); + + // we do not use metis but rather partition by hand below. + //dealii::GridTools::partition_triangulation (n_mpi_processes, triangulation); + { + const double x0 = -1.0; + const double x1 = 1.0; + const double dL = (x1-x0) / n_mpi_processes; + + dealii::Triangulation::active_cell_iterator + cell = triangulation.begin_active(), + endc = triangulation.end(); + for (; cell!=endc; ++cell) + { + const dealii::Point ¢er = cell->center(); + const double x = center[0]; + + const unsigned int id = std::floor ( (x-x0)/dL); + cell->set_subdomain_id (id); + } + } + + dof_handler.distribute_dofs (fe); + dealii::DoFRenumbering::subdomain_wise (dof_handler); + std::vector locally_owned_dofs_per_processor + = DoFTools::locally_owned_dofs_per_subdomain (dof_handler); + locally_owned_dofs = locally_owned_dofs_per_processor[this_mpi_process]; + locally_relevant_dofs.clear(); + dealii::DoFTools::extract_locally_relevant_dofs (dof_handler, + locally_relevant_dofs); + + constraints.clear(); + constraints.reinit (locally_relevant_dofs); + dealii::DoFTools::make_hanging_node_constraints (dof_handler, constraints); + dealii::VectorTools::interpolate_boundary_values (dof_handler, + 0, + dealii::ZeroFunction (), + constraints); + constraints.close (); + + dealii::DynamicSparsityPattern csp (locally_relevant_dofs); + // Fill in ignoring all cells that are not locally owned + dealii::DoFTools::make_sparsity_pattern (dof_handler, csp, + constraints, + /* keep constrained dofs */ true); + std::vector n_locally_owned_dofs(n_mpi_processes); + for (unsigned int i = 0; i < n_mpi_processes; i++) + n_locally_owned_dofs[i] = locally_owned_dofs_per_processor[i].n_elements(); + + dealii::SparsityTools::distribute_sparsity_pattern + (csp, + n_locally_owned_dofs, + mpi_communicator, + locally_relevant_dofs); + + // initialize the stiffness and mass matrices + stiffness_matrix.reinit (locally_owned_dofs, + locally_owned_dofs, + csp, + mpi_communicator); + + mass_matrix.reinit (locally_owned_dofs, + locally_owned_dofs, + csp, + mpi_communicator); + + eigenfunctions.resize (5); + for (unsigned int i=0; i(Testing::rand())/static_cast(RAND_MAX); + + eigenfunctions[i].compress(dealii::VectorOperation::insert); + } + + eigenvalues.resize (eigenfunctions.size ()); + + + // ready for assembly + stiffness_matrix = 0; + mass_matrix = 0; + + dealii::QGauss quadrature_formula(2); + dealii::FEValues fe_values (fe, quadrature_formula, + dealii::update_values | + dealii::update_gradients | + dealii::update_quadrature_points | + dealii::update_JxW_values); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + dealii::FullMatrix cell_stiffness_matrix (dofs_per_cell, dofs_per_cell); + dealii::FullMatrix cell_mass_matrix (dofs_per_cell, dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + typename dealii::DoFHandler::active_cell_iterator + cell = dof_handler.begin_active (), + endc = dof_handler.end (); + for (; cell!=endc; ++cell) + if (cell->subdomain_id() == this_mpi_process) + { + fe_values.reinit (cell); + cell_stiffness_matrix = 0; + cell_mass_matrix = 0; + + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + + constraints + .distribute_local_to_global (cell_stiffness_matrix, + local_dof_indices, + stiffness_matrix); + constraints + .distribute_local_to_global (cell_mass_matrix, + local_dof_indices, + mass_matrix); + } + + stiffness_matrix.compress (dealii::VectorOperation::add); + mass_matrix.compress (dealii::VectorOperation::add); + + // test SLEPc by + { + PETScWrappers::PreconditionerBase *preconditioner; + + dealii::deallog<set_initial_space(eigenfunctions); + + eigensolver->set_which_eigenpairs (EPS_LARGEST_REAL); + eigensolver->set_problem_type (EPS_GHEP); + + eigensolver->solve (stiffness_matrix, + mass_matrix, + eigenvalues, + eigenfunctions, + eigenfunctions.size()); + + // TODO make this robust on different platforms. Seems related to GHEP + // as solve_04 works ok. + //dealii::deallog << "outer iterations: "<< solver_control.last_step ()< +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + + +#include +#include +#include +#include + +#include +#include + +// test parallel (MPI) version of Step-36 + +const unsigned int dim = 2;//run in 2d to save time + +const double eps = 1e-10; + +void test (std::string solver_name, + std::string preconditioner_name) +{ + const unsigned int global_mesh_refinement_steps = 5; + const unsigned int number_of_eigenvalues = 5; + + MPI_Comm mpi_communicator = MPI_COMM_WORLD; + const unsigned int n_mpi_processes = dealii::Utilities::MPI::n_mpi_processes(mpi_communicator); + const unsigned int this_mpi_process = dealii::Utilities::MPI::this_mpi_process(mpi_communicator); + + + dealii::Triangulation triangulation; + dealii::DoFHandler dof_handler(triangulation); + dealii::FE_Q fe(1); + dealii::ConstraintMatrix constraints; + dealii::IndexSet locally_owned_dofs; + dealii::IndexSet locally_relevant_dofs; + + std::vector eigenfunctions; + std::vector eigenvalues; + dealii::PETScWrappers::MPI::SparseMatrix stiffness_matrix, mass_matrix; + + dealii::GridGenerator::hyper_cube (triangulation, -1, 1); + triangulation.refine_global (global_mesh_refinement_steps); + + // we do not use metis but rather partition by hand below. + //dealii::GridTools::partition_triangulation (n_mpi_processes, triangulation); + { + const double x0 = -1.0; + const double x1 = 1.0; + const double dL = (x1-x0) / n_mpi_processes; + + dealii::Triangulation::active_cell_iterator + cell = triangulation.begin_active(), + endc = triangulation.end(); + for (; cell!=endc; ++cell) + { + const dealii::Point ¢er = cell->center(); + const double x = center[0]; + + const unsigned int id = std::floor ( (x-x0)/dL); + cell->set_subdomain_id (id); + } + } + + dof_handler.distribute_dofs (fe); + dealii::DoFRenumbering::subdomain_wise (dof_handler); + std::vector locally_owned_dofs_per_processor + = DoFTools::locally_owned_dofs_per_subdomain (dof_handler); + locally_owned_dofs = locally_owned_dofs_per_processor[this_mpi_process]; + locally_relevant_dofs.clear(); + dealii::DoFTools::extract_locally_relevant_dofs (dof_handler, + locally_relevant_dofs); + + constraints.clear(); + constraints.reinit (locally_relevant_dofs); + dealii::DoFTools::make_hanging_node_constraints (dof_handler, constraints); + dealii::VectorTools::interpolate_boundary_values (dof_handler, + 0, + dealii::ZeroFunction (), + constraints); + constraints.close (); + + dealii::DynamicSparsityPattern csp (locally_relevant_dofs); + // Fill in ignoring all cells that are not locally owned + dealii::DoFTools::make_sparsity_pattern (dof_handler, csp, + constraints, + /* keep constrained dofs */ true); + std::vector n_locally_owned_dofs(n_mpi_processes); + for (unsigned int i = 0; i < n_mpi_processes; i++) + n_locally_owned_dofs[i] = locally_owned_dofs_per_processor[i].n_elements(); + + dealii::SparsityTools::distribute_sparsity_pattern + (csp, + n_locally_owned_dofs, + mpi_communicator, + locally_relevant_dofs); + + // initialize the stiffness and mass matrices + stiffness_matrix.reinit (locally_owned_dofs, + locally_owned_dofs, + csp, + mpi_communicator); + + mass_matrix.reinit (locally_owned_dofs, + locally_owned_dofs, + csp, + mpi_communicator); + + eigenfunctions.resize (5); + for (unsigned int i=0; i(Testing::rand())/static_cast(RAND_MAX); + + eigenfunctions[i].compress(dealii::VectorOperation::insert); + } + + eigenvalues.resize (eigenfunctions.size ()); + + + // ready for assembly + stiffness_matrix = 0; + mass_matrix = 0; + + dealii::QGauss quadrature_formula(2); + dealii::FEValues fe_values (fe, quadrature_formula, + dealii::update_values | + dealii::update_gradients | + dealii::update_quadrature_points | + dealii::update_JxW_values); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + dealii::FullMatrix cell_stiffness_matrix (dofs_per_cell, dofs_per_cell); + dealii::FullMatrix cell_mass_matrix (dofs_per_cell, dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + typename dealii::DoFHandler::active_cell_iterator + cell = dof_handler.begin_active (), + endc = dof_handler.end (); + for (; cell!=endc; ++cell) + if (cell->subdomain_id() == this_mpi_process) + { + fe_values.reinit (cell); + cell_stiffness_matrix = 0; + cell_mass_matrix = 0; + + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + + constraints + .distribute_local_to_global (cell_stiffness_matrix, + local_dof_indices, + stiffness_matrix); + constraints + .distribute_local_to_global (cell_mass_matrix, + local_dof_indices, + mass_matrix); + } + + stiffness_matrix.compress (dealii::VectorOperation::add); + mass_matrix.compress (dealii::VectorOperation::add); + + // test SLEPc by + { + PETScWrappers::PreconditionerBase *preconditioner; + + dealii::deallog<set_initial_space(eigenfunctions); + + eigensolver->set_which_eigenpairs (EPS_LARGEST_REAL); + eigensolver->set_problem_type (EPS_HEP); + + eigensolver->solve (stiffness_matrix, + eigenvalues, + eigenfunctions, + eigenfunctions.size()); + + // TODO make this robust on different platforms. Seems related to GHEP + // as solve_04 works ok. + //dealii::deallog << "outer iterations: "<< solver_control.last_step ()<