--- /dev/null
+New: Extend pArpack solver to cover mode 1 (standard eigenvalue problem) and
+mode 2 (generalized eigenvalue problem without spectral transformation).
+<br>
+(Denis Davydov, 2017/08/01)
* eigenvector/eigenvalue pairs to solve for. Here, <code>lambda</code> is a
* vector that will contain the eigenvalues computed, <code>x</code> a vector
* of objects of type <code>V</code> that will contain the eigenvectors
- * computed. <code>OP</code> is an inverse operation for the matrix <code>A -
+ * computed.
+ *
+ * Currently, only three modes of (P)Arpack are implemented. In mode 3 (default),
+ * <code>OP</code> is an inverse operation for the matrix <code>A -
* sigma * B</code>, where <code> sigma </code> is a shift value, set to zero
- * by default. Note that (P)Arpack supports other transformations, but currently
- * this class implements only shift-and-invert mode.
+ * by default. Whereas in mode 2, <code>OP</code> is an inverse of <code>M</code>.
+ * Finally, mode 1 corresponds to standard eigenvalue problem without
+ * spectral transformation $Ax=\lambda x$.
+ * The mode can be specified via AdditionalData object. Note that for
+ * shift-and-invert (mode=3), the sought eigenpairs are those after the
+ * spectral transformation is applied.
*
* The <code>OP</code> can be specified by using a LinearOperator:
* @code
* const auto op_shift_invert = inverse_operator(op_shift, cg, PreconditionIdentity ());
* @endcode
*
- * Through the AdditionalData the user can specify some of the parameters to
- * be set.
- *
* The class is intended to be used with MPI and can work on arbitrary vector
* and matrix distributed classes. Both symmetric and non-symmetric
* <code>A</code> are supported.
* also how to set the parameters appropriately please take a look into the
* PARPACK manual.
*
- * @author Denis Davydov, 2015.
+ * @author Denis Davydov, 2015, 2017
*/
template <typename VectorType>
class PArpackSolver : public Subscriptor
const unsigned int number_of_arnoldi_vectors;
const WhichEigenvalues eigenvalue_of_interest;
const bool symmetric;
+ const int mode;
AdditionalData(
const unsigned int number_of_arnoldi_vectors = 15,
const WhichEigenvalues eigenvalue_of_interest = largest_magnitude,
- const bool symmetric = false);
+ const bool symmetric = false,
+ const int mode = 3);
};
/**
* Set shift @p sigma for shift-and-invert spectral transformation.
*
* If this function is not called, the shift is assumed to be zero.
+ *
+ * @note only relevant for <code>mode=3</code> (see the general documentation of this
+ * class for a definition of what the different modes are).
*/
void set_shift(const std::complex<double> sigma);
* Solve the generalized eigensprectrum problem $A x=\lambda B x$ by calling
* the <code>pd(n/s)eupd</code> and <code>pd(n/s)aupd</code> functions of
* PARPACK.
+ *
+ * In <code>mode=3</code>, @p inverse should correspond to $[A-\sigma B]^{-1}$,
+ * whereas in <code>mode=2</code> it should represent $B^{-1}$. For
+ * <code>mode=1</code> both @p B and @p inverse are ignored.
*/
template <typename MatrixType1,
typename MatrixType2, typename INVERSE>
PArpackSolver<VectorType>::AdditionalData::
AdditionalData (const unsigned int number_of_arnoldi_vectors,
const WhichEigenvalues eigenvalue_of_interest,
- const bool symmetric)
+ const bool symmetric,
+ const int mode)
:
number_of_arnoldi_vectors(number_of_arnoldi_vectors),
eigenvalue_of_interest(eigenvalue_of_interest),
- symmetric(symmetric)
+ symmetric(symmetric),
+ mode(mode)
{
//Check for possible options for symmetric problems
if (symmetric)
Assert(eigenvalue_of_interest!=smallest_imaginary_part,
ExcMessage("'smallest imaginary part' can only be used for non-symmetric problems!"));
}
+ Assert (mode >= 1 && mode <= 3,
+ ExcMessage("Currently, only modes 1, 2 and 3 are supported."));
}
template <typename VectorType>
template <typename VectorType>
template <typename MatrixType1,typename MatrixType2, typename INVERSE>
void PArpackSolver<VectorType>::solve
-(const MatrixType1 &/*system_matrix*/,
+(const MatrixType1 &system_matrix,
const MatrixType2 &mass_matrix,
const INVERSE &inverse,
std::vector<std::complex<double> > &eigenvalues,
Assert (additional_data.number_of_arnoldi_vectors > 2*n_eigenvalues+1,
PArpackExcSmallNumberofArnoldiVectors(
additional_data.number_of_arnoldi_vectors, n_eigenvalues));
- // ARPACK mode for dnaupd, here only
- // Mode 3: K*x = lambda*M*x, K symmetric, M symmetric positive semi-definite
- //c ===> OP = (inv[K - sigma*M])*M and B = M.
- //c ===> Shift-and-Invert mode
- int mode = 3;
+
+ int mode = additional_data.mode;
// reverse communication parameter
// must be zero on the first call to pdnaupd
// 'G' generalized eigenvalue problem
// 'I' standard eigenvalue problem
- char bmat[2] = "G";
+ char bmat[2];
+ bmat[0] = (mode == 1 ) ? 'I' : 'G';
+ bmat[1] = '\0';
// Specify the eigenvalues of interest, possible parameters:
// "LA" algebraically largest
iparam[3] = 1;
// Sets the mode of dsaupd:
- // 1 is exact shifting,
- // 2 is user-supplied shifts,
- // 3 is shift-invert mode,
+ // 1 is A*x=lambda*x, OP = A, B = I
+ // 2 is A*x = lambda*M*x, OP = inv[M]*A, B = M
+ // 3 is shift-invert mode, OP = inv[A-sigma*M]*M, B = M
// 4 is buckling mode,
// 5 is Cayley mode.
&resid[0], &ncv, &v[0], &ldv, &iparam[0], &ipntr[0],
&workd[0], &workl[0], &lworkl, &info);
+ AssertThrow (info == 0, PArpackExcInfoPdnaupd(info));
+
+ // if we converge, we shall not modify anything in work arrays!
if (ido == 99)
break;
- switch (mode)
- {
-// OP = (inv[K - sigma*M])*M
- case 3:
+ // IPNTR(1) is the pointer into WORKD for X,
+ // IPNTR(2) is the pointer into WORKD for Y.
+ const int shift_x = ipntr[0]-1;
+ const int shift_y = ipntr[1]-1;
+ Assert (shift_x>=0, dealii::ExcInternalError() );
+ Assert (shift_x+nloc <= (int)workd.size(), dealii::ExcInternalError() );
+ Assert (shift_y>=0, dealii::ExcInternalError() );
+ Assert (shift_y+nloc <= (int)workd.size(), dealii::ExcInternalError() );
+
+ src = 0.;
+
+ // switch based on both ido and mode
+ if ((ido == -1) ||
+ (ido == 1 && mode<3))
+ // compute Y = OP * X
{
- switch (ido)
- {
- case -1:
- // compute Y = OP * X where
- // IPNTR(1) is the pointer into WORKD for X,
- // IPNTR(2) is the pointer into WORKD for Y.
+ src.add (nloc,
+ &local_indices[0],
+ &workd[0]+shift_x );
+ src.compress (VectorOperation::add);
+
+ if (mode == 3)
+ // OP = inv[K - sigma*M]*M
{
- const int shift_x = ipntr[0]-1;
- const int shift_y = ipntr[1]-1;
- Assert (shift_x>=0, dealii::ExcInternalError() );
- Assert (shift_x+nloc <= (int)workd.size(), dealii::ExcInternalError() );
- Assert (shift_y>=0, dealii::ExcInternalError() );
- Assert (shift_y+nloc <= (int)workd.size(), dealii::ExcInternalError() );
-
- src = 0.0;
- src.add (nloc,
- &local_indices[0],
- &workd[0]+shift_x );
- src.compress (VectorOperation::add);
-
- // multiplication with mass matrix M
mass_matrix.vmult(tmp, src);
- // solving linear system
inverse.vmult(dst,tmp);
-
- // store the result
- dst.extract_subvector_to (local_indices.begin(),
- local_indices.end(),
- &workd[0]+shift_y );
}
- break;
-
- case 1:
- // compute Y = OP * X where
- // IPNTR(1) is the pointer into WORKD for X,
- // IPNTR(2) is the pointer into WORKD for Y.
- // In mode 3,4 and 5, the vector B * X is already
- // available in WORKD(ipntr(3)). It does not
- // need to be recomputed in forming OP * X.
+ else if (mode == 2)
+ // OP = inv[M]*K
{
- const int shift_x = ipntr[0]-1;
- const int shift_y = ipntr[1]-1;
- const int shift_b_x = ipntr[2]-1;
-
- Assert (shift_x>=0, dealii::ExcInternalError() );
- Assert (shift_x+nloc <= (int)workd.size(), dealii::ExcInternalError() );
- Assert (shift_y>=0, dealii::ExcInternalError() );
- Assert (shift_y+nloc <= (int)workd.size(), dealii::ExcInternalError() );
- Assert (shift_b_x>=0, dealii::ExcInternalError() );
- Assert (shift_b_x+nloc <= (int)workd.size(), dealii::ExcInternalError() );
- Assert (shift_y>=0, dealii::ExcInternalError() );
- Assert (shift_y+nloc <= (int)workd.size(), dealii::ExcInternalError() );
-
- src = 0.0; // B*X
- src.add (nloc,
- &local_indices[0],
- &workd[0]+shift_b_x );
-
- tmp = 0.0; // X
- tmp.add (nloc,
- &local_indices[0],
- &workd[0]+shift_x);
-
- src.compress (VectorOperation::add);
- tmp.compress (VectorOperation::add);
-
- // solving linear system
- inverse.vmult(dst,src);
-
- // store the result
- dst.extract_subvector_to (local_indices.begin(),
+ system_matrix.vmult(tmp, src);
+ // store M*X in X
+ tmp.extract_subvector_to (local_indices.begin(),
local_indices.end(),
- &workd[0]+shift_y );
-
+ &workd[0]+shift_x);
+ inverse.vmult(dst,tmp);
}
- break;
-
- case 2:
- // compute Y = B * X where
- // IPNTR(1) is the pointer into WORKD for X,
- // IPNTR(2) is the pointer into WORKD for Y.
+ else if (mode == 1)
{
+ system_matrix.vmult(dst, src);
+ }
+ else
+ AssertThrow (false, PArpackExcMode(mode));
- const int shift_x = ipntr[0]-1;
- const int shift_y = ipntr[1]-1;
- Assert (shift_x>=0, dealii::ExcInternalError() );
- Assert (shift_x+nloc <= (int)workd.size(), dealii::ExcInternalError() );
- Assert (shift_y>=0, dealii::ExcInternalError() );
- Assert (shift_y+nloc <= (int)workd.size(), dealii::ExcInternalError() );
-
- src = 0.0;
- src.add (nloc,
- &local_indices[0],
- &workd[0]+shift_x );
- src.compress (VectorOperation::add);
-
- // Multiplication with mass matrix M
- mass_matrix.vmult(dst, src);
-
- // store the result
- dst.extract_subvector_to (local_indices.begin(),
- local_indices.end(),
- &workd[0]+shift_y);
+ }
+ else if (ido == 1 && mode >= 3)
+ // compute Y = OP * X for mode 3, 4 and 5, where
+ // the vector B * X is already available in WORKD(ipntr(3)).
+ {
+ const int shift_b_x = ipntr[2]-1;
+ Assert (shift_b_x>=0, dealii::ExcInternalError() );
+ Assert (shift_b_x+nloc <= (int)workd.size(), dealii::ExcInternalError() );
+
+ // B*X
+ src.add (nloc,
+ &local_indices[0],
+ &workd[0]+shift_b_x );
+ src.compress (VectorOperation::add);
+
+ // solving linear system
+ Assert (mode == 3, ExcNotImplemented());
+ inverse.vmult(dst,src);
+ }
+ else if (ido == 2)
+ // compute Y = B * X
+ {
+ src.add (nloc,
+ &local_indices[0],
+ &workd[0]+shift_x );
+ src.compress (VectorOperation::add);
+ // Multiplication with mass matrix M
+ if (mode == 1)
+ {
+ dst = src;
}
- break;
-
- default:
- AssertThrow (false, PArpackExcIdo(ido));
- break;
+ else
+ // mode 2,3 and 5 have B=M
+ {
+ mass_matrix.vmult(dst, src);
}
}
- break;
- default:
- AssertThrow (false, PArpackExcMode(mode));
- break;
- }
- }
+ else
+ AssertThrow (false, PArpackExcIdo(ido));
+ // Note: IDO = 3 does not appear to be required for currently
+ // implemented modes
- if (info<0)
- {
- AssertThrow (false, PArpackExcInfoPdnaupd(info));
- }
- else
- {
- // 1 - compute eigenvectors,
- // 0 - only eigenvalues
- int rvec = 1;
+ // store the result
+ dst.extract_subvector_to (local_indices.begin(),
+ local_indices.end(),
+ &workd[0]+shift_y);
+ } // end of pd*aupd_ loop
- // which eigenvectors
- char howmany[4] = "All";
+ // 1 - compute eigenvectors,
+ // 0 - only eigenvalues
+ int rvec = 1;
- std::vector<double> eigenvalues_real (n_eigenvalues+1, 0.);
- std::vector<double> eigenvalues_im (n_eigenvalues+1, 0.);
+ // which eigenvectors
+ char howmany[4] = "All";
- // call of ARPACK pdneupd routine
- if (additional_data.symmetric)
- pdseupd_(&mpi_communicator_fortran, &rvec, howmany, &select[0], &eigenvalues_real[0],
- &z[0], &ldz, &sigmar,
- bmat, &n_inside_arpack, which, &nev, &tol,
- &resid[0], &ncv, &v[0], &ldv,
- &iparam[0], &ipntr[0], &workd[0], &workl[0], &lworkl, &info);
- else
- pdneupd_(&mpi_communicator_fortran, &rvec, howmany, &select[0], &eigenvalues_real[0],
- &eigenvalues_im[0], &v[0], &ldz, &sigmar, &sigmai,
- &workev[0], bmat, &n_inside_arpack, which, &nev, &tol,
- &resid[0], &ncv, &v[0], &ldv,
- &iparam[0], &ipntr[0], &workd[0], &workl[0], &lworkl, &info);
+ std::vector<double> eigenvalues_real (n_eigenvalues+1, 0.);
+ std::vector<double> eigenvalues_im (n_eigenvalues+1, 0.);
- if (info == 1)
- {
- AssertThrow (false, PArpackExcInfoMaxIt(control().max_steps()));
- }
- else if (info == 3)
- {
- AssertThrow (false, PArpackExcNoShifts(1));
- }
- else if (info!=0)
- {
- AssertThrow (false, PArpackExcInfoPdneupd(info));
- }
+ // call of ARPACK pdneupd routine
+ if (additional_data.symmetric)
+ pdseupd_(&mpi_communicator_fortran, &rvec, howmany, &select[0], &eigenvalues_real[0],
+ &z[0], &ldz, &sigmar,
+ bmat, &n_inside_arpack, which, &nev, &tol,
+ &resid[0], &ncv, &v[0], &ldv,
+ &iparam[0], &ipntr[0], &workd[0], &workl[0], &lworkl, &info);
+ else
+ pdneupd_(&mpi_communicator_fortran, &rvec, howmany, &select[0], &eigenvalues_real[0],
+ &eigenvalues_im[0], &v[0], &ldz, &sigmar, &sigmai,
+ &workev[0], bmat, &n_inside_arpack, which, &nev, &tol,
+ &resid[0], &ncv, &v[0], &ldv,
+ &iparam[0], &ipntr[0], &workd[0], &workl[0], &lworkl, &info);
- for (int i=0; i<nev; ++i)
- {
- eigenvectors[i] = 0.0;
- Assert (i*nloc + nloc <= (int)v.size(), dealii::ExcInternalError() );
+ if (info == 1)
+ {
+ AssertThrow (false, PArpackExcInfoMaxIt(control().max_steps()));
+ }
+ else if (info == 3)
+ {
+ AssertThrow (false, PArpackExcNoShifts(1));
+ }
+ else if (info!=0)
+ {
+ AssertThrow (false, PArpackExcInfoPdneupd(info));
+ }
- eigenvectors[i].add (nloc,
- &local_indices[0],
- &v[i*nloc] );
- eigenvectors[i].compress (VectorOperation::add);
- }
+ for (int i=0; i<nev; ++i)
+ {
+ eigenvectors[i] = 0.0;
+ Assert (i*nloc + nloc <= (int)v.size(), dealii::ExcInternalError() );
- for (size_type i=0; i<n_eigenvalues; ++i)
- eigenvalues[i] = std::complex<double> (eigenvalues_real[i],
- eigenvalues_im[i]);
+ eigenvectors[i].add (nloc,
+ &local_indices[0],
+ &v[i*nloc] );
+ eigenvectors[i].compress (VectorOperation::add);
}
+ for (size_type i=0; i<n_eigenvalues; ++i)
+ eigenvalues[i] = std::complex<double> (eigenvalues_real[i],
+ eigenvalues_im[i]);
+
// Throw an error if the solver did not converge.
AssertThrow (iparam[4] >= (int)n_eigenvalues,
PArpackExcConvergedEigenvectors(n_eigenvalues,iparam[4]));
--- /dev/null
+/* ---------------------------------------------------------------------
+ *
+ * Copyright (C) 2017 by the deal.II authors
+ *
+ * This file is part of the deal.II library.
+ *
+ * The deal.II library is free software; you can use it, redistribute
+ * it, and/or modify it under the terms of the GNU Lesser General
+ * Public License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ * The full text of the license can be found in the file LICENSE at
+ * the top level of the deal.II distribution.
+ *
+ * ---------------------------------------------------------------------
+
+ *
+ * Same as step-36_parpack_mf but solve for largest eigenvalues of Laplace.
+ * For the same problem slepc/step-36_parallel_02.cc produces:
+ * 6099.84
+ * 6035.05
+ * 6035.05
+ * 5970.26
+ * 5931.69
+ */
+
+#include "../tests.h"
+
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/dofs/dof_renumbering.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_values.h>
+#include <deal.II/fe/fe_tools.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/matrix_free/operators.h>
+#include <deal.II/lac/linear_operator.h>
+#include <deal.II/lac/precondition.h>
+#include <deal.II/numerics/vector_tools.h>
+
+
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/grid_in.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/lac/vector.h>
+
+#include <deal.II/lac/parpack_solver.h>
+
+#include <fstream>
+#include <iostream>
+
+
+const unsigned int dim = 2;
+
+using namespace dealii;
+
+const double eps = 1e-10;
+
+const unsigned int fe_degree = 1;
+
+void test ()
+{
+ const unsigned int global_mesh_refinement_steps = 5;
+ const unsigned int number_of_eigenvalues = 5;
+
+ MPI_Comm mpi_communicator = MPI_COMM_WORLD;
+ const unsigned int n_mpi_processes = Utilities::MPI::n_mpi_processes(mpi_communicator);
+ const unsigned int this_mpi_process = Utilities::MPI::this_mpi_process(mpi_communicator);
+
+ parallel::distributed::Triangulation<dim> triangulation (mpi_communicator);
+ GridGenerator::hyper_cube (triangulation, -1, 1);
+ triangulation.refine_global (global_mesh_refinement_steps);
+
+
+ DoFHandler<dim> dof_handler(triangulation);
+ FE_Q<dim> fe(fe_degree);
+ dof_handler.distribute_dofs (fe);
+
+
+ IndexSet locally_relevant_dofs;
+ DoFTools::extract_locally_relevant_dofs (dof_handler,
+ locally_relevant_dofs);
+ ConstraintMatrix constraints;
+ constraints.reinit (locally_relevant_dofs);
+ DoFTools::make_hanging_node_constraints (dof_handler, constraints);
+ VectorTools::interpolate_boundary_values (dof_handler,
+ 0,
+ ZeroFunction<dim> (),
+ constraints);
+ constraints.close ();
+
+ std::shared_ptr<MatrixFree<dim,double> > mf_data(new MatrixFree<dim,double> ());
+ {
+ const QGauss<1> quad (fe_degree+1);
+ typename MatrixFree<dim,double>::AdditionalData data;
+ data.tasks_parallel_scheme =
+ MatrixFree<dim,double>::AdditionalData::partition_color;
+ data.mapping_update_flags = update_values | update_gradients | update_JxW_values;
+ mf_data->reinit (dof_handler, constraints, quad, data);
+ }
+
+ std::vector<LinearAlgebra::distributed::Vector<double> > eigenfunctions;
+ std::vector<double> eigenvalues;
+ MatrixFreeOperators::MassOperator<dim,fe_degree, fe_degree+1, 1, LinearAlgebra::distributed::Vector<double> > mass;
+ MatrixFreeOperators::LaplaceOperator<dim,fe_degree, fe_degree+1, 1, LinearAlgebra::distributed::Vector<double> > laplace;
+ mass.initialize(mf_data);
+ laplace.initialize(mf_data);
+
+ eigenfunctions.resize (number_of_eigenvalues);
+ eigenvalues.resize (number_of_eigenvalues);
+ for (unsigned int i=0; i<eigenfunctions.size (); ++i)
+ mf_data->initialize_dof_vector (eigenfunctions[i]);
+
+ // test PArpack with matrix-free
+ {
+ std::vector<std::complex<double> > lambda(number_of_eigenvalues);
+
+ // set up iterative inverse
+ static ReductionControl inner_control_c(dof_handler.n_dofs(), 0.0, 1.e-14);
+
+ typedef LinearAlgebra::distributed::Vector<double> VectorType;
+ SolverCG<VectorType> solver_c(inner_control_c);
+ PreconditionIdentity preconditioner;
+ const auto invert =
+ inverse_operator(linear_operator<VectorType>(mass),
+ solver_c,
+ preconditioner);
+
+ const unsigned int num_arnoldi_vectors = 2*eigenvalues.size() + 10;
+ PArpackSolver<LinearAlgebra::distributed::Vector<double> >::AdditionalData
+ additional_data(num_arnoldi_vectors,
+ PArpackSolver<LinearAlgebra::distributed::Vector<double> >::largest_magnitude,
+ true,
+ 2);
+
+ SolverControl solver_control(
+ dof_handler.n_dofs(), 1e-9, /*log_history*/ false, /*log_results*/ false);
+
+ PArpackSolver<LinearAlgebra::distributed::Vector<double> > eigensolver(
+ solver_control, mpi_communicator, additional_data);
+
+ eigensolver.reinit(eigenfunctions[0]);
+ // make sure initial vector is orthogonal to the space due to constraints
+ {
+ LinearAlgebra::distributed::Vector<double> init_vector;
+ mf_data->initialize_dof_vector(init_vector);
+ for (auto it = init_vector.begin(); it != init_vector.end(); ++it)
+ *it = static_cast<double>(Testing::rand())/static_cast<double>(RAND_MAX);
+
+ constraints.set_zero(init_vector);
+ eigensolver.set_initial_vector(init_vector);
+ }
+ // avoid output of iterative solver:
+ const unsigned int previous_depth = deallog.depth_file(0);
+ eigensolver.solve (laplace,
+ mass,
+ invert,
+ lambda,
+ eigenfunctions,
+ eigenvalues.size());
+ deallog.depth_file(previous_depth);
+
+ for (unsigned int i = 0; i < lambda.size(); i++)
+ eigenvalues[i] = lambda[i].real();
+
+ for (unsigned int i=0; i < eigenvalues.size(); i++)
+ deallog << eigenvalues[i] << std::endl;
+
+ // make sure that we have eigenvectors and they are mass-orthonormal:
+ // a) (A*x_i-\lambda*B*x_i).L2() == 0
+ // b) x_j*B*x_i=\delta_{ij}
+ {
+ const double precision = 1e-7;
+ LinearAlgebra::distributed::Vector<double> Ax(eigenfunctions[0]), Bx(eigenfunctions[0]);
+ for (unsigned int i=0; i < eigenfunctions.size(); ++i)
+ {
+ mass.vmult(Bx,eigenfunctions[i]);
+
+ for (unsigned int j=0; j < eigenfunctions.size(); j++)
+ {
+ const double err = std::abs( eigenfunctions[j] * Bx - (i==j));
+ Assert( err< precision,
+ ExcMessage("Eigenvectors " +
+ Utilities::int_to_string(i) +
+ " and " +
+ Utilities::int_to_string(j) +
+ " are not orthonormal: " +
+ std::to_string(err)));
+ }
+
+ laplace.vmult(Ax,eigenfunctions[i]);
+ Ax.add(-1.0*eigenvalues[i],Bx);
+ const double err = Ax.l2_norm();
+ Assert (err < precision,
+ ExcMessage("Returned vector " +
+ Utilities::int_to_string(i) +
+ " is not an eigenvector: " +
+ std::to_string(err)));
+ }
+ }
+ }
+
+
+ dof_handler.clear ();
+ deallog << "Ok"<<std::endl;
+}
+
+
+int main (int argc,char **argv)
+{
+ std::ofstream logfile("output");
+ deallog.attach(logfile,/*do not print job id*/false);
+ deallog.threshold_double(eps);
+
+ try
+ {
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ {
+ test ();
+ }
+
+ }
+ catch (std::exception &exc)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Exception on processing: " << std::endl
+ << exc.what() << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+
+ return 1;
+ }
+ catch (...)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Unknown exception!" << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ return 1;
+ };
+}
--- /dev/null
+DEAL::5931.69
+DEAL::5970.26
+DEAL::6035.05
+DEAL::6035.05
+DEAL::6099.84
+DEAL::Ok
--- /dev/null
+/* ---------------------------------------------------------------------
+ *
+ * Copyright (C) 2017 by the deal.II authors
+ *
+ * This file is part of the deal.II library.
+ *
+ * The deal.II library is free software; you can use it, redistribute
+ * it, and/or modify it under the terms of the GNU Lesser General
+ * Public License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ * The full text of the license can be found in the file LICENSE at
+ * the top level of the deal.II distribution.
+ *
+ * ---------------------------------------------------------------------
+
+ *
+ * Same as step-36_parpack_mf_2 but solve for largest eigenvalues of SHEP Laplace.
+ * For the same problem slepc/step-36_parallel_03.cc produces:
+ * DEAL::3.98719
+ * DEAL::3.98719
+ * DEAL::3.97768
+ * DEAL::3.97768
+ * DEAL::3.96194
+ */
+
+#include "../tests.h"
+
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/dofs/dof_renumbering.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_values.h>
+#include <deal.II/fe/fe_tools.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/matrix_free/operators.h>
+#include <deal.II/lac/linear_operator.h>
+#include <deal.II/lac/precondition.h>
+#include <deal.II/numerics/vector_tools.h>
+
+
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/grid_in.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/lac/vector.h>
+
+#include <deal.II/lac/parpack_solver.h>
+
+#include <fstream>
+#include <iostream>
+
+
+const unsigned int dim = 2;
+
+using namespace dealii;
+
+const double eps = 1e-10;
+
+const unsigned int fe_degree = 1;
+
+void test ()
+{
+ const unsigned int global_mesh_refinement_steps = 5;
+ const unsigned int number_of_eigenvalues = 5;
+
+ MPI_Comm mpi_communicator = MPI_COMM_WORLD;
+ const unsigned int n_mpi_processes = Utilities::MPI::n_mpi_processes(mpi_communicator);
+ const unsigned int this_mpi_process = Utilities::MPI::this_mpi_process(mpi_communicator);
+
+ parallel::distributed::Triangulation<dim> triangulation (mpi_communicator);
+ GridGenerator::hyper_cube (triangulation, -1, 1);
+ triangulation.refine_global (global_mesh_refinement_steps);
+
+
+ DoFHandler<dim> dof_handler(triangulation);
+ FE_Q<dim> fe(fe_degree);
+ dof_handler.distribute_dofs (fe);
+
+
+ IndexSet locally_relevant_dofs;
+ DoFTools::extract_locally_relevant_dofs (dof_handler,
+ locally_relevant_dofs);
+ ConstraintMatrix constraints;
+ constraints.reinit (locally_relevant_dofs);
+ DoFTools::make_hanging_node_constraints (dof_handler, constraints);
+ VectorTools::interpolate_boundary_values (dof_handler,
+ 0,
+ ZeroFunction<dim> (),
+ constraints);
+ constraints.close ();
+
+ std::shared_ptr<MatrixFree<dim,double> > mf_data(new MatrixFree<dim,double> ());
+ {
+ const QGauss<1> quad (fe_degree+1);
+ typename MatrixFree<dim,double>::AdditionalData data;
+ data.tasks_parallel_scheme =
+ MatrixFree<dim,double>::AdditionalData::partition_color;
+ data.mapping_update_flags = update_values | update_gradients | update_JxW_values;
+ mf_data->reinit (dof_handler, constraints, quad, data);
+ }
+
+ std::vector<LinearAlgebra::distributed::Vector<double> > eigenfunctions;
+ std::vector<double> eigenvalues;
+ MatrixFreeOperators::MassOperator<dim,fe_degree, fe_degree+1, 1, LinearAlgebra::distributed::Vector<double> > mass;
+ MatrixFreeOperators::LaplaceOperator<dim,fe_degree, fe_degree+1, 1, LinearAlgebra::distributed::Vector<double> > laplace;
+ mass.initialize(mf_data);
+ laplace.initialize(mf_data);
+
+ eigenfunctions.resize (number_of_eigenvalues);
+ eigenvalues.resize (number_of_eigenvalues);
+ for (unsigned int i=0; i<eigenfunctions.size (); ++i)
+ mf_data->initialize_dof_vector (eigenfunctions[i]);
+
+ // test PArpack with matrix-free
+ {
+ std::vector<std::complex<double> > lambda(number_of_eigenvalues);
+
+ // set up iterative inverse
+ static ReductionControl inner_control_c(dof_handler.n_dofs(), 0.0, 1.e-14);
+
+ typedef LinearAlgebra::distributed::Vector<double> VectorType;
+ SolverCG<VectorType> solver_c(inner_control_c);
+ PreconditionIdentity preconditioner;
+ const auto invert =
+ inverse_operator(linear_operator<VectorType>(mass),
+ solver_c,
+ preconditioner);
+
+ const unsigned int num_arnoldi_vectors = 2*eigenvalues.size() + 10;
+ PArpackSolver<LinearAlgebra::distributed::Vector<double> >::AdditionalData
+ additional_data(num_arnoldi_vectors,
+ PArpackSolver<LinearAlgebra::distributed::Vector<double> >::largest_magnitude,
+ true,
+ 1);
+
+ SolverControl solver_control(
+ dof_handler.n_dofs(), 1e-9, /*log_history*/ false, /*log_results*/ false);
+
+ PArpackSolver<LinearAlgebra::distributed::Vector<double> > eigensolver(
+ solver_control, mpi_communicator, additional_data);
+
+ eigensolver.reinit(eigenfunctions[0]);
+ // make sure initial vector is orthogonal to the space due to constraints
+ {
+ LinearAlgebra::distributed::Vector<double> init_vector;
+ mf_data->initialize_dof_vector(init_vector);
+ for (auto it = init_vector.begin(); it != init_vector.end(); ++it)
+ *it = static_cast<double>(Testing::rand())/static_cast<double>(RAND_MAX);
+
+ constraints.set_zero(init_vector);
+ eigensolver.set_initial_vector(init_vector);
+ }
+ // avoid output of iterative solver:
+ const unsigned int previous_depth = deallog.depth_file(0);
+ eigensolver.solve (laplace,
+ mass,
+ invert,
+ lambda,
+ eigenfunctions,
+ eigenvalues.size());
+ deallog.depth_file(previous_depth);
+
+ for (unsigned int i = 0; i < lambda.size(); i++)
+ eigenvalues[i] = lambda[i].real();
+
+ for (unsigned int i=0; i < eigenvalues.size(); i++)
+ deallog << eigenvalues[i] << std::endl;
+
+ // make sure that we have eigenvectors and they are mass-orthonormal:
+ // a) (A*x_i-\lambda*x_i).L2() == 0
+ // b) x_j*x_i=\delta_{ij}
+ {
+ const double precision = 1e-7;
+ LinearAlgebra::distributed::Vector<double> Ax(eigenfunctions[0]);
+ for (unsigned int i=0; i < eigenfunctions.size(); ++i)
+ {
+ for (unsigned int j=0; j < eigenfunctions.size(); j++)
+ {
+ const double err = std::abs( eigenfunctions[j] * eigenfunctions[i] - (i==j));
+ Assert( err< precision,
+ ExcMessage("Eigenvectors " +
+ Utilities::int_to_string(i) +
+ " and " +
+ Utilities::int_to_string(j) +
+ " are not orthonormal: " +
+ std::to_string(err)));
+ }
+
+ laplace.vmult(Ax,eigenfunctions[i]);
+ Ax.add(-1.0*eigenvalues[i],eigenfunctions[i]);
+ const double err = Ax.l2_norm();
+ Assert (err < precision,
+ ExcMessage("Returned vector " +
+ Utilities::int_to_string(i) +
+ " is not an eigenvector: " +
+ std::to_string(err)));
+ }
+ }
+ }
+
+
+ dof_handler.clear ();
+ deallog << "Ok"<<std::endl;
+}
+
+
+int main (int argc,char **argv)
+{
+ std::ofstream logfile("output");
+ deallog.attach(logfile,/*do not print job id*/false);
+ deallog.threshold_double(eps);
+
+ try
+ {
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ {
+ test ();
+ }
+
+ }
+ catch (std::exception &exc)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Exception on processing: " << std::endl
+ << exc.what() << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+
+ return 1;
+ }
+ catch (...)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Unknown exception!" << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ return 1;
+ };
+}
--- /dev/null
+DEAL::3.96194
+DEAL::3.97768
+DEAL::3.97768
+DEAL::3.98719
+DEAL::3.98719
+DEAL::Ok
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2004 - 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+// Solve for smallest eigenvalues of Laplace operator in Step-36 tutorial
+
#include "../tests.h"
#include <deal.II/base/logstream.h>
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2004 - 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+// same as step-36_parallel, but solve for largest eigenvalues of GHEP
+
+#include "../tests.h"
+
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+
+#include <deal.II/dofs/dof_renumbering.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_values.h>
+#include <deal.II/fe/fe_tools.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/lac/sparsity_tools.h>
+#include <deal.II/lac/dynamic_sparsity_pattern.h>
+
+#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
+#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_precondition.h>
+#include <deal.II/lac/slepc_solver.h>
+
+#include <deal.II/numerics/vector_tools.h>
+
+
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/grid_in.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/lac/vector.h>
+
+#include <fstream>
+#include <iostream>
+
+// test parallel (MPI) version of Step-36
+
+const unsigned int dim = 2;//run in 2d to save time
+
+const double eps = 1e-10;
+
+void test (std::string solver_name,
+ std::string preconditioner_name)
+{
+ const unsigned int global_mesh_refinement_steps = 5;
+ const unsigned int number_of_eigenvalues = 5;
+
+ MPI_Comm mpi_communicator = MPI_COMM_WORLD;
+ const unsigned int n_mpi_processes = dealii::Utilities::MPI::n_mpi_processes(mpi_communicator);
+ const unsigned int this_mpi_process = dealii::Utilities::MPI::this_mpi_process(mpi_communicator);
+
+
+ dealii::Triangulation<dim> triangulation;
+ dealii::DoFHandler<dim> dof_handler(triangulation);
+ dealii::FE_Q<dim> fe(1);
+ dealii::ConstraintMatrix constraints;
+ dealii::IndexSet locally_owned_dofs;
+ dealii::IndexSet locally_relevant_dofs;
+
+ std::vector<dealii::PETScWrappers::MPI::Vector> eigenfunctions;
+ std::vector<PetscScalar> eigenvalues;
+ dealii::PETScWrappers::MPI::SparseMatrix stiffness_matrix, mass_matrix;
+
+ dealii::GridGenerator::hyper_cube (triangulation, -1, 1);
+ triangulation.refine_global (global_mesh_refinement_steps);
+
+ // we do not use metis but rather partition by hand below.
+ //dealii::GridTools::partition_triangulation (n_mpi_processes, triangulation);
+ {
+ const double x0 = -1.0;
+ const double x1 = 1.0;
+ const double dL = (x1-x0) / n_mpi_processes;
+
+ dealii::Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active(),
+ endc = triangulation.end();
+ for (; cell!=endc; ++cell)
+ {
+ const dealii::Point<dim> ¢er = cell->center();
+ const double x = center[0];
+
+ const unsigned int id = std::floor ( (x-x0)/dL);
+ cell->set_subdomain_id (id);
+ }
+ }
+
+ dof_handler.distribute_dofs (fe);
+ dealii::DoFRenumbering::subdomain_wise (dof_handler);
+ std::vector<dealii::IndexSet> locally_owned_dofs_per_processor
+ = DoFTools::locally_owned_dofs_per_subdomain (dof_handler);
+ locally_owned_dofs = locally_owned_dofs_per_processor[this_mpi_process];
+ locally_relevant_dofs.clear();
+ dealii::DoFTools::extract_locally_relevant_dofs (dof_handler,
+ locally_relevant_dofs);
+
+ constraints.clear();
+ constraints.reinit (locally_relevant_dofs);
+ dealii::DoFTools::make_hanging_node_constraints (dof_handler, constraints);
+ dealii::VectorTools::interpolate_boundary_values (dof_handler,
+ 0,
+ dealii::ZeroFunction<dim> (),
+ constraints);
+ constraints.close ();
+
+ dealii::DynamicSparsityPattern csp (locally_relevant_dofs);
+ // Fill in ignoring all cells that are not locally owned
+ dealii::DoFTools::make_sparsity_pattern (dof_handler, csp,
+ constraints,
+ /* keep constrained dofs */ true);
+ std::vector<dealii::types::global_dof_index> n_locally_owned_dofs(n_mpi_processes);
+ for (unsigned int i = 0; i < n_mpi_processes; i++)
+ n_locally_owned_dofs[i] = locally_owned_dofs_per_processor[i].n_elements();
+
+ dealii::SparsityTools::distribute_sparsity_pattern
+ (csp,
+ n_locally_owned_dofs,
+ mpi_communicator,
+ locally_relevant_dofs);
+
+ // initialize the stiffness and mass matrices
+ stiffness_matrix.reinit (locally_owned_dofs,
+ locally_owned_dofs,
+ csp,
+ mpi_communicator);
+
+ mass_matrix.reinit (locally_owned_dofs,
+ locally_owned_dofs,
+ csp,
+ mpi_communicator);
+
+ eigenfunctions.resize (5);
+ for (unsigned int i=0; i<eigenfunctions.size (); ++i)
+ {
+ eigenfunctions[i].reinit (locally_owned_dofs, mpi_communicator);//without ghost dofs
+ for (unsigned int j=0; j<locally_owned_dofs.n_elements(); ++j)
+ eigenfunctions[i][locally_owned_dofs.nth_index_in_set(j)] = static_cast<double>(Testing::rand())/static_cast<double>(RAND_MAX);
+
+ eigenfunctions[i].compress(dealii::VectorOperation::insert);
+ }
+
+ eigenvalues.resize (eigenfunctions.size ());
+
+
+ // ready for assembly
+ stiffness_matrix = 0;
+ mass_matrix = 0;
+
+ dealii::QGauss<dim> quadrature_formula(2);
+ dealii::FEValues<dim> fe_values (fe, quadrature_formula,
+ dealii::update_values |
+ dealii::update_gradients |
+ dealii::update_quadrature_points |
+ dealii::update_JxW_values);
+
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+ const unsigned int n_q_points = quadrature_formula.size();
+
+ dealii::FullMatrix<double> cell_stiffness_matrix (dofs_per_cell, dofs_per_cell);
+ dealii::FullMatrix<double> cell_mass_matrix (dofs_per_cell, dofs_per_cell);
+
+ std::vector<dealii::types::global_dof_index> local_dof_indices (dofs_per_cell);
+
+ typename dealii::DoFHandler<dim>::active_cell_iterator
+ cell = dof_handler.begin_active (),
+ endc = dof_handler.end ();
+ for (; cell!=endc; ++cell)
+ if (cell->subdomain_id() == this_mpi_process)
+ {
+ fe_values.reinit (cell);
+ cell_stiffness_matrix = 0;
+ cell_mass_matrix = 0;
+
+ for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
+ {
+ cell_stiffness_matrix (i, j)
+ += (fe_values.shape_grad (i, q_point) *
+ fe_values.shape_grad (j, q_point)
+ ) * fe_values.JxW (q_point);
+
+ cell_mass_matrix (i, j)
+ += (fe_values.shape_value (i, q_point) *
+ fe_values.shape_value (j, q_point)
+ ) * fe_values.JxW (q_point);
+ }
+
+ cell->get_dof_indices (local_dof_indices);
+
+ constraints
+ .distribute_local_to_global (cell_stiffness_matrix,
+ local_dof_indices,
+ stiffness_matrix);
+ constraints
+ .distribute_local_to_global (cell_mass_matrix,
+ local_dof_indices,
+ mass_matrix);
+ }
+
+ stiffness_matrix.compress (dealii::VectorOperation::add);
+ mass_matrix.compress (dealii::VectorOperation::add);
+
+ // test SLEPc by
+ {
+ PETScWrappers::PreconditionerBase *preconditioner;
+
+ dealii::deallog<<preconditioner_name<<std::endl;
+ if (preconditioner_name == "Jacobi")
+ {
+ preconditioner = new PETScWrappers::PreconditionJacobi(mpi_communicator);
+ }
+ else if (preconditioner_name == "Boomer")
+ {
+ PETScWrappers::PreconditionBoomerAMG::AdditionalData data;
+ data.symmetric_operator = true;
+
+ preconditioner = new PETScWrappers::PreconditionBoomerAMG(mpi_communicator,
+ data);
+ }
+ else if (preconditioner_name == "BlockJacobi")
+ {
+ preconditioner = new PETScWrappers::PreconditionBlockJacobi(mpi_communicator);
+ }
+ else
+ {
+ AssertThrow (false, ExcMessage ("not supported preconditioner"));
+
+ // make compiler happy
+ preconditioner = new PETScWrappers::PreconditionJacobi(mpi_communicator);
+ }
+
+ dealii::SolverControl linear_solver_control (dof_handler.n_dofs(), 1e-15,/*log_history*/false,/*log_results*/false);
+ PETScWrappers::SolverCG linear_solver(linear_solver_control,mpi_communicator);
+ linear_solver.initialize(*preconditioner);
+
+ dealii::SolverControl solver_control (100, 1e-11,/*log_history*/false,/*log_results*/false);
+
+ dealii::SLEPcWrappers::SolverBase *eigensolver;
+
+ dealii::deallog<<solver_name<<std::endl;
+ // Get a handle on the wanted eigenspectrum solver
+ if (solver_name == "KrylovSchur")
+ {
+ eigensolver = new dealii::SLEPcWrappers::SolverKrylovSchur (solver_control,
+ mpi_communicator);
+ }
+
+ else if (solver_name == "GeneralizedDavidson")
+ {
+ eigensolver = new dealii::SLEPcWrappers::SolverGeneralizedDavidson (solver_control,
+ mpi_communicator);
+ }
+ else if (solver_name == "JacobiDavidson")
+ {
+ eigensolver = new dealii::SLEPcWrappers::SolverJacobiDavidson (solver_control,
+ mpi_communicator);
+ }
+ else if (solver_name == "Lanczos")
+ {
+ eigensolver = new dealii::SLEPcWrappers::SolverLanczos (solver_control,
+ mpi_communicator);
+ }
+ else
+ {
+ AssertThrow (false, ExcMessage ("not supported eigensolver"));
+
+ // Make compiler happy and not complaining about non
+ // uninitialized variables
+ eigensolver = new dealii::SLEPcWrappers::SolverKrylovSchur (solver_control,
+ mpi_communicator);
+ }
+
+ // Set the initial vector. This is optional, if not done the initial vector is set to random values
+ eigensolver->set_initial_space(eigenfunctions);
+
+ eigensolver->set_which_eigenpairs (EPS_LARGEST_REAL);
+ eigensolver->set_problem_type (EPS_GHEP);
+
+ eigensolver->solve (stiffness_matrix,
+ mass_matrix,
+ eigenvalues,
+ eigenfunctions,
+ eigenfunctions.size());
+
+ // TODO make this robust on different platforms. Seems related to GHEP
+ // as solve_04 works ok.
+ //dealii::deallog << "outer iterations: "<< solver_control.last_step ()<<std::endl;
+ //dealii::deallog << "last inner iterations: "<<linear_solver_control.last_step()<<std::endl;
+ for (unsigned int i=0; i < eigenvalues.size(); i++)
+ dealii::deallog << eigenvalues[i] << std::endl;
+
+ delete preconditioner;
+ delete eigensolver;
+
+ // make sure that we have eigenvectors and they are mass-orthonormal:
+ // a) (A*x_i-\lambda*B*x_i).L2() == 0
+ // b) x_j*B*x_i=\delta_{ij}
+ {
+ const double precision = 1e-5;
+ PETScWrappers::MPI::Vector Ax(eigenfunctions[0]), Bx(eigenfunctions[0]);
+ for (unsigned int i=0; i < eigenfunctions.size(); ++i)
+ {
+ mass_matrix.vmult(Bx,eigenfunctions[i]);
+
+ for (unsigned int j=0; j < eigenfunctions.size(); j++)
+ Assert( std::abs( eigenfunctions[j] * Bx - (i==j))< precision,
+ ExcMessage("Eigenvectors " +
+ Utilities::int_to_string(i) +
+ " and " +
+ Utilities::int_to_string(j) +
+ " are not orthonormal!"));
+
+ stiffness_matrix.vmult(Ax,eigenfunctions[i]);
+ Ax.add(-1.0*eigenvalues[i],Bx);
+ Assert (Ax.l2_norm() < precision,
+ ExcMessage(Utilities::to_string(Ax.l2_norm())));
+ }
+ }
+ }
+
+
+ dof_handler.clear ();
+ dealii::deallog << "Ok"<<std::endl;
+}
+
+
+int main (int argc,char **argv)
+{
+ std::ofstream logfile("output");
+ dealii::deallog.attach(logfile,/*do not print job id*/false);
+ dealii::deallog.depth_console(0);
+ dealii::deallog.threshold_double(eps);
+
+ try
+ {
+ dealii::Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ {
+ test ("KrylovSchur","Jacobi");
+ test ("KrylovSchur","BlockJacobi");
+ test ("KrylovSchur","Boomer");
+// test ("GeneralizedDavidson");
+// test ("JacobiDavidson");
+ }
+ }
+ catch (std::exception &exc)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Exception on processing: " << std::endl
+ << exc.what() << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+
+ return 1;
+ }
+ catch (...)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Unknown exception!" << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ return 1;
+ };
+}
--- /dev/null
+DEAL::Jacobi
+DEAL::KrylovSchur
+DEAL::6099.84
+DEAL::6035.05
+DEAL::6035.05
+DEAL::5970.26
+DEAL::5931.69
+DEAL::Ok
+DEAL::BlockJacobi
+DEAL::KrylovSchur
+DEAL::6099.84
+DEAL::6035.05
+DEAL::6035.05
+DEAL::5970.26
+DEAL::5931.69
+DEAL::Ok
+DEAL::Boomer
+DEAL::KrylovSchur
+DEAL::6099.84
+DEAL::6035.05
+DEAL::6035.05
+DEAL::5970.26
+DEAL::5931.69
+DEAL::Ok
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2004 - 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+// same as step-36_parallel_02, but solve SHEP
+
+#include "../tests.h"
+
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+
+#include <deal.II/dofs/dof_renumbering.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_values.h>
+#include <deal.II/fe/fe_tools.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/lac/sparsity_tools.h>
+#include <deal.II/lac/dynamic_sparsity_pattern.h>
+
+#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
+#include <deal.II/lac/petsc_solver.h>
+#include <deal.II/lac/petsc_precondition.h>
+#include <deal.II/lac/slepc_solver.h>
+
+#include <deal.II/numerics/vector_tools.h>
+
+
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/grid_in.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/lac/vector.h>
+
+#include <fstream>
+#include <iostream>
+
+// test parallel (MPI) version of Step-36
+
+const unsigned int dim = 2;//run in 2d to save time
+
+const double eps = 1e-10;
+
+void test (std::string solver_name,
+ std::string preconditioner_name)
+{
+ const unsigned int global_mesh_refinement_steps = 5;
+ const unsigned int number_of_eigenvalues = 5;
+
+ MPI_Comm mpi_communicator = MPI_COMM_WORLD;
+ const unsigned int n_mpi_processes = dealii::Utilities::MPI::n_mpi_processes(mpi_communicator);
+ const unsigned int this_mpi_process = dealii::Utilities::MPI::this_mpi_process(mpi_communicator);
+
+
+ dealii::Triangulation<dim> triangulation;
+ dealii::DoFHandler<dim> dof_handler(triangulation);
+ dealii::FE_Q<dim> fe(1);
+ dealii::ConstraintMatrix constraints;
+ dealii::IndexSet locally_owned_dofs;
+ dealii::IndexSet locally_relevant_dofs;
+
+ std::vector<dealii::PETScWrappers::MPI::Vector> eigenfunctions;
+ std::vector<PetscScalar> eigenvalues;
+ dealii::PETScWrappers::MPI::SparseMatrix stiffness_matrix, mass_matrix;
+
+ dealii::GridGenerator::hyper_cube (triangulation, -1, 1);
+ triangulation.refine_global (global_mesh_refinement_steps);
+
+ // we do not use metis but rather partition by hand below.
+ //dealii::GridTools::partition_triangulation (n_mpi_processes, triangulation);
+ {
+ const double x0 = -1.0;
+ const double x1 = 1.0;
+ const double dL = (x1-x0) / n_mpi_processes;
+
+ dealii::Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active(),
+ endc = triangulation.end();
+ for (; cell!=endc; ++cell)
+ {
+ const dealii::Point<dim> ¢er = cell->center();
+ const double x = center[0];
+
+ const unsigned int id = std::floor ( (x-x0)/dL);
+ cell->set_subdomain_id (id);
+ }
+ }
+
+ dof_handler.distribute_dofs (fe);
+ dealii::DoFRenumbering::subdomain_wise (dof_handler);
+ std::vector<dealii::IndexSet> locally_owned_dofs_per_processor
+ = DoFTools::locally_owned_dofs_per_subdomain (dof_handler);
+ locally_owned_dofs = locally_owned_dofs_per_processor[this_mpi_process];
+ locally_relevant_dofs.clear();
+ dealii::DoFTools::extract_locally_relevant_dofs (dof_handler,
+ locally_relevant_dofs);
+
+ constraints.clear();
+ constraints.reinit (locally_relevant_dofs);
+ dealii::DoFTools::make_hanging_node_constraints (dof_handler, constraints);
+ dealii::VectorTools::interpolate_boundary_values (dof_handler,
+ 0,
+ dealii::ZeroFunction<dim> (),
+ constraints);
+ constraints.close ();
+
+ dealii::DynamicSparsityPattern csp (locally_relevant_dofs);
+ // Fill in ignoring all cells that are not locally owned
+ dealii::DoFTools::make_sparsity_pattern (dof_handler, csp,
+ constraints,
+ /* keep constrained dofs */ true);
+ std::vector<dealii::types::global_dof_index> n_locally_owned_dofs(n_mpi_processes);
+ for (unsigned int i = 0; i < n_mpi_processes; i++)
+ n_locally_owned_dofs[i] = locally_owned_dofs_per_processor[i].n_elements();
+
+ dealii::SparsityTools::distribute_sparsity_pattern
+ (csp,
+ n_locally_owned_dofs,
+ mpi_communicator,
+ locally_relevant_dofs);
+
+ // initialize the stiffness and mass matrices
+ stiffness_matrix.reinit (locally_owned_dofs,
+ locally_owned_dofs,
+ csp,
+ mpi_communicator);
+
+ mass_matrix.reinit (locally_owned_dofs,
+ locally_owned_dofs,
+ csp,
+ mpi_communicator);
+
+ eigenfunctions.resize (5);
+ for (unsigned int i=0; i<eigenfunctions.size (); ++i)
+ {
+ eigenfunctions[i].reinit (locally_owned_dofs, mpi_communicator);//without ghost dofs
+ for (unsigned int j=0; j<locally_owned_dofs.n_elements(); ++j)
+ eigenfunctions[i][locally_owned_dofs.nth_index_in_set(j)] = static_cast<double>(Testing::rand())/static_cast<double>(RAND_MAX);
+
+ eigenfunctions[i].compress(dealii::VectorOperation::insert);
+ }
+
+ eigenvalues.resize (eigenfunctions.size ());
+
+
+ // ready for assembly
+ stiffness_matrix = 0;
+ mass_matrix = 0;
+
+ dealii::QGauss<dim> quadrature_formula(2);
+ dealii::FEValues<dim> fe_values (fe, quadrature_formula,
+ dealii::update_values |
+ dealii::update_gradients |
+ dealii::update_quadrature_points |
+ dealii::update_JxW_values);
+
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+ const unsigned int n_q_points = quadrature_formula.size();
+
+ dealii::FullMatrix<double> cell_stiffness_matrix (dofs_per_cell, dofs_per_cell);
+ dealii::FullMatrix<double> cell_mass_matrix (dofs_per_cell, dofs_per_cell);
+
+ std::vector<dealii::types::global_dof_index> local_dof_indices (dofs_per_cell);
+
+ typename dealii::DoFHandler<dim>::active_cell_iterator
+ cell = dof_handler.begin_active (),
+ endc = dof_handler.end ();
+ for (; cell!=endc; ++cell)
+ if (cell->subdomain_id() == this_mpi_process)
+ {
+ fe_values.reinit (cell);
+ cell_stiffness_matrix = 0;
+ cell_mass_matrix = 0;
+
+ for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
+ {
+ cell_stiffness_matrix (i, j)
+ += (fe_values.shape_grad (i, q_point) *
+ fe_values.shape_grad (j, q_point)
+ ) * fe_values.JxW (q_point);
+
+ cell_mass_matrix (i, j)
+ += (fe_values.shape_value (i, q_point) *
+ fe_values.shape_value (j, q_point)
+ ) * fe_values.JxW (q_point);
+ }
+
+ cell->get_dof_indices (local_dof_indices);
+
+ constraints
+ .distribute_local_to_global (cell_stiffness_matrix,
+ local_dof_indices,
+ stiffness_matrix);
+ constraints
+ .distribute_local_to_global (cell_mass_matrix,
+ local_dof_indices,
+ mass_matrix);
+ }
+
+ stiffness_matrix.compress (dealii::VectorOperation::add);
+ mass_matrix.compress (dealii::VectorOperation::add);
+
+ // test SLEPc by
+ {
+ PETScWrappers::PreconditionerBase *preconditioner;
+
+ dealii::deallog<<preconditioner_name<<std::endl;
+ if (preconditioner_name == "Jacobi")
+ {
+ preconditioner = new PETScWrappers::PreconditionJacobi(mpi_communicator);
+ }
+ else if (preconditioner_name == "Boomer")
+ {
+ PETScWrappers::PreconditionBoomerAMG::AdditionalData data;
+ data.symmetric_operator = true;
+
+ preconditioner = new PETScWrappers::PreconditionBoomerAMG(mpi_communicator,
+ data);
+ }
+ else if (preconditioner_name == "BlockJacobi")
+ {
+ preconditioner = new PETScWrappers::PreconditionBlockJacobi(mpi_communicator);
+ }
+ else
+ {
+ AssertThrow (false, ExcMessage ("not supported preconditioner"));
+
+ // make compiler happy
+ preconditioner = new PETScWrappers::PreconditionJacobi(mpi_communicator);
+ }
+
+ dealii::SolverControl linear_solver_control (dof_handler.n_dofs(), 1e-15,/*log_history*/false,/*log_results*/false);
+ PETScWrappers::SolverCG linear_solver(linear_solver_control,mpi_communicator);
+ linear_solver.initialize(*preconditioner);
+
+ dealii::SolverControl solver_control (100, 1e-11,/*log_history*/false,/*log_results*/false);
+
+ dealii::SLEPcWrappers::SolverBase *eigensolver;
+
+ dealii::deallog<<solver_name<<std::endl;
+ // Get a handle on the wanted eigenspectrum solver
+ if (solver_name == "KrylovSchur")
+ {
+ eigensolver = new dealii::SLEPcWrappers::SolverKrylovSchur (solver_control,
+ mpi_communicator);
+ }
+
+ else if (solver_name == "GeneralizedDavidson")
+ {
+ eigensolver = new dealii::SLEPcWrappers::SolverGeneralizedDavidson (solver_control,
+ mpi_communicator);
+ }
+ else if (solver_name == "JacobiDavidson")
+ {
+ eigensolver = new dealii::SLEPcWrappers::SolverJacobiDavidson (solver_control,
+ mpi_communicator);
+ }
+ else if (solver_name == "Lanczos")
+ {
+ eigensolver = new dealii::SLEPcWrappers::SolverLanczos (solver_control,
+ mpi_communicator);
+ }
+ else
+ {
+ AssertThrow (false, ExcMessage ("not supported eigensolver"));
+
+ // Make compiler happy and not complaining about non
+ // uninitialized variables
+ eigensolver = new dealii::SLEPcWrappers::SolverKrylovSchur (solver_control,
+ mpi_communicator);
+ }
+
+ // Set the initial vector. This is optional, if not done the initial vector is set to random values
+ eigensolver->set_initial_space(eigenfunctions);
+
+ eigensolver->set_which_eigenpairs (EPS_LARGEST_REAL);
+ eigensolver->set_problem_type (EPS_HEP);
+
+ eigensolver->solve (stiffness_matrix,
+ eigenvalues,
+ eigenfunctions,
+ eigenfunctions.size());
+
+ // TODO make this robust on different platforms. Seems related to GHEP
+ // as solve_04 works ok.
+ //dealii::deallog << "outer iterations: "<< solver_control.last_step ()<<std::endl;
+ //dealii::deallog << "last inner iterations: "<<linear_solver_control.last_step()<<std::endl;
+ for (unsigned int i=0; i < eigenvalues.size(); i++)
+ dealii::deallog << eigenvalues[i] << std::endl;
+
+ delete preconditioner;
+ delete eigensolver;
+
+ // make sure that we have eigenvectors and they are mass-orthonormal:
+ // a) (A*x_i-\lambda*x_i).L2() == 0
+ // b) x_j*x_i=\delta_{ij}
+ {
+ const double precision = 1e-5;
+ PETScWrappers::MPI::Vector Ax(eigenfunctions[0]);
+ for (unsigned int i=0; i < eigenfunctions.size(); ++i)
+ {
+ for (unsigned int j=0; j < eigenfunctions.size(); j++)
+ Assert( std::abs( eigenfunctions[j] * eigenfunctions[i] - (i==j))< precision,
+ ExcMessage("Eigenvectors " +
+ Utilities::int_to_string(i) +
+ " and " +
+ Utilities::int_to_string(j) +
+ " are not orthonormal!"));
+
+ stiffness_matrix.vmult(Ax,eigenfunctions[i]);
+ Ax.add(-1.0*eigenvalues[i],eigenfunctions[i]);
+ Assert (Ax.l2_norm() < precision,
+ ExcMessage(Utilities::to_string(Ax.l2_norm())));
+ }
+ }
+ }
+
+
+ dof_handler.clear ();
+ dealii::deallog << "Ok"<<std::endl;
+}
+
+
+int main (int argc,char **argv)
+{
+ std::ofstream logfile("output");
+ dealii::deallog.attach(logfile,/*do not print job id*/false);
+ dealii::deallog.depth_console(0);
+ dealii::deallog.threshold_double(eps);
+
+ try
+ {
+ dealii::Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ {
+ test ("KrylovSchur","Jacobi");
+ test ("KrylovSchur","BlockJacobi");
+ test ("KrylovSchur","Boomer");
+// test ("GeneralizedDavidson");
+// test ("JacobiDavidson");
+ }
+ }
+ catch (std::exception &exc)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Exception on processing: " << std::endl
+ << exc.what() << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+
+ return 1;
+ }
+ catch (...)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Unknown exception!" << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ return 1;
+ };
+}
--- /dev/null
+DEAL::Jacobi
+DEAL::KrylovSchur
+DEAL::3.98719
+DEAL::3.98719
+DEAL::3.97768
+DEAL::3.97768
+DEAL::3.96194
+DEAL::Ok
+DEAL::BlockJacobi
+DEAL::KrylovSchur
+DEAL::3.98719
+DEAL::3.98719
+DEAL::3.97768
+DEAL::3.97768
+DEAL::3.96194
+DEAL::Ok
+DEAL::Boomer
+DEAL::KrylovSchur
+DEAL::3.98719
+DEAL::3.98719
+DEAL::3.97768
+DEAL::3.97768
+DEAL::3.96194
+DEAL::Ok