From: Denis Davydov Date: Sun, 9 Aug 2015 21:08:08 +0000 (+0200) Subject: added PArpack unit tests and a note to changes.h X-Git-Tag: v8.4.0-rc2~396^2~7 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=017d34a54c96c9c3269bb948914851c4d69966b1;p=dealii.git added PArpack unit tests and a note to changes.h --- diff --git a/doc/news/changes.h b/doc/news/changes.h index 29a4d4626e..39c2b54c99 100644 --- a/doc/news/changes.h +++ b/doc/news/changes.h @@ -149,14 +149,12 @@ inconvenience this causes.

General

- -
  • New: implemented the gradient method for - InterpolatedTensorProductGridData +
      +
    1. New: PArpackSolver eigensolver interface class.
      - (Daniel Shapero, 2015/08/12) + (Denis Davydov, 2015/09/17)
    2. -
      1. Changed: All doxygen-generated pages now contain a link to the tutorial in their top-level menus.
        @@ -288,6 +286,13 @@ inconvenience this causes. (Jason Sheldon, Wolfgang Bangerth, 2015/08/13)
      2. +
      3. New: implemented the gradient method for + InterpolatedTensorProductGridData +
        + (Daniel Shapero, 2015/08/12) +
      4. + +
      5. New: FE_RannacherTurek describes a discontinuous FiniteElement with vanishing mean values of jumps across faces.
        diff --git a/tests/arpack/step-36_parpack.cc b/tests/arpack/step-36_parpack.cc new file mode 100644 index 0000000000..b37eb61ed0 --- /dev/null +++ b/tests/arpack/step-36_parpack.cc @@ -0,0 +1,348 @@ +#include "../tests.h" + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + + +#include +#include +#include +#include + +#include + +#include +#include + +const unsigned int dim = 2;//run in 2d to save time + +const double eps = 1e-10; + +template +std::vector +locally_owned_dofs_per_subdomain (const DH &dof_handler) +{ + std::vector< dealii::types::subdomain_id > subdomain_association (dof_handler.n_dofs ()); + dealii::DoFTools::get_subdomain_association (dof_handler, subdomain_association); + + const unsigned int n_subdomains = 1 + (*max_element (subdomain_association.begin (), + subdomain_association.end () )); + + std::vector index_sets (n_subdomains,dealii::IndexSet(dof_handler.n_dofs())); + + // loop over subdomain_association and populate IndexSet when a + // change in subdomain ID is found + dealii::types::global_dof_index i_min = 0; + dealii::types::global_dof_index this_subdomain = subdomain_association[0]; + + for (dealii::types::global_dof_index index = 1; + index < subdomain_association.size (); ++index) + { + //found index different from the current one + if (subdomain_association[index] != this_subdomain) + { + index_sets[this_subdomain].add_range (i_min, index); + i_min = index; + this_subdomain = subdomain_association[index]; + } + } + + // the very last element is of different index + if (i_min == subdomain_association.size () - 1) + { + index_sets[this_subdomain].add_index (i_min); + } + + // otherwise there are at least two different indices + else + { + index_sets[this_subdomain].add_range ( + i_min, subdomain_association.size ()); + } + + for (unsigned int i = 0; i < n_subdomains; i++) + index_sets[i].compress (); + + return index_sets; +} //locally_owned_dofs_per_subdomain + +class PETScInverse +{ +public: + PETScInverse(const dealii::PETScWrappers::MatrixBase &A, dealii::SolverControl &cn,const MPI_Comm &mpi_communicator = PETSC_COMM_SELF): + solver(cn,mpi_communicator), + matrix(A), + preconditioner(matrix) + { + + } + + void vmult ( dealii::PETScWrappers::MPI::Vector &dst, + const dealii::PETScWrappers::MPI::Vector &src) const + { + ; + solver.solve(matrix, dst, src,preconditioner); + } + + +private: + mutable dealii::PETScWrappers::SolverCG solver; + const dealii::PETScWrappers::MatrixBase &matrix; + PETScWrappers::PreconditionBlockJacobi preconditioner; + +}; + +void test () +{ + const unsigned int global_mesh_refinement_steps = 5; + const unsigned int number_of_eigenvalues = 5; + + MPI_Comm mpi_communicator = MPI_COMM_WORLD; + const unsigned int n_mpi_processes = dealii::Utilities::MPI::n_mpi_processes(mpi_communicator); + const unsigned int this_mpi_process = dealii::Utilities::MPI::this_mpi_process(mpi_communicator); + + + dealii::Triangulation triangulation; + dealii::DoFHandler dof_handler(triangulation); + dealii::FE_Q fe(1); + dealii::ConstraintMatrix constraints; + dealii::IndexSet locally_owned_dofs; + dealii::IndexSet locally_relevant_dofs; + + std::vector eigenfunctions; + std::vector eigenvalues; + dealii::PETScWrappers::MPI::SparseMatrix stiffness_matrix, mass_matrix; + + dealii::GridGenerator::hyper_cube (triangulation, -1, 1); + triangulation.refine_global (global_mesh_refinement_steps); + + // METIS: + //dealii::GridTools::partition_triangulation (n_mpi_processes, triangulation); + + // partition by hand + { + const double x0 = -1.0; + const double x1 = 1.0; + const double dL = (x1-x0) / n_mpi_processes; + + dealii::Triangulation::active_cell_iterator + cell = triangulation.begin_active(), + endc = triangulation.end(); + for (; cell!=endc; ++cell) + { + const dealii::Point ¢er = cell->center(); + const double x = center[0]; + + const unsigned int id = std::floor ( (x-x0)/dL); + cell->set_subdomain_id (id); + } + } + + dof_handler.distribute_dofs (fe); + dealii::DoFRenumbering::subdomain_wise (dof_handler); + std::vector locally_owned_dofs_per_processor + = locally_owned_dofs_per_subdomain (dof_handler); + locally_owned_dofs = locally_owned_dofs_per_processor[this_mpi_process]; + locally_relevant_dofs.clear(); + dealii::DoFTools::extract_locally_relevant_dofs (dof_handler, + locally_relevant_dofs); + + constraints.clear(); + constraints.reinit (locally_relevant_dofs); + dealii::DoFTools::make_hanging_node_constraints (dof_handler, constraints); + dealii::VectorTools::interpolate_boundary_values (dof_handler, + 0, + dealii::ZeroFunction (), + constraints); + constraints.close (); + + dealii::CompressedSimpleSparsityPattern csp (locally_relevant_dofs); + // Fill in ignoring all cells that are not locally owned + dealii::DoFTools::make_sparsity_pattern (dof_handler, csp, + constraints, + /* keep constrained dofs */ true); + std::vector n_locally_owned_dofs(n_mpi_processes); + for (unsigned int i = 0; i < n_mpi_processes; i++) + n_locally_owned_dofs[i] = locally_owned_dofs_per_processor[i].n_elements(); + + dealii::SparsityTools::distribute_sparsity_pattern + (csp, + n_locally_owned_dofs, + mpi_communicator, + locally_relevant_dofs); + + // Initialise the stiffness and mass matrices + stiffness_matrix.reinit (locally_owned_dofs, + locally_owned_dofs, + csp, + mpi_communicator); + + mass_matrix.reinit (locally_owned_dofs, + locally_owned_dofs, + csp, + mpi_communicator); + + eigenfunctions.resize (5); + for (unsigned int i=0; i quadrature_formula(2); + dealii::FEValues fe_values (fe, quadrature_formula, + dealii::update_values | + dealii::update_gradients | + dealii::update_quadrature_points | + dealii::update_JxW_values); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + dealii::FullMatrix cell_stiffness_matrix (dofs_per_cell, dofs_per_cell); + dealii::FullMatrix cell_mass_matrix (dofs_per_cell, dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + typename dealii::DoFHandler::active_cell_iterator + cell = dof_handler.begin_active (), + endc = dof_handler.end (); + for (; cell!=endc; ++cell) + if (cell->subdomain_id() == this_mpi_process) + { + fe_values.reinit (cell); + cell_stiffness_matrix = 0; + cell_mass_matrix = 0; + + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + + constraints + .distribute_local_to_global (cell_stiffness_matrix, + local_dof_indices, + stiffness_matrix); + constraints + .distribute_local_to_global (cell_mass_matrix, + local_dof_indices, + mass_matrix); + } + + stiffness_matrix.compress (dealii::VectorOperation::add); + mass_matrix.compress (dealii::VectorOperation::add); + + // test Arpack + { + std::vector > lambda(eigenfunctions.size()); + + for (unsigned int i=0; i < eigenvalues.size(); i++) + eigenfunctions[i] = PetscScalar(); + + dealii::SolverControl solver_control (dof_handler.n_dofs(), 1e-9,/*log_history*/false,/*log_results*/false); + PETScInverse inverse(stiffness_matrix,solver_control,mpi_communicator); + const unsigned int num_arnoldi_vectors = 2*eigenvalues.size() + 2; + + dealii::PArpackSolver::AdditionalData + additional_data(num_arnoldi_vectors, + dealii::PArpackSolver::largest_magnitude, + true); + + dealii::PArpackSolver eigensolver (solver_control, + mpi_communicator, + additional_data); + eigensolver.reinit(locally_owned_dofs); + eigensolver.solve (stiffness_matrix, + mass_matrix, + inverse, + lambda, + eigenfunctions, + eigenvalues.size()); + + for (unsigned int i = 0; i < lambda.size(); i++) + eigenvalues[i] = lambda[i].real(); + + for (unsigned int i=0; i < eigenvalues.size(); i++) + dealii::deallog << eigenvalues[i] << std::endl; + + } + + + dof_handler.clear (); + dealii::deallog << "Ok"< +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + + +#include +#include +#include +#include + +#include + +#include +#include + +const unsigned int dim = 2;//run in 2d to save time + +using namespace dealii; + +const double eps = 1e-10; + +template +std::vector +locally_owned_dofs_per_subdomain (const DH &dof_handler) +{ + std::vector< types::subdomain_id > subdomain_association (dof_handler.n_dofs ()); + DoFTools::get_subdomain_association (dof_handler, subdomain_association); + + const unsigned int n_subdomains = 1 + (*max_element (subdomain_association.begin (), + subdomain_association.end () )); + + std::vector index_sets (n_subdomains,IndexSet(dof_handler.n_dofs())); + + // loop over subdomain_association and populate IndexSet when a + // change in subdomain ID is found + types::global_dof_index i_min = 0; + types::global_dof_index this_subdomain = subdomain_association[0]; + + for (types::global_dof_index index = 1; + index < subdomain_association.size (); ++index) + { + //found index different from the current one + if (subdomain_association[index] != this_subdomain) + { + index_sets[this_subdomain].add_range (i_min, index); + i_min = index; + this_subdomain = subdomain_association[index]; + } + } + + // the very last element is of different index + if (i_min == subdomain_association.size () - 1) + { + index_sets[this_subdomain].add_index (i_min); + } + + // otherwise there are at least two different indices + else + { + index_sets[this_subdomain].add_range ( + i_min, subdomain_association.size ()); + } + + for (unsigned int i = 0; i < n_subdomains; i++) + index_sets[i].compress (); + + return index_sets; +} //locally_owned_dofs_per_subdomain + + +void test () +{ + const unsigned int global_mesh_refinement_steps = 5; + const unsigned int number_of_eigenvalues = 5; + + MPI_Comm mpi_communicator = MPI_COMM_WORLD; + const unsigned int n_mpi_processes = Utilities::MPI::n_mpi_processes(mpi_communicator); + const unsigned int this_mpi_process = Utilities::MPI::this_mpi_process(mpi_communicator); + + + Triangulation triangulation; + DoFHandler dof_handler(triangulation); + FE_Q fe(1); + ConstraintMatrix constraints; + IndexSet locally_owned_dofs; + IndexSet locally_relevant_dofs; + + std::vector eigenfunctions; + std::vector eigenvalues; + TrilinosWrappers::SparseMatrix stiffness_matrix, mass_matrix; + + GridGenerator::hyper_cube (triangulation, -1, 1); + triangulation.refine_global (global_mesh_refinement_steps); + + //METIS: + //GridTools::partition_triangulation (n_mpi_processes, triangulation); + + // partition by hand + { + const double x0 = -1.0; + const double x1 = 1.0; + const double dL = (x1-x0) / n_mpi_processes; + + Triangulation::active_cell_iterator + cell = triangulation.begin_active(), + endc = triangulation.end(); + for (; cell!=endc; ++cell) + { + const Point ¢er = cell->center(); + const double x = center[0]; + + const unsigned int id = std::floor ( (x-x0)/dL); + cell->set_subdomain_id (id); + } + } + + + dof_handler.distribute_dofs (fe); + DoFRenumbering::subdomain_wise (dof_handler); + std::vector locally_owned_dofs_per_processor + = locally_owned_dofs_per_subdomain (dof_handler); + locally_owned_dofs = locally_owned_dofs_per_processor[this_mpi_process]; + locally_relevant_dofs.clear(); + DoFTools::extract_locally_relevant_dofs (dof_handler, + locally_relevant_dofs); + + constraints.clear(); + constraints.reinit (locally_relevant_dofs); + DoFTools::make_hanging_node_constraints (dof_handler, constraints); + VectorTools::interpolate_boundary_values (dof_handler, + 0, + ZeroFunction (), + constraints); + constraints.close (); + + CompressedSimpleSparsityPattern csp (locally_relevant_dofs); + // Fill in ignoring all cells that are not locally owned + DoFTools::make_sparsity_pattern (dof_handler, csp, + constraints, + /* keep constrained dofs */ true); + std::vector n_locally_owned_dofs(n_mpi_processes); + for (unsigned int i = 0; i < n_mpi_processes; i++) + n_locally_owned_dofs[i] = locally_owned_dofs_per_processor[i].n_elements(); + + SparsityTools::distribute_sparsity_pattern + (csp, + n_locally_owned_dofs, + mpi_communicator, + locally_relevant_dofs); + + // Initialise the stiffness and mass matrices + stiffness_matrix.reinit (locally_owned_dofs, + locally_owned_dofs, + csp, + mpi_communicator); + + mass_matrix.reinit (locally_owned_dofs, + locally_owned_dofs, + csp, + mpi_communicator); + + eigenfunctions.resize (5); + for (unsigned int i=0; i quadrature_formula(2); + FEValues fe_values (fe, quadrature_formula, + update_values | + update_gradients | + update_quadrature_points | + update_JxW_values); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + FullMatrix cell_stiffness_matrix (dofs_per_cell, dofs_per_cell); + FullMatrix cell_mass_matrix (dofs_per_cell, dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active (), + endc = dof_handler.end (); + for (; cell!=endc; ++cell) + if (cell->subdomain_id() == this_mpi_process) + { + fe_values.reinit (cell); + cell_stiffness_matrix = 0; + cell_mass_matrix = 0; + + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + + constraints + .distribute_local_to_global (cell_stiffness_matrix, + local_dof_indices, + stiffness_matrix); + constraints + .distribute_local_to_global (cell_mass_matrix, + local_dof_indices, + mass_matrix); + } + + stiffness_matrix.compress (VectorOperation::add); + mass_matrix.compress (VectorOperation::add); + + // test Arpack + { + const double shift = 4.0; + std::vector > lambda(eigenfunctions.size()); + + for (unsigned int i=0; i < eigenvalues.size(); i++) + eigenfunctions[i] = PetscScalar(); + + SolverControl solver_control (dof_handler.n_dofs(), 1e-9,/*log_history*/false,/*log_results*/false); + SolverControl solver_control_lin (dof_handler.n_dofs(), 1e-10,/*log_history*/false,/*log_results*/false); + + PArpackSolver::Shift shifted_matrix(stiffness_matrix,mass_matrix,shift); + TrilinosWrappers::PreconditionIdentity preconditioner; + IterativeInverse shift_and_invert; + shift_and_invert.initialize(shifted_matrix,preconditioner); + shift_and_invert.solver.select("cg"); + static ReductionControl inner_control_c(/*maxiter*/stiffness_matrix.m(), + /*tolerance (global)*/ 0.0, + /*reduce (w.r.t. initial)*/ 1.e-13); + shift_and_invert.solver.set_control(inner_control_c); + + const unsigned int num_arnoldi_vectors = 2*eigenvalues.size() + 2; + + PArpackSolver::AdditionalData + additional_data(num_arnoldi_vectors, + PArpackSolver::largest_magnitude, + true); + + PArpackSolver eigensolver (solver_control, + mpi_communicator, + additional_data); + eigensolver.reinit(locally_owned_dofs); + eigensolver.set_shift(shift); + + // avoid output of iterative solver: + const unsigned int previous_depth = deallog.depth_file(0); + eigensolver.solve (stiffness_matrix, + mass_matrix, + shift_and_invert, + lambda, + eigenfunctions, + eigenvalues.size()); + deallog.depth_file(previous_depth); + + for (unsigned int i = 0; i < lambda.size(); i++) + eigenvalues[i] = lambda[i].real(); + + for (unsigned int i=0; i < eigenvalues.size(); i++) + deallog << eigenvalues[i] << std::endl; + + } + + + dof_handler.clear (); + deallog << "Ok"<