From: Michele Bucelli Date: Thu, 14 Jan 2021 09:48:06 +0000 (+0100) Subject: Step 40 with simplex X-Git-Tag: v9.3.0-rc1~153^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=8346993410d38f45c95e58113fc4d324e57432a5;p=dealii.git Step 40 with simplex --- diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 6d33e88605..d1be33ae19 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -85,6 +85,11 @@ jobs: make -j 2 - name: test run: | + # Remove warning: "A high-performance Open MPI point-to-point + # messaging module was unable to find any relevant network + # interfaces." + export OMPI_MCA_btl_base_warn_component_unused='0' + cd build make -j 2 setup_tests_simplex ctest --output-on-failure -j 2 diff --git a/tests/simplex/step-40.cc b/tests/simplex/step-40.cc new file mode 100644 index 0000000000..a69f093ffe --- /dev/null +++ b/tests/simplex/step-40.cc @@ -0,0 +1,436 @@ +/* --------------------------------------------------------------------- + * + * Copyright (C) 2009 - 2020 by the deal.II authors + * + * This file is part of the deal.II library. + * + * The deal.II library is free software; you can use it, redistribute + * it, and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * The full text of the license can be found in the file LICENSE.md at + * the top level directory of deal.II. + * + * --------------------------------------------------------------------- + + * + * Author: Wolfgang Bangerth, Texas A&M University, 2009, 2010 + * Timo Heister, University of Goettingen, 2009, 2010 + */ + +// Step 40 on simplex mesh. +// Modifications with respect to original step-40: +// - grid generation via GridGenerator::subdivided_hyper_rectangle and +// GridGenerator::subdivided_hyper_rectangle_with_simplices (instead of +// GridGenerator::hyper_cube); +// - removed local, adaptive refinement. + + +#include +#include +#include + +#include + +// #define FORCE_USE_OF_TRILINOS + +namespace LA +{ +#if defined(DEAL_II_WITH_PETSC) && !defined(DEAL_II_PETSC_WITH_COMPLEX) && \ + !(defined(DEAL_II_WITH_TRILINOS) && defined(FORCE_USE_OF_TRILINOS)) + using namespace dealii::LinearAlgebraPETSc; +# define USE_PETSC_LA +#elif defined(DEAL_II_WITH_TRILINOS) + using namespace dealii::LinearAlgebraTrilinos; +#else +# error DEAL_II_WITH_PETSC or DEAL_II_WITH_TRILINOS required +#endif +} // namespace LA + +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +// Simplex. +#include + +#include +#include + +#include +#include + +// Flag to toggle between hex and simplex. +// #define HEX + +namespace Step40 +{ + using namespace dealii; + + template + class LaplaceProblem + { + public: + LaplaceProblem(); + + void + run(); + + private: + void + setup_system(); + + void + assemble_system(); + + void + solve(); + + void + output_results() const; + + MPI_Comm mpi_communicator; + +#ifdef HEX + MappingQGeneric mapping; + parallel::distributed::Triangulation triangulation; + FE_Q fe; +#else + MappingFE mapping; + parallel::fullydistributed::Triangulation triangulation; + FE_SimplexP fe; +#endif + + DoFHandler dof_handler; + + IndexSet locally_owned_dofs; + IndexSet locally_relevant_dofs; + + AffineConstraints constraints; + + LA::MPI::SparseMatrix system_matrix; + LA::MPI::Vector locally_relevant_solution; + LA::MPI::Vector system_rhs; + + ConditionalOStream pcout; + }; + + template + LaplaceProblem::LaplaceProblem() + : mpi_communicator(MPI_COMM_WORLD) +#ifdef HEX + , mapping(1) + , triangulation(mpi_communicator, + typename Triangulation::MeshSmoothing( + Triangulation::smoothing_on_refinement | + Triangulation::smoothing_on_coarsening)) +#else + , mapping(FE_SimplexP(1)) + , triangulation(mpi_communicator) +#endif + , fe(2) + , dof_handler(triangulation) + , pcout(std::cout, + (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)) + {} + + template + void + LaplaceProblem::setup_system() + { + dof_handler.distribute_dofs(fe); + + locally_owned_dofs = dof_handler.locally_owned_dofs(); + DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); + + locally_relevant_solution.reinit(locally_owned_dofs, + locally_relevant_dofs, + mpi_communicator); + system_rhs.reinit(locally_owned_dofs, mpi_communicator); + + constraints.clear(); + constraints.reinit(locally_relevant_dofs); + DoFTools::make_hanging_node_constraints(dof_handler, constraints); + VectorTools::interpolate_boundary_values( + mapping, dof_handler, 0, Functions::ZeroFunction(), constraints); + constraints.close(); + + DynamicSparsityPattern dsp(locally_relevant_dofs); + + DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints, false); + SparsityTools::distribute_sparsity_pattern(dsp, + dof_handler.locally_owned_dofs(), + mpi_communicator, + locally_relevant_dofs); + + system_matrix.reinit(locally_owned_dofs, + locally_owned_dofs, + dsp, + mpi_communicator); + } + + template + void + LaplaceProblem::assemble_system() + { +#ifdef HEX + const QGauss quadrature_formula(fe.degree + 1); +#else + const QGaussSimplex quadrature_formula(fe.degree + 1); +#endif + + FEValues fe_values(mapping, + fe, + quadrature_formula, + update_values | update_gradients | + update_quadrature_points | update_JxW_values); + + const unsigned int dofs_per_cell = fe.n_dofs_per_cell(); + const unsigned int n_q_points = quadrature_formula.size(); + + FullMatrix cell_matrix(dofs_per_cell, dofs_per_cell); + Vector cell_rhs(dofs_per_cell); + + std::vector local_dof_indices(dofs_per_cell); + + for (const auto &cell : dof_handler.active_cell_iterators()) + if (cell->is_locally_owned()) + { + cell_matrix = 0.; + cell_rhs = 0.; + + fe_values.reinit(cell); + + for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) + { + const double rhs_value = + (fe_values.quadrature_point(q_point)[1] > + 0.5 + + 0.25 * std::sin(4.0 * numbers::PI * + fe_values.quadrature_point(q_point)[0]) ? + 1. : + -1.); + + for (unsigned int i = 0; i < dofs_per_cell; ++i) + { + for (unsigned int j = 0; j < dofs_per_cell; ++j) + cell_matrix(i, j) += fe_values.shape_grad(i, q_point) * + fe_values.shape_grad(j, q_point) * + fe_values.JxW(q_point); + + cell_rhs(i) += rhs_value * // + fe_values.shape_value(i, q_point) * // + fe_values.JxW(q_point); + } + } + + cell->get_dof_indices(local_dof_indices); + constraints.distribute_local_to_global(cell_matrix, + cell_rhs, + local_dof_indices, + system_matrix, + system_rhs); + } + + system_matrix.compress(VectorOperation::add); + system_rhs.compress(VectorOperation::add); + } + + template + void + LaplaceProblem::solve() + { + LA::MPI::Vector completely_distributed_solution(locally_owned_dofs, + mpi_communicator); + + SolverControl solver_control(dof_handler.n_dofs(), 1e-12); + +#ifdef USE_PETSC_LA + LA::SolverCG solver(solver_control, mpi_communicator); +#else + LA::SolverCG solver(solver_control); +#endif + + LA::MPI::PreconditionAMG preconditioner; + + LA::MPI::PreconditionAMG::AdditionalData data; + +#ifdef USE_PETSC_LA + data.symmetric_operator = true; +#else + /* Trilinos defaults are good */ +#endif + preconditioner.initialize(system_matrix, data); + + solver.solve(system_matrix, + completely_distributed_solution, + system_rhs, + preconditioner); + + pcout << " Solved in " << solver_control.last_step() << " iterations." + << std::endl; + + constraints.distribute(completely_distributed_solution); + + locally_relevant_solution = completely_distributed_solution; + } + + template + void + LaplaceProblem::output_results() const + { + DataOut data_out; + data_out.attach_dof_handler(dof_handler); + data_out.add_data_vector(locally_relevant_solution, "u"); + + Vector subdomain(triangulation.n_active_cells()); + for (unsigned int i = 0; i < subdomain.size(); ++i) + subdomain(i) = triangulation.locally_owned_subdomain(); + data_out.add_data_vector(subdomain, "subdomain"); + + data_out.build_patches(mapping); + + data_out.write_vtu_with_pvtu_record( + "./", "solution", 0, mpi_communicator, 2, 8); + } + + template + void + LaplaceProblem::run() + { + pcout << "Running with " +#ifdef USE_PETSC_LA + << "PETSc" +#else + << "Trilinos" +#endif + << " on " << Utilities::MPI::n_mpi_processes(mpi_communicator) + << " MPI rank(s)..." << std::endl; + + const unsigned int n_subdivisions = 16; + Point a, b; + + switch (dim) + { + case 1: + a = Point(0); + b = Point(1); + break; + case 2: + a = Point(0, 0); + b = Point(1, 1); + break; + case 3: + a = Point(0, 0, 0); + b = Point(1, 1, 1); + break; + default: + Assert(false, ExcInternalError()); + } + +#ifdef HEX + GridGenerator::subdivided_hyper_rectangle( + triangulation, std::vector(dim, n_subdivisions), a, b); +#else + auto construction_data = TriangulationDescription::Utilities:: + create_description_from_triangulation_in_groups( + [n_subdivisions, a, b](Triangulation &tria_serial) { + GridGenerator::subdivided_hyper_rectangle_with_simplices( + tria_serial, std::vector(dim, n_subdivisions), a, b); + }, + [](Triangulation &tria_serial, + const MPI_Comm mpi_comm, + const unsigned int /* group_size */) { + GridTools::partition_triangulation( + Utilities::MPI::n_mpi_processes(mpi_comm), tria_serial); + }, + mpi_communicator, + 1); + triangulation.create_triangulation(construction_data); +#endif + + setup_system(); + + pcout << " Number of active cells: " + << triangulation.n_global_active_cells() << std::endl + << " Number of degrees of freedom: " << dof_handler.n_dofs() + << std::endl; + + assemble_system(); + solve(); + + if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 32) + { + output_results(); + } + } +} // namespace Step40 + + +int +main(int argc, char *argv[]) +{ + try + { + using namespace dealii; + using namespace Step40; + + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + + LaplaceProblem<2> laplace_problem_2d; + laplace_problem_2d.run(); + } + catch (std::exception &exc) + { + std::cerr << std::endl + << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Exception on processing: " << std::endl + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + + return 1; + } + catch (...) + { + std::cerr << std::endl + << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Unknown exception!" << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; + } + + return 0; +} diff --git a/tests/simplex/step-40.mpirun=1.with_petsc=true.output b/tests/simplex/step-40.mpirun=1.with_petsc=true.output new file mode 100644 index 0000000000..35bb2044a5 --- /dev/null +++ b/tests/simplex/step-40.mpirun=1.with_petsc=true.output @@ -0,0 +1,4 @@ +Running with PETSc on 1 MPI rank(s)... + Number of active cells: 512 + Number of degrees of freedom: 1089 + Solved in 7 iterations. diff --git a/tests/simplex/step-40.mpirun=4.with_petsc=true.output b/tests/simplex/step-40.mpirun=4.with_petsc=true.output new file mode 100644 index 0000000000..41556ca6a2 --- /dev/null +++ b/tests/simplex/step-40.mpirun=4.with_petsc=true.output @@ -0,0 +1,4 @@ +Running with PETSc on 4 MPI rank(s)... + Number of active cells: 512 + Number of degrees of freedom: 1089 + Solved in 7 iterations.