From da8ca18af1d857d30c98b1de74d7a7605024ff1a Mon Sep 17 00:00:00 2001 From: bangerth Date: Mon, 15 Nov 2010 00:50:45 +0000 Subject: [PATCH] Some more comments. git-svn-id: https://svn.dealii.org/trunk@22732 0785d39b-7218-0410-832d-ea1e28bc413d --- deal.II/examples/step-40/step-40.cc | 62 ++++++++++++++++++++++------- 1 file changed, 47 insertions(+), 15 deletions(-) diff --git a/deal.II/examples/step-40/step-40.cc b/deal.II/examples/step-40/step-40.cc index ee8a92bd96..dfb6df7c4d 100644 --- a/deal.II/examples/step-40/step-40.cc +++ b/deal.II/examples/step-40/step-40.cc @@ -177,7 +177,7 @@ class LaplaceProblem LaplaceProblem (); ~LaplaceProblem (); - void run (const unsigned int initial_global_refine); + void run (); private: void setup_system (); @@ -554,10 +554,39 @@ void LaplaceProblem::output_results (const unsigned int cycle) const + // @sect4{LaplaceProblem::run} + + // The function that controls the + // overall behavior of the program is + // again like the one in step-6. The + // minor difference are the use of + // pcout instead of + // std::cout for output + // to the console (see also step-17) + // and that we only generate + // graphical output if at most 32 + // processors are involved. Without + // this limit, it would be just too + // easy for people carelessly running + // this program without reading it + // first to bring down the cluster + // interconnect and fill any file + // system available :-) + // + // A functional difference to step-6 + // is the use of a square domain and + // that we start with a slightly + // finer mesh (5 global refinement + // cycles) -- there just isn't much + // of a point showing a massively + // parallel program starting on 4 + // cells (although admittedly the + // point is only slightly stronger + // starting on 1024). template -void LaplaceProblem::run (const unsigned int initial_global_refine) +void LaplaceProblem::run () { - const unsigned int n_cycles = 12; + const unsigned int n_cycles = 8; for (unsigned int cycle=0; cycle::run (const unsigned int initial_global_refine) if (cycle == 0) { GridGenerator::hyper_cube (triangulation); - triangulation.refine_global (initial_global_refine); + triangulation.refine_global (5); } else refine_grid (); @@ -582,7 +611,7 @@ void LaplaceProblem::run (const unsigned int initial_global_refine) assemble_system (); solve (); - if (Utilities::System::get_n_mpi_processes(mpi_communicator) <= 100) + if (Utilities::System::get_n_mpi_processes(mpi_communicator) <= 32) output_results (cycle); pcout << std::endl; @@ -591,6 +620,17 @@ void LaplaceProblem::run (const unsigned int initial_global_refine) + // @sect4{main()} + + // The final function, + // main(), again has the + // same structure as in all other + // programs, in particular + // step-6. Like in the other programs + // that use PETSc, we have to + // inialize and finalize PETSc, which + // also initializes and finalizes the + // MPI subsystem. int main(int argc, char *argv[]) { try @@ -598,16 +638,8 @@ int main(int argc, char *argv[]) PetscInitialize(&argc, &argv, PETSC_NULL, PETSC_NULL); deallog.depth_console (0); - int refine=5; - if (argc>1) - { - refine = (unsigned int)Utilities::string_to_int(argv[1]); - } - - { - LaplaceProblem<2> laplace_problem_2d; - laplace_problem_2d.run (refine); - } + LaplaceProblem<2> laplace_problem_2d; + laplace_problem_2d.run (); PetscFinalize(); } -- 2.39.5