From ba954dea061217679f06623a4a34348bc4b83bdf Mon Sep 17 00:00:00 2001 From: wolf Date: Mon, 5 Apr 2004 19:28:35 +0000 Subject: [PATCH] More docs. git-svn-id: https://svn.dealii.org/trunk@8973 0785d39b-7218-0410-832d-ea1e28bc413d --- deal.II/examples/step-17/step-17.cc | 111 ++++++++++++++++++---------- 1 file changed, 73 insertions(+), 38 deletions(-) diff --git a/deal.II/examples/step-17/step-17.cc b/deal.II/examples/step-17/step-17.cc index 12b3c36f41..0390de16fd 100644 --- a/deal.II/examples/step-17/step-17.cc +++ b/deal.II/examples/step-17/step-17.cc @@ -840,49 +840,84 @@ unsigned int ElasticProblem::solve () // The fifth step is to take the solution // just computed, and evaluate some kind of - // refinement indicator to refine the mesh. + // refinement indicator to refine the + // mesh. The problem is basically the same as + // with distributing hanging node + // constraints: in order to compute the error + // indicator, we need access to all elements + // of the solution vector. We then compute + // the indicators for the cells that belong + // to the present process, but then we need + // to distribute the refinement indicators + // into a distributed vector so that all + // processes have the values of the + // refinement indicator for all cells. But + // then, in order for each process to refine + // its copy of the mesh, they need to have + // acces to all refinement indicators + // locally, so they have to copy the global + // vector back into a local one. That's a + // little convoluted, but thinking about it + // quite straightforward nevertheless. So + // here's how we do it: template void ElasticProblem::refine_grid () { - Vector estimated_error_per_cell (triangulation.n_active_cells()); - - // XXX - { - PETScWrappers::Vector localized_solution (solution); - Vector local_error_per_cell (triangulation.n_active_cells()); - - typename FunctionMap::type neumann_boundary; - KellyErrorEstimator::estimate (dof_handler, - QGauss2(), - neumann_boundary, - localized_solution, - local_error_per_cell, - std::vector(), - 0, - multithread_info.n_default_threads, - this_mpi_process); - - const unsigned int local_cells - = (n_mpi_processes == 1 ? - triangulation.n_active_cells() : - (this_mpi_process != n_mpi_processes-1 ? - triangulation.n_active_cells() / n_mpi_processes : - triangulation.n_active_cells() - triangulation.n_active_cells() / n_mpi_processes * (n_mpi_processes-1))); - PETScWrappers::MPI::Vector - global_error_per_cell (mpi_communicator, - triangulation.n_active_cells(), - local_cells); - - - for (unsigned int i=0; i local_error_per_cell (triangulation.n_active_cells()); + KellyErrorEstimator::estimate (dof_handler, + QGauss2(), + typename FunctionMap::type(), + localized_solution, + local_error_per_cell, + std::vector(), + 0, + multithread_info.n_default_threads, + this_mpi_process); + + const unsigned int local_cells + = (n_mpi_processes == 1 ? + triangulation.n_active_cells() : + (this_mpi_process != n_mpi_processes-1 ? + triangulation.n_active_cells() / n_mpi_processes : + triangulation.n_active_cells() - triangulation.n_active_cells() / n_mpi_processes * (n_mpi_processes-1))); + PETScWrappers::MPI::Vector + global_error_per_cell (mpi_communicator, + triangulation.n_active_cells(), + local_cells); + + + for (unsigned int i=0; i estimated_error_per_cell (global_error_per_cell); + GridRefinement::refine_and_coarsen_fixed_number (triangulation, estimated_error_per_cell, 0.3, 0.03); -- 2.39.5