From: heister Date: Fri, 25 Apr 2014 21:36:42 +0000 (+0000) Subject: new step-18 using shared_tria X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=7707354ace46fe9a93400711a08279561e576d63;p=dealii-svn.git new step-18 using shared_tria git-svn-id: https://svn.dealii.org/branches/branch_sharedtria@32837 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/examples/step-18/step-18.cc b/deal.II/examples/step-18/step-18.cc index ff5f5bb619..58516741bb 100644 --- a/deal.II/examples/step-18/step-18.cc +++ b/deal.II/examples/step-18/step-18.cc @@ -30,12 +30,15 @@ #include #include #include +#include #include #include #include #include #include #include +#include +#include #include #include #include @@ -430,10 +433,8 @@ namespace Step18 // timestep: void update_quadrature_point_history (); - // After the member functions, here are the member variables. The first - // ones have all been discussed in more detail in previous example - // programs: - Triangulation triangulation; + // This is the new shared Triangulation: + parallel::shared::Triangulation triangulation; FESystem fe; @@ -524,10 +525,9 @@ namespace Step18 // freedom that are stored on the processor with that particular number: std::vector local_dofs_per_process; - // Next, how many degrees of freedom the present processor stores. This - // is, of course, an abbreviation to - // local_dofs_per_process[this_mpi_process]. - types::global_dof_index n_local_dofs; + // We are storing the locally owned and the locally relevant indices: + IndexSet locally_owned_dofs; + IndexSet locally_relevant_dofs; // In the same direction, also cache how many cells the present processor // owns. Note that the cells that belong to a processor are not @@ -752,6 +752,7 @@ namespace Step18 template TopLevel::TopLevel () : + triangulation(MPI_COMM_WORLD), fe (FE_Q(1), dim), dof_handler (triangulation), quadrature_formula (2), @@ -896,10 +897,7 @@ namespace Step18 // As the final step, we need to set up a clean state of the data that we // store in the quadrature points on all cells that are treated on the - // present processor. To do so, we also have to know which processors are - // ours in the first place. This is done in the following two function - // calls: - GridTools::partition_triangulation (n_mpi_processes, triangulation); + // present processor. setup_quadrature_point_history (); } @@ -923,25 +921,17 @@ namespace Step18 void TopLevel::setup_system () { dof_handler.distribute_dofs (fe); - DoFRenumbering::subdomain_wise (dof_handler); + locally_owned_dofs = dof_handler.locally_owned_dofs(); + DoFTools::extract_locally_relevant_dofs (dof_handler,locally_relevant_dofs); // The next thing is to store some information for later use on how many // cells or degrees of freedom the present processor, or any of the // processors has to work on. First the cells local to this processor... n_local_cells = GridTools::count_cells_with_subdomain_association (triangulation, - this_mpi_process); - - // ...and then a list of numbers of how many degrees of freedom each - // processor has to handle: - local_dofs_per_process.resize (n_mpi_processes); - for (unsigned int i=0; iCompressedSparsityPattern class - // here that was already introduced in step-11, rather than the + CompressedSimpleSparsityPattern csp (locally_relevant_dofs); + DoFTools::make_sparsity_pattern (dof_handler, csp, hanging_node_constraints, /*keep constrained dofs*/ false); + SparsityTools::distribute_sparsity_pattern (csp, + local_dofs_per_process, + mpi_communicator, + locally_relevant_dofs); + + // Note that we have used the CompressedSimpleSparsityPattern class + // here, rather than the // SparsityPattern class that we have used in all other // cases. The reason for this is that for the latter class to work we have // to give an initial upper bound for the number of entries in each row, a @@ -989,11 +982,12 @@ namespace Step18 // too much memory can lead to out-of-memory situations. // // In order to avoid this, we resort to the - // CompressedSparsityPattern class that is slower but does + // CompressedSimpleSparsityPattern class that is slower but does // not require any up-front estimate on the number of nonzero entries per // row. It therefore only ever allocates as much memory as it needs at any // given time, and we can build it even for large 3d problems. // + // TODO: this is no longer true: // It is also worth noting that the sparsity pattern we construct is // global, i.e. comprises all degrees of freedom whether they will be // owned by the processor we are on or another one (in case this program @@ -1003,29 +997,16 @@ namespace Step18 // not scale well. However, there are several more places in the program // in which we do this, for example we always keep the global // triangulation and DoF handler objects around, even if we only work on - // part of them. At present, deal.II does not have the necessary - // facilities to completely distribute these objects (a task that, indeed, - // is very hard to achieve with adaptive meshes, since well-balanced - // subdivisions of a domain tend to become unbalanced as the mesh is - // adaptively refined). + // part of them. // // With this data structure, we can then go to the PETSc sparse matrix and // tell it to preallocate all the entries we will later want to write to: - system_matrix.reinit (mpi_communicator, - sparsity_pattern, - local_dofs_per_process, - local_dofs_per_process, - this_mpi_process); - // After this point, no further explicit knowledge of the sparsity pattern - // is required any more and we can let the sparsity_pattern - // variable go out of scope without any problem. - - // The last task in this function is then only to reset the right hand - // side vector as well as the solution vector to its correct size; - // remember that the solution vector is a local one, unlike the right hand - // side that is a distributed %parallel one and therefore needs to know - // the MPI communicator over which it is supposed to transmit messages: - system_rhs.reinit (mpi_communicator, dof_handler.n_dofs(), n_local_dofs); + system_matrix.reinit (locally_owned_dofs, + locally_owned_dofs, + csp, + mpi_communicator); + + system_rhs.reinit(locally_owned_dofs,mpi_communicator); incremental_displacement.reinit (dof_handler.n_dofs()); } @@ -1072,7 +1053,7 @@ namespace Step18 cell = dof_handler.begin_active(), endc = dof_handler.end(); for (; cell!=endc; ++cell) - if (cell->subdomain_id() == this_mpi_process) + if (cell->is_locally_owned() ) { cell_matrix = 0; cell_rhs = 0; @@ -1206,8 +1187,7 @@ namespace Step18 boundary_values, fe.component_mask(z_component)); - PETScWrappers::MPI::Vector tmp (mpi_communicator, dof_handler.n_dofs(), - n_local_dofs); + PETScWrappers::MPI::Vector tmp (locally_owned_dofs,mpi_communicator); MatrixTools::apply_boundary_values (boundary_values, system_matrix, tmp, system_rhs, false); @@ -1259,9 +1239,7 @@ namespace Step18 unsigned int TopLevel::solve_linear_problem () { PETScWrappers::MPI::Vector - distributed_incremental_displacement (mpi_communicator, - dof_handler.n_dofs(), - n_local_dofs); + distributed_incremental_displacement (locally_owned_dofs,mpi_communicator); distributed_incremental_displacement = incremental_displacement; SolverControl solver_control (dof_handler.n_dofs(), @@ -1291,87 +1269,12 @@ namespace Step18 // 0 will write the record files the reference all the .vtu files. // // The crucial part of this function is to give the DataOut - // class a way to only work on the cells that the present process owns. This - // class is already well-equipped for that: it has two virtual functions - // first_cell and next_cell that return the first - // cell to be worked on, and given one cell return the next cell to be - // worked on. By default, these functions return the first active cell - // (i.e. the first one that has no children) and the next active cell. What - // we have to do here is derive a class from DataOut that - // overloads these two functions to only iterate over those cells with the - // right subdomain indicator. - // - // We do this at the beginning of this function. The first_cell - // function just starts with the first active cell, and then iterates to the - // next cells while the cell presently under consideration does not yet have - // the correct subdomain id. The only thing that needs to be taken care of - // is that we don't try to keep iterating when we have hit the end iterator. - // - // The next_cell function could be implemented in a similar - // way. However, we use this occasion as a pretext to introduce one more - // thing that the library offers: filtered iterators. These are wrappers for - // the iterator classes that just skip all cells (or faces, lines, etc) that - // do not satisfy a certain predicate (a predicate in computer-lingo is a - // function that when applied to a data element either returns true or - // false). In the present case, the predicate is that the cell has to have a - // certain subdomain id, and the library already has this predicate built - // in. If the cell iterator is not the end iterator, what we then have to do - // is to initialize such a filtered iterator with the present cell and the - // predicate, and then increase the iterator exactly once. While the more - // conventional loop would probably not have been much longer, this is - // definitely the more elegant way -- and then, these example programs also - // serve the purpose of introducing what is available in deal.II. - template - class FilteredDataOut : public DataOut - { - public: - FilteredDataOut (const unsigned int subdomain_id) - : - subdomain_id (subdomain_id) - {} - - virtual typename DataOut::cell_iterator - first_cell () - { - typename DataOut::active_cell_iterator - cell = this->dofs->begin_active(); - while ((cell != this->dofs->end()) && - (cell->subdomain_id() != subdomain_id)) - ++cell; - - return cell; - } - - virtual typename DataOut::cell_iterator - next_cell (const typename DataOut::cell_iterator &old_cell) - { - if (old_cell != this->dofs->end()) - { - const IteratorFilters::SubdomainEqualTo - predicate(subdomain_id); - - return - ++(FilteredIterator - ::active_cell_iterator> - (predicate,old_cell)); - } - else - return old_cell; - } - - private: - const unsigned int subdomain_id; - }; - - + // class a way to only work on the cells that the present process owns. template void TopLevel::output_results () const { - // With this newly defined class, declare an object that is going to - // generate the graphical output and attach the dof handler with it from - // which to get the solution vector: - FilteredDataOut data_out(this_mpi_process); + DataOut data_out; data_out.attach_dof_handler (dof_handler); // Then, just as in step-17, define the names of solution variables (which @@ -1423,8 +1326,7 @@ namespace Step18 cell = triangulation.begin_active(), endc = triangulation.end(); for (unsigned int index=0; cell!=endc; ++cell, ++index) - // ... and pick those that are relevant to us: - if (cell->subdomain_id() == this_mpi_process) + if (cell->is_locally_owned() ) { // On these cells, add up the stresses over all quadrature // points... @@ -1675,7 +1577,6 @@ namespace Step18 // Finally, set up quadrature point data again on the new mesh, and only // on those cells that we have determined to be ours: - GridTools::partition_triangulation (n_mpi_processes, triangulation); setup_quadrature_point_history (); } @@ -1826,7 +1727,7 @@ namespace Step18 for (typename Triangulation::active_cell_iterator cell = triangulation.begin_active(); cell != triangulation.end(); ++cell) - if (cell->subdomain_id() == this_mpi_process) + if (cell->is_locally_owned() ) ++our_cells; triangulation.clear_user_data(); @@ -1858,8 +1759,8 @@ namespace Step18 unsigned int history_index = 0; for (typename Triangulation::active_cell_iterator cell = triangulation.begin_active(); - cell != triangulation.end(); ++cell) - if (cell->subdomain_id() == this_mpi_process) + cell != triangulation.end(); ++cell) + if (cell->is_locally_owned() ) { cell->set_user_pointer (&quadrature_point_history[history_index]); history_index += quadrature_formula.size(); @@ -1944,7 +1845,7 @@ namespace Step18 for (typename DoFHandler::active_cell_iterator cell = dof_handler.begin_active(); cell != dof_handler.end(); ++cell) - if (cell->subdomain_id() == this_mpi_process) + if (cell->is_locally_owned() ) { // Next, get a pointer to the quadrature point history data local to // the present cell, and, as a defensive measure, make sure that