From: Martin Kronbichler Date: Wed, 18 Nov 2009 02:14:36 +0000 (+0000) Subject: Use IndexSet instead of Epetra_Map. This avoids using Trilinos-specific structures... X-Git-Tag: v8.0.0~6807 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=00f7d26c848e12b2607ce6366ec6f19aea37c56c;p=dealii.git Use IndexSet instead of Epetra_Map. This avoids using Trilinos-specific structures and makes the code more generic. git-svn-id: https://svn.dealii.org/trunk@20127 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/examples/step-32/step-32.cc b/deal.II/examples/step-32/step-32.cc index 0d55dfc809..6b17b88e66 100644 --- a/deal.II/examples/step-32/step-32.cc +++ b/deal.II/examples/step-32/step-32.cc @@ -68,15 +68,11 @@ #include #include - // This is the only include file that is - // new: We use Trilinos for defining the - // %parallel partitioning of the matrices - // and vectors, and as explained in the - // introduction, an Epetra_Map - // is the Trilinos data structure for the - // definition of which part of a - // distributed vector is stored locally: -#include + // This is the only include file that is new: + // We use an IndexSet to describe the + // %parallel partitioning of vectors and + // matrices. +#include // Next, we import all deal.II @@ -735,25 +731,19 @@ namespace Assembly // for each of those two steps for all // the four assembly routines that we use // in this program. - // - // Moreover, we include an MPI communicator - // and an Epetra_Map (see the introduction) - // that are needed for communication and - // data exchange if the Trilinos matrices - // and vectors are distributed over several - // processors. Finally, the - // pcout (for %parallel - // std::cout) object is - // used to simplify writing output: each - // MPI process can use this to generate - // output as usual, but since each of these - // processes will produce the same output - // it will just be replicated many times - // over; with the ConditionalOStream class, - // only the output generated by one task - // will actually be printed to screen, - // whereas the output by all the other - // threads will simply be forgotten. + // + // The pcout (for %parallel + // std::cout) object is used + // to simplify writing output: each MPI + // process can use this to generate output as + // usual, but since each of these processes + // will produce the same output it will just + // be replicated many times over; with the + // ConditionalOStream class, only the output + // generated by one task will actually be + // printed to screen, whereas the output by + // all the other threads will simply be + // forgotten. // // In a bit of naming confusion, you will // notice below that some of the variables @@ -842,8 +832,6 @@ class BoussinesqFlowProblem const double cell_diameter) const; - const Epetra_Comm &trilinos_communicator; - ConditionalOStream pcout; Triangulation triangulation; @@ -854,7 +842,6 @@ class BoussinesqFlowProblem DoFHandler stokes_dof_handler; ConstraintMatrix stokes_constraints; - std::vector stokes_partitioner; TrilinosWrappers::BlockSparseMatrix stokes_matrix; TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix; @@ -868,7 +855,6 @@ class BoussinesqFlowProblem DoFHandler temperature_dof_handler; ConstraintMatrix temperature_constraints; - Epetra_Map temperature_partitioner; TrilinosWrappers::SparseMatrix temperature_mass_matrix; TrilinosWrappers::SparseMatrix temperature_stiffness_matrix; TrilinosWrappers::SparseMatrix temperature_matrix; @@ -894,9 +880,11 @@ class BoussinesqFlowProblem TimerOutput computing_timer; - void setup_stokes_matrix (); - void setup_stokes_preconditioner (); - void setup_temperature_matrices (); + void setup_stokes_matrix (const IndexSet &velocity_partitioning, + const IndexSet &pressure_partitioning); + void setup_stokes_preconditioner (const IndexSet &velocity_partitioning, + const IndexSet &pressure_partitioning); + void setup_temperature_matrices (const IndexSet &temperature_partitioning); void local_assemble_stokes_preconditioner (const typename DoFHandler::active_cell_iterator &cell, @@ -986,10 +974,9 @@ class BoussinesqFlowProblem template BoussinesqFlowProblem::BoussinesqFlowProblem () : - trilinos_communicator (Utilities::Trilinos::comm_world()), pcout (std::cout, - (Utilities::Trilinos:: - get_this_mpi_process(trilinos_communicator) + (Utilities::System:: + get_this_mpi_process(MPI_COMM_WORLD) == 0)), triangulation (Triangulation::maximum_smoothing), @@ -1003,8 +990,6 @@ BoussinesqFlowProblem::BoussinesqFlowProblem () temperature_fe (temperature_degree), temperature_dof_handler (triangulation), - temperature_partitioner (0, 0, trilinos_communicator), - time_step (0), old_time_step (0), timestep_number (0), @@ -1056,7 +1041,7 @@ BoussinesqFlowProblem::BoussinesqFlowProblem () // individual processors. MPI // provides such a call, but it's // even simpler to use the respective - // function of the Trilinos + // function of the MPI // communicator object since that // will do the right thing even if we // work without MPI and on a single @@ -1086,7 +1071,7 @@ double BoussinesqFlowProblem::get_maximal_velocity () const endc = stokes_dof_handler.end(); for (; cell!=endc; ++cell) if (cell->subdomain_id() == - Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)) + Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) { fe_values.reinit (cell); fe_values[velocities].get_function_values (stokes_solution, @@ -1098,7 +1083,12 @@ double BoussinesqFlowProblem::get_maximal_velocity () const } double max_velocity = 0.; - trilinos_communicator.MaxAll(&max_local_velocity, &max_velocity, 1); +#ifdef DEAL_II_COMPILER_SUPPORTS_MPI + MPI_Allreduce (&max_local_velocity, &max_velocity, 1, MPI_DOUBLE, + MPI_MAX, MPI_COMM_WORLD); +#else + max_velocity = max_local_velocity; +#endif return max_velocity; } @@ -1141,7 +1131,7 @@ BoussinesqFlowProblem::get_extrapolated_temperature_range () const endc = temperature_dof_handler.end(); for (; cell!=endc; ++cell) if (cell->subdomain_id() == - Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)) + Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) { fe_values.reinit (cell); fe_values.get_function_values (old_temperature_solution, @@ -1163,9 +1153,15 @@ BoussinesqFlowProblem::get_extrapolated_temperature_range () const } double min_temperature, max_temperature; - - trilinos_communicator.MaxAll(&max_local_temperature, &max_temperature, 1); - trilinos_communicator.MinAll(&min_local_temperature, &min_temperature, 1); +#ifdef DEAL_II_COMPILER_SUPPORTS_MPI + MPI_Allreduce (&max_local_temperature, &max_temperature, 1, MPI_DOUBLE, + MPI_MAX, MPI_COMM_WORLD); + MPI_Allreduce (&min_local_temperature, &min_temperature, 1, MPI_DOUBLE, + MPI_MIN, MPI_COMM_WORLD); +#else + min_temperature = min_local_temperature; + max_temperature = max_local_temperature; +#endif return std::make_pair(min_temperature, max_temperature); } @@ -1179,7 +1175,7 @@ BoussinesqFlowProblem::get_extrapolated_temperature_range () const endc = temperature_dof_handler.end(); for (; cell!=endc; ++cell) if (cell->subdomain_id() == - Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)) + Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) { fe_values.reinit (cell); fe_values.get_function_values (old_temperature_solution, @@ -1197,9 +1193,15 @@ BoussinesqFlowProblem::get_extrapolated_temperature_range () const } double min_temperature, max_temperature; - - trilinos_communicator.MaxAll(&max_local_temperature, &max_temperature, 1); - trilinos_communicator.MinAll(&min_local_temperature, &min_temperature, 1); +#ifdef DEAL_II_COMPILER_SUPPORTS_MPI + MPI_Allreduce (&max_local_temperature, &max_temperature, 1, MPI_DOUBLE, + MPI_MAX, MPI_COMM_WORLD); + MPI_Allreduce (&min_local_temperature, &min_temperature, 1, MPI_DOUBLE, + MPI_MIN, MPI_COMM_WORLD); +#else + min_temperature = min_local_temperature; + max_temperature = max_local_temperature; +#endif return std::make_pair(min_temperature, max_temperature); } @@ -1394,7 +1396,7 @@ void BoussinesqFlowProblem::project_temperature_field () for (; cell!=endc; ++cell) if (cell->subdomain_id() == - Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)) + Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) { cell->get_dof_indices (local_dof_indices); fe_values.reinit(cell); @@ -1520,11 +1522,22 @@ void BoussinesqFlowProblem::project_temperature_field () // sp variable once the matrix // has been given the sparsity structure. template -void BoussinesqFlowProblem::setup_stokes_matrix () +void BoussinesqFlowProblem:: + setup_stokes_matrix (const IndexSet &velocity_partitioning, + const IndexSet &pressure_partitioning) { stokes_matrix.clear (); - TrilinosWrappers::BlockSparsityPattern sp (stokes_partitioner); + TrilinosWrappers::BlockSparsityPattern sp (2,2); + sp.block(0,0).reinit (velocity_partitioning, velocity_partitioning, + MPI_COMM_WORLD); + sp.block(0,1).reinit (velocity_partitioning, pressure_partitioning, + MPI_COMM_WORLD); + sp.block(1,0).reinit (pressure_partitioning, velocity_partitioning, + MPI_COMM_WORLD); + sp.block(1,1).reinit (pressure_partitioning, pressure_partitioning, + MPI_COMM_WORLD); + sp.collect_sizes(); Table<2,DoFTools::Coupling> coupling (dim+1, dim+1); @@ -1537,8 +1550,8 @@ void BoussinesqFlowProblem::setup_stokes_matrix () DoFTools::make_sparsity_pattern (stokes_dof_handler, coupling, sp, stokes_constraints, false, - Utilities::Trilinos:: - get_this_mpi_process(trilinos_communicator)); + Utilities::System:: + get_this_mpi_process(MPI_COMM_WORLD)); sp.compress(); stokes_matrix.reinit (sp); @@ -1547,14 +1560,25 @@ void BoussinesqFlowProblem::setup_stokes_matrix () template -void BoussinesqFlowProblem::setup_stokes_preconditioner () +void BoussinesqFlowProblem:: + setup_stokes_preconditioner (const IndexSet &velocity_partitioning, + const IndexSet &pressure_partitioning) { Amg_preconditioner.reset (); Mp_preconditioner.reset (); stokes_preconditioner_matrix.clear (); - TrilinosWrappers::BlockSparsityPattern sp (stokes_partitioner); + TrilinosWrappers::BlockSparsityPattern sp (2,2); + sp.block(0,0).reinit (velocity_partitioning, velocity_partitioning, + MPI_COMM_WORLD); + sp.block(0,1).reinit (velocity_partitioning, pressure_partitioning, + MPI_COMM_WORLD); + sp.block(1,0).reinit (pressure_partitioning, velocity_partitioning, + MPI_COMM_WORLD); + sp.block(1,1).reinit (pressure_partitioning, pressure_partitioning, + MPI_COMM_WORLD); + sp.collect_sizes(); Table<2,DoFTools::Coupling> coupling (dim+1, dim+1); for (unsigned int c=0; c::setup_stokes_preconditioner () DoFTools::make_sparsity_pattern (stokes_dof_handler, coupling, sp, stokes_constraints, false, - Utilities::Trilinos:: - get_this_mpi_process(trilinos_communicator)); + Utilities::System:: + get_this_mpi_process(MPI_COMM_WORLD)); sp.compress(); stokes_preconditioner_matrix.reinit (sp); @@ -1575,18 +1599,19 @@ void BoussinesqFlowProblem::setup_stokes_preconditioner () template -void BoussinesqFlowProblem::setup_temperature_matrices () +void BoussinesqFlowProblem:: + setup_temperature_matrices (const IndexSet &temperature_partitioner) { T_preconditioner.reset (); temperature_mass_matrix.clear (); temperature_stiffness_matrix.clear (); temperature_matrix.clear (); - TrilinosWrappers::SparsityPattern sp (temperature_partitioner); + TrilinosWrappers::SparsityPattern sp (temperature_partitioner, MPI_COMM_WORLD); DoFTools::make_sparsity_pattern (temperature_dof_handler, sp, temperature_constraints, false, - Utilities::Trilinos:: - get_this_mpi_process(trilinos_communicator)); + Utilities::System:: + get_this_mpi_process(MPI_COMM_WORLD)); sp.compress(); temperature_matrix.reinit (sp); @@ -1636,7 +1661,7 @@ void BoussinesqFlowProblem::setup_temperature_matrices () // // After this, we have to set up the // various partitioners (of type - // Epetra_Map, see the + // IndexSet, see the // introduction) that describe which parts // of each matrix or vector will be stored // where, then call the functions that @@ -1670,9 +1695,9 @@ void BoussinesqFlowProblem::setup_temperature_matrices () // std::cout but would then // have had to guard every access to that // stream by something like if - // (Utilities:: Trilinos:: + // (Utilities:: System:: // get_this_mpi_process - // (trilinos_communicator) == 0), + // (MPI_COMM_WORLD) == 0), // hardly a pretty solution. template void BoussinesqFlowProblem::setup_dofs () @@ -1681,8 +1706,8 @@ void BoussinesqFlowProblem::setup_dofs () std::vector stokes_sub_blocks (dim+1,0); stokes_sub_blocks[dim] = 1; - GridTools::partition_triangulation (Utilities::Trilinos:: - get_n_mpi_processes(trilinos_communicator), + GridTools::partition_triangulation (Utilities::System:: + get_n_mpi_processes(MPI_COMM_WORLD), triangulation); { @@ -1747,60 +1772,112 @@ void BoussinesqFlowProblem::setup_dofs () << std::endl << std::endl; - stokes_partitioner.clear(); + IndexSet velocity_partitioning (n_u); + IndexSet pressure_partitioning (n_p); + IndexSet temperature_partitioning (n_T); { - std::vector local_dofs (dim+1); - DoFTools:: - count_dofs_with_subdomain_association (stokes_dof_handler, - Utilities::Trilinos:: - get_this_mpi_process(trilinos_communicator), - local_dofs); - const unsigned int - n_local_velocities = std::accumulate (&local_dofs[0], - &local_dofs[dim], - 0), - n_local_pressures = local_dofs[dim]; - - stokes_partitioner.push_back (Epetra_Map(n_u, n_local_velocities, - 0, trilinos_communicator)); - stokes_partitioner.push_back (Epetra_Map(n_p, n_local_pressures, - 0, trilinos_communicator)); + const unsigned int my_id = Utilities::System::get_this_mpi_process(MPI_COMM_WORLD); + std::pair + range_u (deal_II_numbers::invalid_unsigned_int, + deal_II_numbers::invalid_unsigned_int), + range_p = range_u, + range_T = range_u; + std::vector subdomain_association (stokes_dof_handler.n_dofs()); + DoFTools::get_subdomain_association (stokes_dof_handler, subdomain_association); + unsigned int i; + for (i=0; i tasks; tasks += Threads::new_task (&BoussinesqFlowProblem::setup_stokes_matrix, - *this); + *this, + velocity_partitioning, pressure_partitioning); tasks += Threads::new_task (&BoussinesqFlowProblem::setup_stokes_preconditioner, - *this); + *this, + velocity_partitioning, pressure_partitioning); tasks += Threads::new_task (&BoussinesqFlowProblem::setup_temperature_matrices, - *this); + *this, + temperature_partitioning); tasks.join_all (); } else { - setup_stokes_matrix (); - setup_stokes_preconditioner (); - setup_temperature_matrices (); + setup_stokes_matrix (velocity_partitioning, pressure_partitioning); + setup_stokes_preconditioner (velocity_partitioning, pressure_partitioning); + setup_temperature_matrices (temperature_partitioning); } - stokes_solution.reinit (stokes_partitioner); - old_stokes_solution.reinit (stokes_partitioner); - stokes_rhs.reinit (stokes_partitioner); + stokes_rhs.reinit (2); + stokes_rhs.block(0).reinit (velocity_partitioning, MPI_COMM_WORLD); + stokes_rhs.block(1).reinit (pressure_partitioning, MPI_COMM_WORLD); + stokes_rhs.collect_sizes(); + stokes_solution.reinit (stokes_rhs); + old_stokes_solution.reinit (stokes_solution); - temperature_solution.reinit (temperature_partitioner); - old_temperature_solution.reinit (temperature_partitioner); - old_old_temperature_solution.reinit (temperature_partitioner); - temperature_rhs.reinit (temperature_partitioner); + temperature_rhs.reinit (temperature_partitioning, MPI_COMM_WORLD); + temperature_solution.reinit (temperature_rhs); + old_temperature_solution.reinit (temperature_solution); + old_old_temperature_solution.reinit (temperature_solution); computing_timer.exit_section(); } @@ -1978,10 +2055,10 @@ BoussinesqFlowProblem::assemble_stokes_preconditioner () WorkStream:: run (SubdomainFilter (IteratorFilters::SubdomainEqualTo - (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)), + (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), stokes_dof_handler.begin_active()), SubdomainFilter (IteratorFilters::SubdomainEqualTo - (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)), + (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), stokes_dof_handler.end()), std_cxx1x::bind (&BoussinesqFlowProblem:: local_assemble_stokes_preconditioner, @@ -2186,10 +2263,10 @@ void BoussinesqFlowProblem::assemble_stokes_system () WorkStream:: run (SubdomainFilter (IteratorFilters::SubdomainEqualTo - (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)), + (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), stokes_dof_handler.begin_active()), SubdomainFilter (IteratorFilters::SubdomainEqualTo - (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)), + (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), stokes_dof_handler.end()), std_cxx1x::bind (&BoussinesqFlowProblem:: local_assemble_stokes_system, @@ -2313,10 +2390,10 @@ void BoussinesqFlowProblem::assemble_temperature_matrix () WorkStream:: run (SubdomainFilter (IteratorFilters::SubdomainEqualTo - (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)), + (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), temperature_dof_handler.begin_active()), SubdomainFilter (IteratorFilters::SubdomainEqualTo - (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)), + (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), temperature_dof_handler.end()), std_cxx1x::bind (&BoussinesqFlowProblem:: local_assemble_temperature_matrix, @@ -2584,10 +2661,10 @@ void BoussinesqFlowProblem::assemble_temperature_system (const double maxim WorkStream:: run (SubdomainFilter (IteratorFilters::SubdomainEqualTo - (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)), + (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), temperature_dof_handler.begin_active()), SubdomainFilter (IteratorFilters::SubdomainEqualTo - (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)), + (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), temperature_dof_handler.end()), std_cxx1x::bind (&BoussinesqFlowProblem:: local_assemble_temperature_rhs, @@ -2670,7 +2747,7 @@ void BoussinesqFlowProblem::solve () preconditioner (stokes_matrix, *Mp_preconditioner, *Amg_preconditioner); TrilinosWrappers::MPI::BlockVector - distributed_stokes_solution (stokes_partitioner); + distributed_stokes_solution (stokes_rhs); distributed_stokes_solution = stokes_solution; const unsigned int @@ -2739,7 +2816,7 @@ void BoussinesqFlowProblem::solve () SolverCG cg (solver_control); TrilinosWrappers::MPI::Vector - distributed_temperature_solution (temperature_partitioner); + distributed_temperature_solution (temperature_rhs); distributed_temperature_solution = temperature_solution; cg.solve (temperature_matrix, distributed_temperature_solution, @@ -2847,7 +2924,7 @@ void BoussinesqFlowProblem::output_results () computing_timer.enter_section ("Postprocessing"); - if (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator) == 0) + if (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD) == 0) { const FESystem joint_fe (stokes_fe, 1, @@ -3083,14 +3160,18 @@ void BoussinesqFlowProblem::refine_mesh (const unsigned int max_grid_level) std::vector(), 0, 0, - Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)); + Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)); Vector x_local_estimated_error_per_cell (triangulation.n_active_cells()); x_local_estimated_error_per_cell = local_estimated_error_per_cell; Vector estimated_error_per_cell (triangulation.n_active_cells()); - trilinos_communicator.MaxAll (&x_local_estimated_error_per_cell(0), - &estimated_error_per_cell(0), - triangulation.n_active_cells()); +#ifdef DEAL_II_COMPILER_SUPPORTS_MPI + MPI_Allreduce (&x_local_estimated_error_per_cell(0), + &estimated_error_per_cell(0), + triangulation.n_active_cells(), MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); +#else + estimated_error_per_cell = x_local_estimated_error_per_cell; +#endif GridRefinement::refine_and_coarsen_fixed_fraction (triangulation, estimated_error_per_cell,