#include <iostream>
#include <sstream>
- // This is the only include file that is
- // new: We use Trilinos for defining the
- // %parallel partitioning of the matrices
- // and vectors, and as explained in the
- // introduction, an <code>Epetra_Map</code>
- // is the Trilinos data structure for the
- // definition of which part of a
- // distributed vector is stored locally:
-#include <Epetra_Map.h>
+ // This is the only include file that is new:
+ // We use an IndexSet to describe the
+ // %parallel partitioning of vectors and
+ // matrices.
+#include <base/index_set.h>
// Next, we import all deal.II
// for each of those two steps for all
// the four assembly routines that we use
// in this program.
- //
- // Moreover, we include an MPI communicator
- // and an Epetra_Map (see the introduction)
- // that are needed for communication and
- // data exchange if the Trilinos matrices
- // and vectors are distributed over several
- // processors. Finally, the
- // <code>pcout</code> (for <i>%parallel
- // <code>std::cout</code></i>) object is
- // used to simplify writing output: each
- // MPI process can use this to generate
- // output as usual, but since each of these
- // processes will produce the same output
- // it will just be replicated many times
- // over; with the ConditionalOStream class,
- // only the output generated by one task
- // will actually be printed to screen,
- // whereas the output by all the other
- // threads will simply be forgotten.
+ //
+ // The <code>pcout</code> (for <i>%parallel
+ // <code>std::cout</code></i>) object is used
+ // to simplify writing output: each MPI
+ // process can use this to generate output as
+ // usual, but since each of these processes
+ // will produce the same output it will just
+ // be replicated many times over; with the
+ // ConditionalOStream class, only the output
+ // generated by one task will actually be
+ // printed to screen, whereas the output by
+ // all the other threads will simply be
+ // forgotten.
//
// In a bit of naming confusion, you will
// notice below that some of the variables
const double cell_diameter) const;
- const Epetra_Comm &trilinos_communicator;
-
ConditionalOStream pcout;
Triangulation<dim> triangulation;
DoFHandler<dim> stokes_dof_handler;
ConstraintMatrix stokes_constraints;
- std::vector<Epetra_Map> stokes_partitioner;
TrilinosWrappers::BlockSparseMatrix stokes_matrix;
TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix;
DoFHandler<dim> temperature_dof_handler;
ConstraintMatrix temperature_constraints;
- Epetra_Map temperature_partitioner;
TrilinosWrappers::SparseMatrix temperature_mass_matrix;
TrilinosWrappers::SparseMatrix temperature_stiffness_matrix;
TrilinosWrappers::SparseMatrix temperature_matrix;
TimerOutput computing_timer;
- void setup_stokes_matrix ();
- void setup_stokes_preconditioner ();
- void setup_temperature_matrices ();
+ void setup_stokes_matrix (const IndexSet &velocity_partitioning,
+ const IndexSet &pressure_partitioning);
+ void setup_stokes_preconditioner (const IndexSet &velocity_partitioning,
+ const IndexSet &pressure_partitioning);
+ void setup_temperature_matrices (const IndexSet &temperature_partitioning);
void
local_assemble_stokes_preconditioner (const typename DoFHandler<dim>::active_cell_iterator &cell,
template <int dim>
BoussinesqFlowProblem<dim>::BoussinesqFlowProblem ()
:
- trilinos_communicator (Utilities::Trilinos::comm_world()),
pcout (std::cout,
- (Utilities::Trilinos::
- get_this_mpi_process(trilinos_communicator)
+ (Utilities::System::
+ get_this_mpi_process(MPI_COMM_WORLD)
== 0)),
triangulation (Triangulation<dim>::maximum_smoothing),
temperature_fe (temperature_degree),
temperature_dof_handler (triangulation),
- temperature_partitioner (0, 0, trilinos_communicator),
-
time_step (0),
old_time_step (0),
timestep_number (0),
// individual processors. MPI
// provides such a call, but it's
// even simpler to use the respective
- // function of the Trilinos
+ // function of the MPI
// communicator object since that
// will do the right thing even if we
// work without MPI and on a single
endc = stokes_dof_handler.end();
for (; cell!=endc; ++cell)
if (cell->subdomain_id() ==
- Utilities::Trilinos::get_this_mpi_process(trilinos_communicator))
+ Utilities::System::get_this_mpi_process(MPI_COMM_WORLD))
{
fe_values.reinit (cell);
fe_values[velocities].get_function_values (stokes_solution,
}
double max_velocity = 0.;
- trilinos_communicator.MaxAll(&max_local_velocity, &max_velocity, 1);
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ MPI_Allreduce (&max_local_velocity, &max_velocity, 1, MPI_DOUBLE,
+ MPI_MAX, MPI_COMM_WORLD);
+#else
+ max_velocity = max_local_velocity;
+#endif
return max_velocity;
}
endc = temperature_dof_handler.end();
for (; cell!=endc; ++cell)
if (cell->subdomain_id() ==
- Utilities::Trilinos::get_this_mpi_process(trilinos_communicator))
+ Utilities::System::get_this_mpi_process(MPI_COMM_WORLD))
{
fe_values.reinit (cell);
fe_values.get_function_values (old_temperature_solution,
}
double min_temperature, max_temperature;
-
- trilinos_communicator.MaxAll(&max_local_temperature, &max_temperature, 1);
- trilinos_communicator.MinAll(&min_local_temperature, &min_temperature, 1);
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ MPI_Allreduce (&max_local_temperature, &max_temperature, 1, MPI_DOUBLE,
+ MPI_MAX, MPI_COMM_WORLD);
+ MPI_Allreduce (&min_local_temperature, &min_temperature, 1, MPI_DOUBLE,
+ MPI_MIN, MPI_COMM_WORLD);
+#else
+ min_temperature = min_local_temperature;
+ max_temperature = max_local_temperature;
+#endif
return std::make_pair(min_temperature, max_temperature);
}
endc = temperature_dof_handler.end();
for (; cell!=endc; ++cell)
if (cell->subdomain_id() ==
- Utilities::Trilinos::get_this_mpi_process(trilinos_communicator))
+ Utilities::System::get_this_mpi_process(MPI_COMM_WORLD))
{
fe_values.reinit (cell);
fe_values.get_function_values (old_temperature_solution,
}
double min_temperature, max_temperature;
-
- trilinos_communicator.MaxAll(&max_local_temperature, &max_temperature, 1);
- trilinos_communicator.MinAll(&min_local_temperature, &min_temperature, 1);
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ MPI_Allreduce (&max_local_temperature, &max_temperature, 1, MPI_DOUBLE,
+ MPI_MAX, MPI_COMM_WORLD);
+ MPI_Allreduce (&min_local_temperature, &min_temperature, 1, MPI_DOUBLE,
+ MPI_MIN, MPI_COMM_WORLD);
+#else
+ min_temperature = min_local_temperature;
+ max_temperature = max_local_temperature;
+#endif
return std::make_pair(min_temperature, max_temperature);
}
for (; cell!=endc; ++cell)
if (cell->subdomain_id() ==
- Utilities::Trilinos::get_this_mpi_process(trilinos_communicator))
+ Utilities::System::get_this_mpi_process(MPI_COMM_WORLD))
{
cell->get_dof_indices (local_dof_indices);
fe_values.reinit(cell);
// <code>sp</code> variable once the matrix
// has been given the sparsity structure.
template <int dim>
-void BoussinesqFlowProblem<dim>::setup_stokes_matrix ()
+void BoussinesqFlowProblem<dim>::
+ setup_stokes_matrix (const IndexSet &velocity_partitioning,
+ const IndexSet &pressure_partitioning)
{
stokes_matrix.clear ();
- TrilinosWrappers::BlockSparsityPattern sp (stokes_partitioner);
+ TrilinosWrappers::BlockSparsityPattern sp (2,2);
+ sp.block(0,0).reinit (velocity_partitioning, velocity_partitioning,
+ MPI_COMM_WORLD);
+ sp.block(0,1).reinit (velocity_partitioning, pressure_partitioning,
+ MPI_COMM_WORLD);
+ sp.block(1,0).reinit (pressure_partitioning, velocity_partitioning,
+ MPI_COMM_WORLD);
+ sp.block(1,1).reinit (pressure_partitioning, pressure_partitioning,
+ MPI_COMM_WORLD);
+ sp.collect_sizes();
Table<2,DoFTools::Coupling> coupling (dim+1, dim+1);
DoFTools::make_sparsity_pattern (stokes_dof_handler, coupling, sp,
stokes_constraints, false,
- Utilities::Trilinos::
- get_this_mpi_process(trilinos_communicator));
+ Utilities::System::
+ get_this_mpi_process(MPI_COMM_WORLD));
sp.compress();
stokes_matrix.reinit (sp);
template <int dim>
-void BoussinesqFlowProblem<dim>::setup_stokes_preconditioner ()
+void BoussinesqFlowProblem<dim>::
+ setup_stokes_preconditioner (const IndexSet &velocity_partitioning,
+ const IndexSet &pressure_partitioning)
{
Amg_preconditioner.reset ();
Mp_preconditioner.reset ();
stokes_preconditioner_matrix.clear ();
- TrilinosWrappers::BlockSparsityPattern sp (stokes_partitioner);
+ TrilinosWrappers::BlockSparsityPattern sp (2,2);
+ sp.block(0,0).reinit (velocity_partitioning, velocity_partitioning,
+ MPI_COMM_WORLD);
+ sp.block(0,1).reinit (velocity_partitioning, pressure_partitioning,
+ MPI_COMM_WORLD);
+ sp.block(1,0).reinit (pressure_partitioning, velocity_partitioning,
+ MPI_COMM_WORLD);
+ sp.block(1,1).reinit (pressure_partitioning, pressure_partitioning,
+ MPI_COMM_WORLD);
+ sp.collect_sizes();
Table<2,DoFTools::Coupling> coupling (dim+1, dim+1);
for (unsigned int c=0; c<dim+1; ++c)
DoFTools::make_sparsity_pattern (stokes_dof_handler, coupling, sp,
stokes_constraints, false,
- Utilities::Trilinos::
- get_this_mpi_process(trilinos_communicator));
+ Utilities::System::
+ get_this_mpi_process(MPI_COMM_WORLD));
sp.compress();
stokes_preconditioner_matrix.reinit (sp);
template <int dim>
-void BoussinesqFlowProblem<dim>::setup_temperature_matrices ()
+void BoussinesqFlowProblem<dim>::
+ setup_temperature_matrices (const IndexSet &temperature_partitioner)
{
T_preconditioner.reset ();
temperature_mass_matrix.clear ();
temperature_stiffness_matrix.clear ();
temperature_matrix.clear ();
- TrilinosWrappers::SparsityPattern sp (temperature_partitioner);
+ TrilinosWrappers::SparsityPattern sp (temperature_partitioner, MPI_COMM_WORLD);
DoFTools::make_sparsity_pattern (temperature_dof_handler, sp,
temperature_constraints, false,
- Utilities::Trilinos::
- get_this_mpi_process(trilinos_communicator));
+ Utilities::System::
+ get_this_mpi_process(MPI_COMM_WORLD));
sp.compress();
temperature_matrix.reinit (sp);
//
// After this, we have to set up the
// various partitioners (of type
- // <code>Epetra_Map</code>, see the
+ // <code>IndexSet</code>, see the
// introduction) that describe which parts
// of each matrix or vector will be stored
// where, then call the functions that
// <code>std::cout</code> but would then
// have had to guard every access to that
// stream by something like <code>if
- // (Utilities:: Trilinos::
+ // (Utilities:: System::
// get_this_mpi_process
- // (trilinos_communicator) == 0)</code>,
+ // (MPI_COMM_WORLD) == 0)</code>,
// hardly a pretty solution.
template <int dim>
void BoussinesqFlowProblem<dim>::setup_dofs ()
std::vector<unsigned int> stokes_sub_blocks (dim+1,0);
stokes_sub_blocks[dim] = 1;
- GridTools::partition_triangulation (Utilities::Trilinos::
- get_n_mpi_processes(trilinos_communicator),
+ GridTools::partition_triangulation (Utilities::System::
+ get_n_mpi_processes(MPI_COMM_WORLD),
triangulation);
{
<< std::endl
<< std::endl;
- stokes_partitioner.clear();
+ IndexSet velocity_partitioning (n_u);
+ IndexSet pressure_partitioning (n_p);
+ IndexSet temperature_partitioning (n_T);
{
- std::vector<unsigned int> local_dofs (dim+1);
- DoFTools::
- count_dofs_with_subdomain_association (stokes_dof_handler,
- Utilities::Trilinos::
- get_this_mpi_process(trilinos_communicator),
- local_dofs);
- const unsigned int
- n_local_velocities = std::accumulate (&local_dofs[0],
- &local_dofs[dim],
- 0),
- n_local_pressures = local_dofs[dim];
-
- stokes_partitioner.push_back (Epetra_Map(n_u, n_local_velocities,
- 0, trilinos_communicator));
- stokes_partitioner.push_back (Epetra_Map(n_p, n_local_pressures,
- 0, trilinos_communicator));
+ const unsigned int my_id = Utilities::System::get_this_mpi_process(MPI_COMM_WORLD);
+ std::pair<unsigned int, unsigned int>
+ range_u (deal_II_numbers::invalid_unsigned_int,
+ deal_II_numbers::invalid_unsigned_int),
+ range_p = range_u,
+ range_T = range_u;
+ std::vector<unsigned int> subdomain_association (stokes_dof_handler.n_dofs());
+ DoFTools::get_subdomain_association (stokes_dof_handler, subdomain_association);
+ unsigned int i;
+ for (i=0; i<n_u; ++i)
+ if (subdomain_association[i] == my_id)
+ {
+ range_u.first = i;
+ break;
+ }
+ Assert (range_u.first != deal_II_numbers::invalid_unsigned_int,
+ ExcMessage ("Could not find an appropriate parallel partition"));
+ for (i=range_u.first; i<n_u; ++i)
+ if (subdomain_association[i] != my_id)
+ {
+ range_u.second = i;
+ break;
+ }
+ if (i == n_u)
+ range_u.second = i;
+ for (i=n_u; i<n_u+n_p; ++i)
+ if (subdomain_association[i] == my_id)
+ {
+ range_p.first = i-n_u;
+ break;
+ }
+ Assert (range_p.first != deal_II_numbers::invalid_unsigned_int,
+ ExcMessage ("Could not find an appropriate parallel partition"));
+ for (i=range_p.first+n_u; i<n_u+n_p; ++i)
+ if (subdomain_association[i] != my_id)
+ {
+ range_p.second = i-n_u;
+ break;
+ }
+ if (i == n_u+n_p)
+ range_p.second = i-n_u;
+
+ subdomain_association.resize(temperature_dof_handler.n_dofs());
+ DoFTools::get_subdomain_association (temperature_dof_handler,
+ subdomain_association);
+ for (i=0; i<n_T; ++i)
+ if (subdomain_association[i] == my_id)
+ {
+ range_T.first = i;
+ break;
+ }
+ Assert (range_T.first != deal_II_numbers::invalid_unsigned_int,
+ ExcMessage ("Could not find an appropriate parallel partition"));
+ for (i=range_T.first; i<n_T; ++i)
+ if (subdomain_association[i] != my_id)
+ {
+ range_T.second = i;
+ break;
+ }
+ if (i == n_T)
+ range_T.second = i;
+
+ velocity_partitioning.add_range (range_u.first, range_u.second);
+ velocity_partitioning.compress();
+ pressure_partitioning.add_range (range_p.first, range_p.second);
+ pressure_partitioning.compress();
+ temperature_partitioning.add_range (range_T.first, range_T.second);
+ temperature_partitioning.compress();
}
- temperature_partitioner
- = Epetra_Map (n_T,
- DoFTools::count_dofs_with_subdomain_association
- (temperature_dof_handler,
- Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
- 0,
- trilinos_communicator);
-
if (Utilities::System::job_supports_mpi() == false)
{
Threads::TaskGroup<> tasks;
tasks += Threads::new_task (&BoussinesqFlowProblem<dim>::setup_stokes_matrix,
- *this);
+ *this,
+ velocity_partitioning, pressure_partitioning);
tasks += Threads::new_task (&BoussinesqFlowProblem<dim>::setup_stokes_preconditioner,
- *this);
+ *this,
+ velocity_partitioning, pressure_partitioning);
tasks += Threads::new_task (&BoussinesqFlowProblem<dim>::setup_temperature_matrices,
- *this);
+ *this,
+ temperature_partitioning);
tasks.join_all ();
}
else
{
- setup_stokes_matrix ();
- setup_stokes_preconditioner ();
- setup_temperature_matrices ();
+ setup_stokes_matrix (velocity_partitioning, pressure_partitioning);
+ setup_stokes_preconditioner (velocity_partitioning, pressure_partitioning);
+ setup_temperature_matrices (temperature_partitioning);
}
- stokes_solution.reinit (stokes_partitioner);
- old_stokes_solution.reinit (stokes_partitioner);
- stokes_rhs.reinit (stokes_partitioner);
+ stokes_rhs.reinit (2);
+ stokes_rhs.block(0).reinit (velocity_partitioning, MPI_COMM_WORLD);
+ stokes_rhs.block(1).reinit (pressure_partitioning, MPI_COMM_WORLD);
+ stokes_rhs.collect_sizes();
+ stokes_solution.reinit (stokes_rhs);
+ old_stokes_solution.reinit (stokes_solution);
- temperature_solution.reinit (temperature_partitioner);
- old_temperature_solution.reinit (temperature_partitioner);
- old_old_temperature_solution.reinit (temperature_partitioner);
- temperature_rhs.reinit (temperature_partitioner);
+ temperature_rhs.reinit (temperature_partitioning, MPI_COMM_WORLD);
+ temperature_solution.reinit (temperature_rhs);
+ old_temperature_solution.reinit (temperature_solution);
+ old_old_temperature_solution.reinit (temperature_solution);
computing_timer.exit_section();
}
WorkStream::
run (SubdomainFilter (IteratorFilters::SubdomainEqualTo
- (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
+ (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
stokes_dof_handler.begin_active()),
SubdomainFilter (IteratorFilters::SubdomainEqualTo
- (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
+ (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
stokes_dof_handler.end()),
std_cxx1x::bind (&BoussinesqFlowProblem<dim>::
local_assemble_stokes_preconditioner,
WorkStream::
run (SubdomainFilter (IteratorFilters::SubdomainEqualTo
- (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
+ (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
stokes_dof_handler.begin_active()),
SubdomainFilter (IteratorFilters::SubdomainEqualTo
- (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
+ (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
stokes_dof_handler.end()),
std_cxx1x::bind (&BoussinesqFlowProblem<dim>::
local_assemble_stokes_system,
WorkStream::
run (SubdomainFilter (IteratorFilters::SubdomainEqualTo
- (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
+ (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
temperature_dof_handler.begin_active()),
SubdomainFilter (IteratorFilters::SubdomainEqualTo
- (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
+ (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
temperature_dof_handler.end()),
std_cxx1x::bind (&BoussinesqFlowProblem<dim>::
local_assemble_temperature_matrix,
WorkStream::
run (SubdomainFilter (IteratorFilters::SubdomainEqualTo
- (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
+ (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
temperature_dof_handler.begin_active()),
SubdomainFilter (IteratorFilters::SubdomainEqualTo
- (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
+ (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
temperature_dof_handler.end()),
std_cxx1x::bind (&BoussinesqFlowProblem<dim>::
local_assemble_temperature_rhs,
preconditioner (stokes_matrix, *Mp_preconditioner, *Amg_preconditioner);
TrilinosWrappers::MPI::BlockVector
- distributed_stokes_solution (stokes_partitioner);
+ distributed_stokes_solution (stokes_rhs);
distributed_stokes_solution = stokes_solution;
const unsigned int
SolverCG<TrilinosWrappers::MPI::Vector> cg (solver_control);
TrilinosWrappers::MPI::Vector
- distributed_temperature_solution (temperature_partitioner);
+ distributed_temperature_solution (temperature_rhs);
distributed_temperature_solution = temperature_solution;
cg.solve (temperature_matrix, distributed_temperature_solution,
computing_timer.enter_section ("Postprocessing");
- if (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator) == 0)
+ if (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD) == 0)
{
const FESystem<dim> joint_fe (stokes_fe, 1,
std::vector<bool>(),
0,
0,
- Utilities::Trilinos::get_this_mpi_process(trilinos_communicator));
+ Utilities::System::get_this_mpi_process(MPI_COMM_WORLD));
Vector<double> x_local_estimated_error_per_cell (triangulation.n_active_cells());
x_local_estimated_error_per_cell = local_estimated_error_per_cell;
Vector<double> estimated_error_per_cell (triangulation.n_active_cells());
- trilinos_communicator.MaxAll (&x_local_estimated_error_per_cell(0),
- &estimated_error_per_cell(0),
- triangulation.n_active_cells());
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ MPI_Allreduce (&x_local_estimated_error_per_cell(0),
+ &estimated_error_per_cell(0),
+ triangulation.n_active_cells(), MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
+#else
+ estimated_error_per_cell = x_local_estimated_error_per_cell;
+#endif
GridRefinement::refine_and_coarsen_fixed_fraction (triangulation,
estimated_error_per_cell,