]> https://gitweb.dealii.org/ - dealii-svn.git/commitdiff
Use IndexSet instead of Epetra_Map. This avoids using Trilinos-specific structures...
authorkronbichler <kronbichler@0785d39b-7218-0410-832d-ea1e28bc413d>
Wed, 18 Nov 2009 02:14:36 +0000 (02:14 +0000)
committerkronbichler <kronbichler@0785d39b-7218-0410-832d-ea1e28bc413d>
Wed, 18 Nov 2009 02:14:36 +0000 (02:14 +0000)
git-svn-id: https://svn.dealii.org/trunk@20127 0785d39b-7218-0410-832d-ea1e28bc413d

deal.II/examples/step-32/step-32.cc

index 0d55dfc80987e7ccf56bd40a01c9149fc24dbc46..6b17b88e667fe7a150398ab885f46a0cdd8ab65e 100644 (file)
 #include <iostream>
 #include <sstream>
 
-                                // This is the only include file that is
-                                // new: We use Trilinos for defining the
-                                // %parallel partitioning of the matrices
-                                // and vectors, and as explained in the
-                                // introduction, an <code>Epetra_Map</code>
-                                // is the Trilinos data structure for the
-                                // definition of which part of a
-                                // distributed vector is stored locally:
-#include <Epetra_Map.h>
+                                // This is the only include file that is new:
+                                // We use an IndexSet to describe the
+                                // %parallel partitioning of vectors and
+                                // matrices.
+#include <base/index_set.h>
 
 
                                 // Next, we import all deal.II
@@ -735,25 +731,19 @@ namespace Assembly
                                 // for each of those two steps for all
                                 // the four assembly routines that we use
                                 // in this program.
-                                //
-                                // Moreover, we include an MPI communicator
-                                // and an Epetra_Map (see the introduction)
-                                // that are needed for communication and
-                                // data exchange if the Trilinos matrices
-                                // and vectors are distributed over several
-                                // processors. Finally, the
-                                // <code>pcout</code> (for <i>%parallel
-                                // <code>std::cout</code></i>) object is
-                                // used to simplify writing output: each
-                                // MPI process can use this to generate
-                                // output as usual, but since each of these
-                                // processes will produce the same output
-                                // it will just be replicated many times
-                                // over; with the ConditionalOStream class,
-                                // only the output generated by one task
-                                // will actually be printed to screen,
-                                // whereas the output by all the other
-                                // threads will simply be forgotten.
+                                // 
+                                // The <code>pcout</code> (for <i>%parallel
+                                // <code>std::cout</code></i>) object is used
+                                // to simplify writing output: each MPI
+                                // process can use this to generate output as
+                                // usual, but since each of these processes
+                                // will produce the same output it will just
+                                // be replicated many times over; with the
+                                // ConditionalOStream class, only the output
+                                // generated by one task will actually be
+                                // printed to screen, whereas the output by
+                                // all the other threads will simply be
+                                // forgotten.
                                 //
                                 // In a bit of naming confusion, you will
                                 // notice below that some of the variables
@@ -842,8 +832,6 @@ class BoussinesqFlowProblem
                      const double                        cell_diameter) const;
 
 
-    const Epetra_Comm                  &trilinos_communicator;
-
     ConditionalOStream                  pcout;
 
     Triangulation<dim>                  triangulation;
@@ -854,7 +842,6 @@ class BoussinesqFlowProblem
     DoFHandler<dim>                     stokes_dof_handler;
     ConstraintMatrix                    stokes_constraints;
 
-    std::vector<Epetra_Map>             stokes_partitioner;
     TrilinosWrappers::BlockSparseMatrix stokes_matrix;
     TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix;
 
@@ -868,7 +855,6 @@ class BoussinesqFlowProblem
     DoFHandler<dim>                     temperature_dof_handler;
     ConstraintMatrix                    temperature_constraints;
 
-    Epetra_Map                          temperature_partitioner;
     TrilinosWrappers::SparseMatrix      temperature_mass_matrix;
     TrilinosWrappers::SparseMatrix      temperature_stiffness_matrix;
     TrilinosWrappers::SparseMatrix      temperature_matrix;
@@ -894,9 +880,11 @@ class BoussinesqFlowProblem
 
     TimerOutput computing_timer;
 
-    void setup_stokes_matrix ();
-    void setup_stokes_preconditioner ();
-    void setup_temperature_matrices ();
+    void setup_stokes_matrix (const IndexSet &velocity_partitioning,
+                             const IndexSet &pressure_partitioning);
+    void setup_stokes_preconditioner (const IndexSet &velocity_partitioning,
+                                     const IndexSet &pressure_partitioning);
+    void setup_temperature_matrices (const IndexSet &temperature_partitioning);
 
     void
     local_assemble_stokes_preconditioner (const typename DoFHandler<dim>::active_cell_iterator &cell,
@@ -986,10 +974,9 @@ class BoussinesqFlowProblem
 template <int dim>
 BoussinesqFlowProblem<dim>::BoussinesqFlowProblem ()
                 :
-                trilinos_communicator (Utilities::Trilinos::comm_world()),
                pcout (std::cout,
-                      (Utilities::Trilinos::
-                       get_this_mpi_process(trilinos_communicator)
+                      (Utilities::System::
+                       get_this_mpi_process(MPI_COMM_WORLD)
                        == 0)),
 
                triangulation (Triangulation<dim>::maximum_smoothing),
@@ -1003,8 +990,6 @@ BoussinesqFlowProblem<dim>::BoussinesqFlowProblem ()
                temperature_fe (temperature_degree),
                 temperature_dof_handler (triangulation),
 
-               temperature_partitioner (0, 0, trilinos_communicator),
-
                 time_step (0),
                old_time_step (0),
                timestep_number (0),
@@ -1056,7 +1041,7 @@ BoussinesqFlowProblem<dim>::BoussinesqFlowProblem ()
                                 // individual processors. MPI
                                 // provides such a call, but it's
                                 // even simpler to use the respective
-                                // function of the Trilinos
+                                // function of the MPI
                                 // communicator object since that
                                 // will do the right thing even if we
                                 // work without MPI and on a single
@@ -1086,7 +1071,7 @@ double BoussinesqFlowProblem<dim>::get_maximal_velocity () const
     endc = stokes_dof_handler.end();
   for (; cell!=endc; ++cell)
     if (cell->subdomain_id() ==
-       Utilities::Trilinos::get_this_mpi_process(trilinos_communicator))
+       Utilities::System::get_this_mpi_process(MPI_COMM_WORLD))
       {
        fe_values.reinit (cell);
        fe_values[velocities].get_function_values (stokes_solution,
@@ -1098,7 +1083,12 @@ double BoussinesqFlowProblem<dim>::get_maximal_velocity () const
       }
 
   double max_velocity = 0.;
-  trilinos_communicator.MaxAll(&max_local_velocity, &max_velocity, 1);
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+  MPI_Allreduce (&max_local_velocity, &max_velocity, 1, MPI_DOUBLE,
+                MPI_MAX, MPI_COMM_WORLD);
+#else
+  max_velocity = max_local_velocity;
+#endif
 
   return max_velocity;
 }
@@ -1141,7 +1131,7 @@ BoussinesqFlowProblem<dim>::get_extrapolated_temperature_range () const
        endc = temperature_dof_handler.end();
       for (; cell!=endc; ++cell)
        if (cell->subdomain_id() ==
-           Utilities::Trilinos::get_this_mpi_process(trilinos_communicator))
+           Utilities::System::get_this_mpi_process(MPI_COMM_WORLD))
          {
            fe_values.reinit (cell);
            fe_values.get_function_values (old_temperature_solution,
@@ -1163,9 +1153,15 @@ BoussinesqFlowProblem<dim>::get_extrapolated_temperature_range () const
          }
 
       double min_temperature, max_temperature;
-
-      trilinos_communicator.MaxAll(&max_local_temperature, &max_temperature, 1);
-      trilinos_communicator.MinAll(&min_local_temperature, &min_temperature, 1);
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+      MPI_Allreduce (&max_local_temperature, &max_temperature, 1, MPI_DOUBLE,
+                    MPI_MAX, MPI_COMM_WORLD);
+      MPI_Allreduce (&min_local_temperature, &min_temperature, 1, MPI_DOUBLE,
+                    MPI_MIN, MPI_COMM_WORLD);
+#else
+      min_temperature = min_local_temperature;
+      max_temperature = max_local_temperature;
+#endif
 
       return std::make_pair(min_temperature, max_temperature);
     }
@@ -1179,7 +1175,7 @@ BoussinesqFlowProblem<dim>::get_extrapolated_temperature_range () const
        endc = temperature_dof_handler.end();
       for (; cell!=endc; ++cell)
        if (cell->subdomain_id() ==
-           Utilities::Trilinos::get_this_mpi_process(trilinos_communicator))
+           Utilities::System::get_this_mpi_process(MPI_COMM_WORLD))
          {
            fe_values.reinit (cell);
            fe_values.get_function_values (old_temperature_solution,
@@ -1197,9 +1193,15 @@ BoussinesqFlowProblem<dim>::get_extrapolated_temperature_range () const
          }
 
       double min_temperature, max_temperature;
-
-      trilinos_communicator.MaxAll(&max_local_temperature, &max_temperature, 1);
-      trilinos_communicator.MinAll(&min_local_temperature, &min_temperature, 1);
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+      MPI_Allreduce (&max_local_temperature, &max_temperature, 1, MPI_DOUBLE,
+                    MPI_MAX, MPI_COMM_WORLD);
+      MPI_Allreduce (&min_local_temperature, &min_temperature, 1, MPI_DOUBLE,
+                    MPI_MIN, MPI_COMM_WORLD);
+#else
+      min_temperature = min_local_temperature;
+      max_temperature = max_local_temperature;
+#endif
 
       return std::make_pair(min_temperature, max_temperature);
     }
@@ -1394,7 +1396,7 @@ void BoussinesqFlowProblem<dim>::project_temperature_field ()
 
   for (; cell!=endc; ++cell)
     if (cell->subdomain_id() ==
-       Utilities::Trilinos::get_this_mpi_process(trilinos_communicator))
+       Utilities::System::get_this_mpi_process(MPI_COMM_WORLD))
       {
        cell->get_dof_indices (local_dof_indices);
        fe_values.reinit(cell);
@@ -1520,11 +1522,22 @@ void BoussinesqFlowProblem<dim>::project_temperature_field ()
                                 // <code>sp</code> variable once the matrix
                                 // has been given the sparsity structure.
 template <int dim>
-void BoussinesqFlowProblem<dim>::setup_stokes_matrix ()
+void BoussinesqFlowProblem<dim>::
+  setup_stokes_matrix (const IndexSet &velocity_partitioning,
+                      const IndexSet &pressure_partitioning)
 {
   stokes_matrix.clear ();
 
-  TrilinosWrappers::BlockSparsityPattern sp (stokes_partitioner);
+  TrilinosWrappers::BlockSparsityPattern sp (2,2);
+  sp.block(0,0).reinit (velocity_partitioning, velocity_partitioning, 
+                       MPI_COMM_WORLD);
+  sp.block(0,1).reinit (velocity_partitioning, pressure_partitioning, 
+                       MPI_COMM_WORLD);
+  sp.block(1,0).reinit (pressure_partitioning, velocity_partitioning, 
+                       MPI_COMM_WORLD);
+  sp.block(1,1).reinit (pressure_partitioning, pressure_partitioning, 
+                       MPI_COMM_WORLD);
+  sp.collect_sizes();
 
   Table<2,DoFTools::Coupling> coupling (dim+1, dim+1);
 
@@ -1537,8 +1550,8 @@ void BoussinesqFlowProblem<dim>::setup_stokes_matrix ()
 
   DoFTools::make_sparsity_pattern (stokes_dof_handler, coupling, sp,
                                   stokes_constraints, false,
-                                  Utilities::Trilinos::
-                                  get_this_mpi_process(trilinos_communicator));
+                                  Utilities::System::
+                                  get_this_mpi_process(MPI_COMM_WORLD));
   sp.compress();
 
   stokes_matrix.reinit (sp);
@@ -1547,14 +1560,25 @@ void BoussinesqFlowProblem<dim>::setup_stokes_matrix ()
 
 
 template <int dim>
-void BoussinesqFlowProblem<dim>::setup_stokes_preconditioner ()
+void BoussinesqFlowProblem<dim>::
+  setup_stokes_preconditioner (const IndexSet &velocity_partitioning,
+                              const IndexSet &pressure_partitioning)
 {
   Amg_preconditioner.reset ();
   Mp_preconditioner.reset ();
 
   stokes_preconditioner_matrix.clear ();
 
-  TrilinosWrappers::BlockSparsityPattern sp (stokes_partitioner);
+  TrilinosWrappers::BlockSparsityPattern sp (2,2);
+  sp.block(0,0).reinit (velocity_partitioning, velocity_partitioning, 
+                       MPI_COMM_WORLD);
+  sp.block(0,1).reinit (velocity_partitioning, pressure_partitioning, 
+                       MPI_COMM_WORLD);
+  sp.block(1,0).reinit (pressure_partitioning, velocity_partitioning, 
+                       MPI_COMM_WORLD);
+  sp.block(1,1).reinit (pressure_partitioning, pressure_partitioning, 
+                       MPI_COMM_WORLD);
+  sp.collect_sizes();
 
   Table<2,DoFTools::Coupling> coupling (dim+1, dim+1);
   for (unsigned int c=0; c<dim+1; ++c)
@@ -1566,8 +1590,8 @@ void BoussinesqFlowProblem<dim>::setup_stokes_preconditioner ()
 
   DoFTools::make_sparsity_pattern (stokes_dof_handler, coupling, sp,
                                   stokes_constraints, false,
-                                  Utilities::Trilinos::
-                                  get_this_mpi_process(trilinos_communicator));
+                                  Utilities::System::
+                                  get_this_mpi_process(MPI_COMM_WORLD));
   sp.compress();
 
   stokes_preconditioner_matrix.reinit (sp);
@@ -1575,18 +1599,19 @@ void BoussinesqFlowProblem<dim>::setup_stokes_preconditioner ()
 
 
 template <int dim>
-void BoussinesqFlowProblem<dim>::setup_temperature_matrices ()
+void BoussinesqFlowProblem<dim>::
+  setup_temperature_matrices (const IndexSet &temperature_partitioner)
 {
   T_preconditioner.reset ();
   temperature_mass_matrix.clear ();
   temperature_stiffness_matrix.clear ();
   temperature_matrix.clear ();
 
-  TrilinosWrappers::SparsityPattern sp (temperature_partitioner);
+  TrilinosWrappers::SparsityPattern sp (temperature_partitioner, MPI_COMM_WORLD);
   DoFTools::make_sparsity_pattern (temperature_dof_handler, sp,
                                   temperature_constraints, false,
-                                  Utilities::Trilinos::
-                                  get_this_mpi_process(trilinos_communicator));
+                                  Utilities::System::
+                                  get_this_mpi_process(MPI_COMM_WORLD));
   sp.compress();
 
   temperature_matrix.reinit (sp);
@@ -1636,7 +1661,7 @@ void BoussinesqFlowProblem<dim>::setup_temperature_matrices ()
                                 //
                                 // After this, we have to set up the
                                 // various partitioners (of type
-                                // <code>Epetra_Map</code>, see the
+                                // <code>IndexSet</code>, see the
                                 // introduction) that describe which parts
                                 // of each matrix or vector will be stored
                                 // where, then call the functions that
@@ -1670,9 +1695,9 @@ void BoussinesqFlowProblem<dim>::setup_temperature_matrices ()
                                 // <code>std::cout</code> but would then
                                 // have had to guard every access to that
                                 // stream by something like <code>if
-                                // (Utilities:: Trilinos::
+                                // (Utilities:: System::
                                 // get_this_mpi_process
-                                // (trilinos_communicator) == 0)</code>,
+                                // (MPI_COMM_WORLD) == 0)</code>,
                                 // hardly a pretty solution.
 template <int dim>
 void BoussinesqFlowProblem<dim>::setup_dofs ()
@@ -1681,8 +1706,8 @@ void BoussinesqFlowProblem<dim>::setup_dofs ()
   std::vector<unsigned int> stokes_sub_blocks (dim+1,0);
   stokes_sub_blocks[dim] = 1;
 
-  GridTools::partition_triangulation (Utilities::Trilinos::
-                                     get_n_mpi_processes(trilinos_communicator),
+  GridTools::partition_triangulation (Utilities::System::
+                                     get_n_mpi_processes(MPI_COMM_WORLD),
                                      triangulation);
 
   {
@@ -1747,60 +1772,112 @@ void BoussinesqFlowProblem<dim>::setup_dofs ()
        << std::endl
        << std::endl;
 
-  stokes_partitioner.clear();
+  IndexSet velocity_partitioning (n_u);
+  IndexSet pressure_partitioning (n_p);
+  IndexSet temperature_partitioning (n_T);
   {
-    std::vector<unsigned int> local_dofs (dim+1);
-    DoFTools::
-      count_dofs_with_subdomain_association (stokes_dof_handler,
-                                            Utilities::Trilinos::
-                                            get_this_mpi_process(trilinos_communicator),
-                                            local_dofs);
-    const unsigned int
-      n_local_velocities = std::accumulate (&local_dofs[0],
-                                           &local_dofs[dim],
-                                           0),
-      n_local_pressures  = local_dofs[dim];
-
-    stokes_partitioner.push_back (Epetra_Map(n_u, n_local_velocities,
-                                            0, trilinos_communicator));
-    stokes_partitioner.push_back (Epetra_Map(n_p, n_local_pressures,
-                                            0, trilinos_communicator));
+    const unsigned int my_id = Utilities::System::get_this_mpi_process(MPI_COMM_WORLD);
+    std::pair<unsigned int, unsigned int> 
+      range_u (deal_II_numbers::invalid_unsigned_int,
+              deal_II_numbers::invalid_unsigned_int),
+      range_p = range_u,
+      range_T = range_u;
+    std::vector<unsigned int> subdomain_association (stokes_dof_handler.n_dofs());
+    DoFTools::get_subdomain_association (stokes_dof_handler, subdomain_association);
+    unsigned int i;
+    for (i=0; i<n_u; ++i)
+      if (subdomain_association[i] == my_id)
+       {
+         range_u.first = i;
+         break;
+       }
+    Assert (range_u.first != deal_II_numbers::invalid_unsigned_int,
+           ExcMessage ("Could not find an appropriate parallel partition"));
+    for (i=range_u.first; i<n_u; ++i)
+      if (subdomain_association[i] != my_id)
+       {
+         range_u.second = i;
+         break;
+       }
+    if (i == n_u)
+      range_u.second = i;
+    for (i=n_u; i<n_u+n_p; ++i)
+      if (subdomain_association[i] == my_id)
+       {
+         range_p.first = i-n_u;
+         break;
+       }
+    Assert (range_p.first != deal_II_numbers::invalid_unsigned_int,
+           ExcMessage ("Could not find an appropriate parallel partition"));
+    for (i=range_p.first+n_u; i<n_u+n_p; ++i)
+      if (subdomain_association[i] != my_id)
+       {
+         range_p.second = i-n_u;
+         break;
+       }
+    if (i == n_u+n_p)
+      range_p.second = i-n_u;
+
+    subdomain_association.resize(temperature_dof_handler.n_dofs());
+    DoFTools::get_subdomain_association (temperature_dof_handler, 
+                                        subdomain_association);
+    for (i=0; i<n_T; ++i)
+      if (subdomain_association[i] == my_id)
+       {
+         range_T.first = i;
+         break;
+       }
+    Assert (range_T.first != deal_II_numbers::invalid_unsigned_int,
+           ExcMessage ("Could not find an appropriate parallel partition"));
+    for (i=range_T.first; i<n_T; ++i)
+      if (subdomain_association[i] != my_id)
+       {
+         range_T.second = i;
+         break;
+       }
+    if (i == n_T)
+      range_T.second = i;
+
+    velocity_partitioning.add_range (range_u.first, range_u.second);
+    velocity_partitioning.compress();
+    pressure_partitioning.add_range (range_p.first, range_p.second);
+    pressure_partitioning.compress();
+    temperature_partitioning.add_range (range_T.first, range_T.second);
+    temperature_partitioning.compress();
   }
 
-  temperature_partitioner
-    = Epetra_Map (n_T,
-                 DoFTools::count_dofs_with_subdomain_association
-                 (temperature_dof_handler,
-                  Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
-                 0,
-                 trilinos_communicator);
-
   if (Utilities::System::job_supports_mpi() == false)
     {
       Threads::TaskGroup<> tasks;
       tasks += Threads::new_task (&BoussinesqFlowProblem<dim>::setup_stokes_matrix,
-                                 *this);
+                                 *this,
+                                 velocity_partitioning, pressure_partitioning);
       tasks += Threads::new_task (&BoussinesqFlowProblem<dim>::setup_stokes_preconditioner,
-                                 *this);
+                                 *this,
+                                 velocity_partitioning, pressure_partitioning);
       tasks += Threads::new_task (&BoussinesqFlowProblem<dim>::setup_temperature_matrices,
-                                 *this);
+                                 *this,
+                                 temperature_partitioning);
       tasks.join_all ();
     }
   else
     {
-      setup_stokes_matrix ();
-      setup_stokes_preconditioner ();
-      setup_temperature_matrices ();
+      setup_stokes_matrix (velocity_partitioning, pressure_partitioning);
+      setup_stokes_preconditioner (velocity_partitioning, pressure_partitioning);
+      setup_temperature_matrices (temperature_partitioning);
     }
 
-  stokes_solution.reinit (stokes_partitioner);
-  old_stokes_solution.reinit (stokes_partitioner);
-  stokes_rhs.reinit (stokes_partitioner);
+  stokes_rhs.reinit (2);
+  stokes_rhs.block(0).reinit (velocity_partitioning, MPI_COMM_WORLD);
+  stokes_rhs.block(1).reinit (pressure_partitioning, MPI_COMM_WORLD);
+  stokes_rhs.collect_sizes();
+  stokes_solution.reinit (stokes_rhs);
+  old_stokes_solution.reinit (stokes_solution);
 
-  temperature_solution.reinit (temperature_partitioner);
-  old_temperature_solution.reinit (temperature_partitioner);
-  old_old_temperature_solution.reinit (temperature_partitioner);
-  temperature_rhs.reinit (temperature_partitioner);
+  temperature_rhs.reinit (temperature_partitioning, MPI_COMM_WORLD);
+  temperature_solution.reinit (temperature_rhs);
+  old_temperature_solution.reinit (temperature_solution);
+  old_old_temperature_solution.reinit (temperature_solution);
 
   computing_timer.exit_section();
 }
@@ -1978,10 +2055,10 @@ BoussinesqFlowProblem<dim>::assemble_stokes_preconditioner ()
 
   WorkStream::
     run (SubdomainFilter (IteratorFilters::SubdomainEqualTo
-                         (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
+                         (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
                          stokes_dof_handler.begin_active()),
         SubdomainFilter (IteratorFilters::SubdomainEqualTo
-                         (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
+                         (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
                          stokes_dof_handler.end()),
         std_cxx1x::bind (&BoussinesqFlowProblem<dim>::
                          local_assemble_stokes_preconditioner,
@@ -2186,10 +2263,10 @@ void BoussinesqFlowProblem<dim>::assemble_stokes_system ()
 
   WorkStream::
     run (SubdomainFilter (IteratorFilters::SubdomainEqualTo
-                         (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
+                         (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
                          stokes_dof_handler.begin_active()),
         SubdomainFilter (IteratorFilters::SubdomainEqualTo
-                         (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
+                         (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
                          stokes_dof_handler.end()),
         std_cxx1x::bind (&BoussinesqFlowProblem<dim>::
                          local_assemble_stokes_system,
@@ -2313,10 +2390,10 @@ void BoussinesqFlowProblem<dim>::assemble_temperature_matrix ()
 
   WorkStream::
     run (SubdomainFilter (IteratorFilters::SubdomainEqualTo
-                         (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
+                         (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
                          temperature_dof_handler.begin_active()),
         SubdomainFilter (IteratorFilters::SubdomainEqualTo
-                         (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
+                         (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
                          temperature_dof_handler.end()),
         std_cxx1x::bind (&BoussinesqFlowProblem<dim>::
                          local_assemble_temperature_matrix,
@@ -2584,10 +2661,10 @@ void BoussinesqFlowProblem<dim>::assemble_temperature_system (const double maxim
 
   WorkStream::
     run (SubdomainFilter (IteratorFilters::SubdomainEqualTo
-                         (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
+                         (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
                          temperature_dof_handler.begin_active()),
         SubdomainFilter (IteratorFilters::SubdomainEqualTo
-                         (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)),
+                         (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
                          temperature_dof_handler.end()),
         std_cxx1x::bind (&BoussinesqFlowProblem<dim>::
                          local_assemble_temperature_rhs,
@@ -2670,7 +2747,7 @@ void BoussinesqFlowProblem<dim>::solve ()
       preconditioner (stokes_matrix, *Mp_preconditioner, *Amg_preconditioner);
 
     TrilinosWrappers::MPI::BlockVector
-      distributed_stokes_solution (stokes_partitioner);
+      distributed_stokes_solution (stokes_rhs);
     distributed_stokes_solution = stokes_solution;
 
     const unsigned int
@@ -2739,7 +2816,7 @@ void BoussinesqFlowProblem<dim>::solve ()
     SolverCG<TrilinosWrappers::MPI::Vector>   cg (solver_control);
 
     TrilinosWrappers::MPI::Vector
-      distributed_temperature_solution (temperature_partitioner);
+      distributed_temperature_solution (temperature_rhs);
     distributed_temperature_solution = temperature_solution;
 
     cg.solve (temperature_matrix, distributed_temperature_solution,
@@ -2847,7 +2924,7 @@ void BoussinesqFlowProblem<dim>::output_results ()
 
   computing_timer.enter_section ("Postprocessing");
 
-  if (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator) == 0)
+  if (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD) == 0)
     {
 
       const FESystem<dim> joint_fe (stokes_fe, 1,
@@ -3083,14 +3160,18 @@ void BoussinesqFlowProblem<dim>::refine_mesh (const unsigned int max_grid_level)
                                      std::vector<bool>(),
                                      0,
                                      0,
-                                     Utilities::Trilinos::get_this_mpi_process(trilinos_communicator));
+                                     Utilities::System::get_this_mpi_process(MPI_COMM_WORLD));
 
   Vector<double> x_local_estimated_error_per_cell (triangulation.n_active_cells());
   x_local_estimated_error_per_cell = local_estimated_error_per_cell;
   Vector<double> estimated_error_per_cell (triangulation.n_active_cells());
-  trilinos_communicator.MaxAll (&x_local_estimated_error_per_cell(0),
-                               &estimated_error_per_cell(0),
-                               triangulation.n_active_cells());
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+  MPI_Allreduce (&x_local_estimated_error_per_cell(0),
+                &estimated_error_per_cell(0),
+                triangulation.n_active_cells(), MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
+#else
+  estimated_error_per_cell = x_local_estimated_error_per_cell;
+#endif
 
   GridRefinement::refine_and_coarsen_fixed_fraction (triangulation,
                                                     estimated_error_per_cell,

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.