]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Can use a smarter process for creating an IndexSet. Use this in step-32. Provide...
authorMartin Kronbichler <kronbichler@lnm.mw.tum.de>
Thu, 19 Nov 2009 20:48:41 +0000 (20:48 +0000)
committerMartin Kronbichler <kronbichler@lnm.mw.tum.de>
Thu, 19 Nov 2009 20:48:41 +0000 (20:48 +0000)
git-svn-id: https://svn.dealii.org/trunk@20140 0785d39b-7218-0410-832d-ea1e28bc413d

deal.II/base/include/base/index_set.h
deal.II/base/source/index_set.cc
deal.II/examples/step-32/step-32.cc
deal.II/lac/include/lac/block_sparsity_pattern.h
deal.II/lac/include/lac/trilinos_block_sparse_matrix.h
deal.II/lac/include/lac/trilinos_block_vector.h
deal.II/lac/include/lac/trilinos_vector.h
deal.II/lac/source/block_sparsity_pattern.cc
deal.II/lac/source/trilinos_block_sparse_matrix.cc
deal.II/lac/source/trilinos_block_vector.cc
deal.II/lac/source/trilinos_vector.cc

index cb2571825f36a380c525f5253af3b58a406dfe0c..4ef0d35edcc6a4fa3d866422db59fb409e00bbb0 100644 (file)
@@ -195,6 +195,17 @@ class IndexSet
                                      */
     IndexSet operator & (const IndexSet &is) const;
 
+                                    /**
+                                     * This command takes an interval
+                                     * <tt>[begin, end)</tt> and returns
+                                     * the intersection of the current
+                                     * index set with the interval, shifted
+                                     * to the range <tt>[0,
+                                     * end-begin)</tt>.
+                                     */
+    IndexSet get_view (const unsigned int begin,
+                      const unsigned int end) const;
+
 #ifdef DEAL_II_USE_TRILINOS
                                     /**
                                      * Given an MPI communicator,
@@ -622,73 +633,6 @@ IndexSet::operator != (const IndexSet &is) const
 
 
 
-inline
-IndexSet
-IndexSet::operator & (const IndexSet &is) const
-{
-  Assert (size() == is.size(),
-         ExcDimensionMismatch (size(), is.size()));
-
-  compress ();
-  is.compress ();
-
-  std::vector<Range>::const_iterator r1 = ranges.begin(),
-                                    r2 = is.ranges.begin();
-  IndexSet result (size());
-
-  while ((r1 != ranges.end())
-        &&
-        (r2 != is.ranges.end()))
-    {
-                                      // if r1 and r2 do not overlap
-                                      // at all, then move the
-                                      // pointer that sits to the
-                                      // left of the other up by one
-      if (r1->end <= r2->begin)
-       ++r1;
-      else if (r2->end <= r1->begin)
-       ++r2;
-      else
-       {
-                                          // the ranges must overlap
-                                          // somehow
-         Assert (((r1->begin <= r2->begin) &&
-                  (r1->end > r2->begin))
-                 ||
-                 ((r2->begin <= r1->begin) &&
-                  (r2->end > r1->begin)),
-                 ExcInternalError());
-
-                                          // add the overlapping
-                                          // range to the result
-         result.add_range (std::max (r1->begin,
-                                     r2->begin),
-                           std::min (r1->end,
-                                     r2->end));
-
-                                          // now move that iterator
-                                          // that ends earlier one
-                                          // up. note that it has to
-                                          // be this one because a
-                                          // subsequent range may
-                                          // still have a chance of
-                                          // overlapping with the
-                                          // range that ends later
-         if (r1->end <= r2->end)
-           ++r1;
-         else
-           ++r2;
-       }
-    }
-
-  result.compress ();
-  return result;
-}
-
-
-
-
-
 DEAL_II_NAMESPACE_CLOSE
 
 #endif
index 9fc707ebab1c7e247af4e08d930483641a3104ab..db090467dee4145b8cf166b2e6a670454647952b 100644 (file)
@@ -92,6 +92,99 @@ IndexSet::compress () const
 
 
 
+IndexSet
+IndexSet::operator & (const IndexSet &is) const
+{
+  Assert (size() == is.size(),
+         ExcDimensionMismatch (size(), is.size()));
+
+  compress ();
+  is.compress ();
+
+  std::vector<Range>::const_iterator r1 = ranges.begin(),
+                                    r2 = is.ranges.begin();
+  IndexSet result (size());
+
+  while ((r1 != ranges.end())
+        &&
+        (r2 != is.ranges.end()))
+    {
+                                      // if r1 and r2 do not overlap
+                                      // at all, then move the
+                                      // pointer that sits to the
+                                      // left of the other up by one
+      if (r1->end <= r2->begin)
+       ++r1;
+      else if (r2->end <= r1->begin)
+       ++r2;
+      else
+       {
+                                          // the ranges must overlap
+                                          // somehow
+         Assert (((r1->begin <= r2->begin) &&
+                  (r1->end > r2->begin))
+                 ||
+                 ((r2->begin <= r1->begin) &&
+                  (r2->end > r1->begin)),
+                 ExcInternalError());
+
+                                          // add the overlapping
+                                          // range to the result
+         result.add_range (std::max (r1->begin,
+                                     r2->begin),
+                           std::min (r1->end,
+                                     r2->end));
+
+                                          // now move that iterator
+                                          // that ends earlier one
+                                          // up. note that it has to
+                                          // be this one because a
+                                          // subsequent range may
+                                          // still have a chance of
+                                          // overlapping with the
+                                          // range that ends later
+         if (r1->end <= r2->end)
+           ++r1;
+         else
+           ++r2;
+       }
+    }
+
+  result.compress ();
+  return result;
+}
+
+
+
+IndexSet
+IndexSet::get_view (const unsigned int begin,
+                   const unsigned int end) const
+{
+  Assert (begin <= end,
+         ExcMessage ("End index needs to be larger or equal to begin index!"));
+  Assert (end <= size(),
+         ExcMessage ("Given range exceeds index set dimension"));
+
+  IndexSet result (end-begin);
+  std::vector<Range>::const_iterator r1 = ranges.begin();
+
+  while (r1 != ranges.end())
+    {
+      if (r1->end > begin || r1->begin < end)
+       {
+         result.add_range (std::max(r1->begin, begin)-begin,
+                           std::min(r1->end, end)-begin);
+
+       }
+      ++r1;
+    }
+
+  result.compress();
+  return result;
+}
+
+
+
 #ifdef DEAL_II_USE_TRILINOS
 
 Epetra_Map
@@ -120,7 +213,7 @@ IndexSet::make_trilinos_map (const MPI_Comm &communicator,
        for (unsigned int j=i->begin; j<i->end; ++j)
          indices.push_back (j);
       Assert (indices.size() == n_elements(), ExcInternalError());
-  
+
       return Epetra_Map (-1,
                         n_elements(),
                         &indices[0],
@@ -133,7 +226,7 @@ IndexSet::make_trilinos_map (const MPI_Comm &communicator,
     }
 }
 
-  
+
 #endif
 
 
index 6b17b88e667fe7a150398ab885f46a0cdd8ab65e..348a92736385afb7f3e1fde14fd9abe32d45dda3 100644 (file)
@@ -112,8 +112,8 @@ namespace EquationData
            (1 - expansion_coefficient * (temperature -
                                          reference_temperature)));
   }
-  
-  
+
+
   template <int dim>
   Tensor<1,dim> gravity_vector (const Point<dim> &p)
   {
@@ -731,7 +731,7 @@ namespace Assembly
                                 // for each of those two steps for all
                                 // the four assembly routines that we use
                                 // in this program.
-                                // 
+                                //
                                 // The <code>pcout</code> (for <i>%parallel
                                 // <code>std::cout</code></i>) object is used
                                 // to simplify writing output: each MPI
@@ -880,10 +880,8 @@ class BoussinesqFlowProblem
 
     TimerOutput computing_timer;
 
-    void setup_stokes_matrix (const IndexSet &velocity_partitioning,
-                             const IndexSet &pressure_partitioning);
-    void setup_stokes_preconditioner (const IndexSet &velocity_partitioning,
-                                     const IndexSet &pressure_partitioning);
+    void setup_stokes_matrix (const std::vector<IndexSet> &stokes_partitioning);
+    void setup_stokes_preconditioner (const std::vector<IndexSet> &stokes_partitioning);
     void setup_temperature_matrices (const IndexSet &temperature_partitioning);
 
     void
@@ -1523,21 +1521,12 @@ void BoussinesqFlowProblem<dim>::project_temperature_field ()
                                 // has been given the sparsity structure.
 template <int dim>
 void BoussinesqFlowProblem<dim>::
-  setup_stokes_matrix (const IndexSet &velocity_partitioning,
-                      const IndexSet &pressure_partitioning)
+  setup_stokes_matrix (const std::vector<IndexSet> &stokes_partitioning)
 {
   stokes_matrix.clear ();
 
-  TrilinosWrappers::BlockSparsityPattern sp (2,2);
-  sp.block(0,0).reinit (velocity_partitioning, velocity_partitioning, 
-                       MPI_COMM_WORLD);
-  sp.block(0,1).reinit (velocity_partitioning, pressure_partitioning, 
-                       MPI_COMM_WORLD);
-  sp.block(1,0).reinit (pressure_partitioning, velocity_partitioning, 
-                       MPI_COMM_WORLD);
-  sp.block(1,1).reinit (pressure_partitioning, pressure_partitioning, 
-                       MPI_COMM_WORLD);
-  sp.collect_sizes();
+  TrilinosWrappers::BlockSparsityPattern sp (stokes_partitioning,
+                                            MPI_COMM_WORLD);
 
   Table<2,DoFTools::Coupling> coupling (dim+1, dim+1);
 
@@ -1561,24 +1550,15 @@ void BoussinesqFlowProblem<dim>::
 
 template <int dim>
 void BoussinesqFlowProblem<dim>::
-  setup_stokes_preconditioner (const IndexSet &velocity_partitioning,
-                              const IndexSet &pressure_partitioning)
+  setup_stokes_preconditioner (const std::vector<IndexSet> &stokes_partitioning)
 {
   Amg_preconditioner.reset ();
   Mp_preconditioner.reset ();
 
   stokes_preconditioner_matrix.clear ();
 
-  TrilinosWrappers::BlockSparsityPattern sp (2,2);
-  sp.block(0,0).reinit (velocity_partitioning, velocity_partitioning, 
-                       MPI_COMM_WORLD);
-  sp.block(0,1).reinit (velocity_partitioning, pressure_partitioning, 
-                       MPI_COMM_WORLD);
-  sp.block(1,0).reinit (pressure_partitioning, velocity_partitioning, 
-                       MPI_COMM_WORLD);
-  sp.block(1,1).reinit (pressure_partitioning, pressure_partitioning, 
-                       MPI_COMM_WORLD);
-  sp.collect_sizes();
+  TrilinosWrappers::BlockSparsityPattern sp (stokes_partitioning,
+                                            MPI_COMM_WORLD);
 
   Table<2,DoFTools::Coupling> coupling (dim+1, dim+1);
   for (unsigned int c=0; c<dim+1; ++c)
@@ -1607,7 +1587,8 @@ void BoussinesqFlowProblem<dim>::
   temperature_stiffness_matrix.clear ();
   temperature_matrix.clear ();
 
-  TrilinosWrappers::SparsityPattern sp (temperature_partitioner, MPI_COMM_WORLD);
+  TrilinosWrappers::SparsityPattern sp (temperature_partitioner,
+                                       MPI_COMM_WORLD);
   DoFTools::make_sparsity_pattern (temperature_dof_handler, sp,
                                   temperature_constraints, false,
                                   Utilities::System::
@@ -1772,78 +1753,20 @@ void BoussinesqFlowProblem<dim>::setup_dofs ()
        << std::endl
        << std::endl;
 
-  IndexSet velocity_partitioning (n_u);
-  IndexSet pressure_partitioning (n_p);
+  std::vector<IndexSet> stokes_partitioning;
   IndexSet temperature_partitioning (n_T);
   {
-    const unsigned int my_id = Utilities::System::get_this_mpi_process(MPI_COMM_WORLD);
-    std::pair<unsigned int, unsigned int> 
-      range_u (deal_II_numbers::invalid_unsigned_int,
-              deal_II_numbers::invalid_unsigned_int),
-      range_p = range_u,
-      range_T = range_u;
-    std::vector<unsigned int> subdomain_association (stokes_dof_handler.n_dofs());
-    DoFTools::get_subdomain_association (stokes_dof_handler, subdomain_association);
-    unsigned int i;
-    for (i=0; i<n_u; ++i)
-      if (subdomain_association[i] == my_id)
-       {
-         range_u.first = i;
-         break;
-       }
-    Assert (range_u.first != deal_II_numbers::invalid_unsigned_int,
-           ExcMessage ("Could not find an appropriate parallel partition"));
-    for (i=range_u.first; i<n_u; ++i)
-      if (subdomain_association[i] != my_id)
-       {
-         range_u.second = i;
-         break;
-       }
-    if (i == n_u)
-      range_u.second = i;
-    for (i=n_u; i<n_u+n_p; ++i)
-      if (subdomain_association[i] == my_id)
-       {
-         range_p.first = i-n_u;
-         break;
-       }
-    Assert (range_p.first != deal_II_numbers::invalid_unsigned_int,
-           ExcMessage ("Could not find an appropriate parallel partition"));
-    for (i=range_p.first+n_u; i<n_u+n_p; ++i)
-      if (subdomain_association[i] != my_id)
-       {
-         range_p.second = i-n_u;
-         break;
-       }
-    if (i == n_u+n_p)
-      range_p.second = i-n_u;
-
-    subdomain_association.resize(temperature_dof_handler.n_dofs());
-    DoFTools::get_subdomain_association (temperature_dof_handler, 
-                                        subdomain_association);
-    for (i=0; i<n_T; ++i)
-      if (subdomain_association[i] == my_id)
-       {
-         range_T.first = i;
-         break;
-       }
-    Assert (range_T.first != deal_II_numbers::invalid_unsigned_int,
-           ExcMessage ("Could not find an appropriate parallel partition"));
-    for (i=range_T.first; i<n_T; ++i)
-      if (subdomain_association[i] != my_id)
-       {
-         range_T.second = i;
-         break;
-       }
-    if (i == n_T)
-      range_T.second = i;
-
-    velocity_partitioning.add_range (range_u.first, range_u.second);
-    velocity_partitioning.compress();
-    pressure_partitioning.add_range (range_p.first, range_p.second);
-    pressure_partitioning.compress();
-    temperature_partitioning.add_range (range_T.first, range_T.second);
-    temperature_partitioning.compress();
+    const unsigned int my_id =
+      Utilities::System::get_this_mpi_process(MPI_COMM_WORLD);
+    IndexSet stokes_index_set =
+      DoFTools::dof_indices_with_subdomain_association(stokes_dof_handler,
+                                                      my_id);
+    stokes_partitioning.push_back(stokes_index_set.get_view(0,n_u));
+    stokes_partitioning.push_back(stokes_index_set.get_view(n_u,n_u+n_p));
+
+    temperature_partitioning =
+      DoFTools::dof_indices_with_subdomain_association(temperature_dof_handler,
+                                                      my_id);
   }
 
   if (Utilities::System::job_supports_mpi() == false)
@@ -1851,10 +1774,10 @@ void BoussinesqFlowProblem<dim>::setup_dofs ()
       Threads::TaskGroup<> tasks;
       tasks += Threads::new_task (&BoussinesqFlowProblem<dim>::setup_stokes_matrix,
                                  *this,
-                                 velocity_partitioning, pressure_partitioning);
+                                 stokes_partitioning);
       tasks += Threads::new_task (&BoussinesqFlowProblem<dim>::setup_stokes_preconditioner,
                                  *this,
-                                 velocity_partitioning, pressure_partitioning);
+                                 stokes_partitioning);
       tasks += Threads::new_task (&BoussinesqFlowProblem<dim>::setup_temperature_matrices,
                                  *this,
                                  temperature_partitioning);
@@ -1862,15 +1785,12 @@ void BoussinesqFlowProblem<dim>::setup_dofs ()
     }
   else
     {
-      setup_stokes_matrix (velocity_partitioning, pressure_partitioning);
-      setup_stokes_preconditioner (velocity_partitioning, pressure_partitioning);
+      setup_stokes_matrix (stokes_partitioning);
+      setup_stokes_preconditioner (stokes_partitioning);
       setup_temperature_matrices (temperature_partitioning);
     }
 
-  stokes_rhs.reinit (2);
-  stokes_rhs.block(0).reinit (velocity_partitioning, MPI_COMM_WORLD);
-  stokes_rhs.block(1).reinit (pressure_partitioning, MPI_COMM_WORLD);
-  stokes_rhs.collect_sizes();
+  stokes_rhs.reinit (stokes_partitioning, MPI_COMM_WORLD);
   stokes_solution.reinit (stokes_rhs);
   old_stokes_solution.reinit (stokes_solution);
 
@@ -2759,7 +2679,7 @@ void BoussinesqFlowProblem<dim>::solve ()
       if (stokes_constraints.is_constrained (i))
        distributed_stokes_solution(i) = 0;
 
-    SolverControl solver_control (stokes_matrix.m(), 1e-18*stokes_rhs.l2_norm());
+    SolverControl solver_control (stokes_matrix.m(), 1e-22*stokes_rhs.l2_norm());
     SolverBicgstab<TrilinosWrappers::MPI::BlockVector>
       bicgstab (solver_control, false);
 
@@ -2945,7 +2865,7 @@ void BoussinesqFlowProblem<dim>::output_results ()
        for (unsigned int i=0; i<stokes_solution.block(1).size(); ++i)
          minimal_pressure = std::min<double> (stokes_solution.block(1)(i),
                                               minimal_pressure);
-       
+
        std::vector<unsigned int> local_joint_dof_indices (joint_fe.dofs_per_cell);
        std::vector<unsigned int> local_stokes_dof_indices (stokes_fe.dofs_per_cell);
        std::vector<unsigned int> local_temperature_dof_indices (temperature_fe.dofs_per_cell);
@@ -2983,13 +2903,13 @@ void BoussinesqFlowProblem<dim>::output_results ()
                           *
                           100);
                    }
-                 else 
+                 else
                    {
                      Assert (stokes_fe.system_to_component_index(index_in_stokes_fe).first
                              ==
                              dim,
                              ExcInternalError());
-                     
+
                      joint_solution(local_joint_dof_indices[i])
                        = ((stokes_solution(local_stokes_dof_indices
                                            [joint_fe.system_to_base_index(i).second])
index af999c3f30fb33f8fc1ed27a866c6c5c92c6f729..ba0b48e7c728140e61a0282b9610f8a9116da9a1 100644 (file)
@@ -92,7 +92,7 @@ class BlockSparsityPatternBase : public Subscriptor
                                      * SparsityPattern class.
                                      */
     static const unsigned int invalid_entry = SparsityPattern::invalid_entry;
-    
+
                                     /**
                                      * Initialize the matrix empty,
                                      * that is with no memory
@@ -115,7 +115,7 @@ class BlockSparsityPatternBase : public Subscriptor
                                      */
     BlockSparsityPatternBase (const unsigned int n_block_rows,
                              const unsigned int n_block_columns);
-    
+
                                     /**
                                      * Copy constructor. This
                                      * constructor is only allowed to
@@ -133,7 +133,7 @@ class BlockSparsityPatternBase : public Subscriptor
                                      * Destructor.
                                      */
     ~BlockSparsityPatternBase ();
-    
+
                                     /**
                                      * Resize the matrix, by setting
                                      * the number of block rows and
@@ -186,7 +186,7 @@ class BlockSparsityPatternBase : public Subscriptor
                                      * the sub-objects.
                                      */
     void collect_sizes ();
-    
+
                                     /**
                                      * Access the block with the
                                      * given coordinates.
@@ -194,8 +194,8 @@ class BlockSparsityPatternBase : public Subscriptor
     SparsityPatternBase &
     block (const unsigned int row,
           const unsigned int column);
-    
-    
+
+
                                     /**
                                      * Access the block with the
                                      * given coordinates. Version for
@@ -203,7 +203,7 @@ class BlockSparsityPatternBase : public Subscriptor
                                      */
     const SparsityPatternBase &
     block (const unsigned int row,
-          const unsigned int column) const;    
+          const unsigned int column) const;
 
                                     /**
                                      * Grant access to the object
@@ -222,7 +222,7 @@ class BlockSparsityPatternBase : public Subscriptor
                                      */
     const BlockIndices &
     get_column_indices () const;
-    
+
                                     /**
                                      * This function compresses the
                                      * sparsity structures that this
@@ -237,13 +237,13 @@ class BlockSparsityPatternBase : public Subscriptor
                                      * column.
                                      */
     unsigned int n_block_rows () const;
-    
+
                                     /**
                                      * Return the number of blocks in a
                                      * row.
                                      */
     unsigned int n_block_cols () const;
-  
+
                                     /**
                                      * Return whether the object is
                                      * empty. It is empty if no
@@ -297,7 +297,7 @@ class BlockSparsityPatternBase : public Subscriptor
                                      * blocks.
                                      */
     template <typename ForwardIterator>
-    void add_entries (const unsigned int row, 
+    void add_entries (const unsigned int row,
                      ForwardIterator    begin,
                      ForwardIterator    end,
                      const bool         indices_are_sorted = false);
@@ -335,7 +335,7 @@ class BlockSparsityPatternBase : public Subscriptor
                                      * row.
                                      */
     unsigned int row_length (const unsigned int row) const;
-    
+
                                     /**
                                      * Return the number of nonzero
                                      * elements of this
@@ -408,7 +408,7 @@ class BlockSparsityPatternBase : public Subscriptor
                                      */
     DeclException0 (ExcInvalidConstructorCall);
                                     //@}
-    
+
   protected:
 
                                     /**
@@ -420,7 +420,7 @@ class BlockSparsityPatternBase : public Subscriptor
                                      * Number of block columns.
                                      */
     unsigned int columns;
-    
+
                                     /**
                                      * Array of sparsity patterns.
                                      */
@@ -449,7 +449,7 @@ class BlockSparsityPatternBase : public Subscriptor
                                        * individual blocks when doing a
                                        * collective add or set.
                                        */
-    std::vector<unsigned int> counter_within_block; 
+    std::vector<unsigned int> counter_within_block;
 
                                        /**
                                        * Temporary vector for column
@@ -457,7 +457,7 @@ class BlockSparsityPatternBase : public Subscriptor
                                        * local to global data on each
                                        * sparse matrix.
                                        */
-    std::vector<std::vector<unsigned int> > block_column_indices; 
+    std::vector<std::vector<unsigned int> > block_column_indices;
 
                                     /**
                                      * Make the block sparse matrix a
@@ -484,7 +484,7 @@ class BlockSparsityPatternBase : public Subscriptor
 class BlockSparsityPattern : public BlockSparsityPatternBase<SparsityPattern>
 {
   public:
-    
+
                                     /**
                                      * Initialize the matrix empty,
                                      * that is with no memory
@@ -514,7 +514,7 @@ class BlockSparsityPattern : public BlockSparsityPatternBase<SparsityPattern>
                                      */
     void reinit (const unsigned int n_block_rows,
                 const unsigned int n_block_columns);
-    
+
                                     /**
                                      * Initialize the pattern with
                                      * two BlockIndices for the block
@@ -526,7 +526,7 @@ class BlockSparsityPattern : public BlockSparsityPatternBase<SparsityPattern>
     void reinit (const BlockIndices& row_indices,
                 const BlockIndices& col_indices,
                 const std::vector<std::vector<unsigned int> >& row_lengths);
-    
+
 
                                     /**
                                      * Return whether the structure
@@ -534,7 +534,7 @@ class BlockSparsityPattern : public BlockSparsityPatternBase<SparsityPattern>
                                      * i.e. whether all sub-matrices
                                      * are compressed.
                                      */
-    bool is_compressed () const;    
+    bool is_compressed () const;
 
                                     /**
                                      * Determine an estimate for the
@@ -641,7 +641,7 @@ class BlockSparsityPattern : public BlockSparsityPatternBase<SparsityPattern>
 class BlockCompressedSparsityPattern : public BlockSparsityPatternBase<CompressedSparsityPattern>
 {
   public:
-    
+
                                     /**
                                      * Initialize the matrix empty,
                                      * that is with no memory
@@ -678,7 +678,7 @@ class BlockCompressedSparsityPattern : public BlockSparsityPatternBase<Compresse
                                      */
     BlockCompressedSparsityPattern (const std::vector<unsigned int>& row_block_sizes,
                                    const std::vector<unsigned int>& col_block_sizes);
-    
+
                                     /**
                                      * Initialize the pattern with
                                      * two BlockIndices for the block
@@ -687,7 +687,7 @@ class BlockCompressedSparsityPattern : public BlockSparsityPatternBase<Compresse
                                      */
     BlockCompressedSparsityPattern (const BlockIndices& row_indices,
                                    const BlockIndices& col_indices);
-    
+
                                     /**
                                      * Resize the matrix to a tensor
                                      * product of matrices with
@@ -717,7 +717,7 @@ class BlockCompressedSparsityPattern : public BlockSparsityPatternBase<Compresse
                                      * same block structure afterwards.
                                      */
     void reinit (const BlockIndices& row_indices, const BlockIndices& col_indices);
-    
+
                                     /**
                                      * Allow the use of the reinit
                                      * functions of the base class as
@@ -758,7 +758,7 @@ typedef BlockCompressedSparsityPattern CompressedBlockSparsityPattern;
 class BlockCompressedSetSparsityPattern : public BlockSparsityPatternBase<CompressedSetSparsityPattern>
 {
   public:
-    
+
                                     /**
                                      * Initialize the matrix empty,
                                      * that is with no memory
@@ -795,7 +795,7 @@ class BlockCompressedSetSparsityPattern : public BlockSparsityPatternBase<Compre
                                      */
     BlockCompressedSetSparsityPattern (const std::vector<unsigned int>& row_block_sizes,
                                       const std::vector<unsigned int>& col_block_sizes);
-    
+
                                     /**
                                      * Initialize the pattern with
                                      * two BlockIndices for the block
@@ -804,7 +804,7 @@ class BlockCompressedSetSparsityPattern : public BlockSparsityPatternBase<Compre
                                      */
     BlockCompressedSetSparsityPattern (const BlockIndices& row_indices,
                                       const BlockIndices& col_indices);
-    
+
                                     /**
                                      * Resize the matrix to a tensor
                                      * product of matrices with
@@ -834,7 +834,7 @@ class BlockCompressedSetSparsityPattern : public BlockSparsityPatternBase<Compre
                                      * same block structure afterwards.
                                      */
     void reinit (const BlockIndices& row_indices, const BlockIndices& col_indices);
-    
+
                                     /**
                                      * Allow the use of the reinit
                                      * functions of the base class as
@@ -955,11 +955,11 @@ class BlockCompressedSimpleSparsityPattern : public BlockSparsityPatternBase<Com
  *
  * This class is used in @ref step_32 "step-32".
  *
- * @author Martin Kronbichler, 2008
+ * @author Martin Kronbichler, 2008, 2009
  */
 namespace TrilinosWrappers
 {
-  class BlockSparsityPattern : 
+  class BlockSparsityPattern :
     public dealii::BlockSparsityPatternBase<SparsityPattern>
   {
     public:
@@ -1015,7 +1015,24 @@ namespace TrilinosWrappers
                                      * mapping vector and then entering the
                                      * index values.
                                      */
-      BlockSparsityPattern (const std::vector<Epetra_Map>& input_maps);
+      BlockSparsityPattern (const std::vector<Epetra_Map>& parallel_partitioning);
+
+                                    /**
+                                     * Initialize the pattern with an array
+                                     * of index sets that specifies both
+                                     * rows and columns of the matrix (so
+                                     * the final matrix will be a square
+                                     * matrix), where the IndexSet
+                                     * specifies the parallel distribution
+                                     * of the degrees of freedom on the
+                                     * individual block.  This function is
+                                     * equivalent to calling the second
+                                     * constructor with the length of the
+                                     * mapping vector and then entering the
+                                     * index values.
+                                     */
+      BlockSparsityPattern (const std::vector<IndexSet>& parallel_partitioning,
+                           const MPI_Comm &communicator = MPI_COMM_WORLD);
 
                                     /**
                                      * Resize the matrix to a tensor
@@ -1042,7 +1059,17 @@ namespace TrilinosWrappers
                                      * specifications in the array of
                                      * Epetra_Maps.
                                      */
-      void reinit (const std::vector<Epetra_Map>& input_maps);
+      void reinit (const std::vector<Epetra_Map>& parallel_partitioning);
+
+                                    /**
+                                     * Resize the matrix to a square tensor
+                                     * product of matrices with parallel
+                                     * distribution according to the
+                                     * specifications in the array of
+                                     * Epetra_Maps.
+                                     */
+      void reinit (const std::vector<IndexSet>& parallel_partitioning,
+                  const MPI_Comm             & communicator = MPI_COMM_WORLD);
 
 
                                     /**
@@ -1197,7 +1224,7 @@ BlockSparsityPatternBase<SparsityPatternBase>::add_entries (const unsigned int r
                                   // where we should start reading out
                                   // data. Now let's write the data into
                                   // the individual blocks!
-  const std::pair<unsigned int,unsigned int> 
+  const std::pair<unsigned int,unsigned int>
     row_index = this->row_indices.global_to_local (row);
   for (unsigned int block_col=0; block_col<n_block_cols(); ++block_col)
     {
@@ -1241,7 +1268,7 @@ row_length (const unsigned int row) const
     row_index = row_indices.global_to_local (row);
 
   unsigned int c = 0;
-  
+
   for (unsigned int b=0; b<rows; ++b)
     c += sub_objects[row_index.first][b]->row_length (row_index.second);
 
index b0fa7930b891e5ba63fee3375e36b44275d316b7..105309562b1e4bfe4021f74064726d024e4cb9d8 100644 (file)
@@ -41,7 +41,7 @@ template <typename number> class BlockSparseMatrix;
 
 namespace TrilinosWrappers
 {
-  
+
 /*! @addtogroup TrilinosWrappers
  *@{
  */
@@ -75,7 +75,7 @@ namespace TrilinosWrappers
                                         * access to its own typedefs.
                                         */
       typedef BlockMatrixBase<SparseMatrix> BaseClass;
-    
+
                                        /**
                                         * Typedef the type of the underlying
                                         * matrix.
@@ -181,7 +181,7 @@ namespace TrilinosWrappers
                                        /**
                                         * Resize the matrix, by using an
                                        * array of Epetra maps to determine
-                                       * the distribution of the 
+                                       * the %parallel distribution of the
                                        * individual matrices. This function
                                        * assumes that a quadratic block
                                        * matrix is generated.
@@ -190,6 +190,19 @@ namespace TrilinosWrappers
       void reinit (const std::vector<Epetra_Map> &input_maps,
                   const BlockSparsityType       &block_sparsity_pattern);
 
+                                       /**
+                                        * Resize the matrix, by using an
+                                       * array of index sets to determine
+                                       * the %parallel distribution of the
+                                       * individual matrices. This function
+                                       * assumes that a quadratic block
+                                       * matrix is generated.
+                                        */
+      template <typename BlockSparsityType>
+      void reinit (const std::vector<IndexSet> &input_maps,
+                  const BlockSparsityType     &block_sparsity_pattern,
+                  const MPI_Comm              &communicator = MPI_COMM_WORLD);
+
                                        /**
                                         * Resize the matrix and initialize it
                                         * by the given sparsity pattern. Since
@@ -204,10 +217,10 @@ namespace TrilinosWrappers
                                         * This function initializes the
                                        * Trilinos matrix using the deal.II
                                        * sparse matrix and the entries stored
-                                       * therein. It uses a threshold 
-                                       * to copy only elements whose 
-                                       * modulus is larger than the 
-                                       * threshold (so zeros in the 
+                                       * therein. It uses a threshold
+                                       * to copy only elements whose
+                                       * modulus is larger than the
+                                       * threshold (so zeros in the
                                        * deal.II matrix can be filtered
                                        * away).
                                         */
@@ -235,14 +248,14 @@ namespace TrilinosWrappers
 
                                       /**
                                        * This function calls the compress()
-                                       * command of all matrices after 
-                                       * the assembly is 
+                                       * command of all matrices after
+                                       * the assembly is
                                        * completed. Note that all MPI
-                                       * processes need to call this 
-                                       * command (whereas the individual 
+                                       * processes need to call this
+                                       * command (whereas the individual
                                        * assembly routines will most probably
                                        * only be called on each processor
-                                       * individually) before any 
+                                       * individually) before any
                                        * can complete it.
                                        */
       void compress ();
@@ -288,7 +301,7 @@ namespace TrilinosWrappers
                                        /**
                                         * Return the number of nonzero
                                         * elements of this
-                                        * matrix. 
+                                        * matrix.
                                         */
       unsigned int n_nonzero_elements () const;
 
@@ -603,11 +616,11 @@ namespace TrilinosWrappers
                                         * protected.
                                         */
       using BlockMatrixBase<SparseMatrix>::clear;
-      
+
                                       /** @addtogroup Exceptions
                                        * @{
                                        */
-      
+
                                        /**
                                         * Exception
                                         */
@@ -657,7 +670,7 @@ namespace TrilinosWrappers
   {
     BaseClass::vmult_block_block (dst, src);
   }
-  
+
 
 
   inline
@@ -677,7 +690,7 @@ namespace TrilinosWrappers
   {
     BaseClass::vmult_block_nonblock (dst, src);
   }
-  
+
 
 
   inline
@@ -697,7 +710,7 @@ namespace TrilinosWrappers
   {
     BaseClass::vmult_nonblock_block (dst, src);
   }
-  
+
 
 
   inline
index 6836bf2f034200eb6966fc0a2876b51996bd6f20..be5006675d437785e2c33a953bc84403e5cfa024 100644 (file)
@@ -62,7 +62,7 @@ namespace TrilinosWrappers
  *
  * @ingroup Vectors
  * @ingroup TrilinosWrappers
- * @author Martin Kronbichler, Wolfgang Bangerth, 2008
+ * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009
  */
     class BlockVector : public BlockVectorBase<Vector>
     {
@@ -72,7 +72,7 @@ namespace TrilinosWrappers
                                        * access to its own typedefs.
                                        */
         typedef BlockVectorBase<Vector> BaseClass;
-    
+
                                        /**
                                        * Typedef the type of the underlying
                                        * vector.
@@ -97,17 +97,31 @@ namespace TrilinosWrappers
                                        * empty vector without any blocks.
                                        */
         BlockVector ();
-        
+
                                        /**
                                        * Constructor. Generate a block
                                        * vector with as many blocks as
-                                       * there are entries in Input_Maps.
-                                       * Each Epetra_Map already knows
-                                       * the distribution of data among
-                                       * the MPI processes.
+                                       * there are entries in @p
+                                       * partitioning. Each Epetra_Map
+                                       * contains the layout of the
+                                       * distribution of data among the MPI
+                                       * processes.
+                                       */
+        BlockVector (const std::vector<Epetra_Map> &parallel_partitioning);
+
+                                       /**
+                                       * Constructor. Generate a block
+                                       * vector with as many blocks as
+                                       * there are entries in
+                                       * @p partitioning.  Each IndexSet
+                                       * together with the MPI communicator
+                                       * contains the layout of the
+                                       * distribution of data among the MPI
+                                       * processes.
                                        */
-        BlockVector (const std::vector<Epetra_Map> &InputMaps);
-    
+        BlockVector (const std::vector<IndexSet> &parallel_partitioning,
+                    const MPI_Comm              &communicator = MPI_COMM_WORLD);
+
                                        /**
                                        * Copy-Constructor. Set all the
                                        * properties of the parallel vector
@@ -115,7 +129,7 @@ namespace TrilinosWrappers
                                        * copy the elements.
                                        */
         BlockVector (const BlockVector  &V);
-    
+
                                        /**
                                        * Creates a block vector
                                        * consisting of
@@ -127,7 +141,7 @@ namespace TrilinosWrappers
                                        * reinit of the blocks.
                                        */
         BlockVector (const unsigned int num_blocks);
-    
+
                                        /**
                                        * Destructor. Clears memory
                                        */
@@ -178,24 +192,40 @@ namespace TrilinosWrappers
                                        * deal.II vector.
                                        */
         template <typename Number>
-       BlockVector & 
+       BlockVector &
          operator = (const ::dealii::BlockVector<Number> &V);
 
                                          /**
                                           * Reinitialize the BlockVector to
-                                          * contain as many blocks as there 
+                                          * contain as many blocks as there
                                          * are Epetra_Maps given in the input
                                          * argument, according to the
-                                         * parallel distribution of the 
+                                         * parallel distribution of the
                                          * individual components described
                                          * in the maps.
                                           *
                                           * If <tt>fast==false</tt>, the vector
                                           * is filled with zeros.
                                           */
-        void reinit (const std::vector<Epetra_Map> &input_maps,
+        void reinit (const std::vector<Epetra_Map> &parallel_partitioning,
                     const bool                     fast = false);
 
+                                         /**
+                                          * Reinitialize the BlockVector to
+                                          * contain as many blocks as there
+                                         * are index sets given in the input
+                                         * argument, according to the
+                                         * parallel distribution of the
+                                         * individual components described
+                                         * in the maps.
+                                          *
+                                          * If <tt>fast==false</tt>, the vector
+                                          * is filled with zeros.
+                                          */
+        void reinit (const std::vector<IndexSet> &parallel_partitioning,
+                    const MPI_Comm              &communicator = MPI_COMM_WORLD,
+                    const bool                   fast = false);
+
                                          /**
                                           * Change the dimension to that
                                           * of the vector <tt>V</tt>. The same
@@ -231,9 +261,9 @@ namespace TrilinosWrappers
                                          * blocks will get initialized with
                                          * zero size, so it is assumed that
                                          * the user resizes the
-                                         * individual blocks by herself 
+                                         * individual blocks by herself
                                          * in an appropriate way, and
-                                         * calls <tt>collect_sizes</tt> 
+                                         * calls <tt>collect_sizes</tt>
                                          * afterwards.
                                           */
         void reinit (const unsigned int num_blocks);
@@ -354,9 +384,18 @@ namespace TrilinosWrappers
 
 
     inline
-    BlockVector::BlockVector (const std::vector<Epetra_Map> &InputMaps)
+    BlockVector::BlockVector (const std::vector<Epetra_Map> &parallel_partitioning)
     {
-      reinit (InputMaps);
+      reinit (parallel_partitioning, false);
+    }
+
+
+
+    inline
+    BlockVector::BlockVector (const std::vector<IndexSet> &parallel_partitioning,
+                             const MPI_Comm              &communicator)
+    {
+      reinit (parallel_partitioning, communicator, false);
     }
 
 
@@ -376,7 +415,7 @@ namespace TrilinosWrappers
     {
       this->components.resize (v.n_blocks());
       this->block_indices = v.block_indices;
-    
+
       for (unsigned int i=0; i<this->n_blocks(); ++i)
         this->components[i] = v.components[i];
     }
@@ -427,17 +466,17 @@ namespace TrilinosWrappers
     {
       Assert (n_blocks() == v.n_blocks(),
              ExcDimensionMismatch(n_blocks(),v.n_blocks()));
-      
+
       for (unsigned int row=0; row<n_blocks(); ++row)
        block(row).swap (v.block(row));
     }
-    
+
 
 
 /**
  * Global function which overloads the default implementation
  * of the C++ standard library which uses a temporary object. The
- * function simply exchanges the data of the two vectors. 
+ * function simply exchanges the data of the two vectors.
  *
  * @relates TrilinosWrappers::MPI::BlockVector
  * @author Martin Kronbichler, Wolfgang Bangerth, 2008
@@ -480,7 +519,7 @@ namespace TrilinosWrappers
                                        * access to its own typedefs.
                                        */
       typedef BlockVectorBase<Vector> BaseClass;
-    
+
                                        /**
                                        * Typedef the type of the underlying
                                        * vector.
@@ -505,32 +544,44 @@ namespace TrilinosWrappers
                                        * empty vector without any blocks.
                                        */
       BlockVector ();
-        
+
                                        /**
                                        * Constructor. Generate a block
                                        * vector with as many blocks as
                                        * there are entries in Input_Maps.
-                                       * Each Epetra_Map already knows
-                                       * the distribution of data among
-                                       * the MPI processes.
+                                       * For this non-distributed vector,
+                                       * the %parallel partitioning is not
+                                       * used, just the global size of the
+                                       * partitioner.
                                        */
-      BlockVector (const std::vector<Epetra_Map> &InputMaps);
+      BlockVector (const std::vector<Epetra_Map> &partitioner);
+
+                                       /**
+                                       * Constructor. Generate a block
+                                       * vector with as many blocks as
+                                       * there are entries in Input_Maps.
+                                       * For this non-distributed vector,
+                                       * the %parallel partitioning is not
+                                       * used, just the global size of the
+                                       * partitioner.
+                                       */
+      BlockVector (const std::vector<IndexSet> &partitioner,
+                  const MPI_Comm              &communicator = MPI_COMM_WORLD);
 
                                        /**
                                        * Copy-Constructor. Set all the
-                                       * properties of the parallel
+                                       * properties of the non-%parallel
                                        * vector to those of the given
-                                       * argument and copy the
+                                       * %parallel vector and import the
                                        * elements.
                                        */
       BlockVector (const MPI::BlockVector &V);
-    
+
                                        /**
                                        * Copy-Constructor. Set all the
-                                       * properties of the parallel
-                                       * vector to those of the given
-                                       * argument and copy the
-                                       * elements.
+                                       * properties of the vector to those
+                                       * of the given input vector and copy
+                                       * the elements.
                                        */
       BlockVector (const BlockVector  &V);
 
@@ -629,24 +680,55 @@ namespace TrilinosWrappers
                                        * deal.II vector.
                                        */
       template <typename Number>
-      BlockVector & 
+      BlockVector &
        operator = (const ::dealii::BlockVector<Number> &V);
 
                                          /**
                                           * Reinitialize the BlockVector to
-                                          * contain as many blocks as there 
-                                         * are Epetra_Maps given in the input
-                                         * argument, according to the
-                                         * parallel distribution of the 
-                                         * individual components described
-                                         * in the maps.
+                                          * contain as many blocks as there
+                                          * are Epetra_Maps given in the
+                                          * input argument, according to the
+                                          * global size of the individual
+                                          * components described in the
+                                          * maps. Note that the resulting
+                                          * vector will be stored completely
+                                          * on each process. The Epetra_Map
+                                          * is useful when data exchange
+                                          * with a distributed vector based
+                                          * on the same Epetra_map is
+                                          * intended. In that case, the same
+                                          * communicator is used for data
+                                          * exchange.
                                           *
                                           * If <tt>fast==false</tt>, the vector
                                           * is filled with zeros.
                                           */
-      void reinit (const std::vector<Epetra_Map> &input_maps,
+      void reinit (const std::vector<Epetra_Map> &partitioning,
                   const bool                     fast = false);
 
+                                         /**
+                                          * Reinitialize the BlockVector to
+                                          * contain as many blocks as there
+                                          * are index sets given in the
+                                          * input argument, according to the
+                                          * global size of the individual
+                                          * components described in the
+                                          * index set, and using a given MPI
+                                          * communicator. The MPI
+                                          * communicator is useful when data
+                                          * exchange with a distributed
+                                          * vector based on the same
+                                          * initialization is intended. In
+                                          * that case, the same communicator
+                                          * is used for data exchange.
+                                          *
+                                          * If <tt>fast==false</tt>, the vector
+                                          * is filled with zeros.
+                                          */
+      void reinit (const std::vector<IndexSet> &partitioning,
+                  const MPI_Comm              &communicator = MPI_COMM_WORLD,
+                  const bool                   fast = false);
+
                                          /**
                                           * Reinitialize the BlockVector to
                                           * contain as many blocks as there
@@ -661,7 +743,7 @@ namespace TrilinosWrappers
                                           */
       void reinit (const std::vector<unsigned int> &N,
                   const bool                       fast=false);
-      
+
                                          /**
                                           * Reinit the function
                                           * according to a distributed
@@ -706,9 +788,9 @@ namespace TrilinosWrappers
                                          * blocks will get initialized with
                                          * zero size, so it is assumed that
                                          * the user resizes the
-                                         * individual blocks by herself 
+                                         * individual blocks by herself
                                          * in an appropriate way, and
-                                         * calls <tt>collect_sizes</tt> 
+                                         * calls <tt>collect_sizes</tt>
                                          * afterwards.
                                           */
       void reinit (const unsigned int num_blocks);
@@ -759,7 +841,7 @@ namespace TrilinosWrappers
                                        * Exception
                                        */
       DeclException2 (ExcNonLocalizedMap,
-                     int, int, 
+                     int, int,
                      << "For the generation of a localized vector the map has "
                      << "to assign all elements to all vectors! "
                      << "local_size = global_size is a necessary condition, but"
@@ -780,16 +862,18 @@ namespace TrilinosWrappers
 
 
   inline
-  BlockVector::BlockVector (const std::vector<Epetra_Map> &InputMaps)
+  BlockVector::BlockVector (const std::vector<Epetra_Map> &partitioning)
   {
-    for (unsigned int i=0; i<InputMaps.size(); ++i)
-      {
-       Assert (InputMaps[i].NumGlobalElements() == InputMaps[i].NumMyElements(),
-               ExcNonLocalizedMap(InputMaps[i].NumGlobalElements(),
-                                  InputMaps[i].NumMyElements()));
-      }
+    reinit (partitioning);
+  }
+
 
-    reinit (InputMaps);
+
+  inline
+  BlockVector::BlockVector (const std::vector<IndexSet> &partitioning,
+                           const MPI_Comm              &communicator)
+  {
+    reinit (partitioning, communicator);
   }
 
 
@@ -854,7 +938,7 @@ namespace TrilinosWrappers
   {
     this->components.resize (v.n_blocks());
     this->block_indices = v.block_indices;
-    
+
     for (unsigned int i=0; i<this->n_blocks(); ++i)
       this->components[i] = v.components[i];
   }
@@ -866,7 +950,7 @@ namespace TrilinosWrappers
   {
     Assert (n_blocks() == v.n_blocks(),
            ExcDimensionMismatch(n_blocks(),v.n_blocks()));
-      
+
     for (unsigned int row=0; row<n_blocks(); ++row)
       block(row).swap (v.block(row));
   }
@@ -891,12 +975,12 @@ namespace TrilinosWrappers
 
     return *this;
   }
-  
+
 
 /**
  * Global function which overloads the default implementation
  * of the C++ standard library which uses a temporary object. The
- * function simply exchanges the data of the two vectors. 
+ * function simply exchanges the data of the two vectors.
  *
  * @relates TrilinosWrappers::BlockVector
  * @author Martin Kronbichler, 2008
index 968f77ee24c9e4c9f5d9cf1d88bdf213b4fab1d1..5bff7053ebfc0d5fb3940e3c4b62ee840a85c9f9 100644 (file)
@@ -513,7 +513,7 @@ namespace TrilinosWrappers
     {
       if (size() != v.size())
        {
-         *vector = std::auto_ptr<Epetra_FEVector> 
+         *vector = std::auto_ptr<Epetra_FEVector>
          (new Epetra_FEVector(Epetra_Map (v.size(), 0,
 #ifdef DEAL_II_COMPILER_SUPPORTS_MPI
                                           Epetra_MpiComm(MPI_COMM_SELF)
@@ -578,7 +578,21 @@ namespace TrilinosWrappers
                                        * map will be generated
                                        * internally.
                                         */
-      Vector (const Epetra_Map &InputMap);
+      Vector (const Epetra_Map &partitioning);
+
+                                       /**
+                                       * This constructor takes as input
+                                       * the number of elements in the
+                                       * vector. If the index set is not
+                                       * localized, i.e., if there are some
+                                       * elements that are not present on
+                                       * all processes, only the global
+                                       * size of the index set will be
+                                       * taken and a localized version will
+                                       * be generated internally.
+                                        */
+      Vector (const IndexSet &partitioning,
+             const MPI_Comm &communicator = MPI_COMM_WORLD);
 
                                        /**
                                        * This constructor takes a
@@ -608,18 +622,41 @@ namespace TrilinosWrappers
 
                                        /**
                                        * Initialization with an
-                                       * Epetra_Map. Similar to the
-                                       * call in the other class
-                                       * MPI::Vector, with the only
-                                       * difference that now a copy on
-                                       * all processes is
-                                       * generated. The variable
-                                       * <tt>fast</tt> determines
-                                       * whether the vector should be
-                                       * filled with zero or left
-                                       * untouched.
+                                       * Epetra_Map. Similar to the call in
+                                       * the other class MPI::Vector, with
+                                       * the difference that now a copy on
+                                       * all processes is generated. This
+                                       * initialization function is
+                                       * appropriate when the data in the
+                                       * localized vector should be
+                                       * imported from a distributed vector
+                                       * that has been initialized with the
+                                       * same communicator. The variable
+                                       * <tt>fast</tt> determines whether
+                                       * the vector should be filled with
+                                       * zero or left untouched.
                                        */
       void reinit (const Epetra_Map &input_map,
+                  const bool        fast = false);
+
+                                       /**
+                                       * Initialization with an
+                                       * IndexSet. Similar to the call in
+                                       * the other class MPI::Vector, with
+                                       * the difference that now a copy on
+                                       * all processes is generated. This
+                                       * initialization function is
+                                       * appropriate in case the data in
+                                       * the localized vector should be
+                                       * imported from a distributed vector
+                                       * that has been initialized with the
+                                       * same communicator. The variable
+                                       * <tt>fast</tt> determines whether
+                                       * the vector should be filled with
+                                       * zero or left untouched.
+                                       */
+      void reinit (const IndexSet   &input_map,
+                  const MPI_Comm   &communicator = MPI_COMM_WORLD,
                   const bool        fast = false);
 
                                        /**
@@ -721,7 +758,7 @@ namespace TrilinosWrappers
       {
        vector.release();
 
-       Epetra_LocalMap map ((int)v.size(), 0, 
+       Epetra_LocalMap map ((int)v.size(), 0,
                             Utilities::Trilinos::comm_self());
        vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector(map));
       }
index 4ab9c469e701d9f2701132be26b31c05d4838b35..72a4b26575365b690f89dde879e236cc268fa56c 100644 (file)
@@ -113,7 +113,7 @@ operator = (const BlockSparsityPatternBase<SparsityPatternBase> &bsp)
 
   return *this;
 }
-  
+
 
 
 template <class SparsityPatternBase>
@@ -138,8 +138,8 @@ BlockSparsityPatternBase<SparsityPatternBase>::collect_sizes ()
                                   // finally initialize the row
                                   // indices with this array
   row_indices.reinit (row_sizes);
-  
-  
+
+
                                   // then do the same with the columns
   for (unsigned int c=0; c<columns; ++c)
     col_sizes[c] = sub_objects[0][c]->n_cols();
@@ -340,7 +340,7 @@ BlockSparsityPattern::reinit(
       {
        const unsigned int start = rows.local_to_global(i, 0);
        const unsigned int length = rows.block_size(i);
-       
+
        VectorSlice<const std::vector<unsigned int> >
          block_rows(row_lengths[j], start, length);
        block(i,j).reinit(rows.block_size(i),
@@ -349,10 +349,10 @@ BlockSparsityPattern::reinit(
       }
   this->collect_sizes();
   Assert (this->row_indices == rows, ExcInternalError());
-  Assert (this->column_indices == cols, ExcInternalError());  
+  Assert (this->column_indices == cols, ExcInternalError());
 }
 
-    
+
 bool
 BlockSparsityPattern::is_compressed () const
 {
@@ -376,7 +376,7 @@ BlockSparsityPattern::memory_consumption () const
   for (unsigned int r=0; r<rows; ++r)
     for (unsigned int c=0; c<columns; ++c)
       mem += MemoryConsumption::memory_consumption (*sub_objects[r][c]);
-  
+
   return mem;
 }
 
@@ -477,7 +477,7 @@ BlockCompressedSparsityPattern::reinit (
   for (unsigned int i=0;i<row_block_sizes.size();++i)
     for (unsigned int j=0;j<col_block_sizes.size();++j)
       this->block(i,j).reinit(row_block_sizes[i],col_block_sizes[j]);
-  this->collect_sizes();  
+  this->collect_sizes();
 }
 
 
@@ -493,7 +493,7 @@ BlockCompressedSparsityPattern::reinit (
     for (unsigned int j=0;j<col_indices.size();++j)
       this->block(i,j).reinit(row_indices.block_size(i),
                              col_indices.block_size(j));
-  this->collect_sizes();  
+  this->collect_sizes();
 }
 
 
@@ -540,7 +540,7 @@ BlockCompressedSetSparsityPattern::reinit (
   for (unsigned int i=0;i<row_block_sizes.size();++i)
     for (unsigned int j=0;j<col_block_sizes.size();++j)
       this->block(i,j).reinit(row_block_sizes[i],col_block_sizes[j]);
-  this->collect_sizes();  
+  this->collect_sizes();
 }
 
 
@@ -556,7 +556,7 @@ BlockCompressedSetSparsityPattern::reinit (
     for (unsigned int j=0;j<col_indices.size();++j)
       this->block(i,j).reinit(row_indices.block_size(i),
                              col_indices.block_size(j));
-  this->collect_sizes();  
+  this->collect_sizes();
 }
 
 
@@ -578,10 +578,10 @@ BlockCompressedSimpleSparsityPattern (const unsigned int n_rows,
 
 BlockCompressedSimpleSparsityPattern::
 BlockCompressedSimpleSparsityPattern (const std::vector<unsigned int>& row_indices,
-                               const std::vector<unsigned int>& col_indices)
+                                     const std::vector<unsigned int>& col_indices)
                :
                BlockSparsityPatternBase<CompressedSimpleSparsityPattern>(row_indices.size(),
-                                                                   col_indices.size())
+                                                                         col_indices.size())
 {
   for (unsigned int i=0;i<row_indices.size();++i)
     for (unsigned int j=0;j<col_indices.size();++j)
@@ -596,11 +596,12 @@ BlockCompressedSimpleSparsityPattern::reinit (
   const std::vector< unsigned int > &row_block_sizes,
   const std::vector< unsigned int > &col_block_sizes)
 {
-  BlockSparsityPatternBase<CompressedSimpleSparsityPattern>::reinit(row_block_sizes.size(), col_block_sizes.size());
+  BlockSparsityPatternBase<CompressedSimpleSparsityPattern>::
+    reinit(row_block_sizes.size(), col_block_sizes.size());
   for (unsigned int i=0;i<row_block_sizes.size();++i)
     for (unsigned int j=0;j<col_block_sizes.size();++j)
       this->block(i,j).reinit(row_block_sizes[i],col_block_sizes[j]);
-  this->collect_sizes();  
+  this->collect_sizes();
 }
 
 
@@ -640,14 +641,34 @@ namespace TrilinosWrappers
 
 
   BlockSparsityPattern::
-  BlockSparsityPattern (const std::vector<Epetra_Map>& input_maps)
+  BlockSparsityPattern (const std::vector<Epetra_Map>& parallel_partitioning)
+               :
+               BlockSparsityPatternBase<SparsityPattern>
+                 (parallel_partitioning.size(),
+                  parallel_partitioning.size())
+  {
+    for (unsigned int i=0;i<parallel_partitioning.size();++i)
+      for (unsigned int j=0;j<parallel_partitioning.size();++j)
+       this->block(i,j).reinit(parallel_partitioning[i],
+                               parallel_partitioning[j]);
+    this->collect_sizes();
+  }
+
+
+
+  BlockSparsityPattern::
+  BlockSparsityPattern (const std::vector<IndexSet>& parallel_partitioning,
+                       const MPI_Comm             & communicator)
                :
-               BlockSparsityPatternBase<SparsityPattern>(input_maps.size(),
-                                                         input_maps.size())
+               BlockSparsityPatternBase<SparsityPattern>
+                 (parallel_partitioning.size(),
+                  parallel_partitioning.size())
   {
-    for (unsigned int i=0;i<input_maps.size();++i)
-      for (unsigned int j=0;j<input_maps.size();++j)
-       this->block(i,j).reinit(input_maps[i], input_maps[j]);
+    for (unsigned int i=0;i<parallel_partitioning.size();++i)
+      for (unsigned int j=0;j<parallel_partitioning.size();++j)
+       this->block(i,j).reinit(parallel_partitioning[i],
+                               parallel_partitioning[j],
+                               communicator);
     this->collect_sizes();
   }
 
@@ -657,24 +678,44 @@ namespace TrilinosWrappers
   BlockSparsityPattern::reinit (const std::vector<unsigned int> &row_block_sizes,
                                const std::vector<unsigned int> &col_block_sizes)
   {
-    dealii::BlockSparsityPatternBase<SparsityPattern>::reinit(row_block_sizes.size(), col_block_sizes.size());
+    dealii::BlockSparsityPatternBase<SparsityPattern>::
+      reinit(row_block_sizes.size(), col_block_sizes.size());
     for (unsigned int i=0;i<row_block_sizes.size();++i)
       for (unsigned int j=0;j<col_block_sizes.size();++j)
        this->block(i,j).reinit(row_block_sizes[i],col_block_sizes[j]);
-    this->collect_sizes();  
+    this->collect_sizes();
+  }
+
+
+
+  void
+  BlockSparsityPattern::reinit (const std::vector<Epetra_Map> &parallel_partitioning)
+  {
+    dealii::BlockSparsityPatternBase<SparsityPattern>::
+      reinit(parallel_partitioning.size(),
+            parallel_partitioning.size());
+    for (unsigned int i=0;i<parallel_partitioning.size();++i)
+      for (unsigned int j=0;j<parallel_partitioning.size();++j)
+       this->block(i,j).reinit(parallel_partitioning[i],
+                               parallel_partitioning[j]);
+    this->collect_sizes();
   }
 
 
 
   void
-  BlockSparsityPattern::reinit (const std::vector<Epetra_Map> &input_maps)
+  BlockSparsityPattern::reinit (const std::vector<IndexSet> &parallel_partitioning,
+                               const MPI_Comm &communicator)
   {
-    dealii::BlockSparsityPatternBase<SparsityPattern>::reinit(input_maps.size(), 
-                                                             input_maps.size());
-    for (unsigned int i=0;i<input_maps.size();++i)
-      for (unsigned int j=0;j<input_maps.size();++j)
-       this->block(i,j).reinit(input_maps[i],input_maps[j]);
-    this->collect_sizes();  
+    dealii::BlockSparsityPatternBase<SparsityPattern>::
+      reinit(parallel_partitioning.size(),
+            parallel_partitioning.size());
+    for (unsigned int i=0;i<parallel_partitioning.size();++i)
+      for (unsigned int j=0;j<parallel_partitioning.size();++j)
+       this->block(i,j).reinit(parallel_partitioning[i],
+                               parallel_partitioning[j],
+                               communicator);
+    this->collect_sizes();
   }
 
 }
index 890816ed48d405075a105165bfb69783ba157003..8b683ddb7ef41c00ce42eddc122c6138270b2e5f 100644 (file)
@@ -90,20 +90,20 @@ namespace TrilinosWrappers
 
 
 
-  template <typename BlockSparsityType>  
+  template <typename BlockSparsityType>
   void
   BlockSparseMatrix::
-  reinit (const std::vector<Epetra_Map> &input_maps,
+  reinit (const std::vector<Epetra_Map> &parallel_partitioning,
          const BlockSparsityType       &block_sparsity_pattern)
   {
-    Assert (input_maps.size() == block_sparsity_pattern.n_block_rows(),
-           ExcDimensionMismatch (input_maps.size(),
+    Assert (parallel_partitioning.size() == block_sparsity_pattern.n_block_rows(),
+           ExcDimensionMismatch (parallel_partitioning.size(),
                                  block_sparsity_pattern.n_block_rows()));
-    Assert (input_maps.size() == block_sparsity_pattern.n_block_cols(),
-           ExcDimensionMismatch (input_maps.size(),
+    Assert (parallel_partitioning.size() == block_sparsity_pattern.n_block_cols(),
+           ExcDimensionMismatch (parallel_partitioning.size(),
                                  block_sparsity_pattern.n_block_cols()));
-    
-    const unsigned int n_block_rows = input_maps.size();
+
+    const unsigned int n_block_rows = parallel_partitioning.size();
 
     Assert (n_block_rows == block_sparsity_pattern.n_block_rows(),
            ExcDimensionMismatch (n_block_rows,
@@ -112,7 +112,7 @@ namespace TrilinosWrappers
            ExcDimensionMismatch (n_block_rows,
                                  block_sparsity_pattern.n_block_cols()));
 
-    
+
                                     // Call the other basic reinit function, ...
     reinit (block_sparsity_pattern.n_block_rows(),
            block_sparsity_pattern.n_block_cols());
@@ -120,13 +120,14 @@ namespace TrilinosWrappers
                                     // ... set the correct sizes, ...
     this->row_block_indices    = block_sparsity_pattern.get_row_indices();
     this->column_block_indices = block_sparsity_pattern.get_column_indices();
-       
+
                                     // ... and then assign the correct
                                     // data to the blocks.
     for (unsigned int r=0; r<this->n_block_rows(); ++r)
       for (unsigned int c=0; c<this->n_block_cols(); ++c)
         {
-         this->sub_objects[r][c]->reinit (input_maps[r], input_maps[c],
+         this->sub_objects[r][c]->reinit (parallel_partitioning[r],
+                                          parallel_partitioning[c],
                                           block_sparsity_pattern.block(r,c));
         }
   }
@@ -136,42 +137,44 @@ namespace TrilinosWrappers
   template <typename BlockSparsityType>
   void
   BlockSparseMatrix::
-  reinit (const BlockSparsityType &block_sparsity_pattern)
+  reinit (const std::vector<IndexSet> &parallel_partitioning,
+         const BlockSparsityType     &block_sparsity_pattern,
+         const MPI_Comm              &communicator)
   {
-    Assert (block_sparsity_pattern.n_block_rows() ==
-           block_sparsity_pattern.n_block_cols(),
-           ExcDimensionMismatch (block_sparsity_pattern.n_block_rows(),
-                                 block_sparsity_pattern.n_block_cols()));
-    Assert (block_sparsity_pattern.n_rows() ==
-           block_sparsity_pattern.n_cols(),
-           ExcDimensionMismatch (block_sparsity_pattern.n_rows(),
-                                 block_sparsity_pattern.n_cols()));
-    
-                                    // produce a dummy local map and pass it
-                                    // off to the other function
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
-    Epetra_MpiComm    trilinos_communicator (MPI_COMM_SELF);
-#else
-    Epetra_SerialComm trilinos_communicator;
-#endif
+    std::vector<Epetra_Map> epetra_maps;
+    for (unsigned int i=0; i<block_sparsity_pattern.n_block_rows(); ++i)
+      epetra_maps.push_back
+       (parallel_partitioning[i].make_trilinos_map(communicator, false));
 
-    std::vector<Epetra_Map> input_maps;
+    reinit (epetra_maps, block_sparsity_pattern);
+
+  }
+
+
+
+  template <typename BlockSparsityType>
+  void
+  BlockSparseMatrix::
+  reinit (const BlockSparsityType &block_sparsity_pattern)
+  {
+    std::vector<Epetra_Map> parallel_partitioning;
     for (unsigned int i=0; i<block_sparsity_pattern.n_block_rows(); ++i)
-      input_maps.push_back (Epetra_Map(block_sparsity_pattern.block(i,0).n_rows(),
-                                      0,
-                                      trilinos_communicator));
+      parallel_partitioning.push_back
+       (Epetra_Map(block_sparsity_pattern.block(i,0).n_rows(),
+                   0,
+                   Utilities::Trilinos::comm_self()));
 
-    reinit (input_maps, block_sparsity_pattern);
+    reinit (parallel_partitioning, block_sparsity_pattern);
   }
 
 
 
-  template <>  
+  template <>
   void
   BlockSparseMatrix::
   reinit (const BlockSparsityPattern    &block_sparsity_pattern)
   {
-  
+
                                     // Call the other basic reinit function, ...
     reinit (block_sparsity_pattern.n_block_rows(),
            block_sparsity_pattern.n_block_cols());
@@ -179,7 +182,7 @@ namespace TrilinosWrappers
                                     // ... set the correct sizes, ...
     this->row_block_indices    = block_sparsity_pattern.get_row_indices();
     this->column_block_indices = block_sparsity_pattern.get_column_indices();
-       
+
                                     // ... and then assign the correct
                                     // data to the blocks.
     for (unsigned int r=0; r<this->n_block_rows(); ++r)
@@ -193,12 +196,12 @@ namespace TrilinosWrappers
 
   void
   BlockSparseMatrix::
-  reinit (const std::vector<Epetra_Map>             &input_maps,
+  reinit (const std::vector<Epetra_Map>             &parallel_partitioning,
          const ::dealii::BlockSparseMatrix<double> &dealii_block_sparse_matrix,
          const double                               drop_tolerance)
   {
-    const unsigned int n_block_rows = input_maps.size();
-    
+    const unsigned int n_block_rows = parallel_partitioning.size();
+
     Assert (n_block_rows == dealii_block_sparse_matrix.n_block_rows(),
            ExcDimensionMismatch (n_block_rows,
                                  dealii_block_sparse_matrix.n_block_rows()));
@@ -208,13 +211,14 @@ namespace TrilinosWrappers
 
                                     // Call the other basic reinit function ...
     reinit (n_block_rows, n_block_rows);
-       
+
                                     // ... and then assign the correct
                                     // data to the blocks.
     for (unsigned int r=0; r<this->n_block_rows(); ++r)
       for (unsigned int c=0; c<this->n_block_cols(); ++c)
         {
-          this->sub_objects[r][c]->reinit(input_maps[r],input_maps[c],
+          this->sub_objects[r][c]->reinit(parallel_partitioning[r],
+                                         parallel_partitioning[c],
                                          dealii_block_sparse_matrix.block(r,c),
                                          drop_tolerance);
         }
@@ -237,7 +241,7 @@ namespace TrilinosWrappers
            dealii_block_sparse_matrix.n(),
            ExcDimensionMismatch (dealii_block_sparse_matrix.m(),
                                  dealii_block_sparse_matrix.n()));
-    
+
                                     // produce a dummy local map and pass it
                                     // off to the other function
 #ifdef DEAL_II_COMPILER_SUPPORTS_MPI
@@ -246,13 +250,13 @@ namespace TrilinosWrappers
     Epetra_SerialComm trilinos_communicator;
 #endif
 
-    std::vector<Epetra_Map> input_maps;
+    std::vector<Epetra_Map> parallel_partitioning;
     for (unsigned int i=0; i<dealii_block_sparse_matrix.n_block_rows(); ++i)
-      input_maps.push_back (Epetra_Map(dealii_block_sparse_matrix.block(i,0).m(),
+      parallel_partitioning.push_back (Epetra_Map(dealii_block_sparse_matrix.block(i,0).m(),
                                       0,
                                       trilinos_communicator));
 
-    reinit (input_maps, dealii_block_sparse_matrix, drop_tolerance);
+    reinit (parallel_partitioning, dealii_block_sparse_matrix, drop_tolerance);
   }
 
 
index b25e713ad8773335046aea19444e6776d7c6540b..b4c4d038a88fad40af3049902394eacc74f27776 100644 (file)
@@ -44,7 +44,7 @@ namespace TrilinosWrappers
        this->components[i] = v.block(i);
 
       collect_sizes();
-       
+
       return *this;
     }
 
@@ -93,6 +93,31 @@ namespace TrilinosWrappers
 
 
 
+    void
+    BlockVector::reinit (const std::vector<IndexSet> &parallel_partitioning,
+                        const MPI_Comm              &communicator,
+                        const bool                   fast)
+    {
+      const unsigned int no_blocks = parallel_partitioning.size();
+      std::vector<unsigned int> block_sizes (no_blocks);
+
+      for (unsigned int i=0; i<no_blocks; ++i)
+       {
+         block_sizes[i] = parallel_partitioning[i].size();
+       }
+
+      this->block_indices.reinit (block_sizes);
+      if (components.size() != n_blocks())
+        components.resize(n_blocks());
+
+      for (unsigned int i=0; i<n_blocks(); ++i)
+        components[i].reinit(parallel_partitioning[i], communicator, fast);
+
+      collect_sizes();
+    }
+
+
+
     void
     BlockVector::reinit (const BlockVector& v,
                         const bool fast)
@@ -100,10 +125,10 @@ namespace TrilinosWrappers
       block_indices = v.get_block_indices();
       if (components.size() != n_blocks())
         components.resize(n_blocks());
-  
+
       for (unsigned int i=0;i<n_blocks();++i)
         components[i].reinit(v.block(i), fast, false);
-      
+
       collect_sizes();
     }
 
@@ -116,7 +141,7 @@ namespace TrilinosWrappers
       this->block_indices.reinit (block_sizes);
       if (this->components.size() != this->n_blocks())
         this->components.resize(this->n_blocks());
-  
+
       for (unsigned int i=0;i<this->n_blocks();++i)
         components[i].clear();
 
@@ -126,7 +151,7 @@ namespace TrilinosWrappers
 
 
     void
-    BlockVector::import_nonlocal_data_for_fe 
+    BlockVector::import_nonlocal_data_for_fe
       (const TrilinosWrappers::BlockSparseMatrix &m,
        const BlockVector                         &v)
     {
@@ -195,6 +220,30 @@ namespace TrilinosWrappers
 
 
 
+  void
+  BlockVector::reinit (const std::vector<IndexSet> &partitioning,
+                      const MPI_Comm              &communicator,
+                      const bool                   fast)
+  {
+    unsigned int no_blocks = partitioning.size();
+    std::vector<unsigned int> block_sizes (no_blocks);
+
+    for (unsigned int i=0; i<no_blocks; ++i)
+      block_sizes[i] = partitioning[i].size();
+
+
+    this->block_indices.reinit (block_sizes);
+    if (components.size() != n_blocks())
+      components.resize(n_blocks());
+
+    for (unsigned int i=0; i<n_blocks(); ++i)
+      components[i].reinit(partitioning[i], communicator, fast);
+
+    collect_sizes();
+  }
+
+
+
   void
   BlockVector::reinit (const std::vector<unsigned int> &block_sizes,
                       const bool                       fast)
@@ -206,9 +255,9 @@ namespace TrilinosWrappers
     for (unsigned int i=0; i<n_blocks(); ++i)
       components[i].reinit(block_sizes[i], fast);
 
-    collect_sizes();      
+    collect_sizes();
   }
-    
+
 
 
   void
@@ -217,7 +266,7 @@ namespace TrilinosWrappers
     block_indices = v.get_block_indices();
     if (components.size() != n_blocks())
       components.resize(n_blocks());
-  
+
     for (unsigned int i=0;i<n_blocks();++i)
       components[i] = v.block(i);
   }
@@ -231,7 +280,7 @@ namespace TrilinosWrappers
     block_indices.reinit (block_sizes);
     if (components.size() != n_blocks())
       components.resize(n_blocks());
-  
+
     for (unsigned int i=0;i<n_blocks();++i)
       block(i).clear();
 
@@ -247,10 +296,10 @@ namespace TrilinosWrappers
     block_indices = v.get_block_indices();
     if (components.size() != n_blocks())
       components.resize(n_blocks());
-  
+
     for (unsigned int i=0;i<n_blocks();++i)
       components[i].reinit(v.block(i), fast);
-      
+
     collect_sizes();
   }
 
@@ -284,7 +333,7 @@ namespace TrilinosWrappers
 
     return *this;
   }
+
 }
 
 
index 01b05c724b23d2eed7ad35f3dd804ddf7425603e..5d56402d89df67610d69e2e68dbc57dc244d5bc0 100644 (file)
@@ -32,27 +32,27 @@ namespace TrilinosWrappers
     Vector::Vector ()
     {
       last_action = Zero;
-      vector = std::auto_ptr<Epetra_FEVector> 
+      vector = std::auto_ptr<Epetra_FEVector>
        (new Epetra_FEVector(Epetra_Map(0,0,0,Utilities::Trilinos::comm_self())));
     }
 
 
-  
-    Vector::Vector (const Epetra_Map &input_map)
+
+    Vector::Vector (const Epetra_Map &parallel_partitioning)
     {
-      reinit (input_map);
+      reinit (parallel_partitioning);
     }
 
 
-  
-    Vector::Vector (const IndexSet &parallel_partitioner,
+
+    Vector::Vector (const IndexSet &parallel_partitioning,
                    const MPI_Comm &communicator)
     {
-      reinit (parallel_partitioner, communicator);
+      reinit (parallel_partitioning, communicator);
     }
-  
 
-  
+
+
     Vector::Vector (const Vector &v)
                     :
                     VectorBase()
@@ -73,7 +73,7 @@ namespace TrilinosWrappers
                                         v.vector->Map().NumGlobalElements()));
 
       last_action = Zero;
-      
+
       if (input_map.SameAs(v.vector->Map()) == true)
        vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector(*v.vector));
       else
@@ -96,7 +96,7 @@ namespace TrilinosWrappers
                                         v.vector->Map().NumGlobalElements()));
 
       last_action = Zero;
-      
+
       vector = std::auto_ptr<Epetra_FEVector>
        (new Epetra_FEVector(parallel_partitioner.make_trilinos_map(communicator,
                                                                    true)));
@@ -107,7 +107,7 @@ namespace TrilinosWrappers
 
     Vector::~Vector ()
     {}
-    
+
 
 
     void
@@ -121,10 +121,10 @@ namespace TrilinosWrappers
          const int ierr = vector->PutScalar(0.);
          Assert (ierr == 0, ExcTrilinosError(ierr));
        }
-  
+
       last_action = Zero;
     }
-    
+
 
 
     void
@@ -157,7 +157,7 @@ namespace TrilinosWrappers
            {
              vector.reset();
 
-             vector = std::auto_ptr<Epetra_FEVector> 
+             vector = std::auto_ptr<Epetra_FEVector>
                (new Epetra_FEVector(v.vector->Map()));
              last_action = Zero;
            }
@@ -169,11 +169,11 @@ namespace TrilinosWrappers
                                               // and parallel
                                               // distribution
              int ierr;
-             ierr = vector->GlobalAssemble (last_action);      
-             AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+             ierr = vector->GlobalAssemble (last_action);
+             Assert (ierr == 0, ExcTrilinosError(ierr));
 
              ierr = vector->PutScalar(0.0);
-             AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+             Assert (ierr == 0, ExcTrilinosError(ierr));
 
              last_action = Zero;
            }
@@ -221,7 +221,7 @@ namespace TrilinosWrappers
                                // input vector has a 1-to-1 map (need to
                                // import data). The third case means that we
                                // have to rebuild the calling vector.
-      if (size() == v.size() && 
+      if (size() == v.size() &&
          local_range() == v.local_range())
        {
          Assert (vector->Map().SameAs(v.vector->Map()) == true,
@@ -230,7 +230,7 @@ namespace TrilinosWrappers
                              " seems to be the same. Check vector setup!"));
 
          const int ierr = vector->Update(1.0, *v.vector, 0.0);
-         AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+         Assert (ierr == 0, ExcTrilinosError(ierr));
 
          last_action = Zero;
        }
@@ -245,7 +245,7 @@ namespace TrilinosWrappers
       else
        {
          vector.reset();
-         vector = std::auto_ptr<Epetra_FEVector> 
+         vector = std::auto_ptr<Epetra_FEVector>
                            (new Epetra_FEVector(*v.vector));
          last_action = Zero;
        }
@@ -323,19 +323,35 @@ namespace TrilinosWrappers
   Vector::Vector (const Epetra_Map &input_map)
   {
     last_action = Zero;
-    Epetra_LocalMap map (input_map.NumGlobalElements(), 
-                        input_map.IndexBase(), 
+    Epetra_LocalMap map (input_map.NumGlobalElements(),
+                        input_map.IndexBase(),
                         input_map.Comm());
     vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector(map));
   }
 
 
 
+  Vector::Vector (const IndexSet &partitioning,
+                 const MPI_Comm &communicator)
+  {
+    last_action = Zero;
+    Epetra_LocalMap map (partitioning.size(),
+                        0,
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+                        Epetra_MpiComm(communicator));
+#else
+                         Epetra_SerialComm());
+#endif
+    vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector(map));
+  }
+
+
+
   Vector::Vector (const VectorBase &v)
   {
     last_action = Zero;
-    Epetra_LocalMap map (v.vector->Map().NumGlobalElements(), 
-                        v.vector->Map().IndexBase(), 
+    Epetra_LocalMap map (v.vector->Map().NumGlobalElements(),
+                        v.vector->Map().IndexBase(),
                         v.vector->Map().Comm());
     vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector(map));
 
@@ -367,12 +383,12 @@ namespace TrilinosWrappers
       {
        int ierr;
        ierr = vector->GlobalAssemble(last_action);
-       AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+       Assert (ierr == 0, ExcTrilinosError(ierr));
 
        ierr = vector->PutScalar(0.0);
-       AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+       Assert (ierr == 0, ExcTrilinosError(ierr));
       }
-    
+
     last_action = Zero;
   }
 
@@ -394,16 +410,50 @@ namespace TrilinosWrappers
       {
        int ierr;
        ierr = vector->GlobalAssemble(last_action);
-       AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+       Assert (ierr == 0, ExcTrilinosError(ierr));
 
        ierr = vector->PutScalar(0.0);
-       AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+       Assert (ierr == 0, ExcTrilinosError(ierr));
+      }
+
+    last_action = Zero;
+  }
+
+
+
+  void
+  Vector::reinit (const IndexSet &partitioning,
+                 const MPI_Comm &communicator,
+                  const bool      fast)
+  {
+    if (vector->Map().NumGlobalElements() !=
+       static_cast<int>(partitioning.size()))
+      {
+       vector.reset();
+       Epetra_LocalMap map (partitioning.size(),
+                            0,
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+                            Epetra_MpiComm(communicator));
+#else
+                             Epetra_SerialComm());
+#endif
+       vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector(map));
+      }
+    else if (fast == false)
+      {
+       int ierr;
+       ierr = vector->GlobalAssemble(last_action);
+       Assert (ierr == 0, ExcTrilinosError(ierr));
+
+       ierr = vector->PutScalar(0.0);
+       Assert (ierr == 0, ExcTrilinosError(ierr));
       }
 
     last_action = Zero;
   }
 
 
+
   void
   Vector::reinit (const VectorBase &v,
                  const bool        fast,
@@ -435,10 +485,10 @@ namespace TrilinosWrappers
                                " seems to be the same. Check vector setup!"));
 
            ierr = vector->GlobalAssemble(last_action);
-           AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+           Assert (ierr == 0, ExcTrilinosError(ierr));
 
            ierr = vector->PutScalar(0.0);
-           AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+           Assert (ierr == 0, ExcTrilinosError(ierr));
          }
        last_action = Zero;
       }
@@ -477,7 +527,7 @@ namespace TrilinosWrappers
     if (size() != v.size())
       {
        vector.reset();
-       Epetra_LocalMap map (v.vector->Map().NumGlobalElements(), 
+       Epetra_LocalMap map (v.vector->Map().NumGlobalElements(),
                             v.vector->Map().IndexBase(),
                             v.vector->Comm());
        vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector(map));
@@ -494,18 +544,18 @@ namespace TrilinosWrappers
   {
     if (size() != v.size())
       {
-       Epetra_LocalMap map (v.vector->Map().NumGlobalElements(), 
+       Epetra_LocalMap map (v.vector->Map().NumGlobalElements(),
                             v.vector->Map().IndexBase(),
                             v.vector->Comm());
        vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector(map));
       }
 
     const int ierr = vector->Update(1.0, *v.vector, 0.0);
-    AssertThrow (ierr == 0, ExcTrilinosError(ierr));   
+    Assert (ierr == 0, ExcTrilinosError(ierr));
 
     return *this;
   }
-  
+
 }
 
 DEAL_II_NAMESPACE_CLOSE

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.