]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Check in a revision of the ConstraintMatrix::distribute patch. I'm not sure it actual...
authorWolfgang Bangerth <bangerth@math.tamu.edu>
Thu, 30 May 2013 22:29:18 +0000 (22:29 +0000)
committerWolfgang Bangerth <bangerth@math.tamu.edu>
Thu, 30 May 2013 22:29:18 +0000 (22:29 +0000)
git-svn-id: https://svn.dealii.org/trunk@29674 0785d39b-7218-0410-832d-ea1e28bc413d

deal.II/include/deal.II/lac/constraint_matrix.h
deal.II/include/deal.II/lac/constraint_matrix.templates.h
deal.II/source/lac/constraint_matrix.cc
deal.II/source/lac/constraint_matrix.inst.in

index 40d824c3d564cdb46f61c03af23290590e7c1f89..ce286f57c6c27ccb6fbb86c42728aca818f3a8e5 100644 (file)
@@ -1507,6 +1507,24 @@ public:
                   << " should not be stored by this object, but a constraint "
                   << "is being added.");
 
+  /**
+   * Exception
+   *
+   * @ingroup Exceptions
+   */
+  DeclException2 (ExcIncorrectConstraint,
+                 int, int,
+                  << "While distributing the constraint for DoF "
+                  << arg1 << ", it turns out that one of the processors "
+                  << "who own the " << arg2
+                  << " degrees of freedom that x_" << arg1
+                  << " is constrained against does not know about "
+                  << "the constraint on x_" << arg1
+                  << ". Did you not initialize the ConstraintMatrix "
+                  << "with the appropriate locally_relevant set so "
+                  << "that every processor who owns a DoF that constrains "
+                  << "another DoF also knows about this constraint?");
+  
 private:
 
   /**
index af2b713a700178fe6e68a36e57c80395d10effbc..a1c17c953826df619dc3bf571d982991a37b3ff4 100644 (file)
@@ -24,6 +24,9 @@
 #include <deal.II/lac/block_sparse_matrix.h>
 #include <deal.II/lac/parallel_vector.h>
 #include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
+#include <deal.II/lac/trilinos_vector.h>
 
 #include <iomanip>
 
@@ -973,27 +976,205 @@ ConstraintMatrix::distribute (const VectorType &condensed,
 }
 
 
+namespace internal
+{
+  namespace
+  {
+    // create an output vector that consists of the input vector's locally owned
+    // elements plus some ghost elements that need to be imported from elsewhere
+    //
+    // this is an operation that is different for all vector types and so we
+    // need a few overloads
+#ifdef DEAL_II_WITH_TRILINOS
+    void
+    import_vector_with_ghost_elements (const TrilinosWrappers::MPI::Vector &vec,
+                                       const IndexSet       &/*locally_owned_elements*/,
+                                       const IndexSet       &needed_elements,
+                                       TrilinosWrappers::MPI::Vector       &output)
+    {
+      output.reinit (needed_elements,
+                     dynamic_cast<const Epetra_MpiComm *>(&vec.vector_partitioner().Comm())->GetMpiComm());
+      output = vec;
+    }
 
-template<class VectorType>
+
+    void
+    import_vector_with_ghost_elements (const TrilinosWrappers::Vector &vec,
+                                       const IndexSet       &,
+                                       const IndexSet       &,
+                                       TrilinosWrappers::Vector       &output)
+    {
+      output.reinit (vec.size());
+      output = vec;
+    }
+#endif
+
+#ifdef DEAL_II_WITH_PETSC
+    void
+    import_vector_with_ghost_elements (const PETScWrappers::MPI::Vector &vec,
+                                       const IndexSet       &locally_owned_elements,
+                                       const IndexSet       &needed_elements,
+                                       PETScWrappers::MPI::Vector       &output)
+    {
+      output.reinit (vec.get_mpi_communicator(), locally_owned_elements, needed_elements);
+      output = vec;
+    }
+
+
+
+    void
+    import_vector_with_ghost_elements (const PETScWrappers::Vector &vec,
+                                       const IndexSet       &,
+                                       const IndexSet       &,
+                                       PETScWrappers::Vector       &output)
+    {
+      output.reinit (vec.size());
+      output = vec;
+    }
+#endif
+
+    template <typename number>
+    void
+    import_vector_with_ghost_elements (const parallel::distributed::Vector<number> &vec,
+                                       const IndexSet       &locally_owned_elements,
+                                       const IndexSet       &needed_elements,
+                                       parallel::distributed::Vector<number>       &output)
+    {
+      output.reinit (locally_owned_elements, needed_elements, vec.get_mpi_communicator());
+      output = vec;
+    }
+
+
+    template <typename number>
+    void
+    import_vector_with_ghost_elements (const dealii::Vector<number> &vec,
+                                       const IndexSet   &locally_owned_elements,
+                                       const IndexSet   &needed_elements,
+                                       dealii::Vector<number>       &output)
+    {
+      Assert (false, ExcMessage ("We shouldn't even get here!"));
+    }
+
+
+    // for block vectors, simply dispatch to the individual blocks
+    template <class VectorType>
+    void
+    import_vector_with_ghost_elements (const BlockVectorBase<VectorType> &vec,
+                                       const IndexSet   &locally_owned_elements,
+                                       const IndexSet   &needed_elements,
+                                       BlockVectorBase<VectorType>       &output)
+    {
+      types::global_dof_index block_start = 0;
+      for (unsigned int b=0; b<vec.n_blocks(); ++b)
+        {
+          import_vector_with_ghost_elements (vec.block(b),
+                                             locally_owned_elements.get_view (block_start, block_start+vec.block(b).size()),
+                                             needed_elements.get_view (block_start, block_start+vec.block(b).size()),
+                                             output.block(b));
+          block_start += vec.block(b).size();
+        }
+    }
+  }
+}
+
+
+template <class VectorType>
 void
 ConstraintMatrix::distribute (VectorType &vec) const
 {
-  Assert (sorted == true, ExcMatrixNotClosed());
+  Assert (sorted==true, ExcMatrixIsClosed());
+
+  // if the vector type supports parallel storage and if the
+  // vector actually does store only part of the vector, distributing
+  // is slightly more complicated. we can skip the complicated part
+  // if the local processor stores the *entire* vector and pretend
+  // that this is a sequential vector but we need to pay attention that
+  // in that case the other processors don't actually do anything (in
+  // particular that they do not call compress because the processor
+  // that owns everything doesn't do so either); this is the first if
+  // case here, the second is for the complicated case, the last else
+  // is for the simple case (sequential vector or distributed vector
+  // where the current processor stores everything)
+  if ((vec.supports_distributed_data == true)
+      &&
+      (vec.locally_owned_elements().n_elements() == 0))
+    {
+      // do nothing, in particular don't call compress()
+    }
+  else if ((vec.supports_distributed_data == true)
+           &&
+           (vec.locally_owned_elements() != complete_index_set(vec.size())))
+    {
+      // This processor owns only part of the vector. one may think that
+      // every processor should be able to simply communicate those elements
+      // it owns and for which it knows that they act as sources to constrained
+      // DoFs to the owner of these DoFs. This would lead to a scheme where all
+      // we need to do is to add some local elements to (possibly non-local) ones
+      // and then call compress().
+      //
+      // Alas, this scheme does not work as evidenced by the disaster of bug #51,
+      // see http://code.google.com/p/dealii/issues/detail?id=51 and the
+      // reversion of one attempt that implements this in r29662. Rather, we
+      // need to get a vector that has all the *sources* or constraints we
+      // own locally, possibly as ghost vector elements, then read from them,
+      // and finally throw away the ghosted vector. Implement this in the following.
+      const IndexSet vec_owned_elements = vec.locally_owned_elements();
+      IndexSet needed_elements = vec_owned_elements;
+
+      typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
+      for (constraint_iterator it = lines.begin();
+          it != lines.end(); ++it)
+        if (vec_owned_elements.is_element(it->line))
+          for (unsigned int i=0; i<it->entries.size(); ++i)
+            needed_elements.add_index(it->entries[i].first);
+
+      VectorType ghosted_vector;
+      internal::import_vector_with_ghost_elements (vec,
+                                                   vec_owned_elements, needed_elements,
+                                                   ghosted_vector);
+
+      for (constraint_iterator it = lines.begin();
+          it != lines.end(); ++it)
+        if (vec_owned_elements.is_element(it->line))
+          for (unsigned int i=0; i<it->entries.size(); ++i)
+            {
+              typename VectorType::value_type
+              new_value = it->inhomogeneity;
+              for (unsigned int i=0; i<it->entries.size(); ++i)
+                new_value += (static_cast<typename VectorType::value_type>
+                              (ghosted_vector(it->entries[i].first)) *
+                               it->entries[i].second);
+              Assert(numbers::is_finite(new_value), ExcNumberNotFinite());
+              vec(it->line) = new_value;
+            }
 
-  std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
-  for (; next_constraint != lines.end(); ++next_constraint)
+      // now compress to communicate the entries that we added to
+      // and that weren't to local processors to the owner
+      //
+      // this shouldn't be strictly necessary but it probably doesn't
+      // hurt either
+      vec.compress (VectorOperation::insert);
+    }
+  else
+    // purely sequential vector (either because the type doesn't
+    // support anything else or because it's completely stored
+    // locally
     {
-      // fill entry in line
-      // next_constraint.line by adding the
-      // different contributions
-      typename VectorType::value_type
-      new_value = next_constraint->inhomogeneity;
-      for (unsigned int i=0; i<next_constraint->entries.size(); ++i)
-        new_value += (static_cast<typename VectorType::value_type>
-                      (vec(next_constraint->entries[i].first)) *
-                      next_constraint->entries[i].second);
-      Assert(numbers::is_finite(new_value), ExcNumberNotFinite());
-      vec(next_constraint->line) = new_value;
+      std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
+      for (; next_constraint != lines.end(); ++next_constraint)
+        {
+          // fill entry in line
+          // next_constraint.line by adding the
+          // different contributions
+          typename VectorType::value_type
+          new_value = next_constraint->inhomogeneity;
+          for (unsigned int i=0; i<next_constraint->entries.size(); ++i)
+            new_value += (static_cast<typename VectorType::value_type>
+                          (vec(next_constraint->entries[i].first)) *
+                           next_constraint->entries[i].second);
+          Assert(numbers::is_finite(new_value), ExcNumberNotFinite());
+          vec(next_constraint->line) = new_value;
+        }
     }
 }
 
index 306691df8bed740a5ff5bf3d4f8e9c6029dc80bb..3e7c63c3845a23acfbec934fa9aaec2a4600e48e 100644 (file)
@@ -1,7 +1,7 @@
 //---------------------------------------------------------------------------
 //    $Id$
 //
-//    Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 by the deal.II authors
+//    Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 by the deal.II authors
 //
 //    This file is subject to QPL and may not be  distributed
 //    without copyright and license information. Please refer
@@ -1518,268 +1518,6 @@ void ConstraintMatrix::condense (BlockCompressedSimpleSparsityPattern &sparsity)
 
 
 
-#ifdef DEAL_II_WITH_TRILINOS
-
-// this is a specialization for a parallel (non-block) Trilinos vector. The
-// basic idea is to just work on the local range of the vector. But we need
-// access to values that the local nodes are constrained to.
-
-template<>
-void
-ConstraintMatrix::distribute (TrilinosWrappers::MPI::Vector &vec) const
-{
-  Assert (sorted==true, ExcMatrixIsClosed());
-
-  //TODO: not implemented yet, we need to fix LocalRange() first to only
-  //include "owned" indices. For this we need to keep track of the owned
-  //indices, because Trilinos doesn't. Use same constructor interface as in
-  //PETSc with two IndexSets!
-  AssertThrow (vec.vector_partitioner().IsOneToOne(),
-               ExcMessage ("Distribute does not work on vectors with overlapping parallel partitioning."));
-
-  typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
-  ConstraintLine index_comparison;
-  index_comparison.line = vec.local_range().first;
-  const constraint_iterator begin_my_constraints =
-    Utilities::lower_bound (lines.begin(),lines.end(),index_comparison);
-
-  index_comparison.line = vec.local_range().second;
-  const constraint_iterator end_my_constraints
-    = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison);
-
-  // Here we search all the indices that we need to have read-access to -
-  // the local nodes and all the nodes that the constraints indicate.
-  IndexSet my_indices (vec.size());
-  {
-    const std::pair<unsigned int, unsigned int>
-    local_range = vec.local_range();
-
-    my_indices.add_range (local_range.first, local_range.second);
-
-    std::set<unsigned int> individual_indices;
-    for (constraint_iterator it = begin_my_constraints;
-         it != end_my_constraints; ++it)
-      for (unsigned int i=0; i<it->entries.size(); ++i)
-        if ((it->entries[i].first < local_range.first)
-            ||
-            (it->entries[i].first >= local_range.second))
-          individual_indices.insert (it->entries[i].first);
-
-    my_indices.add_indices (individual_indices.begin(),
-                            individual_indices.end());
-  }
-
-#ifdef DEAL_II_WITH_MPI
-  const Epetra_MpiComm *mpi_comm
-    = dynamic_cast<const Epetra_MpiComm *>(&vec.trilinos_vector().Comm());
-
-  Assert (mpi_comm != 0, ExcInternalError());
-
-  TrilinosWrappers::MPI::Vector vec_distribute
-  (my_indices.make_trilinos_map (mpi_comm->Comm(), true));
-#else
-  TrilinosWrappers::MPI::Vector vec_distribute
-  (my_indices.make_trilinos_map (MPI_COMM_WORLD, true));
-#endif
-
-  // here we import the data
-  vec_distribute.reinit(vec,false,true);
-
-  for (constraint_iterator it = begin_my_constraints;
-       it != end_my_constraints; ++it)
-    {
-      // fill entry in line next_constraint.line by adding the different
-      // contributions
-      double new_value = it->inhomogeneity;
-      for (unsigned int i=0; i<it->entries.size(); ++i)
-        new_value += (vec_distribute(it->entries[i].first) *
-                      it->entries[i].second);
-      vec(it->line) = new_value;
-    }
-
-  // some processes might not apply constraints, so we need to explicitly
-  // state, that the others are doing an insert here:
-  vec.compress (::dealii::VectorOperation::insert);
-}
-
-
-
-template<>
-void
-ConstraintMatrix::distribute (TrilinosWrappers::MPI::BlockVector &vec) const
-{
-  Assert (sorted==true, ExcMatrixIsClosed());
-
-  IndexSet my_indices (vec.size());
-  for (unsigned int block=0; block<vec.n_blocks(); ++block)
-    {
-      typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
-      ConstraintLine index_comparison;
-      index_comparison.line = vec.block(block).local_range().first
-                              +vec.get_block_indices().block_start(block);
-      const constraint_iterator begin_my_constraints =
-        Utilities::lower_bound (lines.begin(),lines.end(),index_comparison);
-
-      index_comparison.line = vec.block(block).local_range().second
-                              +vec.get_block_indices().block_start(block);
-
-      const constraint_iterator end_my_constraints
-        = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison);
-
-      // Here we search all the indices that we need to have read-access to
-      // - the local nodes and all the nodes that the constraints indicate.
-      // No caching done yet. would need some more clever data structures
-      // for doing that.
-      const std::pair<unsigned int, unsigned int>
-      local_range = vec.block(block).local_range();
-
-      my_indices.add_range (local_range.first, local_range.second);
-
-      std::set<unsigned int> individual_indices;
-      for (constraint_iterator it = begin_my_constraints;
-           it != end_my_constraints; ++it)
-        for (unsigned int i=0; i<it->entries.size(); ++i)
-          if ((it->entries[i].first < local_range.first)
-              ||
-              (it->entries[i].first >= local_range.second))
-            individual_indices.insert (it->entries[i].first);
-
-      my_indices.add_indices (individual_indices.begin(),
-                              individual_indices.end());
-    }
-
-#ifdef DEAL_II_WITH_MPI
-  const Epetra_MpiComm *mpi_comm
-    = dynamic_cast<const Epetra_MpiComm *>(&vec.block(0).trilinos_vector().Comm());
-
-  Assert (mpi_comm != 0, ExcInternalError());
-
-  TrilinosWrappers::MPI::Vector vec_distribute
-  (my_indices.make_trilinos_map (mpi_comm->Comm(), true));
-#else
-  TrilinosWrappers::MPI::Vector vec_distribute
-  (my_indices.make_trilinos_map (MPI_COMM_WORLD, true));
-#endif
-
-  // here we import the data
-  vec_distribute.reinit(vec,true);
-
-  for (unsigned int block=0; block<vec.n_blocks(); ++block)
-    {
-      typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
-      ConstraintLine index_comparison;
-      index_comparison.line = vec.block(block).local_range().first
-                              +vec.get_block_indices().block_start(block);
-      const constraint_iterator begin_my_constraints =
-        Utilities::lower_bound (lines.begin(),lines.end(),index_comparison);
-
-      index_comparison.line = vec.block(block).local_range().second
-                              +vec.get_block_indices().block_start(block);
-
-      const constraint_iterator end_my_constraints
-        = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison);
-
-      for (constraint_iterator it = begin_my_constraints;
-           it != end_my_constraints; ++it)
-        {
-          // fill entry in line next_constraint.line by adding the
-          // different contributions
-          double new_value = it->inhomogeneity;
-          for (unsigned int i=0; i<it->entries.size(); ++i)
-            new_value += (vec_distribute(it->entries[i].first) *
-                          it->entries[i].second);
-          vec(it->line) = new_value;
-        }
-      vec.block(block).compress(::dealii::VectorOperation::insert);
-    }
-}
-
-#endif
-
-#ifdef DEAL_II_WITH_PETSC
-
-// this is a specialization for a parallel (non-block) PETSc vector. The
-// basic idea is to just work on the local range of the vector. But we need
-// access to values that the local nodes are constrained to.
-
-template<>
-void
-ConstraintMatrix::distribute (PETScWrappers::MPI::Vector &vec) const
-{
-  Assert (sorted==true, ExcMatrixIsClosed());
-  Assert (vec.has_ghost_elements() == false,
-          ExcMessage ("This operation can only be performed on vectors without ghost elements."));
-
-  typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
-  ConstraintLine index_comparison;
-  index_comparison.line = vec.local_range().first;
-  const constraint_iterator begin_my_constraints =
-    Utilities::lower_bound (lines.begin(),lines.end(),index_comparison);
-
-  index_comparison.line = vec.local_range().second;
-  const constraint_iterator end_my_constraints
-    = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison);
-
-  // all indices we need to read from
-  IndexSet my_indices (vec.size());
-
-  const std::pair<unsigned int, unsigned int>
-  local_range = vec.local_range();
-
-  my_indices.add_range (local_range.first, local_range.second);
-
-  std::set<unsigned int> individual_indices;
-  for (constraint_iterator it = begin_my_constraints;
-       it != end_my_constraints; ++it)
-    for (unsigned int i=0; i<it->entries.size(); ++i)
-      if ((it->entries[i].first < local_range.first)
-          ||
-          (it->entries[i].first >= local_range.second))
-        individual_indices.insert (it->entries[i].first);
-
-  my_indices.add_indices (individual_indices.begin(),
-                          individual_indices.end());
-
-  IndexSet local_range_is (vec.size());
-  local_range_is.add_range(local_range.first, local_range.second);
-
-
-  // create a vector and import those indices
-  PETScWrappers::MPI::Vector ghost_vec (vec.get_mpi_communicator(),
-                                        local_range_is,
-                                        my_indices);
-  ghost_vec = vec;
-  ghost_vec.update_ghost_values();
-
-  // finally do the distribution on own constraints
-  for (constraint_iterator it = begin_my_constraints;
-       it != end_my_constraints; ++it)
-    {
-      // fill entry in line next_constraint.line by adding the different
-      // contributions
-      PetscScalar new_value = it->inhomogeneity;
-      for (unsigned int i=0; i<it->entries.size(); ++i)
-        new_value += (PetscScalar(ghost_vec(it->entries[i].first)) *
-                      it->entries[i].second);
-      vec(it->line) = new_value;
-    }
-
-  vec.compress (VectorOperation::insert);
-}
-
-
-template<>
-void
-ConstraintMatrix::distribute (PETScWrappers::MPI::BlockVector &/*vec*/) const
-{
-  Assert (sorted==true, ExcMatrixIsClosed());
-  AssertThrow (false, ExcNotImplemented());
-}
-
-#endif
-
-
-
 bool ConstraintMatrix::is_identity_constrained (const unsigned int index) const
 {
   if (is_constrained(index) == false)
@@ -1921,8 +1659,7 @@ ConstraintMatrix::memory_consumption () const
                                            VectorType                      &, \
                                            const FullMatrix<double>        &) const; \
   template void ConstraintMatrix::distribute<VectorType >(const VectorType &condensed,\
-                                                          VectorType       &uncondensed) const;\
-  template void ConstraintMatrix::distribute<VectorType >(VectorType &vec) const
+                                                          VectorType       &uncondensed) const
 
 #define PARALLEL_VECTOR_FUNCTIONS(VectorType) \
   template void ConstraintMatrix:: \
@@ -1932,11 +1669,6 @@ ConstraintMatrix::memory_consumption () const
                                            const FullMatrix<double>        &) const
 
 
-// TODO: Can PETSc really do all the operations required by the above
-// condense/distribute function etc also on distributed vectors? Trilinos
-// can't do that - we have to rewrite those functions by hand if we want to
-// use them. The key is to use local ranges etc., which still needs to be
-// implemented.
 #ifdef DEAL_II_WITH_PETSC
 VECTOR_FUNCTIONS(PETScWrappers::MPI::Vector);
 VECTOR_FUNCTIONS(PETScWrappers::MPI::BlockVector);
index 400f44cd4f28da04e519f9937dfa24eb273ab2e5..1a69fff4a28f66e0dbec0f362442c3c2c35ae2c7 100644 (file)
@@ -5,7 +5,6 @@ for (S: REAL_SCALARS; T : DEAL_II_VEC_TEMPLATES)
     template void ConstraintMatrix::distribute_local_to_global<T<S> > (
       const Vector<double>&, const std::vector<unsigned int> &, T<S> &, const FullMatrix<double>&) const;
     template void ConstraintMatrix::distribute<T<S> >(const T<S> &, T<S> &) const;
-    template void ConstraintMatrix::distribute<T<S> >(T<S> &) const;
     template void ConstraintMatrix::set_zero<T<S> >(T<S> &) const;
   }
 
@@ -17,7 +16,6 @@ for (S: REAL_SCALARS; T : DEAL_II_VEC_TEMPLATES)
     template void ConstraintMatrix::distribute_local_to_global<parallel::distributed::T<S> > (
       const Vector<double>&, const std::vector<unsigned int> &, parallel::distributed::T<S> &, const FullMatrix<double>&) const;
     template void ConstraintMatrix::distribute<parallel::distributed::T<S> >(const parallel::distributed::T<S> &, parallel::distributed::T<S> &) const;
-    template void ConstraintMatrix::distribute<parallel::distributed::T<S> >(parallel::distributed::T<S> &) const;
     template void ConstraintMatrix::set_zero<parallel::distributed::T<S> >(parallel::distributed::T<S> &) const;
   }
 
@@ -29,7 +27,6 @@ for (V: EXTERNAL_SEQUENTIAL_VECTORS)
     template void ConstraintMatrix::distribute_local_to_global<V > (
       const Vector<double>&, const std::vector<unsigned int> &, V&, const FullMatrix<double>&) const;
     template void ConstraintMatrix::distribute<V >(const V&, V&) const;
-    template void ConstraintMatrix::distribute<V >(V&) const;
     template void ConstraintMatrix::set_zero<V >(V&) const;
   }
 
@@ -57,3 +54,9 @@ for (S1 : REAL_SCALARS; S2 : REAL_SCALARS)
     template void ConstraintMatrix::condense<S1,BlockVector<S2> >(
         const SparseMatrix<S1>&, const BlockVector<S2>&, SparseMatrix<S1> &, BlockVector<S2>&) const;
   }
+
+
+for (Vec : SERIAL_VECTORS)
+  {
+    template void ConstraintMatrix::distribute<Vec>(Vec &) const;
+  }

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.