]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Provide even a Tmmult operation.
authorMartin Kronbichler <kronbichler@lnm.mw.tum.de>
Fri, 8 May 2009 09:47:09 +0000 (09:47 +0000)
committerMartin Kronbichler <kronbichler@lnm.mw.tum.de>
Fri, 8 May 2009 09:47:09 +0000 (09:47 +0000)
git-svn-id: https://svn.dealii.org/trunk@18823 0785d39b-7218-0410-832d-ea1e28bc413d

deal.II/lac/include/lac/sparse_matrix.h
deal.II/lac/include/lac/sparse_matrix.templates.h
deal.II/lac/include/lac/trilinos_sparse_matrix.h
deal.II/lac/source/sparse_matrix.inst.in
deal.II/lac/source/trilinos_sparse_matrix.cc

index e795fdaaa80973efe1ecfe0b0f75ceb9ec9ff6ab..424b8f895dd6e5b57d01d3777a5f8a1c66156fa3 100644 (file)
@@ -1266,7 +1266,7 @@ class SparseMatrix : public virtual Subscriptor
 
 //@}
 /**
- * @name Matrix vector multiplications
+ * @name Multiplications
  */
 //@{
                                     /**
@@ -1476,6 +1476,47 @@ class SparseMatrix : public virtual Subscriptor
                const Vector<numberV>       &V = Vector<numberV>(),
                const bool                   rebuild_sparsity_pattern = true) const;
     
+                                    /**
+                                     * Perform the matrix-matrix
+                                     * multiplication with the transpose of
+                                     * <tt>this</tt>, i.e., <tt>C =
+                                     * A<sup>T</sup> * B</tt>, or, if an
+                                     * optional vector argument is given,
+                                     * <tt>C = A<sup>T</sup> * diag(V) *
+                                     * B</tt>, where <tt>diag(V)</tt>
+                                     * defines a diagonal matrix with the
+                                     * vector entries.
+                                     *
+                                     * This function assumes that the
+                                     * calling matrix <tt>A</tt> and
+                                     * <tt>B</tt> have compatible
+                                     * sizes. The size of <tt>C</tt> will
+                                     * be set within this function.
+                                     *
+                                     * The content as well as the sparsity
+                                     * pattern of the matrix C will be
+                                     * changed by this function, so make
+                                     * sure that the sparsity pattern is
+                                     * not used somewhere else in your
+                                     * program. This is an expensive
+                                     * operation, so think twice before you
+                                     * use this function.
+                                     *
+                                     * There is an optional flag
+                                     * <tt>rebuild_sparsity_pattern</tt>
+                                     * that can be used to bypass the
+                                     * creation of a new sparsity pattern
+                                     * and instead uses the sparsity
+                                     * pattern stored in <tt>C</tt>. In
+                                     * that case, make sure that it really
+                                     * fits.
+                                     */
+    template <typename numberB, typename numberC, typename numberV>
+    void Tmmult (SparseMatrix<numberC>       &C,
+                const SparseMatrix<numberB> &B,
+                const Vector<numberV>       &V = Vector<numberV>(),
+                const bool                   rebuild_sparsity_pattern = true) const;
+    
 //@}
 /**
  * @name Matrix norms
index b79a557a4355d4e8a91dcd1b12a930a797d12c80..1b12cd9661df7b6afae9e5551836a3deeb762c43 100644 (file)
@@ -713,140 +713,6 @@ SparseMatrix<number>::Tvmult_add (OutVector& dst,
 
 
 
-template <typename number>
-template <typename numberB, typename numberC, typename numberV>
-void
-SparseMatrix<number>::mmult (SparseMatrix<numberC>       &C,
-                            const SparseMatrix<numberB> &B,
-                            const Vector<numberV>       &V,
-                            const bool                   rebuild_sparsity_C) const
-{
-  const bool use_vector = V.size() == n() ? true : false;
-  Assert (n() == B.m(), ExcDimensionMismatch(n(), B.m()));
-  Assert (cols != 0, ExcNotInitialized());
-  Assert (B.cols != 0, ExcNotInitialized());
-  Assert (C.cols != 0, ExcNotInitialized());
-
-  const SparsityPattern & sp_A = *cols;
-  const SparsityPattern & sp_B = *B.cols;
-
-                                  // clear previous content of C
-  if  (rebuild_sparsity_C == true)
-    {
-                                  // need to change the sparsity pattern of
-                                  // C, so cast away const-ness.
-      SparsityPattern & sp_C = 
-       *(const_cast<SparsityPattern *>(&C.get_sparsity_pattern()));
-      C.clear();
-      sp_C.reinit (0,0,0);
-
-                                  // create a sparsity pattern for the
-                                  // matrix. we will go through all the
-                                  // rows in the matrix A, and for each
-                                  // column in a row we add the whole row
-                                  // of matrix B with that row number. This
-                                  // means that we will insert a lot of
-                                  // entries to each row, which is best
-                                  // handled by the
-                                  // CompressedSimpleSparsityPattern class.
-      {
-       CompressedSimpleSparsityPattern csp (m(), B.n());
-       for (unsigned int i = 0; i < csp.n_rows(); ++i)
-         {
-           const unsigned int * rows = 
-             &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
-           const unsigned int *const end_rows = 
-             &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
-           for (; rows != end_rows; ++rows)
-             {
-               const unsigned int col = *rows;
-               unsigned int * new_cols = const_cast<unsigned int*>
-                 (&sp_B.get_column_numbers()
-                  [sp_B.get_rowstart_indices()[col]]);
-               unsigned int * end_new_cols = const_cast<unsigned int*>
-                 (&sp_B.get_column_numbers()
-                  [sp_B.get_rowstart_indices()[col+1]]);
-
-                                  // if B has a diagonal, need to add that
-                                  // manually. this way, we maintain
-                                  // sortedness.
-               if (sp_B.optimize_diagonal() == true)
-                 {
-                   ++new_cols;
-                   csp.add(i, col);
-                 }
-
-               csp.add_entries (i, new_cols, end_new_cols, true);
-             }
-         }
-       sp_C.copy_from (csp);
-      }
-
-                                  // reinit matrix C from that information
-      C.reinit (sp_C);
-    }
-
-  Assert (C.m() == m(), ExcDimensionMismatch(C.m(), m()));
-  Assert (C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
-
-                                  // create an array that caches some
-                                  // elements that are going to be written
-                                  // into the new matrix.
-  unsigned int max_n_cols_B = 0;
-  for (unsigned int i=0; i<B.m(); ++i)
-    max_n_cols_B = std::max (max_n_cols_B, sp_B.row_length(i));
-  std::vector<numberC> new_entries(max_n_cols_B);
-
-                                  // now compute the actual entries: a
-                                  // matrix-matrix product involves three
-                                  // nested loops. One over the rows of A,
-                                  // for each row we then loop over all the
-                                  // columns, and then we need to multiply
-                                  // each element with all the elements in
-                                  // that row in B.
-  for (unsigned int i=0; i<C.m(); ++i)
-    {
-      const unsigned int * rows = 
-       &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
-      const unsigned int *const end_rows = 
-       &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
-      for (; rows != end_rows; ++rows)
-       {
-         const double A_val = global_entry 
-           (rows-&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[0]]);
-         const unsigned int col = *rows;
-         const unsigned int * new_cols = 
-           (&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[col]]);
-
-                                  // special treatment for diagonal
-         if (sp_B.optimize_diagonal())
-           {
-             C.add (i, *new_cols, A_val * 
-                    B.global_entry(new_cols-&sp_B.get_column_numbers()
-                                   [sp_B.get_rowstart_indices()[0]]) * 
-                    (use_vector ? V(col) : 1));
-             ++new_cols;
-           }
-
-                                  // now the innermost loop that goes over
-                                  // all the elements in row 'col' of
-                                  // matrix B. Cache the elements, and then
-                                  // write them into C at once
-         numberC * new_ptr = &new_entries[0];
-         const numberB * B_val_ptr = 
-           &B.val[new_cols-&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
-         const numberB * const end_cols = 
-           &B.val[&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[col+1]]-
-                  &sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
-         for (; B_val_ptr != end_cols; ++B_val_ptr)
-           *new_ptr++ = A_val * *B_val_ptr * (use_vector ? V(col) : 1);
-
-         C.add (i, new_ptr-&new_entries[0], new_cols, &new_entries[0],
-                false, true);
-       }
-    }
-}
-
 template <typename number>
 template <typename somenumber>
 somenumber
@@ -1074,6 +940,279 @@ threaded_matrix_scalar_product (const Vector<somenumber> &u,
 
 
 
+template <typename number>
+template <typename numberB, typename numberC, typename numberV>
+void
+SparseMatrix<number>::mmult (SparseMatrix<numberC>       &C,
+                            const SparseMatrix<numberB> &B,
+                            const Vector<numberV>       &V,
+                            const bool                   rebuild_sparsity_C) const
+{
+  const bool use_vector = V.size() == n() ? true : false;
+  Assert (n() == B.m(), ExcDimensionMismatch(n(), B.m()));
+  Assert (cols != 0, ExcNotInitialized());
+  Assert (B.cols != 0, ExcNotInitialized());
+  Assert (C.cols != 0, ExcNotInitialized());
+
+  const SparsityPattern & sp_A = *cols;
+  const SparsityPattern & sp_B = *B.cols;
+
+                                  // clear previous content of C
+  if  (rebuild_sparsity_C == true)
+    {
+                                  // need to change the sparsity pattern of
+                                  // C, so cast away const-ness.
+      SparsityPattern & sp_C = 
+       *(const_cast<SparsityPattern *>(&C.get_sparsity_pattern()));
+      C.clear();
+      sp_C.reinit (0,0,0);
+
+                                  // create a sparsity pattern for the
+                                  // matrix. we will go through all the
+                                  // rows in the matrix A, and for each
+                                  // column in a row we add the whole row
+                                  // of matrix B with that row number. This
+                                  // means that we will insert a lot of
+                                  // entries to each row, which is best
+                                  // handled by the
+                                  // CompressedSimpleSparsityPattern class.
+      {
+       CompressedSimpleSparsityPattern csp (m(), B.n());
+       for (unsigned int i = 0; i < csp.n_rows(); ++i)
+         {
+           const unsigned int * rows = 
+             &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
+           const unsigned int *const end_rows = 
+             &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
+           for (; rows != end_rows; ++rows)
+             {
+               const unsigned int col = *rows;
+               unsigned int * new_cols = const_cast<unsigned int*>
+                 (&sp_B.get_column_numbers()
+                  [sp_B.get_rowstart_indices()[col]]);
+               unsigned int * end_new_cols = const_cast<unsigned int*>
+                 (&sp_B.get_column_numbers()
+                  [sp_B.get_rowstart_indices()[col+1]]);
+
+                                  // if B has a diagonal, need to add that
+                                  // manually. this way, we maintain
+                                  // sortedness.
+               if (sp_B.optimize_diagonal() == true)
+                 {
+                   ++new_cols;
+                   csp.add(i, col);
+                 }
+
+               csp.add_entries (i, new_cols, end_new_cols, true);
+             }
+         }
+       sp_C.copy_from (csp);
+      }
+
+                                  // reinit matrix C from that information
+      C.reinit (sp_C);
+    }
+
+  Assert (C.m() == m(), ExcDimensionMismatch(C.m(), m()));
+  Assert (C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
+
+                                  // create an array that caches some
+                                  // elements that are going to be written
+                                  // into the new matrix.
+  unsigned int max_n_cols_B = 0;
+  for (unsigned int i=0; i<B.m(); ++i)
+    max_n_cols_B = std::max (max_n_cols_B, sp_B.row_length(i));
+  std::vector<numberC> new_entries(max_n_cols_B);
+
+                                  // now compute the actual entries: a
+                                  // matrix-matrix product involves three
+                                  // nested loops. One over the rows of A,
+                                  // for each row we then loop over all the
+                                  // columns, and then we need to multiply
+                                  // each element with all the elements in
+                                  // that row in B.
+  for (unsigned int i=0; i<C.m(); ++i)
+    {
+      const unsigned int * rows = 
+       &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
+      const unsigned int *const end_rows = 
+       &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
+      for (; rows != end_rows; ++rows)
+       {
+         const double A_val = global_entry 
+           (rows-&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[0]]);
+         const unsigned int col = *rows;
+         const unsigned int * new_cols = 
+           (&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[col]]);
+
+                                  // special treatment for diagonal
+         if (sp_B.optimize_diagonal())
+           {
+             C.add (i, *new_cols, A_val * 
+                    B.global_entry(new_cols-&sp_B.get_column_numbers()
+                                   [sp_B.get_rowstart_indices()[0]]) * 
+                    (use_vector ? V(col) : 1));
+             ++new_cols;
+           }
+
+                                  // now the innermost loop that goes over
+                                  // all the elements in row 'col' of
+                                  // matrix B. Cache the elements, and then
+                                  // write them into C at once
+         numberC * new_ptr = &new_entries[0];
+         const numberB * B_val_ptr = 
+           &B.val[new_cols-&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
+         const numberB * const end_cols = 
+           &B.val[&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[col+1]]-
+                  &sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
+         for (; B_val_ptr != end_cols; ++B_val_ptr)
+           *new_ptr++ = A_val * *B_val_ptr * (use_vector ? V(col) : 1);
+
+         C.add (i, new_ptr-&new_entries[0], new_cols, &new_entries[0],
+                false, true);
+       }
+    }
+}
+
+
+
+
+template <typename number>
+template <typename numberB, typename numberC, typename numberV>
+void
+SparseMatrix<number>::Tmmult (SparseMatrix<numberC>       &C,
+                             const SparseMatrix<numberB> &B,
+                             const Vector<numberV>       &V,
+                             const bool                   rebuild_sparsity_C) const
+{
+  const bool use_vector = V.size() == m() ? true : false;
+  Assert (m() == B.m(), ExcDimensionMismatch(m(), B.m()));
+  Assert (cols != 0, ExcNotInitialized());
+  Assert (B.cols != 0, ExcNotInitialized());
+  Assert (C.cols != 0, ExcNotInitialized());
+
+  const SparsityPattern & sp_A = *cols;
+  const SparsityPattern & sp_B = *B.cols;
+
+                                  // clear previous content of C
+  if  (rebuild_sparsity_C == true)
+    {
+                                  // need to change the sparsity pattern of
+                                  // C, so cast away const-ness.
+      SparsityPattern & sp_C = 
+       *(const_cast<SparsityPattern *>(&C.get_sparsity_pattern()));
+      C.clear();
+      sp_C.reinit (0,0,0);
+
+                                  // create a sparsity pattern for the
+                                  // matrix. we will go through all the
+                                  // rows in the matrix A, and for each
+                                  // column in a row we add the whole row
+                                  // of matrix B with that row number. This
+                                  // means that we will insert a lot of
+                                  // entries to each row, which is best
+                                  // handled by the
+                                  // CompressedSimpleSparsityPattern class.
+      {
+       CompressedSimpleSparsityPattern csp (n(), B.n());
+       for (unsigned int i = 0; i < sp_A.n_rows(); ++i)
+         {
+           const unsigned int * rows = 
+             &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
+           const unsigned int *const end_rows = 
+             &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
+           unsigned int * new_cols = const_cast<unsigned int*>
+             (&sp_B.get_column_numbers()
+              [sp_B.get_rowstart_indices()[i]]);
+           unsigned int * end_new_cols = const_cast<unsigned int*>
+             (&sp_B.get_column_numbers()
+              [sp_B.get_rowstart_indices()[i+1]]);
+
+           if (sp_B.optimize_diagonal() == true)
+             ++new_cols;
+
+           for (; rows != end_rows; ++rows)
+             {
+               const unsigned int row = *rows;
+
+                                  // if B has a diagonal, need to add that
+                                  // manually. this way, we maintain
+                                  // sortedness.
+               if (sp_B.optimize_diagonal() == true)
+                 csp.add(row, i);
+
+               csp.add_entries (row, new_cols, end_new_cols, true);
+             }
+         }
+       sp_C.copy_from (csp);
+      }
+
+                                  // reinit matrix C from that information
+      C.reinit (sp_C);
+    }
+
+  Assert (C.m() == m(), ExcDimensionMismatch(C.m(), m()));
+  Assert (C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
+
+                                  // create an array that caches some
+                                  // elements that are going to be written
+                                  // into the new matrix.
+  unsigned int max_n_cols_B = 0;
+  for (unsigned int i=0; i<B.m(); ++i)
+    max_n_cols_B = std::max (max_n_cols_B, sp_B.row_length(i));
+  std::vector<numberC> new_entries(max_n_cols_B);
+
+                                  // now compute the actual entries: a
+                                  // matrix-matrix product involves three
+                                  // nested loops. One over the rows of A,
+                                  // for each row we then loop over all the
+                                  // columns, and then we need to multiply
+                                  // each element with all the elements in
+                                  // that row in B.
+  for (unsigned int i=0; i<m(); ++i)
+    {
+      const unsigned int * rows = 
+       &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
+      const unsigned int *const end_rows = 
+       &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
+      const unsigned int * new_cols = 
+       (&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[i]]);
+      if (sp_B.optimize_diagonal())
+       ++new_cols;
+
+      const numberB * const end_cols = 
+       &B.val[&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[i+1]]-
+              &sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
+
+      for (; rows != end_rows; ++rows)
+       {
+         const unsigned int row = *rows;
+         const double A_val = global_entry 
+           (rows-&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[0]]);
+
+                                  // special treatment for diagonal
+         if (sp_B.optimize_diagonal())
+           C.add (row, i, A_val * 
+                  B.global_entry(new_cols-1-&sp_B.get_column_numbers()
+                                 [sp_B.get_rowstart_indices()[0]]) * 
+                  (use_vector ? V(i) : 1));
+
+                                  // now the innermost loop that goes over
+                                  // all the elements in row 'col' of
+                                  // matrix B. Cache the elements, and then
+                                  // write them into C at once
+         numberC * new_ptr = &new_entries[0];
+         const numberB * B_val_ptr = 
+           &B.val[new_cols-&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
+         for (; B_val_ptr != end_cols; ++B_val_ptr)
+           *new_ptr++ = A_val * *B_val_ptr * (use_vector ? V(i) : 1);
+
+         C.add (row, new_ptr-&new_entries[0], new_cols, &new_entries[0],
+                false, true);
+       }
+    }
+}
+
 
 
 template <typename number>
index d917623decad95d7e1abd57f9eeff06325da7a53..9447da225dd3b0560f9402ca0d9ab9a1587d7583 100755 (executable)
@@ -1298,7 +1298,7 @@ namespace TrilinosWrappers
 
 //@}
 /**
- * @name Matrix vector multiplications
+ * @name Multiplications
  */
 //@{
 
@@ -1592,6 +1592,37 @@ namespace TrilinosWrappers
                const SparseMatrix &B,
                const VectorBase   &V = VectorBase()) const;
 
+
+                                    /**
+                                     * Perform the matrix-matrix
+                                     * multiplication with the transpose of
+                                     * <tt>this</tt>, i.e., <tt>C =
+                                     * A<sup>T</sup> * B</tt>, or, if an
+                                     * optional vector argument is given,
+                                     * <tt>C = A<sup>T</sup> * diag(V) *
+                                     * B</tt>, where <tt>diag(V)</tt>
+                                     * defines a diagonal matrix with the
+                                     * vector entries.
+                                     *
+                                     * This function assumes that the
+                                     * calling matrix <tt>A</tt> and
+                                     * <tt>B</tt> have compatible
+                                     * sizes. The size of <tt>C</tt> will
+                                     * be set within this function.
+                                     *
+                                     * The content as well as the sparsity
+                                     * pattern of the matrix C will be
+                                     * changed by this function, so make
+                                     * sure that the sparsity pattern is
+                                     * not used somewhere else in your
+                                     * program. This is an expensive
+                                     * operation, so think twice before you
+                                     * use this function.
+                                     */
+    void Tmmult (SparseMatrix       &C,
+                const SparseMatrix &B,
+                const VectorBase   &V = VectorBase()) const;
+
 //@}
 /**
  * @name Matrix norms
index 60c364f60696b7865e4dcb0933e1bd953c6faf8c..5d19d30fac212b25f8b469b833e823c843514973 100644 (file)
@@ -135,6 +135,9 @@ for (S1, S2, S3, S4: REAL_SCALARS)
     template void SparseMatrix<S1>::
       mmult (SparseMatrix<S2> &, const SparseMatrix<S3> &, const Vector<S4>&,
              const bool) const;
+    template void SparseMatrix<S1>::
+      Tmmult (SparseMatrix<S2> &, const SparseMatrix<S3> &, const Vector<S4>&,
+                     const bool) const;
   }
 
 
index 9ac95cdf040d7c167bf7d8e9d238891b3e52e269..4ac73a823d148c90ad7af1b2887461d2832e49b5 100755 (executable)
@@ -1347,6 +1347,144 @@ namespace TrilinosWrappers
 
 
 
+  void
+  SparseMatrix::Tmmult (SparseMatrix       &C,
+                       const SparseMatrix &B,
+                       const VectorBase   &V) const
+  {
+    const bool use_vector = V.size() == m() ? true : false;
+    Assert (m() == B.m(), ExcDimensionMismatch(m(), B.m()));
+    Assert (this->matrix->RangeMap().SameAs(B.matrix->RangeMap()),
+           ExcMessage ("Parallel partitioning of A and B does not fit."));
+
+    C.clear();
+                                  // use ML built-in method for performing
+                                  // the matrix-matrix product.
+
+                                  // create ML operators on top of the
+                                  // Epetra matrix.
+    ML_Comm* comm;
+    ML_Comm_Create(&comm);
+    ML_Operator *A_nontrans = ML_Operator_Create(comm);
+    ML_Operator *A_ = ML_Operator_Create(comm);
+    ML_Operator *B_ = ML_Operator_Create(comm);
+    ML_Operator *C_ = ML_Operator_Create(comm);
+
+    ML_Operator_WrapEpetraCrsMatrix(&*matrix,A_nontrans,false);
+    ML_Operator_Transpose_byrow(A_nontrans,A_);
+    ML_Operator_WrapEpetraCrsMatrix(&*B.matrix,B_,false);
+
+                                  // Case where we only have two
+                                  // matrices. We do this by hand since we
+                                  // can save some operations compared to
+                                  // just calling ml/src/Operator/ml_rap.c
+                                  // that multiplies three matrices. This
+                                  // code is still similar to the one found
+                                  // in ml/src/Operator/ml_rap.c
+    if (use_vector == false)
+      {
+                                  // import data if necessary
+       ML_Operator *Btmp, *Ctmp, *Ctmp2, *tptr;
+       ML_CommInfoOP *getrow_comm; 
+       int max_per_proc;
+       int N_input_vector = B_->invec_leng;
+       getrow_comm = B_->getrow->pre_comm;
+       if ( getrow_comm != NULL)
+         for (int i = 0; i < getrow_comm->N_neighbors; i++)
+           for (int j = 0; j < getrow_comm->neighbors[i].N_send; j++)
+             AssertThrow (getrow_comm->neighbors[i].send_list[j] < N_input_vector,
+                          ExcInternalError());
+
+       ML_create_unique_col_id(N_input_vector, &(B_->getrow->loc_glob_map),
+                               getrow_comm, &max_per_proc, B_->comm);
+       B_->getrow->use_loc_glob_map = ML_YES;
+       if (A_->getrow->pre_comm != NULL) 
+         ML_exchange_rows( B_, &Btmp, A_->getrow->pre_comm);
+       else Btmp = B_;
+
+                                  // perform matrix-matrix product
+       ML_matmat_mult(A_, Btmp , &Ctmp);
+
+                                  // release temporary structures we needed
+                                  // for multiplication
+       ML_free(B_->getrow->loc_glob_map);
+       B_->getrow->loc_glob_map = NULL;
+       B_->getrow->use_loc_glob_map = ML_NO;
+       if (A_->getrow->pre_comm != NULL) {
+         tptr = Btmp;
+         while ( (tptr!= NULL) && (tptr->sub_matrix != B_)) 
+           tptr = tptr->sub_matrix;
+         if (tptr != NULL) tptr->sub_matrix = NULL;
+         ML_RECUR_CSR_MSRdata_Destroy(Btmp);
+         ML_Operator_Destroy(&Btmp);
+       }
+
+                                  // make correct data structures
+       if (A_->getrow->post_comm != NULL) {
+         ML_exchange_rows(Ctmp, &Ctmp2, A_->getrow->post_comm);
+       }
+       else Ctmp2 = Ctmp;
+
+       ML_back_to_csrlocal(Ctmp2, C_, max_per_proc);
+
+       ML_RECUR_CSR_MSRdata_Destroy (Ctmp);
+       ML_Operator_Destroy (&Ctmp);
+
+       if (A_->getrow->post_comm != NULL) {
+         ML_RECUR_CSR_MSRdata_Destroy(Ctmp2);
+         ML_Operator_Destroy (&Ctmp2);
+       }
+      }
+    else
+      {
+                                  // create an Epetra_CrsMatrix with the
+                                  // vector content on the diagonal.
+       Epetra_CrsMatrix Vmat (Copy, this->matrix->RangeMap(), 
+                              B.matrix->RangeMap(), 1, true);
+       Assert (Vmat.DomainMap().SameAs(V.trilinos_vector().Map()),
+               ExcMessage ("Column map of matrix does not fit with vector map!"));
+       const int num_my_rows = B.local_size();
+       for (int i=0; i<num_my_rows; ++i)
+         Vmat.InsertGlobalValues (i, 1, &V.trilinos_vector()[0][i], &i);
+       Vmat.FillComplete();
+       Vmat.OptimizeStorage();
+
+                                  // wrap even this matrix into ML format
+       ML_Operator *V_ = ML_Operator_Create (comm);
+       ML_Operator_WrapEpetraCrsMatrix (&Vmat, V_, false);
+
+                                  // perform triple matrix-vector product
+                                  // using ml/src/Operator/ml_rap.c
+       ML_rap (A_, V_, B_, C_, ML_CSR_MATRIX);
+
+       ML_Operator_Destroy (&V_);
+      }
+
+                                  // create an Epetra matrix from the ML
+                                  // matrix that we got as a result.
+    Epetra_CrsMatrix * C_mat;
+    ML_Operator2EpetraCrsMatrix(C_, C_mat);
+    C_mat->FillComplete();
+    C_mat->OptimizeStorage();
+    C.reinit (*C_mat);
+
+                                  // Sanity check
+    Assert (this->matrix->DomainMap().SameAs(C.matrix->RangeMap()),
+           ExcInternalError());
+    Assert (B.matrix->DomainMap().SameAs(C.matrix->DomainMap()),
+           ExcInternalError());
+
+                                  // destroy allocated memory
+    delete C_mat;
+    ML_Operator_Destroy (&A_);
+    ML_Operator_Destroy (&A_nontrans);
+    ML_Operator_Destroy (&B_);
+    ML_Operator_Destroy (&C_);
+    ML_Comm_Destroy (&comm);
+  }
+
+
+
   void
   SparseMatrix::add (const TrilinosScalar  factor,
                     const SparseMatrix   &rhs)

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.