-template <typename number>
-template <typename numberB, typename numberC, typename numberV>
-void
-SparseMatrix<number>::mmult (SparseMatrix<numberC> &C,
- const SparseMatrix<numberB> &B,
- const Vector<numberV> &V,
- const bool rebuild_sparsity_C) const
-{
- const bool use_vector = V.size() == n() ? true : false;
- Assert (n() == B.m(), ExcDimensionMismatch(n(), B.m()));
- Assert (cols != 0, ExcNotInitialized());
- Assert (B.cols != 0, ExcNotInitialized());
- Assert (C.cols != 0, ExcNotInitialized());
-
- const SparsityPattern & sp_A = *cols;
- const SparsityPattern & sp_B = *B.cols;
-
- // clear previous content of C
- if (rebuild_sparsity_C == true)
- {
- // need to change the sparsity pattern of
- // C, so cast away const-ness.
- SparsityPattern & sp_C =
- *(const_cast<SparsityPattern *>(&C.get_sparsity_pattern()));
- C.clear();
- sp_C.reinit (0,0,0);
-
- // create a sparsity pattern for the
- // matrix. we will go through all the
- // rows in the matrix A, and for each
- // column in a row we add the whole row
- // of matrix B with that row number. This
- // means that we will insert a lot of
- // entries to each row, which is best
- // handled by the
- // CompressedSimpleSparsityPattern class.
- {
- CompressedSimpleSparsityPattern csp (m(), B.n());
- for (unsigned int i = 0; i < csp.n_rows(); ++i)
- {
- const unsigned int * rows =
- &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
- const unsigned int *const end_rows =
- &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
- for (; rows != end_rows; ++rows)
- {
- const unsigned int col = *rows;
- unsigned int * new_cols = const_cast<unsigned int*>
- (&sp_B.get_column_numbers()
- [sp_B.get_rowstart_indices()[col]]);
- unsigned int * end_new_cols = const_cast<unsigned int*>
- (&sp_B.get_column_numbers()
- [sp_B.get_rowstart_indices()[col+1]]);
-
- // if B has a diagonal, need to add that
- // manually. this way, we maintain
- // sortedness.
- if (sp_B.optimize_diagonal() == true)
- {
- ++new_cols;
- csp.add(i, col);
- }
-
- csp.add_entries (i, new_cols, end_new_cols, true);
- }
- }
- sp_C.copy_from (csp);
- }
-
- // reinit matrix C from that information
- C.reinit (sp_C);
- }
-
- Assert (C.m() == m(), ExcDimensionMismatch(C.m(), m()));
- Assert (C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
-
- // create an array that caches some
- // elements that are going to be written
- // into the new matrix.
- unsigned int max_n_cols_B = 0;
- for (unsigned int i=0; i<B.m(); ++i)
- max_n_cols_B = std::max (max_n_cols_B, sp_B.row_length(i));
- std::vector<numberC> new_entries(max_n_cols_B);
-
- // now compute the actual entries: a
- // matrix-matrix product involves three
- // nested loops. One over the rows of A,
- // for each row we then loop over all the
- // columns, and then we need to multiply
- // each element with all the elements in
- // that row in B.
- for (unsigned int i=0; i<C.m(); ++i)
- {
- const unsigned int * rows =
- &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
- const unsigned int *const end_rows =
- &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
- for (; rows != end_rows; ++rows)
- {
- const double A_val = global_entry
- (rows-&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[0]]);
- const unsigned int col = *rows;
- const unsigned int * new_cols =
- (&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[col]]);
-
- // special treatment for diagonal
- if (sp_B.optimize_diagonal())
- {
- C.add (i, *new_cols, A_val *
- B.global_entry(new_cols-&sp_B.get_column_numbers()
- [sp_B.get_rowstart_indices()[0]]) *
- (use_vector ? V(col) : 1));
- ++new_cols;
- }
-
- // now the innermost loop that goes over
- // all the elements in row 'col' of
- // matrix B. Cache the elements, and then
- // write them into C at once
- numberC * new_ptr = &new_entries[0];
- const numberB * B_val_ptr =
- &B.val[new_cols-&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
- const numberB * const end_cols =
- &B.val[&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[col+1]]-
- &sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
- for (; B_val_ptr != end_cols; ++B_val_ptr)
- *new_ptr++ = A_val * *B_val_ptr * (use_vector ? V(col) : 1);
-
- C.add (i, new_ptr-&new_entries[0], new_cols, &new_entries[0],
- false, true);
- }
- }
-}
-
template <typename number>
template <typename somenumber>
somenumber
+template <typename number>
+template <typename numberB, typename numberC, typename numberV>
+void
+SparseMatrix<number>::mmult (SparseMatrix<numberC> &C,
+ const SparseMatrix<numberB> &B,
+ const Vector<numberV> &V,
+ const bool rebuild_sparsity_C) const
+{
+ const bool use_vector = V.size() == n() ? true : false;
+ Assert (n() == B.m(), ExcDimensionMismatch(n(), B.m()));
+ Assert (cols != 0, ExcNotInitialized());
+ Assert (B.cols != 0, ExcNotInitialized());
+ Assert (C.cols != 0, ExcNotInitialized());
+
+ const SparsityPattern & sp_A = *cols;
+ const SparsityPattern & sp_B = *B.cols;
+
+ // clear previous content of C
+ if (rebuild_sparsity_C == true)
+ {
+ // need to change the sparsity pattern of
+ // C, so cast away const-ness.
+ SparsityPattern & sp_C =
+ *(const_cast<SparsityPattern *>(&C.get_sparsity_pattern()));
+ C.clear();
+ sp_C.reinit (0,0,0);
+
+ // create a sparsity pattern for the
+ // matrix. we will go through all the
+ // rows in the matrix A, and for each
+ // column in a row we add the whole row
+ // of matrix B with that row number. This
+ // means that we will insert a lot of
+ // entries to each row, which is best
+ // handled by the
+ // CompressedSimpleSparsityPattern class.
+ {
+ CompressedSimpleSparsityPattern csp (m(), B.n());
+ for (unsigned int i = 0; i < csp.n_rows(); ++i)
+ {
+ const unsigned int * rows =
+ &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
+ const unsigned int *const end_rows =
+ &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
+ for (; rows != end_rows; ++rows)
+ {
+ const unsigned int col = *rows;
+ unsigned int * new_cols = const_cast<unsigned int*>
+ (&sp_B.get_column_numbers()
+ [sp_B.get_rowstart_indices()[col]]);
+ unsigned int * end_new_cols = const_cast<unsigned int*>
+ (&sp_B.get_column_numbers()
+ [sp_B.get_rowstart_indices()[col+1]]);
+
+ // if B has a diagonal, need to add that
+ // manually. this way, we maintain
+ // sortedness.
+ if (sp_B.optimize_diagonal() == true)
+ {
+ ++new_cols;
+ csp.add(i, col);
+ }
+
+ csp.add_entries (i, new_cols, end_new_cols, true);
+ }
+ }
+ sp_C.copy_from (csp);
+ }
+
+ // reinit matrix C from that information
+ C.reinit (sp_C);
+ }
+
+ Assert (C.m() == m(), ExcDimensionMismatch(C.m(), m()));
+ Assert (C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
+
+ // create an array that caches some
+ // elements that are going to be written
+ // into the new matrix.
+ unsigned int max_n_cols_B = 0;
+ for (unsigned int i=0; i<B.m(); ++i)
+ max_n_cols_B = std::max (max_n_cols_B, sp_B.row_length(i));
+ std::vector<numberC> new_entries(max_n_cols_B);
+
+ // now compute the actual entries: a
+ // matrix-matrix product involves three
+ // nested loops. One over the rows of A,
+ // for each row we then loop over all the
+ // columns, and then we need to multiply
+ // each element with all the elements in
+ // that row in B.
+ for (unsigned int i=0; i<C.m(); ++i)
+ {
+ const unsigned int * rows =
+ &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
+ const unsigned int *const end_rows =
+ &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
+ for (; rows != end_rows; ++rows)
+ {
+ const double A_val = global_entry
+ (rows-&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[0]]);
+ const unsigned int col = *rows;
+ const unsigned int * new_cols =
+ (&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[col]]);
+
+ // special treatment for diagonal
+ if (sp_B.optimize_diagonal())
+ {
+ C.add (i, *new_cols, A_val *
+ B.global_entry(new_cols-&sp_B.get_column_numbers()
+ [sp_B.get_rowstart_indices()[0]]) *
+ (use_vector ? V(col) : 1));
+ ++new_cols;
+ }
+
+ // now the innermost loop that goes over
+ // all the elements in row 'col' of
+ // matrix B. Cache the elements, and then
+ // write them into C at once
+ numberC * new_ptr = &new_entries[0];
+ const numberB * B_val_ptr =
+ &B.val[new_cols-&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
+ const numberB * const end_cols =
+ &B.val[&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[col+1]]-
+ &sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
+ for (; B_val_ptr != end_cols; ++B_val_ptr)
+ *new_ptr++ = A_val * *B_val_ptr * (use_vector ? V(col) : 1);
+
+ C.add (i, new_ptr-&new_entries[0], new_cols, &new_entries[0],
+ false, true);
+ }
+ }
+}
+
+
+
+
+template <typename number>
+template <typename numberB, typename numberC, typename numberV>
+void
+SparseMatrix<number>::Tmmult (SparseMatrix<numberC> &C,
+ const SparseMatrix<numberB> &B,
+ const Vector<numberV> &V,
+ const bool rebuild_sparsity_C) const
+{
+ const bool use_vector = V.size() == m() ? true : false;
+ Assert (m() == B.m(), ExcDimensionMismatch(m(), B.m()));
+ Assert (cols != 0, ExcNotInitialized());
+ Assert (B.cols != 0, ExcNotInitialized());
+ Assert (C.cols != 0, ExcNotInitialized());
+
+ const SparsityPattern & sp_A = *cols;
+ const SparsityPattern & sp_B = *B.cols;
+
+ // clear previous content of C
+ if (rebuild_sparsity_C == true)
+ {
+ // need to change the sparsity pattern of
+ // C, so cast away const-ness.
+ SparsityPattern & sp_C =
+ *(const_cast<SparsityPattern *>(&C.get_sparsity_pattern()));
+ C.clear();
+ sp_C.reinit (0,0,0);
+
+ // create a sparsity pattern for the
+ // matrix. we will go through all the
+ // rows in the matrix A, and for each
+ // column in a row we add the whole row
+ // of matrix B with that row number. This
+ // means that we will insert a lot of
+ // entries to each row, which is best
+ // handled by the
+ // CompressedSimpleSparsityPattern class.
+ {
+ CompressedSimpleSparsityPattern csp (n(), B.n());
+ for (unsigned int i = 0; i < sp_A.n_rows(); ++i)
+ {
+ const unsigned int * rows =
+ &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
+ const unsigned int *const end_rows =
+ &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
+ unsigned int * new_cols = const_cast<unsigned int*>
+ (&sp_B.get_column_numbers()
+ [sp_B.get_rowstart_indices()[i]]);
+ unsigned int * end_new_cols = const_cast<unsigned int*>
+ (&sp_B.get_column_numbers()
+ [sp_B.get_rowstart_indices()[i+1]]);
+
+ if (sp_B.optimize_diagonal() == true)
+ ++new_cols;
+
+ for (; rows != end_rows; ++rows)
+ {
+ const unsigned int row = *rows;
+
+ // if B has a diagonal, need to add that
+ // manually. this way, we maintain
+ // sortedness.
+ if (sp_B.optimize_diagonal() == true)
+ csp.add(row, i);
+
+ csp.add_entries (row, new_cols, end_new_cols, true);
+ }
+ }
+ sp_C.copy_from (csp);
+ }
+
+ // reinit matrix C from that information
+ C.reinit (sp_C);
+ }
+
+ Assert (C.m() == m(), ExcDimensionMismatch(C.m(), m()));
+ Assert (C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n()));
+
+ // create an array that caches some
+ // elements that are going to be written
+ // into the new matrix.
+ unsigned int max_n_cols_B = 0;
+ for (unsigned int i=0; i<B.m(); ++i)
+ max_n_cols_B = std::max (max_n_cols_B, sp_B.row_length(i));
+ std::vector<numberC> new_entries(max_n_cols_B);
+
+ // now compute the actual entries: a
+ // matrix-matrix product involves three
+ // nested loops. One over the rows of A,
+ // for each row we then loop over all the
+ // columns, and then we need to multiply
+ // each element with all the elements in
+ // that row in B.
+ for (unsigned int i=0; i<m(); ++i)
+ {
+ const unsigned int * rows =
+ &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]];
+ const unsigned int *const end_rows =
+ &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]];
+ const unsigned int * new_cols =
+ (&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[i]]);
+ if (sp_B.optimize_diagonal())
+ ++new_cols;
+
+ const numberB * const end_cols =
+ &B.val[&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[i+1]]-
+ &sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
+
+ for (; rows != end_rows; ++rows)
+ {
+ const unsigned int row = *rows;
+ const double A_val = global_entry
+ (rows-&sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[0]]);
+
+ // special treatment for diagonal
+ if (sp_B.optimize_diagonal())
+ C.add (row, i, A_val *
+ B.global_entry(new_cols-1-&sp_B.get_column_numbers()
+ [sp_B.get_rowstart_indices()[0]]) *
+ (use_vector ? V(i) : 1));
+
+ // now the innermost loop that goes over
+ // all the elements in row 'col' of
+ // matrix B. Cache the elements, and then
+ // write them into C at once
+ numberC * new_ptr = &new_entries[0];
+ const numberB * B_val_ptr =
+ &B.val[new_cols-&sp_B.get_column_numbers()[sp_B.get_rowstart_indices()[0]]];
+ for (; B_val_ptr != end_cols; ++B_val_ptr)
+ *new_ptr++ = A_val * *B_val_ptr * (use_vector ? V(i) : 1);
+
+ C.add (row, new_ptr-&new_entries[0], new_cols, &new_entries[0],
+ false, true);
+ }
+ }
+}
+
template <typename number>
+ void
+ SparseMatrix::Tmmult (SparseMatrix &C,
+ const SparseMatrix &B,
+ const VectorBase &V) const
+ {
+ const bool use_vector = V.size() == m() ? true : false;
+ Assert (m() == B.m(), ExcDimensionMismatch(m(), B.m()));
+ Assert (this->matrix->RangeMap().SameAs(B.matrix->RangeMap()),
+ ExcMessage ("Parallel partitioning of A and B does not fit."));
+
+ C.clear();
+ // use ML built-in method for performing
+ // the matrix-matrix product.
+
+ // create ML operators on top of the
+ // Epetra matrix.
+ ML_Comm* comm;
+ ML_Comm_Create(&comm);
+ ML_Operator *A_nontrans = ML_Operator_Create(comm);
+ ML_Operator *A_ = ML_Operator_Create(comm);
+ ML_Operator *B_ = ML_Operator_Create(comm);
+ ML_Operator *C_ = ML_Operator_Create(comm);
+
+ ML_Operator_WrapEpetraCrsMatrix(&*matrix,A_nontrans,false);
+ ML_Operator_Transpose_byrow(A_nontrans,A_);
+ ML_Operator_WrapEpetraCrsMatrix(&*B.matrix,B_,false);
+
+ // Case where we only have two
+ // matrices. We do this by hand since we
+ // can save some operations compared to
+ // just calling ml/src/Operator/ml_rap.c
+ // that multiplies three matrices. This
+ // code is still similar to the one found
+ // in ml/src/Operator/ml_rap.c
+ if (use_vector == false)
+ {
+ // import data if necessary
+ ML_Operator *Btmp, *Ctmp, *Ctmp2, *tptr;
+ ML_CommInfoOP *getrow_comm;
+ int max_per_proc;
+ int N_input_vector = B_->invec_leng;
+ getrow_comm = B_->getrow->pre_comm;
+ if ( getrow_comm != NULL)
+ for (int i = 0; i < getrow_comm->N_neighbors; i++)
+ for (int j = 0; j < getrow_comm->neighbors[i].N_send; j++)
+ AssertThrow (getrow_comm->neighbors[i].send_list[j] < N_input_vector,
+ ExcInternalError());
+
+ ML_create_unique_col_id(N_input_vector, &(B_->getrow->loc_glob_map),
+ getrow_comm, &max_per_proc, B_->comm);
+ B_->getrow->use_loc_glob_map = ML_YES;
+ if (A_->getrow->pre_comm != NULL)
+ ML_exchange_rows( B_, &Btmp, A_->getrow->pre_comm);
+ else Btmp = B_;
+
+ // perform matrix-matrix product
+ ML_matmat_mult(A_, Btmp , &Ctmp);
+
+ // release temporary structures we needed
+ // for multiplication
+ ML_free(B_->getrow->loc_glob_map);
+ B_->getrow->loc_glob_map = NULL;
+ B_->getrow->use_loc_glob_map = ML_NO;
+ if (A_->getrow->pre_comm != NULL) {
+ tptr = Btmp;
+ while ( (tptr!= NULL) && (tptr->sub_matrix != B_))
+ tptr = tptr->sub_matrix;
+ if (tptr != NULL) tptr->sub_matrix = NULL;
+ ML_RECUR_CSR_MSRdata_Destroy(Btmp);
+ ML_Operator_Destroy(&Btmp);
+ }
+
+ // make correct data structures
+ if (A_->getrow->post_comm != NULL) {
+ ML_exchange_rows(Ctmp, &Ctmp2, A_->getrow->post_comm);
+ }
+ else Ctmp2 = Ctmp;
+
+ ML_back_to_csrlocal(Ctmp2, C_, max_per_proc);
+
+ ML_RECUR_CSR_MSRdata_Destroy (Ctmp);
+ ML_Operator_Destroy (&Ctmp);
+
+ if (A_->getrow->post_comm != NULL) {
+ ML_RECUR_CSR_MSRdata_Destroy(Ctmp2);
+ ML_Operator_Destroy (&Ctmp2);
+ }
+ }
+ else
+ {
+ // create an Epetra_CrsMatrix with the
+ // vector content on the diagonal.
+ Epetra_CrsMatrix Vmat (Copy, this->matrix->RangeMap(),
+ B.matrix->RangeMap(), 1, true);
+ Assert (Vmat.DomainMap().SameAs(V.trilinos_vector().Map()),
+ ExcMessage ("Column map of matrix does not fit with vector map!"));
+ const int num_my_rows = B.local_size();
+ for (int i=0; i<num_my_rows; ++i)
+ Vmat.InsertGlobalValues (i, 1, &V.trilinos_vector()[0][i], &i);
+ Vmat.FillComplete();
+ Vmat.OptimizeStorage();
+
+ // wrap even this matrix into ML format
+ ML_Operator *V_ = ML_Operator_Create (comm);
+ ML_Operator_WrapEpetraCrsMatrix (&Vmat, V_, false);
+
+ // perform triple matrix-vector product
+ // using ml/src/Operator/ml_rap.c
+ ML_rap (A_, V_, B_, C_, ML_CSR_MATRIX);
+
+ ML_Operator_Destroy (&V_);
+ }
+
+ // create an Epetra matrix from the ML
+ // matrix that we got as a result.
+ Epetra_CrsMatrix * C_mat;
+ ML_Operator2EpetraCrsMatrix(C_, C_mat);
+ C_mat->FillComplete();
+ C_mat->OptimizeStorage();
+ C.reinit (*C_mat);
+
+ // Sanity check
+ Assert (this->matrix->DomainMap().SameAs(C.matrix->RangeMap()),
+ ExcInternalError());
+ Assert (B.matrix->DomainMap().SameAs(C.matrix->DomainMap()),
+ ExcInternalError());
+
+ // destroy allocated memory
+ delete C_mat;
+ ML_Operator_Destroy (&A_);
+ ML_Operator_Destroy (&A_nontrans);
+ ML_Operator_Destroy (&B_);
+ ML_Operator_Destroy (&C_);
+ ML_Comm_Destroy (&comm);
+ }
+
+
+
void
SparseMatrix::add (const TrilinosScalar factor,
const SparseMatrix &rhs)