From e0c91cf5c408a212d68383dbd109a11d07de7c62 Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Fri, 8 May 2009 09:47:09 +0000 Subject: [PATCH] Provide even a Tmmult operation. git-svn-id: https://svn.dealii.org/trunk@18823 0785d39b-7218-0410-832d-ea1e28bc413d --- deal.II/lac/include/lac/sparse_matrix.h | 43 +- .../lac/include/lac/sparse_matrix.templates.h | 407 ++++++++++++------ .../lac/include/lac/trilinos_sparse_matrix.h | 33 +- deal.II/lac/source/sparse_matrix.inst.in | 3 + deal.II/lac/source/trilinos_sparse_matrix.cc | 138 ++++++ 5 files changed, 488 insertions(+), 136 deletions(-) diff --git a/deal.II/lac/include/lac/sparse_matrix.h b/deal.II/lac/include/lac/sparse_matrix.h index e795fdaaa8..424b8f895d 100644 --- a/deal.II/lac/include/lac/sparse_matrix.h +++ b/deal.II/lac/include/lac/sparse_matrix.h @@ -1266,7 +1266,7 @@ class SparseMatrix : public virtual Subscriptor //@} /** - * @name Matrix vector multiplications + * @name Multiplications */ //@{ /** @@ -1476,6 +1476,47 @@ class SparseMatrix : public virtual Subscriptor const Vector &V = Vector(), const bool rebuild_sparsity_pattern = true) const; + /** + * Perform the matrix-matrix + * multiplication with the transpose of + * this, i.e., C = + * AT * B, or, if an + * optional vector argument is given, + * C = AT * diag(V) * + * B, where diag(V) + * defines a diagonal matrix with the + * vector entries. + * + * This function assumes that the + * calling matrix A and + * B have compatible + * sizes. The size of C will + * be set within this function. + * + * The content as well as the sparsity + * pattern of the matrix C will be + * changed by this function, so make + * sure that the sparsity pattern is + * not used somewhere else in your + * program. This is an expensive + * operation, so think twice before you + * use this function. + * + * There is an optional flag + * rebuild_sparsity_pattern + * that can be used to bypass the + * creation of a new sparsity pattern + * and instead uses the sparsity + * pattern stored in C. In + * that case, make sure that it really + * fits. + */ + template + void Tmmult (SparseMatrix &C, + const SparseMatrix &B, + const Vector &V = Vector(), + const bool rebuild_sparsity_pattern = true) const; + //@} /** * @name Matrix norms diff --git a/deal.II/lac/include/lac/sparse_matrix.templates.h b/deal.II/lac/include/lac/sparse_matrix.templates.h index b79a557a43..1b12cd9661 100644 --- a/deal.II/lac/include/lac/sparse_matrix.templates.h +++ b/deal.II/lac/include/lac/sparse_matrix.templates.h @@ -713,140 +713,6 @@ SparseMatrix::Tvmult_add (OutVector& dst, -template -template -void -SparseMatrix::mmult (SparseMatrix &C, - const SparseMatrix &B, - const Vector &V, - const bool rebuild_sparsity_C) const -{ - const bool use_vector = V.size() == n() ? true : false; - Assert (n() == B.m(), ExcDimensionMismatch(n(), B.m())); - Assert (cols != 0, ExcNotInitialized()); - Assert (B.cols != 0, ExcNotInitialized()); - Assert (C.cols != 0, ExcNotInitialized()); - - const SparsityPattern & sp_A = *cols; - const SparsityPattern & sp_B = *B.cols; - - // clear previous content of C - if (rebuild_sparsity_C == true) - { - // need to change the sparsity pattern of - // C, so cast away const-ness. - SparsityPattern & sp_C = - *(const_cast(&C.get_sparsity_pattern())); - C.clear(); - sp_C.reinit (0,0,0); - - // create a sparsity pattern for the - // matrix. we will go through all the - // rows in the matrix A, and for each - // column in a row we add the whole row - // of matrix B with that row number. This - // means that we will insert a lot of - // entries to each row, which is best - // handled by the - // CompressedSimpleSparsityPattern class. - { - CompressedSimpleSparsityPattern csp (m(), B.n()); - for (unsigned int i = 0; i < csp.n_rows(); ++i) - { - const unsigned int * rows = - &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]]; - const unsigned int *const end_rows = - &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]]; - for (; rows != end_rows; ++rows) - { - const unsigned int col = *rows; - unsigned int * new_cols = const_cast - (&sp_B.get_column_numbers() - [sp_B.get_rowstart_indices()[col]]); - unsigned int * end_new_cols = const_cast - (&sp_B.get_column_numbers() - [sp_B.get_rowstart_indices()[col+1]]); - - // if B has a diagonal, need to add that - // manually. this way, we maintain - // sortedness. - if (sp_B.optimize_diagonal() == true) - { - ++new_cols; - csp.add(i, col); - } - - csp.add_entries (i, new_cols, end_new_cols, true); - } - } - sp_C.copy_from (csp); - } - - // reinit matrix C from that information - C.reinit (sp_C); - } - - Assert (C.m() == m(), ExcDimensionMismatch(C.m(), m())); - Assert (C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n())); - - // create an array that caches some - // elements that are going to be written - // into the new matrix. - unsigned int max_n_cols_B = 0; - for (unsigned int i=0; i new_entries(max_n_cols_B); - - // now compute the actual entries: a - // matrix-matrix product involves three - // nested loops. One over the rows of A, - // for each row we then loop over all the - // columns, and then we need to multiply - // each element with all the elements in - // that row in B. - for (unsigned int i=0; i template somenumber @@ -1074,6 +940,279 @@ threaded_matrix_scalar_product (const Vector &u, +template +template +void +SparseMatrix::mmult (SparseMatrix &C, + const SparseMatrix &B, + const Vector &V, + const bool rebuild_sparsity_C) const +{ + const bool use_vector = V.size() == n() ? true : false; + Assert (n() == B.m(), ExcDimensionMismatch(n(), B.m())); + Assert (cols != 0, ExcNotInitialized()); + Assert (B.cols != 0, ExcNotInitialized()); + Assert (C.cols != 0, ExcNotInitialized()); + + const SparsityPattern & sp_A = *cols; + const SparsityPattern & sp_B = *B.cols; + + // clear previous content of C + if (rebuild_sparsity_C == true) + { + // need to change the sparsity pattern of + // C, so cast away const-ness. + SparsityPattern & sp_C = + *(const_cast(&C.get_sparsity_pattern())); + C.clear(); + sp_C.reinit (0,0,0); + + // create a sparsity pattern for the + // matrix. we will go through all the + // rows in the matrix A, and for each + // column in a row we add the whole row + // of matrix B with that row number. This + // means that we will insert a lot of + // entries to each row, which is best + // handled by the + // CompressedSimpleSparsityPattern class. + { + CompressedSimpleSparsityPattern csp (m(), B.n()); + for (unsigned int i = 0; i < csp.n_rows(); ++i) + { + const unsigned int * rows = + &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]]; + const unsigned int *const end_rows = + &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]]; + for (; rows != end_rows; ++rows) + { + const unsigned int col = *rows; + unsigned int * new_cols = const_cast + (&sp_B.get_column_numbers() + [sp_B.get_rowstart_indices()[col]]); + unsigned int * end_new_cols = const_cast + (&sp_B.get_column_numbers() + [sp_B.get_rowstart_indices()[col+1]]); + + // if B has a diagonal, need to add that + // manually. this way, we maintain + // sortedness. + if (sp_B.optimize_diagonal() == true) + { + ++new_cols; + csp.add(i, col); + } + + csp.add_entries (i, new_cols, end_new_cols, true); + } + } + sp_C.copy_from (csp); + } + + // reinit matrix C from that information + C.reinit (sp_C); + } + + Assert (C.m() == m(), ExcDimensionMismatch(C.m(), m())); + Assert (C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n())); + + // create an array that caches some + // elements that are going to be written + // into the new matrix. + unsigned int max_n_cols_B = 0; + for (unsigned int i=0; i new_entries(max_n_cols_B); + + // now compute the actual entries: a + // matrix-matrix product involves three + // nested loops. One over the rows of A, + // for each row we then loop over all the + // columns, and then we need to multiply + // each element with all the elements in + // that row in B. + for (unsigned int i=0; i +template +void +SparseMatrix::Tmmult (SparseMatrix &C, + const SparseMatrix &B, + const Vector &V, + const bool rebuild_sparsity_C) const +{ + const bool use_vector = V.size() == m() ? true : false; + Assert (m() == B.m(), ExcDimensionMismatch(m(), B.m())); + Assert (cols != 0, ExcNotInitialized()); + Assert (B.cols != 0, ExcNotInitialized()); + Assert (C.cols != 0, ExcNotInitialized()); + + const SparsityPattern & sp_A = *cols; + const SparsityPattern & sp_B = *B.cols; + + // clear previous content of C + if (rebuild_sparsity_C == true) + { + // need to change the sparsity pattern of + // C, so cast away const-ness. + SparsityPattern & sp_C = + *(const_cast(&C.get_sparsity_pattern())); + C.clear(); + sp_C.reinit (0,0,0); + + // create a sparsity pattern for the + // matrix. we will go through all the + // rows in the matrix A, and for each + // column in a row we add the whole row + // of matrix B with that row number. This + // means that we will insert a lot of + // entries to each row, which is best + // handled by the + // CompressedSimpleSparsityPattern class. + { + CompressedSimpleSparsityPattern csp (n(), B.n()); + for (unsigned int i = 0; i < sp_A.n_rows(); ++i) + { + const unsigned int * rows = + &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]]; + const unsigned int *const end_rows = + &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]]; + unsigned int * new_cols = const_cast + (&sp_B.get_column_numbers() + [sp_B.get_rowstart_indices()[i]]); + unsigned int * end_new_cols = const_cast + (&sp_B.get_column_numbers() + [sp_B.get_rowstart_indices()[i+1]]); + + if (sp_B.optimize_diagonal() == true) + ++new_cols; + + for (; rows != end_rows; ++rows) + { + const unsigned int row = *rows; + + // if B has a diagonal, need to add that + // manually. this way, we maintain + // sortedness. + if (sp_B.optimize_diagonal() == true) + csp.add(row, i); + + csp.add_entries (row, new_cols, end_new_cols, true); + } + } + sp_C.copy_from (csp); + } + + // reinit matrix C from that information + C.reinit (sp_C); + } + + Assert (C.m() == m(), ExcDimensionMismatch(C.m(), m())); + Assert (C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n())); + + // create an array that caches some + // elements that are going to be written + // into the new matrix. + unsigned int max_n_cols_B = 0; + for (unsigned int i=0; i new_entries(max_n_cols_B); + + // now compute the actual entries: a + // matrix-matrix product involves three + // nested loops. One over the rows of A, + // for each row we then loop over all the + // columns, and then we need to multiply + // each element with all the elements in + // that row in B. + for (unsigned int i=0; i diff --git a/deal.II/lac/include/lac/trilinos_sparse_matrix.h b/deal.II/lac/include/lac/trilinos_sparse_matrix.h index d917623dec..9447da225d 100755 --- a/deal.II/lac/include/lac/trilinos_sparse_matrix.h +++ b/deal.II/lac/include/lac/trilinos_sparse_matrix.h @@ -1298,7 +1298,7 @@ namespace TrilinosWrappers //@} /** - * @name Matrix vector multiplications + * @name Multiplications */ //@{ @@ -1592,6 +1592,37 @@ namespace TrilinosWrappers const SparseMatrix &B, const VectorBase &V = VectorBase()) const; + + /** + * Perform the matrix-matrix + * multiplication with the transpose of + * this, i.e., C = + * AT * B, or, if an + * optional vector argument is given, + * C = AT * diag(V) * + * B, where diag(V) + * defines a diagonal matrix with the + * vector entries. + * + * This function assumes that the + * calling matrix A and + * B have compatible + * sizes. The size of C will + * be set within this function. + * + * The content as well as the sparsity + * pattern of the matrix C will be + * changed by this function, so make + * sure that the sparsity pattern is + * not used somewhere else in your + * program. This is an expensive + * operation, so think twice before you + * use this function. + */ + void Tmmult (SparseMatrix &C, + const SparseMatrix &B, + const VectorBase &V = VectorBase()) const; + //@} /** * @name Matrix norms diff --git a/deal.II/lac/source/sparse_matrix.inst.in b/deal.II/lac/source/sparse_matrix.inst.in index 60c364f606..5d19d30fac 100644 --- a/deal.II/lac/source/sparse_matrix.inst.in +++ b/deal.II/lac/source/sparse_matrix.inst.in @@ -135,6 +135,9 @@ for (S1, S2, S3, S4: REAL_SCALARS) template void SparseMatrix:: mmult (SparseMatrix &, const SparseMatrix &, const Vector&, const bool) const; + template void SparseMatrix:: + Tmmult (SparseMatrix &, const SparseMatrix &, const Vector&, + const bool) const; } diff --git a/deal.II/lac/source/trilinos_sparse_matrix.cc b/deal.II/lac/source/trilinos_sparse_matrix.cc index 9ac95cdf04..4ac73a823d 100755 --- a/deal.II/lac/source/trilinos_sparse_matrix.cc +++ b/deal.II/lac/source/trilinos_sparse_matrix.cc @@ -1347,6 +1347,144 @@ namespace TrilinosWrappers + void + SparseMatrix::Tmmult (SparseMatrix &C, + const SparseMatrix &B, + const VectorBase &V) const + { + const bool use_vector = V.size() == m() ? true : false; + Assert (m() == B.m(), ExcDimensionMismatch(m(), B.m())); + Assert (this->matrix->RangeMap().SameAs(B.matrix->RangeMap()), + ExcMessage ("Parallel partitioning of A and B does not fit.")); + + C.clear(); + // use ML built-in method for performing + // the matrix-matrix product. + + // create ML operators on top of the + // Epetra matrix. + ML_Comm* comm; + ML_Comm_Create(&comm); + ML_Operator *A_nontrans = ML_Operator_Create(comm); + ML_Operator *A_ = ML_Operator_Create(comm); + ML_Operator *B_ = ML_Operator_Create(comm); + ML_Operator *C_ = ML_Operator_Create(comm); + + ML_Operator_WrapEpetraCrsMatrix(&*matrix,A_nontrans,false); + ML_Operator_Transpose_byrow(A_nontrans,A_); + ML_Operator_WrapEpetraCrsMatrix(&*B.matrix,B_,false); + + // Case where we only have two + // matrices. We do this by hand since we + // can save some operations compared to + // just calling ml/src/Operator/ml_rap.c + // that multiplies three matrices. This + // code is still similar to the one found + // in ml/src/Operator/ml_rap.c + if (use_vector == false) + { + // import data if necessary + ML_Operator *Btmp, *Ctmp, *Ctmp2, *tptr; + ML_CommInfoOP *getrow_comm; + int max_per_proc; + int N_input_vector = B_->invec_leng; + getrow_comm = B_->getrow->pre_comm; + if ( getrow_comm != NULL) + for (int i = 0; i < getrow_comm->N_neighbors; i++) + for (int j = 0; j < getrow_comm->neighbors[i].N_send; j++) + AssertThrow (getrow_comm->neighbors[i].send_list[j] < N_input_vector, + ExcInternalError()); + + ML_create_unique_col_id(N_input_vector, &(B_->getrow->loc_glob_map), + getrow_comm, &max_per_proc, B_->comm); + B_->getrow->use_loc_glob_map = ML_YES; + if (A_->getrow->pre_comm != NULL) + ML_exchange_rows( B_, &Btmp, A_->getrow->pre_comm); + else Btmp = B_; + + // perform matrix-matrix product + ML_matmat_mult(A_, Btmp , &Ctmp); + + // release temporary structures we needed + // for multiplication + ML_free(B_->getrow->loc_glob_map); + B_->getrow->loc_glob_map = NULL; + B_->getrow->use_loc_glob_map = ML_NO; + if (A_->getrow->pre_comm != NULL) { + tptr = Btmp; + while ( (tptr!= NULL) && (tptr->sub_matrix != B_)) + tptr = tptr->sub_matrix; + if (tptr != NULL) tptr->sub_matrix = NULL; + ML_RECUR_CSR_MSRdata_Destroy(Btmp); + ML_Operator_Destroy(&Btmp); + } + + // make correct data structures + if (A_->getrow->post_comm != NULL) { + ML_exchange_rows(Ctmp, &Ctmp2, A_->getrow->post_comm); + } + else Ctmp2 = Ctmp; + + ML_back_to_csrlocal(Ctmp2, C_, max_per_proc); + + ML_RECUR_CSR_MSRdata_Destroy (Ctmp); + ML_Operator_Destroy (&Ctmp); + + if (A_->getrow->post_comm != NULL) { + ML_RECUR_CSR_MSRdata_Destroy(Ctmp2); + ML_Operator_Destroy (&Ctmp2); + } + } + else + { + // create an Epetra_CrsMatrix with the + // vector content on the diagonal. + Epetra_CrsMatrix Vmat (Copy, this->matrix->RangeMap(), + B.matrix->RangeMap(), 1, true); + Assert (Vmat.DomainMap().SameAs(V.trilinos_vector().Map()), + ExcMessage ("Column map of matrix does not fit with vector map!")); + const int num_my_rows = B.local_size(); + for (int i=0; iFillComplete(); + C_mat->OptimizeStorage(); + C.reinit (*C_mat); + + // Sanity check + Assert (this->matrix->DomainMap().SameAs(C.matrix->RangeMap()), + ExcInternalError()); + Assert (B.matrix->DomainMap().SameAs(C.matrix->DomainMap()), + ExcInternalError()); + + // destroy allocated memory + delete C_mat; + ML_Operator_Destroy (&A_); + ML_Operator_Destroy (&A_nontrans); + ML_Operator_Destroy (&B_); + ML_Operator_Destroy (&C_); + ML_Comm_Destroy (&comm); + } + + + void SparseMatrix::add (const TrilinosScalar factor, const SparseMatrix &rhs) -- 2.39.5