From: Martin Kronbichler Date: Mon, 4 May 2009 11:19:44 +0000 (+0000) Subject: Add a mmult function to the SparseMatrix class. X-Git-Tag: v8.0.0~7715 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=3fe0400e99d4218386c6d9e360e754db207810ef;p=dealii.git Add a mmult function to the SparseMatrix class. git-svn-id: https://svn.dealii.org/trunk@18808 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/lac/include/lac/constraint_matrix.templates.h b/deal.II/lac/include/lac/constraint_matrix.templates.h index 10e10a3b34..1fca3f8776 100644 --- a/deal.II/lac/include/lac/constraint_matrix.templates.h +++ b/deal.II/lac/include/lac/constraint_matrix.templates.h @@ -1439,7 +1439,7 @@ distribute_local_to_global (const FullMatrix &local_matrix, col_val = 0; // account for indirect contributions by - // constraints + // constraints in column if (my_indices[j].constraints != 0) { constraint_format &constraint_j = *my_indices[j].constraints; diff --git a/deal.II/lac/include/lac/sparse_matrix.h b/deal.II/lac/include/lac/sparse_matrix.h index 1abfa7aecd..247a44ebc4 100644 --- a/deal.II/lac/include/lac/sparse_matrix.h +++ b/deal.II/lac/include/lac/sparse_matrix.h @@ -1439,6 +1439,45 @@ class SparseMatrix : public virtual Subscriptor somenumber residual (Vector &dst, const Vector &x, const Vector &b) const; + + /** + * Perform the matrix-matrix + * multiplication C = A * B, + * or, if an optional vector argument + * is given, C = A * diag(V) * + * B, where diag(V) + * defines a diagonal matrix with the + * vector entries. + * + * This function assumes that the + * calling matrix A and + * B have compatible + * sizes. The size of C will + * be set within this function. + * + * The content as well as the sparsity + * pattern of the matrix C will be + * changed by this function, so make + * sure that the sparsity pattern is + * not used somewhere else in your + * program. This is an expensive + * operation, so think twice before you + * use this function. + * + * There is an optional flag + * rebuild_sparsity_pattern + * that can be used to bypass the + * creation of a new sparsity pattern + * and instead uses the sparsity + * pattern stored in C. In + * that case, make sure that it really + * fits. + */ + template + void mmult (SparseMatrix &C, + const SparseMatrix &B, + const Vector &V = Vector(), + const bool rebuild_sparsity_pattern = true) const; //@} /** diff --git a/deal.II/lac/include/lac/sparse_matrix.templates.h b/deal.II/lac/include/lac/sparse_matrix.templates.h index e5874f41e0..b79a557a43 100644 --- a/deal.II/lac/include/lac/sparse_matrix.templates.h +++ b/deal.II/lac/include/lac/sparse_matrix.templates.h @@ -20,6 +20,7 @@ #include #include #include +#include // we only need output streams, but older compilers did not provide @@ -348,27 +349,30 @@ SparseMatrix::add (const unsigned int row, // just go through the column indices and // look whether we found one, rather than // doing many binary searches - if (col_indices_are_sorted == true) - if (n_cols * 8 > cols->row_length(row)) + if (elide_zero_values == false && col_indices_are_sorted == true && + n_cols > 3) { // check whether the given indices are // really sorted #ifdef DEBUG for (unsigned int i=1; i col_indices[i-1], - ExcMessage("Indices are not sorted.")); + ExcMessage("List of indices not sorted or with duplicates.")); #endif + + const unsigned int * this_cols = + &cols->get_column_numbers()[cols->get_rowstart_indices()[row]]; + number * val_ptr = &val[cols->get_rowstart_indices()[row]]; + if (cols->optimize_diagonal() == true) { - const unsigned int * this_cols = - &cols->get_column_numbers()[cols->get_rowstart_indices()[row]]; - number * val_ptr = &val[cols->get_rowstart_indices()[row]]; - Assert (this_cols[0] == row, ExcInternalError()); // find diagonal and add it if found - const unsigned int * diag_pos = std::lower_bound (col_indices, - col_indices+n_cols, - row); + Assert (this_cols[0] == row, ExcInternalError()); + const unsigned int * diag_pos = + internals::SparsityPatternTools::optimized_lower_bound (col_indices, + col_indices+n_cols, + row); const unsigned int diag = diag_pos - col_indices; unsigned int post_diag = diag; if (diag != n_cols && *diag_pos == row) @@ -410,9 +414,6 @@ SparseMatrix::add (const unsigned int row, } else { - const unsigned int * this_cols = - &cols->get_column_numbers()[cols->get_rowstart_indices()[row]]; - number * val_ptr = &val[cols->get_rowstart_indices()[row]]; unsigned int counter = 0; for (unsigned int i=0; i::vmult (OutVector& dst, } + template template void @@ -631,6 +633,7 @@ SparseMatrix::threaded_vmult (OutVector &dst, } + template template void @@ -657,6 +660,7 @@ SparseMatrix::Tvmult (OutVector& dst, } + template template void @@ -676,15 +680,16 @@ SparseMatrix::vmult_add (OutVector& dst, typename OutVector::iterator dst_ptr = dst.begin(); for (unsigned int row=0; rowrowstart[row+1]]; while (val_ptr != val_end_of_row) s += *val_ptr++ * src(*colnum_ptr++); - *dst_ptr++ += s; + *dst_ptr++ = s; }; } + template template void @@ -707,6 +712,141 @@ SparseMatrix::Tvmult_add (OutVector& dst, } + +template +template +void +SparseMatrix::mmult (SparseMatrix &C, + const SparseMatrix &B, + const Vector &V, + const bool rebuild_sparsity_C) const +{ + const bool use_vector = V.size() == n() ? true : false; + Assert (n() == B.m(), ExcDimensionMismatch(n(), B.m())); + Assert (cols != 0, ExcNotInitialized()); + Assert (B.cols != 0, ExcNotInitialized()); + Assert (C.cols != 0, ExcNotInitialized()); + + const SparsityPattern & sp_A = *cols; + const SparsityPattern & sp_B = *B.cols; + + // clear previous content of C + if (rebuild_sparsity_C == true) + { + // need to change the sparsity pattern of + // C, so cast away const-ness. + SparsityPattern & sp_C = + *(const_cast(&C.get_sparsity_pattern())); + C.clear(); + sp_C.reinit (0,0,0); + + // create a sparsity pattern for the + // matrix. we will go through all the + // rows in the matrix A, and for each + // column in a row we add the whole row + // of matrix B with that row number. This + // means that we will insert a lot of + // entries to each row, which is best + // handled by the + // CompressedSimpleSparsityPattern class. + { + CompressedSimpleSparsityPattern csp (m(), B.n()); + for (unsigned int i = 0; i < csp.n_rows(); ++i) + { + const unsigned int * rows = + &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i]]; + const unsigned int *const end_rows = + &sp_A.get_column_numbers()[sp_A.get_rowstart_indices()[i+1]]; + for (; rows != end_rows; ++rows) + { + const unsigned int col = *rows; + unsigned int * new_cols = const_cast + (&sp_B.get_column_numbers() + [sp_B.get_rowstart_indices()[col]]); + unsigned int * end_new_cols = const_cast + (&sp_B.get_column_numbers() + [sp_B.get_rowstart_indices()[col+1]]); + + // if B has a diagonal, need to add that + // manually. this way, we maintain + // sortedness. + if (sp_B.optimize_diagonal() == true) + { + ++new_cols; + csp.add(i, col); + } + + csp.add_entries (i, new_cols, end_new_cols, true); + } + } + sp_C.copy_from (csp); + } + + // reinit matrix C from that information + C.reinit (sp_C); + } + + Assert (C.m() == m(), ExcDimensionMismatch(C.m(), m())); + Assert (C.n() == B.n(), ExcDimensionMismatch(C.n(), B.n())); + + // create an array that caches some + // elements that are going to be written + // into the new matrix. + unsigned int max_n_cols_B = 0; + for (unsigned int i=0; i new_entries(max_n_cols_B); + + // now compute the actual entries: a + // matrix-matrix product involves three + // nested loops. One over the rows of A, + // for each row we then loop over all the + // columns, and then we need to multiply + // each element with all the elements in + // that row in B. + for (unsigned int i=0; i template somenumber diff --git a/deal.II/lac/source/sparse_matrix.inst.in b/deal.II/lac/source/sparse_matrix.inst.in index a8dafdb7cb..60c364f606 100644 --- a/deal.II/lac/source/sparse_matrix.inst.in +++ b/deal.II/lac/source/sparse_matrix.inst.in @@ -117,7 +117,6 @@ for (S1, S2 : REAL_SCALARS) const S1) const; } - for (S1, S2, S3 : REAL_SCALARS; V1, V2 : DEAL_II_VEC_TEMPLATES) { @@ -131,6 +130,13 @@ for (S1, S2, S3 : REAL_SCALARS; Tvmult_add (V1 &, const V2 &) const; } +for (S1, S2, S3, S4: REAL_SCALARS) + { + template void SparseMatrix:: + mmult (SparseMatrix &, const SparseMatrix &, const Vector&, + const bool) const; + } + // complex instantiations