From: Martin Kronbichler Date: Mon, 31 Aug 2009 13:04:05 +0000 (+0000) Subject: Implement two more matrix-matrix multiplications: the case when B is transposed. X-Git-Tag: v8.0.0~7216 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=5f298c41b3e593313732c8a6e5a698fe167440bc;p=dealii.git Implement two more matrix-matrix multiplications: the case when B is transposed. git-svn-id: https://svn.dealii.org/trunk@19352 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/lac/include/lac/full_matrix.h b/deal.II/lac/include/lac/full_matrix.h index 330c925fc8..1d4f113acf 100644 --- a/deal.II/lac/include/lac/full_matrix.h +++ b/deal.II/lac/include/lac/full_matrix.h @@ -1030,7 +1030,7 @@ class FullMatrix : public Table<2,number> //@} ///@name Multiplications //@{ - + /** * Matrix-matrix-multiplication. * @@ -1093,6 +1093,71 @@ class FullMatrix : public Table<2,number> void Tmmult (FullMatrix &C, const FullMatrix &B, const bool adding=false) const; + + /** + * Matrix-matrix-multiplication using + * transpose of B. + * + * The optional parameter + * adding determines, whether the + * result is stored in C or added + * to C. + * + * if (adding) + * C += A*BT + * + * if (!adding) + * C = A*BT + * + * Assumes that A and + * B have compatible sizes and + * that C already has the + * right size. + * + * This function uses the BLAS function + * Xgemm if the calling matrix has more + * than 15 rows and BLAS was detected + * during configuration. Using BLAS + * usually results in considerable + * performance gains. + */ + template + void mTmult (FullMatrix &C, + const FullMatrix &B, + const bool adding=false) const; + + /** + * Matrix-matrix-multiplication using + * transpose of this and + * B. + * + * The optional parameter + * adding determines, whether the + * result is stored in C or added + * to C. + * + * if (adding) + * C += AT*BT + * + * if (!adding) + * C = AT*BT + * + * Assumes that A and + * B have compatible + * sizes and that C + * already has the right size. + * + * This function uses the BLAS function + * Xgemm if the calling matrix has more + * than 15 columns and BLAS was + * detected during configuration. Using + * BLAS usually results in considerable + * performance gains. + */ + template + void TmTmult (FullMatrix &C, + const FullMatrix &B, + const bool adding=false) const; /** * Matrix-vector-multiplication. diff --git a/deal.II/lac/include/lac/full_matrix.templates.h b/deal.II/lac/include/lac/full_matrix.templates.h index dd19a5aa89..337579545b 100644 --- a/deal.II/lac/include/lac/full_matrix.templates.h +++ b/deal.II/lac/include/lac/full_matrix.templates.h @@ -582,7 +582,7 @@ void FullMatrix::mmult (FullMatrix &dst, types_are_equal::value) && types_are_equal::value) - if (this->m() > 15) + if (this->n()*this->m()*src.n() > 300) { // In case we have the BLAS // function gemm detected at @@ -628,24 +628,20 @@ void FullMatrix::mmult (FullMatrix &dst, const unsigned int m = this->m(), n = src.n(), l = this->n(); - // arrange the loops in a way - // that we can access contiguous - // fields at the innermost loop - // (even though this introduces - // a lot of writing into the - // destination matrix) - if (!adding) - dst = (number2)0; + // arrange the loops in a way that + // we keep write operations low, + // (writing is usually more costly + // than reading), even though we + // need to access the data in src + // not in a contiguous way. for (unsigned int i=0; i::Tmmult (FullMatrix &dst, types_are_equal::value) && types_are_equal::value) - if (this->n() > 15) + if (this->n()*this->m()*src.n() > 300) { // In case we have the BLAS // function gemm detected at @@ -720,24 +716,201 @@ void FullMatrix::Tmmult (FullMatrix &dst, const unsigned int m = n(), n = src.n(), l = this->m(); - // arrange the loops in a way - // that we can access contiguous - // fields at the innermost loop - // (even though this introduces - // a lot of writing into the - // destination matrix) - if (!adding) - dst = (number2)0; + // arrange the loops in a way that + // we keep write operations low, + // (writing is usually more costly + // than reading), even though we + // need to access the data in src + // not in a contiguous way. However, + // we should usually end up in the + // optimized gemm operation in case + // the matrix is big, so this + // shouldn't be too bad. for (unsigned int i=0; i +template +void FullMatrix::mTmult (FullMatrix &dst, + const FullMatrix &src, + const bool adding) const +{ + Assert (!this->empty(), ExcEmptyMatrix()); + Assert (n() == src.n(), ExcDimensionMismatch(n(), src.n())); + Assert (dst.n() == src.m(), ExcDimensionMismatch(dst.n(), src.m())); + Assert (dst.m() == m(), ExcDimensionMismatch(m(), dst.m())); + + // see if we can use BLAS algorithms + // for this and if the type for 'number' + // works for us (it is usually not + // efficient to use BLAS for very small + // matrices): +#if defined(HAVE_DGEMM_) && defined (HAVE_SGEMM_) + if ((types_are_equal::value + || + types_are_equal::value) + && + types_are_equal::value) + if (this->n()*this->m()*src.m() > 300) { - number2 * dst_ptr = &dst(i,0); - for (unsigned int k=0; km(); + const int k = this->n(); + const char *notrans = "n"; + const char *trans = "t"; + + const number alpha = 1.; + const number beta = (adding == true) ? 1. : 0.; + + // Use the BLAS function gemm for + // calculating the matrix-matrix + // product. + internal::gemm_switch::gemm(trans, notrans, &m, &n, &k, &alpha, &src(0,0), &k, + this->val, &k, &beta, &dst(0,0), &m); + + return; + } + +#endif + + const unsigned int m = this->m(), n = src.m(), l = this->n(); + + // arrange the loops in a way that + // we keep write operations low, + // (writing is usually more costly + // than reading). + for (unsigned int i=0; i +template +void FullMatrix::TmTmult (FullMatrix &dst, + const FullMatrix &src, + const bool adding) const +{ + Assert (!this->empty(), ExcEmptyMatrix()); + Assert (m() == src.n(), ExcDimensionMismatch(m(), src.n())); + Assert (n() == dst.m(), ExcDimensionMismatch(n(), dst.m())); + Assert (src.m() == dst.n(), ExcDimensionMismatch(src.m(), dst.n())); + + + // see if we can use BLAS algorithms + // for this and if the type for 'number' + // works for us (it is usually not + // efficient to use BLAS for very small + // matrices): +#if defined(HAVE_DGEMM_) && defined (HAVE_SGEMM_) + if ((types_are_equal::value + || + types_are_equal::value) + && + types_are_equal::value) + if (this->n()*this->m()*src.m() > 300) + { + // In case we have the BLAS + // function gemm detected at + // configure, we use that algorithm + // for matrix-matrix multiplication + // since it provides better + // performance than the deal.II + // native function (it uses cache + // and register blocking in order to + // access local data). + // + // Note that BLAS/LAPACK stores + // matrix elements column-wise (i.e., + // all values in one column, then all + // in the next, etc.), whereas the + // FullMatrix stores them row-wise. + // We ignore that difference, and + // give our row-wise data to BLAS, + // let BLAS build the product of + // transpose matrices, and read the + // result as if it were row-wise + // again. In other words, we calculate + // (B A)^T, which is A^T B^T. + + const int m = src.n(); + const int n = this->n(); + const int k = this->m(); + const char *trans = "t"; + + const number alpha = 1.; + const number beta = (adding == true) ? 1. : 0.; + + // Use the BLAS function gemm for + // calculating the matrix-matrix + // product. + internal::gemm_switch::gemm(trans, trans, &m, &n, &k, &alpha, &src(0,0), &k, + this->val, &n, &beta, &dst(0,0), &m); + + return; } + +#endif + + const unsigned int m = n(), n = src.m(), l = this->m(); + + // arrange the loops in a way that + // we keep write operations low, + // (writing is usually more costly + // than reading), even though we + // need to access the data in the + // calling matrix not in a + // contiguous way. However, we + // should usually end up in the + // optimized gemm operation in case + // the matrix is big, so this + // shouldn't be too bad. + for (unsigned int i=0; i class FullMatrix; /** - * A variant of FullMatrix using LAPACK functions whereever + * A variant of FullMatrix using LAPACK functions wherever * possible. In order to do this, the matrix is stored in transposed * order. The element access functions hide this fact by reverting the * transposition. diff --git a/deal.II/lac/source/full_matrix.inst.in b/deal.II/lac/source/full_matrix.inst.in index 2d7537b2ab..ef1ac6d9c9 100644 --- a/deal.II/lac/source/full_matrix.inst.in +++ b/deal.II/lac/source/full_matrix.inst.in @@ -81,6 +81,10 @@ for (S1, S2 : REAL_SCALARS) void FullMatrix::mmult (FullMatrix&, const FullMatrix&, const bool) const; template void FullMatrix::Tmmult (FullMatrix&, const FullMatrix&, const bool) const; + template + void FullMatrix::mTmult (FullMatrix&, const FullMatrix&, const bool) const; + template + void FullMatrix::TmTmult (FullMatrix&, const FullMatrix&, const bool) const; template void FullMatrix::invert (const FullMatrix&);