From: kronbichler Date: Tue, 30 Apr 2013 14:11:19 +0000 (+0000) Subject: Simplify FullMatrix::Tmmult and FullMatrix::mTmult in case the two input matrices... X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9659fcf6951a6098d5c1bd39bcb79676c033db16;p=dealii-svn.git Simplify FullMatrix::Tmmult and FullMatrix::mTmult in case the two input matrices are the same. git-svn-id: https://svn.dealii.org/trunk@29406 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/include/deal.II/lac/full_matrix.templates.h b/deal.II/include/deal.II/lac/full_matrix.templates.h index 8a9a61b5b6..41febef428 100644 --- a/deal.II/include/deal.II/lac/full_matrix.templates.h +++ b/deal.II/include/deal.II/lac/full_matrix.templates.h @@ -539,10 +539,8 @@ void FullMatrix::mmult (FullMatrix &dst, Assert (dst.n() == src.n(), ExcDimensionMismatch(dst.n(), src.n())); Assert (dst.m() == m(), ExcDimensionMismatch(m(), dst.m())); - // see if we can use BLAS algorithms - // for this and if the type for 'number' - // works for us (it is usually not - // efficient to use BLAS for very small + // see if we can use BLAS algorithms for this and if the type for 'number' + // works for us (it is usually not efficient to use BLAS for very small // matrices): #if defined(HAVE_DGEMM_) && defined (HAVE_SGEMM_) if ((types_are_equal::value @@ -552,28 +550,17 @@ void FullMatrix::mmult (FullMatrix &dst, types_are_equal::value) if (this->n()*this->m()*src.n() > 300) { - // In case we have the BLAS - // function gemm detected at - // configure, we use that algorithm - // for matrix-matrix multiplication - // since it provides better - // performance than the deal.II - // native function (it uses cache - // and register blocking in order to - // access local data). + // In case we have the BLAS function gemm detected at configure, we + // use that algorithm for matrix-matrix multiplication since it + // provides better performance than the deal.II native function (it + // uses cache and register blocking in order to access local data). // - // Note that BLAS/LAPACK stores - // matrix elements column-wise (i.e., - // all values in one column, then all - // in the next, etc.), whereas the - // FullMatrix stores them row-wise. - // We ignore that difference, and - // give our row-wise data to BLAS, - // let BLAS build the product of - // transpose matrices, and read the - // result as if it were row-wise - // again. In other words, we calculate - // (B^T A^T)^T, which is AB. + // Note that BLAS/LAPACK stores matrix elements column-wise (i.e., all + // values in one column, then all in the next, etc.), whereas the + // FullMatrix stores them row-wise. We ignore that difference, and + // give our row-wise data to BLAS, let BLAS build the product of + // transpose matrices, and read the result as if it were row-wise + // again. In other words, we calculate (B^T A^T)^T, which is AB. const int m = src.n(); const int n = this->m(); @@ -596,12 +583,9 @@ void FullMatrix::mmult (FullMatrix &dst, const unsigned int m = this->m(), n = src.n(), l = this->n(); - // arrange the loops in a way that - // we keep write operations low, - // (writing is usually more costly - // than reading), even though we - // need to access the data in src - // not in a contiguous way. + // arrange the loops in a way that we keep write operations low, (writing is + // usually more costly than reading), even though we need to access the data + // in src not in a contiguous way. for (unsigned int i=0; i::Tmmult (FullMatrix &dst, Assert (src.n() == dst.n(), ExcDimensionMismatch(src.n(), dst.n())); - // see if we can use BLAS algorithms - // for this and if the type for 'number' - // works for us (it is usually not - // efficient to use BLAS for very small + // see if we can use BLAS algorithms for this and if the type for 'number' + // works for us (it is usually not efficient to use BLAS for very small // matrices): #if defined(HAVE_DGEMM_) && defined (HAVE_SGEMM_) if ((types_are_equal::value @@ -639,28 +621,17 @@ void FullMatrix::Tmmult (FullMatrix &dst, types_are_equal::value) if (this->n()*this->m()*src.n() > 300) { - // In case we have the BLAS - // function gemm detected at - // configure, we use that algorithm - // for matrix-matrix multiplication - // since it provides better - // performance than the deal.II - // native function (it uses cache - // and register blocking in order to - // access local data). + // In case we have the BLAS function gemm detected at configure, we + // use that algorithm for matrix-matrix multiplication since it + // provides better performance than the deal.II native function (it + // uses cache and register blocking in order to access local data). // - // Note that BLAS/LAPACK stores - // matrix elements column-wise (i.e., - // all values in one column, then all - // in the next, etc.), whereas the - // FullMatrix stores them row-wise. - // We ignore that difference, and - // give our row-wise data to BLAS, - // let BLAS build the product of - // transpose matrices, and read the - // result as if it were row-wise - // again. In other words, we calculate - // (B^T A)^T, which is A^T B. + // Note that BLAS/LAPACK stores matrix elements column-wise (i.e., all + // values in one column, then all in the next, etc.), whereas the + // FullMatrix stores them row-wise. We ignore that difference, and + // give our row-wise data to BLAS, let BLAS build the product of + // transpose matrices, and read the result as if it were row-wise + // again. In other words, we calculate (B^T A)^T, which is A^T B. const int m = src.n(); const int n = this->n(); @@ -671,8 +642,7 @@ void FullMatrix::Tmmult (FullMatrix &dst, const number alpha = 1.; const number beta = (adding == true) ? 1. : 0.; - // Use the BLAS function gemm for - // calculating the matrix-matrix + // Use the BLAS function gemm for calculating the matrix-matrix // product. gemm(notrans, trans, &m, &n, &k, &alpha, &src(0,0), &m, &this->values[0], &n, &beta, &dst(0,0), &m); @@ -684,24 +654,37 @@ void FullMatrix::Tmmult (FullMatrix &dst, const unsigned int m = n(), n = src.n(), l = this->m(); - // arrange the loops in a way that - // we keep write operations low, - // (writing is usually more costly - // than reading), even though we - // need to access the data in src - // not in a contiguous way. However, - // we should usually end up in the - // optimized gemm operation in case - // the matrix is big, so this - // shouldn't be too bad. - for (unsigned int i=0; i::mTmult (FullMatrix &dst, Assert (dst.n() == src.m(), ExcDimensionMismatch(dst.n(), src.m())); Assert (dst.m() == m(), ExcDimensionMismatch(m(), dst.m())); - // see if we can use BLAS algorithms - // for this and if the type for 'number' - // works for us (it is usually not - // efficient to use BLAS for very small + // see if we can use BLAS algorithms for this and if the type for 'number' + // works for us (it is usually not efficient to use BLAS for very small // matrices): #if defined(HAVE_DGEMM_) && defined (HAVE_SGEMM_) if ((types_are_equal::value @@ -730,28 +711,17 @@ void FullMatrix::mTmult (FullMatrix &dst, types_are_equal::value) if (this->n()*this->m()*src.m() > 300) { - // In case we have the BLAS - // function gemm detected at - // configure, we use that algorithm - // for matrix-matrix multiplication - // since it provides better - // performance than the deal.II - // native function (it uses cache - // and register blocking in order to - // access local data). + // In case we have the BLAS function gemm detected at configure, we + // use that algorithm for matrix-matrix multiplication since it + // provides better performance than the deal.II native function (it + // uses cache and register blocking in order to access local data). // - // Note that BLAS/LAPACK stores - // matrix elements column-wise (i.e., - // all values in one column, then all - // in the next, etc.), whereas the - // FullMatrix stores them row-wise. - // We ignore that difference, and - // give our row-wise data to BLAS, - // let BLAS build the product of - // transpose matrices, and read the - // result as if it were row-wise - // again. In other words, we calculate - // (B A^T)^T, which is AB^T. + // Note that BLAS/LAPACK stores matrix elements column-wise (i.e., all + // values in one column, then all in the next, etc.), whereas the + // FullMatrix stores them row-wise. We ignore that difference, and + // give our row-wise data to BLAS, let BLAS build the product of + // transpose matrices, and read the result as if it were row-wise + // again. In other words, we calculate (B A^T)^T, which is AB^T. const int m = src.m(); const int n = this->m(); @@ -762,8 +732,7 @@ void FullMatrix::mTmult (FullMatrix &dst, const number alpha = 1.; const number beta = (adding == true) ? 1. : 0.; - // Use the BLAS function gemm for - // calculating the matrix-matrix + // Use the BLAS function gemm for calculating the matrix-matrix // product. gemm(trans, notrans, &m, &n, &k, &alpha, &src(0,0), &k, &this->values[0], &k, &beta, &dst(0,0), &m); @@ -775,18 +744,34 @@ void FullMatrix::mTmult (FullMatrix &dst, const unsigned int m = this->m(), n = src.m(), l = this->n(); - // arrange the loops in a way that - // we keep write operations low, - // (writing is usually more costly - // than reading). - for (unsigned int i=0; i::TmTmult (FullMatrix &dst, Assert (src.m() == dst.n(), ExcDimensionMismatch(src.m(), dst.n())); - // see if we can use BLAS algorithms - // for this and if the type for 'number' - // works for us (it is usually not - // efficient to use BLAS for very small + // see if we can use BLAS algorithms for this and if the type for 'number' + // works for us (it is usually not efficient to use BLAS for very small // matrices): #if defined(HAVE_DGEMM_) && defined (HAVE_SGEMM_) if ((types_are_equal::value @@ -816,28 +799,17 @@ void FullMatrix::TmTmult (FullMatrix &dst, types_are_equal::value) if (this->n()*this->m()*src.m() > 300) { - // In case we have the BLAS - // function gemm detected at - // configure, we use that algorithm - // for matrix-matrix multiplication - // since it provides better - // performance than the deal.II - // native function (it uses cache - // and register blocking in order to - // access local data). + // In case we have the BLAS function gemm detected at configure, we + // use that algorithm for matrix-matrix multiplication since it + // provides better performance than the deal.II native function (it + // uses cache and register blocking in order to access local data). // - // Note that BLAS/LAPACK stores - // matrix elements column-wise (i.e., - // all values in one column, then all - // in the next, etc.), whereas the - // FullMatrix stores them row-wise. - // We ignore that difference, and - // give our row-wise data to BLAS, - // let BLAS build the product of - // transpose matrices, and read the - // result as if it were row-wise - // again. In other words, we calculate - // (B A)^T, which is A^T B^T. + // Note that BLAS/LAPACK stores matrix elements column-wise (i.e., all + // values in one column, then all in the next, etc.), whereas the + // FullMatrix stores them row-wise. We ignore that difference, and + // give our row-wise data to BLAS, let BLAS build the product of + // transpose matrices, and read the result as if it were row-wise + // again. In other words, we calculate (B A)^T, which is A^T B^T. const int m = src.n(); const int n = this->n(); @@ -847,8 +819,7 @@ void FullMatrix::TmTmult (FullMatrix &dst, const number alpha = 1.; const number beta = (adding == true) ? 1. : 0.; - // Use the BLAS function gemm for - // calculating the matrix-matrix + // Use the BLAS function gemm for calculating the matrix-matrix // product. gemm(trans, trans, &m, &n, &k, &alpha, &src(0,0), &k, &this->values[0], &n, &beta, &dst(0,0), &m); @@ -860,17 +831,11 @@ void FullMatrix::TmTmult (FullMatrix &dst, const unsigned int m = n(), n = src.m(), l = this->m(); - // arrange the loops in a way that - // we keep write operations low, - // (writing is usually more costly - // than reading), even though we - // need to access the data in the - // calling matrix not in a - // contiguous way. However, we - // should usually end up in the - // optimized gemm operation in case - // the matrix is big, so this - // shouldn't be too bad. + // arrange the loops in a way that we keep write operations low, (writing is + // usually more costly than reading), even though we need to access the data + // in the calling matrix in a non-contiguous way, possibly leading to cache + // misses. However, we should usually end up in the optimized gemm operation + // in case the matrix is big, so this shouldn't be too bad. for (unsigned int i=0; i +#include +#include +#include +#include + +#include +#include + +const double entries_A[9] = { 1,2,3,4,5,6,7,8,9 }; +const double compare[9] = { 14,32,50,32,77,122,50,122,194 }; + +int +main () +{ + std::ofstream logfile("full_matrix_07/output"); + deallog << std::fixed; + deallog << std::setprecision(3); + deallog.attach(logfile); + deallog.depth_console(0); + deallog.threshold_double(1.e-10); + + FullMatrix A(3,3,entries_A); + FullMatrix C(3,3); + FullMatrix D(3,3,compare); + + //compute C= A*A^T + A.mTmult(C,A); + + C.add(-1., D); + Assert(C.frobenius_norm() < 1e-12, ExcInternalError()); + + deallog << "OK" << std::endl; +} diff --git a/tests/lac/full_matrix_07/cmp/generic b/tests/lac/full_matrix_07/cmp/generic new file mode 100644 index 0000000000..0fd8fc12f0 --- /dev/null +++ b/tests/lac/full_matrix_07/cmp/generic @@ -0,0 +1,2 @@ + +DEAL::OK diff --git a/tests/lac/full_matrix_08.cc b/tests/lac/full_matrix_08.cc new file mode 100644 index 0000000000..e830b5f6ff --- /dev/null +++ b/tests/lac/full_matrix_08.cc @@ -0,0 +1,50 @@ +//---------------------------- full_matrix_08.cc,v --------------------------- +// $Id$ +// Version: $Name$ +// +// Copyright (C) 2013 by the deal.II authors +// +// This file is subject to QPL and may not be distributed +// without copyright and license information. Please refer +// to the file deal.II/doc/license.html for the text and +// further information on this license. +// +//---------------------------- full_matrix_08.cc,v --------------------------- + +//check method Tmmult of FullMatrix, symmetric case + +#include "../tests.h" +#include +#include +#include +#include +#include + +#include +#include + +const double entries_A[9] = { 1,2,3,4,5,6,7,8,9 }; +const double compare[9] = { 66,78,90,78,93,108,90,108,126 }; + +int +main () +{ + std::ofstream logfile("full_matrix_08/output"); + deallog << std::fixed; + deallog << std::setprecision(3); + deallog.attach(logfile); + deallog.depth_console(0); + deallog.threshold_double(1.e-10); + + FullMatrix A(3,3,entries_A); + FullMatrix C(3,3); + FullMatrix D(3,3,compare); + + //compute C= A^T*A + A.Tmmult(C,A); + + C.add(-1., D); + Assert(C.frobenius_norm() < 1e-12, ExcInternalError()); + + deallog << "OK" << std::endl; +} diff --git a/tests/lac/full_matrix_08/cmp/generic b/tests/lac/full_matrix_08/cmp/generic new file mode 100644 index 0000000000..0fd8fc12f0 --- /dev/null +++ b/tests/lac/full_matrix_08/cmp/generic @@ -0,0 +1,2 @@ + +DEAL::OK