From: Martin Kronbichler Date: Tue, 31 Mar 2009 15:34:35 +0000 (+0000) Subject: Always insert something into the diagonal with distribute_local_to_global. Use BLAS... X-Git-Tag: v8.0.0~7924 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=bae39951f9472e711af479ed865d7b9ba10aac4f;p=dealii.git Always insert something into the diagonal with distribute_local_to_global. Use BLAS function GEMM for mmult if matrix is sufficiently big (> 25 cols). git-svn-id: https://svn.dealii.org/trunk@18539 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/base/include/base/config.h.in b/deal.II/base/include/base/config.h.in index 97b4bb871d..6263f63874 100644 --- a/deal.II/base/include/base/config.h.in +++ b/deal.II/base/include/base/config.h.in @@ -247,6 +247,9 @@ /* Define to 1 if you have the `dgeev_' function. */ #undef HAVE_DGEEV_ +/* Define to 1 if you have the `dgemm_' function. */ +#undef HAVE_DGEMM_ + /* Define to 1 if you have the `dgemv_' function. */ #undef HAVE_DGEMV_ @@ -354,6 +357,9 @@ /* Define to 1 if you have the `sgeev_' function. */ #undef HAVE_SGEEV_ +/* Define to 1 if you have the `sgemm_' function. */ +#undef HAVE_SGEMM_ + /* Define to 1 if you have the `sgemv_' function. */ #undef HAVE_SGEMV_ diff --git a/deal.II/configure b/deal.II/configure index e397ac6307..783843e852 100755 --- a/deal.II/configure +++ b/deal.II/configure @@ -16109,7 +16109,9 @@ done -for ac_func in dgesvd_ sgesvd_ dgetrf_ sgetrf_ dgetri_ sgetri_ + + +for ac_func in dgesvd_ sgesvd_ dgemm_ sgemm_ dgetrf_ sgetrf_ dgetri_ sgetri_ do as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh` { echo "$as_me:$LINENO: checking for $ac_func" >&5 diff --git a/deal.II/configure.in b/deal.II/configure.in index 59135b6ef5..aa9dccfa0f 100644 --- a/deal.II/configure.in +++ b/deal.II/configure.in @@ -551,7 +551,7 @@ if test "x$with_blas" != "x" -a "x$with_blas" != "xno" ; then fi AC_CHECK_FUNCS([daxpy_ saxpy_ dgemv_ sgemv_ dgeev_ sgeev_ dgeevx_ sgeevx_]) -AC_CHECK_FUNCS([dgesvd_ sgesvd_ dgetrf_ sgetrf_ dgetri_ sgetri_]) +AC_CHECK_FUNCS([dgesvd_ sgesvd_ dgemm_ sgemm_ dgetrf_ sgetrf_ dgetri_ sgetri_]) AC_CHECK_FUNCS([dgetrs_ sgetrs_ dstev_ sstev_]) dnl ------------------------------------------------------------- diff --git a/deal.II/lac/include/lac/constraint_matrix.templates.h b/deal.II/lac/include/lac/constraint_matrix.templates.h index 5fb76095ed..56de0b588b 100644 --- a/deal.II/lac/include/lac/constraint_matrix.templates.h +++ b/deal.II/lac/include/lac/constraint_matrix.templates.h @@ -1048,7 +1048,7 @@ namespace internals list_shellsort (std::vector > &my_indices) { // now sort the actual dofs using a shell - // sort (which is very fast in case the + // sort which is very fast in case the // indices are already sorted (which is // the usual case with DG elements) unsigned int i, j, j2, temp, templ, istep; @@ -1121,6 +1121,11 @@ distribute_local_to_global (const FullMatrix &local_matrix, const unsigned int n_local_dofs = local_dof_indices.size(); + double average_diagonal = 0; + for (unsigned int i=0; i &local_matrix, // hanging nodes in 3d). however, in the // line below, we do actually do // something with this dof + const double new_diagonal = std::fabs(local_matrix(local_row,local_row)) != 0 ? + std::fabs(local_matrix(local_row,local_row)) : average_diagonal; Threads::ThreadMutex::ScopedLock lock(mutex); - global_matrix.add(global_row,global_row, - std::fabs(local_matrix(local_row,local_row))); + global_matrix.add(global_row, global_row, new_diagonal); } const unsigned int n_actual_dofs = my_indices.size(); @@ -1460,6 +1466,11 @@ distribute_local_to_global (const FullMatrix &local_matrix, const unsigned int n_local_dofs = local_dof_indices.size(); const unsigned int num_blocks = global_matrix.n_block_rows(); + double average_diagonal = 0; + for (unsigned int i=0; i > my_indices (n_local_dofs); std::vector > constraint_lines; constraint_lines.reserve(n_local_dofs); @@ -1512,9 +1523,10 @@ distribute_local_to_global (const FullMatrix &local_matrix, (local_row, position->entries[q].second)); } + const double new_diagonal = std::fabs(local_matrix(local_row,local_row)) != 0 ? + std::fabs(local_matrix(local_row,local_row)) : average_diagonal; Threads::ThreadMutex::ScopedLock lock(mutex); - global_matrix.add(global_row,global_row, - std::fabs(local_matrix(local_row,local_row))); + global_matrix.add(global_row, global_row, new_diagonal); } const unsigned int n_actual_dofs = my_indices.size(); diff --git a/deal.II/lac/include/lac/full_matrix.templates.h b/deal.II/lac/include/lac/full_matrix.templates.h index 9598f0fbaf..9efb96c49c 100644 --- a/deal.II/lac/include/lac/full_matrix.templates.h +++ b/deal.II/lac/include/lac/full_matrix.templates.h @@ -545,6 +545,37 @@ FullMatrix::equ (const number a, +namespace internal +{ + // a namespace into which we import + // the global definitions of the + // LAPACK functions getrf for + // various data types and add + // general templates for all other + // types. this allows to call the + // getrf function for all data + // types but generated an exception + // if something is called for a + // data type not supported by + // LAPACK. + namespace gemm_switch + { + using ::gemm; + + template + void + gemm (const char*, const char*, const int*, const int*, const int*, + const T*, const T2*, const int*, const T*, const int*, + const T*, T2*, const int*) + { + Assert (false, LAPACKSupport::ExcMissing("dgemm for this data type")); + } + + } +} + + + template template void FullMatrix::mmult (FullMatrix &dst, @@ -556,6 +587,60 @@ void FullMatrix::mmult (FullMatrix &dst, Assert (dst.n() == src.n(), ExcDimensionMismatch(dst.n(), src.n())); Assert (dst.m() == m(), ExcDimensionMismatch(m(), dst.m())); + // see if we can use Lapack algorithms + // for this and if the type for 'number' + // works for us (it is usually not + // efficient to use Lapack for very small + // matrices): +#if defined(HAVE_DGEMM_) && defined (HAVE_SGEMM_) + if ((types_are_equal::value + || + types_are_equal::value) + && + types_are_equal::value) + if (this->n_cols() > 25) + { + // In case we have the LAPACK + // function gemm detected at + // configure, we use that algorithms + // for matrix-matrix multiplication + // since it provides better + // performance than the deal.II + // native functions (it uses cache + // and register blocking in order to + // access local data). + // + // Note that BLAS/LAPACK stores + // matrix elements column-wise (i.e., + // all values in one column, then all + // in the next, etc.), whereas the + // FullMatrix stores them row-wise. + // We ignore that difference, and + // give our row-wise data to LAPACK, + // let LAPACK build the product of + // transpose matrices, and read the + // result as if it were row-wise + // again. In other words, we calculate + // (B^T A^T)^T, which is AB. + + const int m = src.n(); + const int n = this->m(); + const int k = this->n(); + const char *notrans = "n"; + + const number alpha = 1.; + const number beta = (adding == true) ? 1. : 0.; + + // Use the LAPACK function getrf for + // calculating the LU factorization. + internal::gemm_switch::gemm(notrans, notrans, &m, &n, &k, &alpha, &src(0,0), &m, + this->val, &k, &beta, &dst(0,0), &m); + + return; + } + +#endif + if (!adding) for (unsigned int i=0; i::Tmmult (FullMatrix &dst, Assert (n() == dst.m(), ExcDimensionMismatch(n(), dst.m())); Assert (src.n() == dst.n(), ExcDimensionMismatch(src.n(), dst.n())); + + // see if we can use Lapack algorithms + // for this and if the type for 'number' + // works for us (it is usually not + // efficient to use Lapack for very small + // matrices): +#if defined(HAVE_DGEMM_) && defined (HAVE_SGEMM_) + if ((types_are_equal::value + || + types_are_equal::value) + && + types_are_equal::value) + if (this->n_cols() > 25) + { + // In case we have the LAPACK + // function gemm detected at + // configure, we use that algorithms + // for matrix-matrix multiplication + // since it provides better + // performance than the deal.II + // native functions (it uses cache + // and register blocking in order to + // access local data). + // + // Note that BLAS/LAPACK stores + // matrix elements column-wise (i.e., + // all values in one column, then all + // in the next, etc.), whereas the + // FullMatrix stores them row-wise. + // We ignore that difference, and + // give our row-wise data to LAPACK, + // let LAPACK build the product of + // transpose matrices, and read the + // result as if it were row-wise + // again. In other words, we calculate + // (B^T A^T)^T, which is AB. + + const int m = src.n(); + const int n = this->n(); + const int k = this->m(); + const char *trans = "t"; + const char *notrans = "n"; + + const number alpha = 1.; + const number beta = (adding == true) ? 1. : 0.; + + // Use the LAPACK function getrf for + // calculating the LU factorization. + internal::gemm_switch::gemm(notrans, trans, &m, &n, &k, &alpha, &src(0,0), &m, + this->val, &n, &beta, &dst(0,0), &m); + + return; + } + +#endif + + if (!adding) for (unsigned int i=0; i::gauss_jordan () Assert (!this->empty(), ExcEmptyMatrix()); Assert (this->n_cols() == this->n_rows(), ExcNotQuadratic()); - // see if we can use Lapack - // algorithms for this and if the - // type for 'number' works for us: + // see if we can use Lapack algorithms + // for this and if the type for 'number' + // works for us (it is usually not + // efficient to use Lapack for very small + // matrices): #if defined(HAVE_DGETRF_) && defined (HAVE_SGETRF_) && \ defined(HAVE_DGETRI_) && defined (HAVE_SGETRI_) if (types_are_equal::value || types_are_equal::value) + if (this->n_cols() > 15) { // In case we have the LAPACK functions // getrf and getri detected at configure,