From e8fa58c71a28742e9256194893617da152ba3bcf Mon Sep 17 00:00:00 2001 From: wolf Date: Fri, 10 Sep 1999 09:15:22 +0000 Subject: [PATCH] Merge in the changes made for multithreaded linear algebra into the main branch. Since they are guarded by #if ... #endif's, they should not harm. git-svn-id: https://svn.dealii.org/trunk@1736 0785d39b-7218-0410-832d-ea1e28bc413d --- deal.II/lac/Todo | 7 +- deal.II/lac/include/lac/sparse_matrix.h | 52 +++- .../lac/include/lac/sparse_matrix.templates.h | 276 +++++++++++++++++- 3 files changed, 324 insertions(+), 11 deletions(-) diff --git a/deal.II/lac/Todo b/deal.II/lac/Todo index fa889abe22..77a2b39e12 100644 --- a/deal.II/lac/Todo +++ b/deal.II/lac/Todo @@ -37,4 +37,9 @@ Use the commented-out version in PreconditionBlock::invert_diagblocks Comment on the modification of the right hand side in the Vanka smoother (which is done to yield a homogeneous boundary value problem on each patch) - \ No newline at end of file + + +Someone should take a look at the handling of the sparsity variable + in the sparse matrix: it is subscribed to twice (explicitely and + through the subscriptor); the explicit subscription is redundant, + but I don't have the time to look into that now. diff --git a/deal.II/lac/include/lac/sparse_matrix.h b/deal.II/lac/include/lac/sparse_matrix.h index 6e47f26412..2a295495a7 100644 --- a/deal.II/lac/include/lac/sparse_matrix.h +++ b/deal.II/lac/include/lac/sparse_matrix.h @@ -15,6 +15,8 @@ #include #include +#include + //forward declarations template class Vector; @@ -885,7 +887,12 @@ class SparseMatrix : public Subscriptor * function equals the matrix norm with * respect to the mass matrix of the vector * representing the nodal values of the - * finite element function. + * finite element function. Note that + * even though the function's name might + * suggest something different, for historic + * reasons not the norm but its square + * is returned, as defined above by + * the scalar product. * * Note the order in which the matrix * appears. For non-symmetric matrices @@ -1137,7 +1144,48 @@ class SparseMatrix : public Subscriptor * function. */ unsigned int max_len; - + + /** + * Version of #vmult# which only performs + * its actions on the region defined by + * #[begin_row,end_row)#. This function + * is called by #vmult# in the case + * of enabled multithreading. + */ + template + void * threaded_vmult (Vector &dst, + const Vector &src, + const unsigned int begin_row, + const unsigned int end_row) const; + + /** + * Version of #matrix_norm# which only + * performs its actions on the region + * defined by #[begin_row,end_row)#. This + * function is called by #matrix_norm# in + * the case of enabled multithreading. + */ + template + void * threaded_matrix_norm (const Vector &v, + const unsigned int begin_row, + const unsigned int end_row, + somenumber *partial_sum) const; + + /** + * Version of #residual# which only + * performs its actions on the region + * defined by #[begin_row,end_row)#. This + * function is called by #residual# in + * the case of enabled multithreading. + */ + template + void * threaded_residual (Vector &dst, + const Vector &u, + const Vector &b, + const unsigned int begin_row, + const unsigned int end_row, + somenumber *partial_norm) const; + // make all other sparse matrices // friends diff --git a/deal.II/lac/include/lac/sparse_matrix.templates.h b/deal.II/lac/include/lac/sparse_matrix.templates.h index a8c60cc9de..ec663eee86 100644 --- a/deal.II/lac/include/lac/sparse_matrix.templates.h +++ b/deal.II/lac/include/lac/sparse_matrix.templates.h @@ -14,6 +14,16 @@ #include #include +#ifdef DEAL_II_USE_MT +# include +# include + +# include + +# define NTHREADS 4 +#endif + + @@ -69,6 +79,7 @@ SparseMatrix::~SparseMatrix () { if (cols != 0) cols->unsubscribe(); + cols = 0; if (val != 0) delete[] val; @@ -123,6 +134,8 @@ template void SparseMatrix::clear () { + if (cols != 0) + cols->unsubscribe (); cols = 0; if (val) delete[] val; val = 0; @@ -199,8 +212,53 @@ SparseMatrix::vmult (Vector& dst, const Vector& Assert (val != 0, ExcMatrixNotInitialized()); Assert(m() == dst.size(), ExcDimensionsDontMatch(m(),dst.size())); Assert(n() == src.size(), ExcDimensionsDontMatch(n(),src.size())); - + const unsigned int n_rows = m(); + +#ifdef DEAL_II_USE_MT + // in MT mode: start new threads only + // if the matrix is sufficiently large. + // the limit is mostly artificial + if (n_rows/NTHREADS > 2000) + { + const unsigned int n_threads = NTHREADS; + + ThreadManager thread_manager; + + const ThreadManager::Mem_Fun_Data4, + Vector &, + const Vector &, + unsigned int, + unsigned int> + mem_fun_data_all (this, dst, src, 0, 0, + &SparseMatrix::template threaded_vmult ); + vector, + Vector &, + const Vector &, + unsigned int, + unsigned int> > + mem_fun_data(n_threads, mem_fun_data_all); + + // spawn some jobs... + for (unsigned int i=0; irowstart[0]]; const int *colnum_ptr = &cols->colnums[cols->rowstart[0]]; somenumber *dst_ptr = &dst(0); @@ -215,6 +273,38 @@ SparseMatrix::vmult (Vector& dst, const Vector& }; + +template +template +void * +SparseMatrix::threaded_vmult (Vector &dst, + const Vector &src, + const unsigned int begin_row, + const unsigned int end_row) const +{ +#ifdef DEAL_II_USE_MT + const number *val_ptr = &val[cols->rowstart[begin_row]]; + const int *colnum_ptr = &cols->colnums[cols->rowstart[begin_row]]; + somenumber *dst_ptr = &dst(begin_row); + for (unsigned int row=begin_row; rowrowstart[row+1]]; + while (val_ptr != val_end_of_row) + s += *val_ptr++ * src(*colnum_ptr++); + *dst_ptr++ = s; + }; +#else + // this function should not be called + // when not in parallel mode. + Assert (false, ExcInternalError()); +#endif + + return 0; +}; + + + template template void @@ -296,8 +386,58 @@ SparseMatrix::matrix_norm (const Vector& v) const Assert(m() == v.size(), ExcDimensionsDontMatch(m(),v.size())); Assert(n() == v.size(), ExcDimensionsDontMatch(n(),v.size())); - somenumber sum = 0.; const unsigned int n_rows = m(); +#ifdef DEAL_II_USE_MT + // if in MT mode and size sufficiently + // large: do it in parallel; the limit + // is mostly artificial + if (n_rows/NTHREADS > 2000) + { + const unsigned int n_threads = NTHREADS; + + ThreadManager thread_manager; + + const ThreadManager::Mem_Fun_Data4, + const Vector &, + unsigned int, + unsigned int, + somenumber *> + mem_fun_data_all (this, v, 0, 0, 0, + &SparseMatrix::template threaded_matrix_norm ); + vector, + const Vector &, + unsigned int, + unsigned int, + somenumber *> > + mem_fun_data(n_threads, mem_fun_data_all); + + // space for the norms of + // the different parts + vector partial_sums (n_threads, 0); + + // spawn some jobs... + for (unsigned int i=0; irowstart[0]]; const int *colnum_ptr = &cols->colnums[cols->rowstart[0]]; for (unsigned int row=0; row::matrix_norm (const Vector& v) const }; + +template +template +void * +SparseMatrix::threaded_matrix_norm (const Vector &v, + const unsigned int begin_row, + const unsigned int end_row, + somenumber *partial_sum) const +{ +#ifdef DEAL_II_USE_MT + somenumber sum = 0.; + const number *val_ptr = &val[cols->rowstart[begin_row]]; + const int *colnum_ptr = &cols->colnums[cols->rowstart[begin_row]]; + for (unsigned int row=begin_row; rowrowstart[row+1]]; + while (val_ptr != val_end_of_row) + s += *val_ptr++ * v(*colnum_ptr++); + + sum += s* v(row); + }; + *partial_sum = sum; + +#else + // function should not have been called + Assert (false, ExcInternalError()); +#endif + return 0; +}; + + template number SparseMatrix::l1_norm () const { @@ -356,9 +528,9 @@ number SparseMatrix::linfty_norm () const template template somenumber -SparseMatrix::residual (Vector& dst, - const Vector& u, - const Vector& b) const +SparseMatrix::residual (Vector &dst, + const Vector &u, + const Vector &b) const { Assert (cols != 0, ExcMatrixNotInitialized()); Assert (val != 0, ExcMatrixNotInitialized()); @@ -366,11 +538,65 @@ SparseMatrix::residual (Vector& dst, Assert(m() == b.size(), ExcDimensionsDontMatch(m(),b.size())); Assert(n() == u.size(), ExcDimensionsDontMatch(n(),u.size())); - somenumber s,norm=0.; + const unsigned int n_rows = m(); +#ifdef DEAL_II_USE_MT + // if in MT mode and size sufficiently + // large: do it in parallel; the limit + // is mostly artificial + if (n_rows/NTHREADS > 2000) + { + const unsigned int n_threads = NTHREADS; + + ThreadManager thread_manager; + + const ThreadManager::Mem_Fun_Data6, + Vector &, // dst + const Vector &, // u + const Vector &, // b + unsigned int, // begin_row + unsigned int, // end_row + somenumber *> // partial norm + mem_fun_data_all (this, dst, u, b, 0, 0, 0, + &SparseMatrix::template threaded_residual ); + vector, + Vector &, + const Vector &, + const Vector &, + unsigned int, + unsigned int, + somenumber *> > + mem_fun_data(n_threads, mem_fun_data_all); + + // space for the square norms of + // the different parts + vector partial_norms (n_threads, 0); + + // spawn some jobs... + for (unsigned int i=0; irowstart[i]; jrowstart[i+1] ;j++) { int p = cols->colnums[j]; @@ -383,6 +609,40 @@ SparseMatrix::residual (Vector& dst, } +template +template +void * +SparseMatrix::threaded_residual (Vector &dst, + const Vector &u, + const Vector &b, + const unsigned int begin_row, + const unsigned int end_row, + somenumber *partial_norm) const +{ +#ifdef DEAL_II_USE_MT + somenumber norm=0.; + + for (unsigned int i=begin_row; irowstart[i]; jrowstart[i+1] ;j++) + { + int p = cols->colnums[j]; + s -= val[j] * u(p); + } + dst(i) = s; + norm += dst(i)*dst(i); + }; + + *partial_norm = norm; +#else + Assert (false, ExcInternalError()); +#endif + + return 0; +}; + + template template -- 2.39.5