From: kronbichler Date: Mon, 11 Jul 2011 12:12:03 +0000 (+0000) Subject: Do operator= in parallel just as matrix-vector products. This gives considerably... X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c2d1f732b913760f5b0bb4ec5217dd89f9b4f5e7;p=dealii-svn.git Do operator= in parallel just as matrix-vector products. This gives considerably better speed of parallel mat-vecs on some NUMA systems where the first access to a memory location determines which memory bank will hold the data. git-svn-id: https://svn.dealii.org/trunk@23938 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/include/deal.II/lac/sparse_matrix.templates.h b/deal.II/include/deal.II/lac/sparse_matrix.templates.h index fa6b64b018..75db988f29 100644 --- a/deal.II/include/deal.II/lac/sparse_matrix.templates.h +++ b/deal.II/include/deal.II/lac/sparse_matrix.templates.h @@ -125,6 +125,22 @@ SparseMatrix::~SparseMatrix () +namespace internal +{ + namespace SparseMatrix + { + template + void zero_subrange (const unsigned int begin, + const unsigned int end, + T *dst) + { + std::memset (dst+begin,0,(end-begin)*sizeof(T)); + } + } +} + + + template SparseMatrix & SparseMatrix::operator = (const double d) @@ -134,8 +150,31 @@ SparseMatrix::operator = (const double d) Assert (cols != 0, ExcNotInitialized()); Assert (cols->compressed || cols->empty(), SparsityPattern::ExcNotCompressed()); - if (val != 0) - memset (&val[0], 0, cols->n_nonzero_elements()*sizeof(number)); + // do initial zeroing of elements in + // parallel. Try to achieve a similar layout + // as when doing matrix-vector products, as on + // some NUMA systems, a memory block is + // assigned to memory banks where the first + // access is generated. For sparse matrices, + // the first operations is usually the + // operator=. The grain size is chosen to + // reflect the number of rows in + // minimum_parallel_grain_size, weighted by + // the number of nonzero entries per row on + // average. + const unsigned int matrix_size = cols->n_nonzero_elements(); + const unsigned int grain_size = + internal::SparseMatrix::minimum_parallel_grain_size * + (cols->n_nonzero_elements()+m()) / m(); + if (matrix_size>grain_size) + parallel::apply_to_subranges (0U, matrix_size, + std_cxx1x::bind(&internal::SparseMatrix::template + zero_subrange, + std_cxx1x::_1, std_cxx1x::_2, + val), + grain_size); + else if (matrix_size > 0) + memset (&val[0], 0, matrix_size*sizeof(number)); return *this; } @@ -1494,7 +1533,7 @@ SparseMatrix::SOR (Vector& dst, if (col < row) s -= val[j] * dst(col); } - + Assert(val[cols->rowstart[row]]!= 0., ExcDivideByZero()); dst(row) = s * om / val[cols->rowstart[row]]; } @@ -1721,7 +1760,7 @@ SparseMatrix::SSOR (Vector& dst, { //TODO: Is this called anywhere? If so, multiplication with om(2-om)D is missing Assert(false, ExcNotImplemented()); - + Assert (cols != 0, ExcNotInitialized()); Assert (val != 0, ExcNotInitialized()); Assert (cols->optimize_diagonal(),