From 2341a5fe9c40fdb080abd5258aff5f0aaa00ab1f Mon Sep 17 00:00:00 2001 From: wolf Date: Wed, 8 May 2002 08:05:35 +0000 Subject: [PATCH] Help Intel's icc compiler when passing the address of a member template function of a template class to the Threads::encapsulate functions, by putting that address into a variable first. icc would error out otherwise in MT mode. git-svn-id: https://svn.dealii.org/trunk@5833 0785d39b-7218-0410-832d-ea1e28bc413d --- .../lac/include/lac/sparse_matrix.templates.h | 96 +++++++++++++++++-- .../lac/include/lac/sparse_vanka.templates.h | 22 ++++- deal.II/lac/include/lac/sparsity_pattern.h | 4 +- 3 files changed, 108 insertions(+), 14 deletions(-) diff --git a/deal.II/lac/include/lac/sparse_matrix.templates.h b/deal.II/lac/include/lac/sparse_matrix.templates.h index 3ead4bbe90..110702d024 100644 --- a/deal.II/lac/include/lac/sparse_matrix.templates.h +++ b/deal.II/lac/include/lac/sparse_matrix.templates.h @@ -295,11 +295,30 @@ SparseMatrix::vmult (Vector& dst, const Vector& { const unsigned int n_threads = multithread_info.n_default_threads; + // then spawn threads. since + // some compilers have trouble + // finding out which + // 'encapsulate' function to + // take of all those possible + // ones if we simply drop in + // the address of an overloaded + // template member function, + // make it simpler for the + // compiler by giving it the + // correct type right away: + typedef + void (SparseMatrix::*mem_fun_p) + (Vector &, + const Vector &, + const unsigned int , + const unsigned int) const; + const mem_fun_p comp + = &(SparseMatrix:: + template threaded_vmult); Threads::ThreadManager thread_manager; for (unsigned int i=0; i:: - template threaded_vmult) + Threads::encapsulate (comp) .collect_args (this, dst, src, n_rows * i / n_threads, n_rows * (i+1) / n_threads)); @@ -445,11 +464,29 @@ SparseMatrix::matrix_norm_square (const Vector& v) const // the different parts std::vector partial_sums (n_threads, 0); Threads::ThreadManager thread_manager; - // spawn some jobs... + // then spawn threads. since + // some compilers have trouble + // finding out which + // 'encapsulate' function to + // take of all those possible + // ones if we simply drop in + // the address of an overloaded + // template member function, + // make it simpler for the + // compiler by giving it the + // correct type right away: + typedef + void (SparseMatrix::*mem_fun_p) + (const Vector &, + const unsigned int , + const unsigned int , + somenumber *) const; + const mem_fun_p comp + = &(SparseMatrix:: + template threaded_matrix_norm_square); for (unsigned int i=0; i:: - template threaded_matrix_norm_square) + Threads::encapsulate (comp) .collect_args (this, v, n_rows * i / n_threads, n_rows * (i+1) / n_threads, @@ -538,11 +575,30 @@ SparseMatrix::matrix_scalar_product (const Vector& u, // the different parts std::vector partial_sums (n_threads, 0); Threads::ThreadManager thread_manager; - // spawn some jobs... + // then spawn threads. since + // some compilers have trouble + // finding out which + // 'encapsulate' function to + // take of all those possible + // ones if we simply drop in + // the address of an overloaded + // template member function, + // make it simpler for the + // compiler by giving it the + // correct type right away: + typedef + void (SparseMatrix::*mem_fun_p) + (const Vector &, + const Vector &, + const unsigned int , + const unsigned int , + somenumber *) const; + const mem_fun_p comp + = &(SparseMatrix:: + template threaded_matrix_scalar_product); for (unsigned int i=0; i:: - template threaded_matrix_scalar_product) + Threads::encapsulate (comp) .collect_args (this, u, v, n_rows * i / n_threads, n_rows * (i+1) / n_threads, @@ -674,11 +730,31 @@ SparseMatrix::residual (Vector &dst, // space for the square norms of // the different parts std::vector partial_norms (n_threads, 0); + + // then spawn threads. since + // some compilers have trouble + // finding out which + // 'encapsulate' function to + // take of all those possible + // ones if we simply drop in + // the address of an overloaded + // template member function, + // make it simpler for the + // compiler by giving it the + // correct type right away: + typedef + void (SparseMatrix::*mem_fun_p) + (Vector &, + const Vector &, + const Vector &, + const std::pair, + somenumber *) const; + const mem_fun_p comp_residual = &SparseMatrix:: + template threaded_residual; Threads::ThreadManager thread_manager; for (unsigned int i=0; i:: - template threaded_residual) + Threads::encapsulate (comp_residual) .collect_args (this, dst, u, b, std::pair (n_rows * i / n_threads, diff --git a/deal.II/lac/include/lac/sparse_vanka.templates.h b/deal.II/lac/include/lac/sparse_vanka.templates.h index 10b2ce093a..143d1a8885 100644 --- a/deal.II/lac/include/lac/sparse_vanka.templates.h +++ b/deal.II/lac/include/lac/sparse_vanka.templates.h @@ -582,11 +582,29 @@ void SparseBlockVanka::vmult (Vector &dst, // otherwise: blocking requested { #ifdef DEAL_II_USE_MT + // spawn threads. since + // some compilers have trouble + // finding out which + // 'encapsulate' function to + // take of all those possible + // ones if we simply drop in + // the address of an overloaded + // template member function, + // make it simpler for the + // compiler by giving it the + // correct type right away: + typedef + void (SparseVanka::*mem_fun_p) + (Vector &, + const Vector &, + const std::vector *) const; + const mem_fun_p comp + = &(SparseVanka:: + template apply_preconditioner); Threads::ThreadManager thread_manager; for (unsigned int block=0; block:: - template apply_preconditioner) + Threads::encapsulate (comp) .collect_args (this, dst, src, &dof_masks[block])); thread_manager.wait (); diff --git a/deal.II/lac/include/lac/sparsity_pattern.h b/deal.II/lac/include/lac/sparsity_pattern.h index f917b9ca7e..0ca2d58a46 100644 --- a/deal.II/lac/include/lac/sparsity_pattern.h +++ b/deal.II/lac/include/lac/sparsity_pattern.h @@ -941,7 +941,7 @@ class SparsityPattern : public Subscriptor * lengths. */ static - const unsigned int * const + const unsigned int * optimized_lower_bound (const unsigned int *first, const unsigned int *last, const unsigned int &val); @@ -994,7 +994,7 @@ class SparsityPattern : public Subscriptor inline -const unsigned int * const +const unsigned int * SparsityPattern::optimized_lower_bound (const unsigned int *first, const unsigned int *last, const unsigned int &val) -- 2.39.5