From: wolf Date: Wed, 2 Feb 2000 20:14:38 +0000 (+0000) Subject: Implement another clever method to block the DoFs. X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=f5aba6810cb3c0da936d46e24e8539e6a9cac14b;p=dealii-svn.git Implement another clever method to block the DoFs. git-svn-id: https://svn.dealii.org/trunk@2332 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/lac/include/lac/sparse_vanka.h b/deal.II/lac/include/lac/sparse_vanka.h index 7da845f9e5..201e542650 100644 --- a/deal.II/lac/include/lac/sparse_vanka.h +++ b/deal.II/lac/include/lac/sparse_vanka.h @@ -185,13 +185,6 @@ class SparseVanka /** * Exception */ - DeclException2 (ExcInvalidRange, - unsigned int, unsigned int, - << "The bounds [" << arg1 << ',' << arg2 - << ") do not form a valid range."); - /** - * Exception - */ DeclException2 (ExcInvalidVectorSize, unsigned int, unsigned int, << "The dimensions of vectors and matrices, " @@ -199,45 +192,50 @@ class SparseVanka protected: /** - * Apply the inverses in the - * range #[begin,end)# to the + * Apply the inverses + * corresponding to those degrees + * of freedom that have a #true# + * value in #dof_mask#, to the * #src# vector and move the * result into #dst#. Actually, - * only values of #src# from - * within the range are taken - * (all others are set to zero), - * and only values inside the - * range are written to #dst#, so - * the application of this + * only values for allowed + * indices are written to #dst#, + * so the application of this * function only does what is * announced in the general * documentation if the given - * range is the whole interval. + * mask sets all values to zero * * The reason for providing the - * interval anyway is that in - * derived classes we may want to - * apply the preconditioner to - * blocks of the matrix only, in - * order to parallelize the + * mask anyway is that in derived + * classes we may want to apply + * the preconditioner to parts of + * the matrix only, in order to + * parallelize the * application. Then, it is * important to only write to - * some slices of #dst# and only - * takes values from similar - * slices of #src#, in order to - * eliminate the dependencies of - * threads of each other. + * some slices of #dst#, in order + * to eliminate the dependencies + * of threads of each other. + * + * If a null pointer is passed + * instead of a pointer to the + * #dof_mask# (as is the default + * value), then it is assumed + * that we shall work on all + * degrees of freedom. This is + * then equivalent to calling the + * function with a + * #vector(n_dofs,true)#. * * The #operator()# of this class * of course calls this function - * with the whole interval - * #[begin,end)=[0,matrix.m())#. + * with a null pointer */ template void apply_preconditioner (Vector &dst, const Vector &src, - const unsigned int begin, - const unsigned int end) const; + const vector *dof_mask = 0) const; private: /** @@ -318,17 +316,9 @@ class SparseVanka * divides the matrix into blocks and works on the diagonal blocks * only, which of course reduces the efficiency as preconditioner, but * is perfectly parallelizable. The constructor takes a parameter into - * how many diagonal blocks the matrix shall be subdivided and then - * lets the underlying class do the work. - * - * Division of the matrix is done in a way such that the blocks are - * not necessarily of equal size, but such that the number of selected - * degrees of freedom for which a local system is to be solved is - * equal between blocks. The reason for this strategy to subdivision - * is load-balancing for multithreading, but it is necessary to note - * that this almost renders the capability as precondition useless if - * the degrees of freedom are numbered by component, i.e. all Lagrange - * multipliers en bloc. + * how many blocks the matrix shall be subdivided and then lets the + * underlying class do the work. Division of the matrix is done in + * several ways which are described in detail below. * * This class is probably useless if you don't have a multiprocessor * system, since then the amount of work per preconditioning step is @@ -356,6 +346,81 @@ class SparseVanka * mentioned, this reduces the preconditioning properties. * * + * \subsection{Splitting the matrix into blocks} + * + * Splitting the matrix into blocks is always done in a way such that + * the blocks are not necessarily of equal size, but such that the + * number of selected degrees of freedom for which a local system is + * to be solved is equal between blocks. The reason for this strategy + * to subdivision is load-balancing for multithreading. There are + * several possibilities to actually split the matrix into blocks, + * which are selected by the flag #blocking_strategy# that is passed + * to the constructor. By a block, we will in the sequel denote a list + * of indices of degrees of freedom; the algorithm will work on each + * block separately, i.e. the solutions of the local systems + * corresponding to a degree of freedom of one block will only be used + * to update the degrees of freedom belonging to the same block, but + * never to update degrees of freedoms of other blocks. A block can be + * a consecutive list of indices, as in the first alternative below, + * or a nonconsecutive list of indices. Of course, we assume that the + * intersection of each two blocks is empty and that the union of all + * blocks equals the interval #[0,N)#, where #N# is the number of + * degrees of freedom of the system of equations. + * + * \begin{itemize} + * \item #index_intervals#: + * Here, we chose the blocks to be intervals #[a_i,a_{i+1})#, + * i.e. consecutive degrees of freedom are usually also within the + * same block. This is a reasonable strategy, if the degrees of + * freedom have, for example, be re-numbered using the + * Cuthill-McKee algorithm, in which spatially neighboring degrees + * of freedom have neighboring indices. In that case, coupling in + * the matrix is usually restricted to the vicinity of the diagonal + * as well, and we can simply cut the matrix into blocks. + * + * The bounds of the intervals, i.e. the #a_i# above, are chosen + * such that the number of degrees of freedom on which we shall + * work (i.e. usually the degrees of freedom corresponding to + * Lagrange multipliers) is about the same in each block; this does + * not mean, however, that the sizes of the blocks are equal, since + * the blocks also comprise the other degrees of freedom for which + * no local system is solved. In the extreme case, consider that + * all Lagrange multipliers are sorted to the end of the range of + * DoF indices, then the first block would be very large, since it + * comprises all other DoFs and some Lagrange multipliers, while + * all other blocks are rather small and comprise only Langrange + * multipliers. This strategy therefore does not only depend on the + * order in which the Lagrange DoFs are sorted, but also on the + * order in which the other DoFs are sorted. It is therefore + * necessary to note that this almost renders the capability as + * preconditioner useless if the degrees of freedom are numbered by + * component, i.e. all Lagrange multipliers en bloc. + * + * \item #adaptive#: This strategy is a bit more clever in cases where + * the Langrange DoFs are clustered, as in the example above. It + * works as follows: it first groups the Lagrange DoFs into blocks, + * using the same strategy as above. However, instead of grouping + * the other DoFs into the blocks of Lagrange DoFs with nearest DoF + * index, it decides for each non-Lagrange DoF to put it into the + * block of Lagrange DoFs which write to this non-Lagrange DoF most + * often. This makes it possible to even sort the Lagrange DoFs to + * the end and still associate spatially neighboring non-Lagrange + * DoFs to the same blocks where the respective Lagrange DoFs are, + * since they couple to each other while spatially distant DoFs + * don't couple. + * + * The additional computational effort to sorting the non-Lagrange + * DoFs is not very large compared with the inversion of the local + * systems and applying the preconditioner, so this strategy might + * be reasonable if you want to sort your degrees of freedom by + * component. If the degrees of freedom are not sorted by + * component, the results of the both strategies outlined above + * does not differ much. However, unlike the first strategy, the + * performance of the second strategy does not deteriorate if the + * DoFs are renumbered by component. + * \end{itemize} + * + * * \subsection{Typical results} * * As a prototypical test case, we use a nonlinear problem from @@ -365,18 +430,31 @@ class SparseVanka * freedom. With the non-blocked version #SparseVanka# (or * #SparseBlockVanka# with #n_blocks==1#), the following numbers of * iterations is needed to solver the linear system in each nonlinear - * step: \begin{verbatim} 101 68 64 53 35 21 \end{verbatim} With four - * blocks, we need the following numbers of iterations - * \begin{verbatim} 124 88 83 66 44 28 \end{verbatim} As can be seen, - * more iterations are needed. However, in terms of computing time, - * the first version needs 72 seconds wall time (and 79 seconds CPU - * time, which is more than wall time since some other parts of the - * program were parallelized as well), while the second version needed - * 53 second wall time (and 110 seconds CPU time) on a four processor - * machine. The total time is in both cases dominated by the linear - * solvers. In this case, it is therefore worth while using the - * blocked version of the preconditioner if wall time is more - * important than CPU time. + * step: + * \begin{verbatim} + * 101 68 64 53 35 21 + * \end{verbatim} + * + * With four blocks, we need the following numbers of iterations + * \begin{verbatim} + * 124 88 83 66 44 28 + * \end{verbatim} + * As can be seen, more iterations are needed. However, in terms of + * computing time, the first version needs 72 seconds wall time (and + * 79 seconds CPU time, which is more than wall time since some other + * parts of the program were parallelized as well), while the second + * version needed 53 second wall time (and 110 seconds CPU time) on a + * four processor machine. The total time is in both cases dominated + * by the linear solvers. In this case, it is therefore worth while + * using the blocked version of the preconditioner if wall time is + * more important than CPU time. + * + * The results with the block version above were obtained with the + * first blocking strategy and the degrees of freedom were not + * numbered by component. Using the second strategy does not much + * change the numbers of iterations (at most by one in each step) and + * they also do not much change when the degrees of freedom are sorted + * by component. * * @author Wolfgang Bangerth, 2000 */ @@ -384,6 +462,16 @@ template class SparseBlockVanka : public SparseVanka { public: + /** + * Enumeration of the different + * methods by which the DoFs are + * distributed to the blocks on + * which we are to work. + */ + enum BlockingStrategy { + index_intervals, adaptive + }; + /** * Constructor. Pass all * arguments except for @@ -391,9 +479,10 @@ class SparseBlockVanka : public SparseVanka */ SparseBlockVanka (const SparseMatrix &M, const vector &selected, + const unsigned int n_blocks, + const BlockingStrategy blocking_strategy, const bool conserve_memory = false, - const unsigned int n_threads = 1, - const unsigned int n_blocks = 1); + const unsigned int n_threads = 1); /** * Apply the preconditioner. @@ -417,12 +506,13 @@ class SparseBlockVanka : public SparseVanka * avoid recomputing each time * the preconditioner is called. */ - vector > intervals; + vector > dof_masks; }; + /*---------------------------- sparse_vanka.h ---------------------------*/ /* end of #ifndef __sparse_vanka_H */ #endif diff --git a/deal.II/lac/include/lac/sparse_vanka.templates.h b/deal.II/lac/include/lac/sparse_vanka.templates.h index 598c2092c5..6e9e408218 100644 --- a/deal.II/lac/include/lac/sparse_vanka.templates.h +++ b/deal.II/lac/include/lac/sparse_vanka.templates.h @@ -29,7 +29,7 @@ SparseVanka::SparseVanka(const SparseMatrix &M, { Assert (M.m() == M.n(), ExcMatrixNotSquare ()); Assert (M.m() == selected.size(), ExcInvalidVectorSize(M.m(), selected.size())); - + if (conserve_mem == false) compute_inverses (); } @@ -217,7 +217,7 @@ SparseVanka::operator ()(Vector &dst, dst.clear (); // then pass on to the function // that actually does the work - apply_preconditioner (dst, src, 0, matrix->m()); + apply_preconditioner (dst, src); }; @@ -228,10 +228,8 @@ template void SparseVanka::apply_preconditioner (Vector &dst, const Vector &src, - const unsigned int begin, - const unsigned int end) const + const vector *const dof_mask) const { - Assert (begin < end, ExcInvalidRange(begin, end)); Assert (dst.size() == src.size(), ExcInvalidVectorSize(dst.size(), src.size())); Assert (dst.size() == matrix->m(), @@ -249,7 +247,7 @@ SparseVanka::apply_preconditioner (Vector &dst, // blocks. this variable is used to // optimize access to vectors a // little bit. - const bool range_is_restricted = ((begin != 0) || (end != matrix->m())); + const bool range_is_restricted = (dof_mask != 0); // space to be used for local // systems. allocate as much memory @@ -266,8 +264,10 @@ SparseVanka::apply_preconditioner (Vector &dst, // traverse all rows of the matrix // which are selected - for (unsigned int row=begin; rowm(); + for (unsigned int row=0; row::apply_preconditioner (Vector &dst, if (js == local_index.end()) { if (!range_is_restricted || - ((begin <= col) && (col < end))) + ((*dof_mask)[col] == true)) b(i) -= matrix->raw_entry(irow,j) * dst(col); } else @@ -368,7 +368,7 @@ SparseVanka::apply_preconditioner (Vector &dst, const unsigned int i = is->second; if (!range_is_restricted || - ((begin <= irow) && (irow < end))) + ((*dof_mask)[irow] == true)) dst(irow) = x(i); // do nothing if not in // the range @@ -388,18 +388,18 @@ SparseVanka::apply_preconditioner (Vector &dst, template SparseBlockVanka::SparseBlockVanka (const SparseMatrix &M, const vector &selected, + const unsigned int n_blocks, + const BlockingStrategy blocking_strategy, const bool conserve_memory, - const unsigned int n_threads, - const unsigned int n_blocks) + const unsigned int n_threads) : SparseVanka (M, selected, conserve_memory, n_threads), - n_blocks (n_blocks) + n_blocks (n_blocks), + dof_masks (n_blocks, + vector(M.m(), false)) { Assert (n_blocks > 0, ExcInternalError()); - // precompute the splitting points - intervals.resize (n_blocks); - const unsigned int n_inverses = count (selected.begin(), selected.end(), true); @@ -407,6 +407,9 @@ SparseBlockVanka::SparseBlockVanka (const SparseMatrix &M, const unsigned int n_inverses_per_block = max(n_inverses / n_blocks, 1U); + // precompute the splitting points + vector > intervals (n_blocks); + // set up start and end index for // each of the blocks. note that // we have to work somewhat to get @@ -421,24 +424,140 @@ SparseBlockVanka::SparseBlockVanka (const SparseMatrix &M, // consecutive, with other // consecutive regions where we do // not have to do something - unsigned int c = 0; - unsigned int block = 0; - intervals[0].first = 0; - - for (unsigned int i=0; (is that the base + // class wants to see. the way how + // we do this depends on the + // requested blocking strategy + switch (blocking_strategy) + { + case index_intervals: + { + for (unsigned int block=0; block > access_count (n_blocks, + vector(M.m(), 0)); + + // set-up the map that will + // be used to store the + // indices each Lagrange dof + // accesses + map local_index; + const SparsityPattern &structure = M.get_sparsity_pattern(); + + for (unsigned int row=0; row=intervals[block_number].second) + ++block_number; + Assert (block_number < n_blocks, ExcInternalError()); + + // now traverse the + // matrix structure to + // find out to which + // dofs number the + // present index wants + // to write + const unsigned int row_length = structure.row_length(row); + for (unsigned int i=0; i=intervals[block_number].second) + ++block_number; + + dof_masks[block_number][row] = true; + } + else + { + // find out which block + // accesses this dof + // the most often + unsigned int max_accesses = 0; + unsigned int max_access_block = 0; + for (unsigned int block=0; block max_accesses) + { + max_accesses = access_count[block][row]; + max_access_block = block; + }; + // each dof should be + // accessed by at least + // one block! + Assert (max_accesses > 0, ExcInternalError()); + + dof_masks[max_access_block][row] = true; + }; + + break; + }; + + default: + Assert (false, ExcInternalError()); }; - intervals[n_blocks-1].second = M.m(); }; @@ -454,25 +573,24 @@ void SparseBlockVanka::operator() (Vector &dst, // if no blocking is required, pass // down to the underlying class if (n_blocks == 1) - apply_preconditioner (dst, src, 0, dst.size()); + apply_preconditioner (dst, src); else // otherwise: blocking requested { #ifdef DEAL_II_USE_MT - typedef ThreadManager::Mem_Fun_Data4 + typedef ThreadManager::Mem_Fun_Data3 , Vector&, - const Vector &, unsigned int, unsigned int> MemFunData; + const Vector &, const vector *> MemFunData; vector mem_fun_data (n_blocks, MemFunData (this, - dst, src, 0, 0, + dst, src, 0, &SparseVanka::template apply_preconditioner)); - + ThreadManager thread_manager; for (unsigned int block=0; block::operator() (Vector &dst, #else for (unsigned int block=0; block