<h3>lac</h3>
<ol>
+ <li> <p>
+ New: There is now a (private) function <code
+ class="member">SparsityPattern::optimized_lower_bound</code>
+ that is used as an optimized replacement for <code
+ class="member">std::lower_bound</code> for searching in the
+ column number arrays. It unrolls small loops and it also seems
+ that the compiler is able to optimized it better due to
+ eliminated template parameters, making it about twice as fast
+ as the standard implementation. In effect, it also speeds up
+ the SSOR preconditioner that spent about one third of its time
+ in that function by approximately 15 per cent.
+ <br>
+ (WB 2001/04/25)
+ </p>
+
<li> <p>
New: There is now a function <code
class="member">Vector::scale(Vector)</code>
const std::pair<unsigned int,unsigned int> interval,
somenumber *partial_norm) const;
-
// make all other sparse matrices
// friends
template <typename somenumber> friend class SparseMatrix;
/*---------------------- Inline functions -----------------------------------*/
+
template <typename number>
inline
unsigned int SparseMatrix<number>::m () const
#endif
+
+
template <typename number>
SparseMatrix<number>::SparseMatrix () :
cols(0),
// line denotes the diagonal element,
// which we need not check.
const unsigned int first_right_of_diagonal_index
- = (std::lower_bound (&cols->colnums[*rowstart_ptr+1],
- &cols->colnums[*(rowstart_ptr+1)],
- row)
+ = (SparsityPattern::optimized_lower_bound (&cols->colnums[*rowstart_ptr+1],
+ &cols->colnums[*(rowstart_ptr+1)],
+ row)
-
&cols->colnums[0]);
-
+
for (unsigned int j=(*rowstart_ptr)+1; j<first_right_of_diagonal_index; ++j)
*dst_ptr -= om* val[j] * dst(cols->colnums[j]);
for (int row=n-1; row>=0; --row, --rowstart_ptr, --dst_ptr)
{
const unsigned int first_right_of_diagonal_index
- = (std::lower_bound (&cols->colnums[*rowstart_ptr+1],
- &cols->colnums[*(rowstart_ptr+1)],
- static_cast<unsigned int>(row)) -
+ = (SparsityPattern::optimized_lower_bound (&cols->colnums[*rowstart_ptr+1],
+ &cols->colnums[*(rowstart_ptr+1)],
+ static_cast<unsigned int>(row)) -
&cols->colnums[0]);
for (unsigned int j=first_right_of_diagonal_index; j<*(rowstart_ptr+1); ++j)
if (cols->colnums[j] > static_cast<unsigned int>(row))
*/
bool compressed;
+ /**
+ * Optimized replacement for
+ * @p{std::lower_bound} for
+ * searching within the range of
+ * column indices. Slashes
+ * execution time by
+ * approximately one half for the
+ * present application, partly
+ * because we have eliminated
+ * templates and the compiler
+ * seems to be able to optimize
+ * better, and partly because the
+ * binary search is replaced by a
+ * linear search for small loop
+ * lengths.
+ */
+ static
+ const unsigned int * const
+ optimized_lower_bound (const unsigned int *first,
+ const unsigned int *last,
+ const unsigned int &val);
+
/**
* Make all sparse matrices
* friends of this class.
/*---------------------- Inline functions -----------------------------------*/
+inline
+const unsigned int * const
+SparsityPattern::optimized_lower_bound (const unsigned int *first,
+ const unsigned int *last,
+ const unsigned int &val)
+{
+ // this function is mostly copied
+ // over from the STL __lower_bound
+ // function, but with template args
+ // replaced by the actual data
+ // types needed here, and above all
+ // with a rolled out search on the
+ // last four elements
+ unsigned int len = last-first;
+
+ if (len==0)
+ return first;
+
+ while (true)
+ {
+ // if length equals 8 or less,
+ // then do a rolled out
+ // search. use a switch without
+ // breaks for that and roll-out
+ // the loop somehow
+ if (len < 8)
+ {
+ switch (len)
+ {
+ case 7:
+ if (*first >= val)
+ return first;
+ ++first;
+ case 6:
+ if (*first >= val)
+ return first;
+ ++first;
+ case 5:
+ if (*first >= val)
+ return first;
+ ++first;
+ case 4:
+ if (*first >= val)
+ return first;
+ ++first;
+ case 3:
+ if (*first >= val)
+ return first;
+ ++first;
+ case 2:
+ if (*first >= val)
+ return first;
+ ++first;
+ case 1:
+ if (*first >= val)
+ return first;
+ return first+1;
+ default:
+ // indices seem
+ // to not be
+ // sorted
+ // correctly!? or
+ // did len
+ // become==0
+ // somehow? that
+ // shouln't have
+ // happened
+ Assert (false, ExcInternalError());
+ };
+ };
+
+
+
+ const unsigned int half = len >> 1;
+ const unsigned int * const middle = first + half;
+
+ // if the value is larger than
+ // that pointed to by the
+ // middle pointer, then the
+ // insertion point must be
+ // right of it
+ if (*middle < val)
+ {
+ first = middle + 1;
+ len -= half + 1;
+ }
+ else
+ len = half;
+ }
+}
+
+
+
inline
unsigned int
SparsityPattern::n_rows () const
+inline
unsigned int
SparsityPattern::operator () (const unsigned int i,
const unsigned int j) const
Assert (j<cols, ExcInvalidIndex(j,cols));
Assert (compressed, ExcNotCompressed());
+ // let's see whether there is
+ // something in this line
+ if (rowstart[i] == rowstart[i+1])
+ return invalid_entry;
+
// check first entry separately, since
// for square matrices this is
- // the diagonal entry (check only
- // if a first entry exists)
- if (rowstart[i] != rowstart[i+1])
- {
- if (j == colnums[rowstart[i]])
- return rowstart[i];
- }
- else
- // no first entry exists for this
- // line
- return invalid_entry;
+ // the diagonal entry
+ if ((i==j) && (rows==cols))
+ return rowstart[i];
// all other entries are sorted, so
// we can use a binary seach algorithm
// at the top of this function, so it
// may not be called for noncompressed
// structures.
- const unsigned int * const p = std::lower_bound (&colnums[rowstart[i]+1],
- &colnums[rowstart[i+1]],
- j);
+ const unsigned int * const sorted_region_start = (rows==cols ?
+ &colnums[rowstart[i]+1] :
+ &colnums[rowstart[i]]);
+ const unsigned int * const p = optimized_lower_bound (sorted_region_start,
+ &colnums[rowstart[i+1]],
+ j);
if ((*p == j) &&
(p != &colnums[rowstart[i+1]]))
return (p - &colnums[0]);