From 51d384a489d3258f9cd30dd9c52bafcde0138d94 Mon Sep 17 00:00:00 2001 From: kronbichler Date: Wed, 25 Mar 2009 16:55:27 +0000 Subject: [PATCH] Make the local_to_global functions in the ConstraintMatrix even more efficient by pre-sorting all the rows and columns that are going to be written into the sparsity pattern / sparse matrix. This makes the values added for each element really unique, which is faster. Moreover, the matrix and sparsity pattern classes can now take an additional argument sorted when adding many elements, and some of them already make use of the sorted-ness. For block matrices, the sortedness is used in an even more clever way by writing directly into the blocks (even though I had to introduce a lot of duplicate code to be in line with the templates). git-svn-id: https://svn.dealii.org/trunk@18512 0785d39b-7218-0410-832d-ea1e28bc413d --- deal.II/lac/include/lac/block_matrix_base.h | 86 +- .../lac/compressed_set_sparsity_pattern.h | 6 +- .../lac/compressed_simple_sparsity_pattern.h | 6 +- .../include/lac/compressed_sparsity_pattern.h | 6 +- .../include/lac/constraint_matrix.templates.h | 1728 ++++++++++++----- deal.II/lac/include/lac/petsc_matrix_base.h | 6 +- deal.II/lac/include/lac/sparse_matrix.h | 77 +- deal.II/lac/include/lac/sparse_matrix_ez.h | 6 +- deal.II/lac/include/lac/sparsity_pattern.h | 3 +- .../lac/include/lac/trilinos_sparse_matrix.h | 6 +- .../include/lac/trilinos_sparsity_pattern.h | 6 +- deal.II/lac/source/sparsity_pattern.cc | 12 +- deal.II/lac/source/trilinos_sparse_matrix.cc | 10 +- 13 files changed, 1453 insertions(+), 505 deletions(-) diff --git a/deal.II/lac/include/lac/block_matrix_base.h b/deal.II/lac/include/lac/block_matrix_base.h index 9e28ce9218..6c81977467 100644 --- a/deal.II/lac/include/lac/block_matrix_base.h +++ b/deal.II/lac/include/lac/block_matrix_base.h @@ -674,7 +674,8 @@ class BlockMatrixBase : public Subscriptor const unsigned int n_cols, const unsigned int *col_indices, const number *values, - const bool elide_zero_values = true); + const bool elide_zero_values = true, + const bool col_indices_are_sorted = false); /** * Return the value of the entry @@ -883,6 +884,18 @@ class BlockMatrixBase : public Subscriptor */ const_iterator end (const unsigned int r) const; + /** + * Return a reference to the underlying + * BlockIndices data of the rows. + */ + const BlockIndices & get_row_indices () const; + + /** + * Return a reference to the underlying + * BlockIndices data of the rows. + */ + const BlockIndices & get_column_indices () const; + /** @addtogroup Exceptions * @{ */ @@ -2047,11 +2060,59 @@ BlockMatrixBase::add (const unsigned int row, const unsigned int n_cols, const unsigned int *col_indices, const number *values, - const bool elide_zero_values) + const bool elide_zero_values, + const bool col_indices_are_sorted) { // TODO: Look over this to find out // whether we can do that more // efficiently. + if (col_indices_are_sorted == true) + { +#ifdef DEBUG + // check whether indices really are + // sorted. + unsigned int before = col_indices[0]; + for (unsigned int i=1; i + row_index = this->row_block_indices.global_to_local (row); + + if (this->n_block_cols() > 1) + { + const unsigned int * first_block = std::lower_bound (col_indices, + col_indices+n_cols, + this->column_block_indices.block_start(1)); + + const unsigned int n_zero_block_indices = first_block - col_indices; + block(row_index.first, 0).add (row_index.second, + n_zero_block_indices, + col_indices, + values, + elide_zero_values, + col_indices_are_sorted); + + if (n_zero_block_indices < n_cols) + this->add(row, n_cols - n_zero_block_indices, first_block, + values + n_zero_block_indices, elide_zero_values, + false); + } + else + { + block(row_index.first, 0). add (row_index.second, + n_cols, + col_indices, + values, + elide_zero_values, + col_indices_are_sorted); + } + + return; + } // Resize scratch arrays if (column_indices.size() < this->n_block_cols()) @@ -2136,7 +2197,8 @@ BlockMatrixBase::add (const unsigned int row, counter_within_block[block_col], &column_indices[block_col][0], &column_values[block_col][0], - false); + false, + col_indices_are_sorted); } } @@ -2236,6 +2298,24 @@ BlockMatrixBase::operator /= (const value_type factor) +template +const BlockIndices & +BlockMatrixBase::get_row_indices () const +{ + return this->row_block_indices; +} + + + +template +const BlockIndices & +BlockMatrixBase::get_column_indices () const +{ + return this->column_block_indices; +} + + + template template void diff --git a/deal.II/lac/include/lac/compressed_set_sparsity_pattern.h b/deal.II/lac/include/lac/compressed_set_sparsity_pattern.h index 112b637a70..946526c24e 100644 --- a/deal.II/lac/include/lac/compressed_set_sparsity_pattern.h +++ b/deal.II/lac/include/lac/compressed_set_sparsity_pattern.h @@ -226,7 +226,8 @@ class CompressedSetSparsityPattern : public Subscriptor template void add_entries (const unsigned int row, ForwardIterator begin, - ForwardIterator end); + ForwardIterator end, + const bool indices_are_sorted = false); /** * Check if a value at a certain @@ -471,7 +472,8 @@ inline void CompressedSetSparsityPattern::add_entries (const unsigned int row, ForwardIterator begin, - ForwardIterator end) + ForwardIterator end, + const bool /*indices_are_sorted*/) { Assert (row < rows, ExcIndexRange (row, 0, rows)); diff --git a/deal.II/lac/include/lac/compressed_simple_sparsity_pattern.h b/deal.II/lac/include/lac/compressed_simple_sparsity_pattern.h index 57033f93d6..fb8f716127 100644 --- a/deal.II/lac/include/lac/compressed_simple_sparsity_pattern.h +++ b/deal.II/lac/include/lac/compressed_simple_sparsity_pattern.h @@ -195,7 +195,8 @@ class CompressedSimpleSparsityPattern : public Subscriptor template void add_entries (const unsigned int row, ForwardIterator begin, - ForwardIterator end); + ForwardIterator end, + const bool indices_are_sorted = false); /** * Check if a value at a certain @@ -519,7 +520,8 @@ inline void CompressedSimpleSparsityPattern::add_entries (const unsigned int row, ForwardIterator begin, - ForwardIterator end) + ForwardIterator end, + const bool /*indices_are_sorted*/) { Assert (row < rows, ExcIndexRange (row, 0, rows)); diff --git a/deal.II/lac/include/lac/compressed_sparsity_pattern.h b/deal.II/lac/include/lac/compressed_sparsity_pattern.h index 2ad866c797..233931e50b 100644 --- a/deal.II/lac/include/lac/compressed_sparsity_pattern.h +++ b/deal.II/lac/include/lac/compressed_sparsity_pattern.h @@ -199,7 +199,8 @@ class CompressedSparsityPattern : public Subscriptor template void add_entries (const unsigned int row, ForwardIterator begin, - ForwardIterator end); + ForwardIterator end, + const bool indices_are_sorted = false); /** * Check if a value at a certain @@ -528,7 +529,8 @@ inline void CompressedSparsityPattern::add_entries (const unsigned int row, ForwardIterator begin, - ForwardIterator end) + ForwardIterator end, + const bool /*indices_are_sorted*/) { Assert (row < rows, ExcIndexRange (row, 0, rows)); diff --git a/deal.II/lac/include/lac/constraint_matrix.templates.h b/deal.II/lac/include/lac/constraint_matrix.templates.h index 8d85d1eef0..7775e8db91 100644 --- a/deal.II/lac/include/lac/constraint_matrix.templates.h +++ b/deal.II/lac/include/lac/constraint_matrix.templates.h @@ -20,6 +20,8 @@ #include #include #include +#include +#include #include #include @@ -743,6 +745,19 @@ distribute_local_to_global (const Vector &local_vector, { for (unsigned int i=0; i &local_vector, // the nodes to which it is // constrained are also // fixed - if ((position == lines.end()) - || - (position->line != local_dof_indices[i])) - global_vector(local_dof_indices[i]) += local_vector(i); - else - { - for (unsigned int j=0; jentries.size(); ++j) - global_vector(position->entries[j].first) - += local_vector(i) * position->entries[j].second; - } + Assert (position->line == local_dof_indices[i], + ExcInternalError()); + for (unsigned int j=0; jentries.size(); ++j) + global_vector(position->entries[j].first) + += local_vector(i) * position->entries[j].second; } } } @@ -812,7 +822,664 @@ distribute_local_to_global (const FullMatrix &local_matrix, -template + // Some helper definitions for the + // local_to_global functions. +namespace internals +{ + struct distributing + { + distributing (const unsigned int local_row = deal_II_numbers::invalid_unsigned_int); + unsigned int local_row; + std::vector > constraints; + }; + distributing::distributing (const unsigned int local_row) : + local_row (local_row) {} + + // a version of std::lower_bound for a + // pair of unsigned int and distributing, + // without taking into account the second + // argument of the pair. + inline + std::vector >::iterator + lower_bound (std::vector >::iterator begin, + std::vector >::iterator end, + unsigned int val) + { + unsigned int length = end - begin; + unsigned int half; + std::vector >::iterator middle; + + while (length > 0) + { + half = length >> 1; + middle = begin + half; + if (middle->first < val) + { + begin = middle; + ++begin; + length = length - half - 1; + } + else + length = half; + } + return begin; + } + + // a function that appends an additional + // row to the list of values, or appends + // a value to an already existing + // row. Similar functionality as for + // std::map, + // but much faster here. + inline + void + insert_index (std::vector > &my_indices, + const unsigned int row, + const unsigned int local_row, + const std::pair constraint + = std::make_pair(0,0)) + { + typedef std::vector >::iterator + index_iterator; + index_iterator pos, pos1; + + if (my_indices.size() == 0 || my_indices.back().first < row) + { + my_indices.push_back(std::make_pair(row,distributing())); + pos1 = my_indices.end()-1; + } + else + { + pos = lower_bound (my_indices.begin(), + my_indices.end(), + row); + + if (pos->first == row) + pos1 = pos; + else + pos1 = my_indices.insert(pos,std::make_pair + (row,distributing())); + } + + if (local_row == deal_II_numbers::invalid_unsigned_int) + pos1->second.constraints.push_back (constraint); + else + pos1->second.local_row = local_row; + } + + + // a lot of functions that cover all the + // different situations in templated + // calls to + // distribute_local_to_global. this is a + // lot of duplicated code, find out + // whether we can do this better + template + inline + void + make_block_indices_local (const MatrixType &mat, + std::vector &indices, + const std::vector &block_end) + { + return; + } + + template + inline + void + make_block_indices_local (const BlockSparseMatrix &mat, + std::vector &indices, + std::vector &block_end) + { + typedef std::vector::iterator row_iterator; + const unsigned int num_blocks = mat.n_block_rows(); + Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); + row_iterator col_indices = indices.begin(); + block_end.resize(num_blocks,0); + unsigned int n_cols = indices.size(); + for (unsigned int i=0;i &indices, + std::vector &block_end) + { + typedef std::vector::iterator row_iterator; + const unsigned int num_blocks = mat.n_block_rows(); + Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); + row_iterator col_indices = indices.begin(); + block_end.resize(num_blocks,0); + unsigned int n_cols = indices.size(); + for (unsigned int i=0;i &indices, + std::vector &block_end) + { + typedef std::vector::iterator row_iterator; + const unsigned int num_blocks = mat.n_block_rows(); + Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); + row_iterator col_indices = indices.begin(); + block_end.resize(num_blocks,0); + unsigned int n_cols = indices.size(); + for (unsigned int i=0;i &indices, + std::vector &block_end) + { + typedef std::vector::iterator row_iterator; + const unsigned int num_blocks = mat.n_block_rows(); + Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); + row_iterator col_indices = indices.begin(); + block_end.resize(num_blocks,0); + unsigned int n_cols = indices.size(); + for (unsigned int i=0;i + inline + void + sum_into_matrix (MatrixType &mat, + const unsigned int row, + const unsigned int n_elements, + const std::vector &cols, + const std::vector &vals, + const std::vector &block_ends) + { + // should get here only if we have not + // modified anything with + // make_block_indices_local + Assert (block_ends.size() == 1, ExcInternalError()); + if (n_elements > 0) + mat.add(row, n_elements, &cols[0], &vals[0], false, true); + } + + // now some efficient versions for block + // matrices. have to replicate code for + // each matrix type to make things work + // (or is there a smarter way for doing + // that?) + template + inline + void + sum_into_matrix (BlockSparseMatrix &mat, + const unsigned int row, + const unsigned int n_elements, + const std::vector &cols, + const std::vector &vals, + const std::vector &block_ends) + { + Assert (block_ends.size() == mat.n_block_rows(), + ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); + Assert (block_ends[mat.n_block_rows()-1] == n_elements, + ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); + unsigned int col_begins = 0; + const std::pair row_block = mat.get_row_indices().global_to_local(row); + + for (unsigned int i=0; i 0) + mat.block(row_block.first, i).add(row_block.second, current_length, + &cols[col_begins], &vals[col_begins], + false, true); + col_begins = block_ends[i]; + } + } + + +#ifdef DEAL_II_USE_PETSC + inline + void + sum_into_matrix (PETScWrappers::BlockSparseMatrix &mat, + const unsigned int row, + const unsigned int n_elements, + const std::vector &cols, + const std::vector &vals, + const std::vector &block_ends) + { + Assert (block_ends.size() == mat.n_block_rows(), + ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); + Assert (block_ends[mat.n_block_rows()-1] == n_elements, + ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); + unsigned int col_begins = 0; + const std::pair row_block = mat.get_row_indices().global_to_local(row); + + for (unsigned int i=0; i 0) + mat.block(row_block.first, i).add(row_block.second, current_length, + &cols[col_begins], &vals[col_begins], + false, true); + col_begins = block_ends[i]; + } + } + + inline + void + sum_into_matrix (PETScWrappers::MPI::BlockSparseMatrix &mat, + const unsigned int row, + const unsigned int n_elements, + const std::vector &cols, + const std::vector &vals, + const std::vector &block_ends) + { + Assert (block_ends.size() == mat.n_block_rows(), + ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); + Assert (block_ends[mat.n_block_rows()-1] == n_elements, + ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); + unsigned int col_begins = 0; + const std::pair row_block = mat.get_row_indices().global_to_local(row); + + for (unsigned int i=0; i 0) + mat.block(row_block.first, i).add(row_block.second, current_length, + &cols[col_begins], &vals[col_begins], + false, true); + col_begins = block_ends[i]; + } + } +#endif + +#ifdef DEAL_II_USE_TRILINOS + inline + void + sum_into_matrix (TrilinosWrappers::BlockSparseMatrix &mat, + const unsigned int row, + const unsigned int n_elements, + const std::vector &cols, + const std::vector &vals, + const std::vector &block_ends) + { + Assert (block_ends.size() == mat.n_block_rows(), + ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); + Assert (block_ends[mat.n_block_rows()-1] == n_elements, + ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); + unsigned int col_begins = 0; + const std::pair row_block = mat.get_row_indices().global_to_local(row); + + for (unsigned int i=0; i 0) + mat.block(row_block.first, i).add(row_block.second, current_length, + &cols[col_begins], &vals[col_begins], + false, true); + col_begins = block_ends[i]; + } + } +#endif + + + + inline + void + make_block_indices_local (const BlockSparsityPattern &mat, + std::vector &indices, + std::vector &block_end) + { + typedef std::vector::iterator row_iterator; + const unsigned int num_blocks = mat.n_block_rows(); + Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); + row_iterator col_indices = indices.begin(); + block_end.resize(num_blocks,0); + unsigned int n_cols = indices.size(); + for (unsigned int i=0;i &indices, + std::vector &block_end) + { + typedef std::vector::iterator row_iterator; + const unsigned int num_blocks = mat.n_block_rows(); + Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); + row_iterator col_indices = indices.begin(); + block_end.resize(num_blocks,0); + unsigned int n_cols = indices.size(); + for (unsigned int i=0;i &indices, + std::vector &block_end) + { + typedef std::vector::iterator row_iterator; + const unsigned int num_blocks = mat.n_block_rows(); + Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); + row_iterator col_indices = indices.begin(); + block_end.resize(num_blocks,0); + unsigned int n_cols = indices.size(); + for (unsigned int i=0;i &indices, + std::vector &block_end) + { + typedef std::vector::iterator row_iterator; + const unsigned int num_blocks = mat.n_block_rows(); + Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); + row_iterator col_indices = indices.begin(); + block_end.resize(num_blocks,0); + unsigned int n_cols = indices.size(); + for (unsigned int i=0;i &indices, + std::vector &block_end) + { + typedef std::vector::iterator row_iterator; + const unsigned int num_blocks = mat.n_block_rows(); + Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); + row_iterator col_indices = indices.begin(); + block_end.resize(num_blocks,0); + unsigned int n_cols = indices.size(); + for (unsigned int i=0;i + inline + void + insert_into_sparsity (SparsityType &mat, + const unsigned int row, + const unsigned int n_elements, + const std::vector &cols, + const std::vector &block_ends) + { + // should get here only if we have not + // modified anything with + // make_block_indices_local + Assert (block_ends.size() == 1, ExcInternalError()); + if (n_elements > 0) + mat.add_entries(row, cols.begin(), cols.begin()+n_elements); + } + + // now some efficient versions for block + // sparsity patterns. have to replicate + // code for each sp type to make things + // work (or is there a smarter way for + // doing the template stuff?) + inline + void + insert_into_sparsity (BlockSparsityPattern &mat, + const unsigned int row, + const unsigned int n_elements, + const std::vector &cols, + const std::vector &block_ends) + { + Assert (block_ends.size() == mat.n_block_rows(), + ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); + Assert (block_ends[mat.n_block_rows()-1] == n_elements, + ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); + unsigned int col_begins = 0; + const std::pair row_block = + mat.get_row_indices().global_to_local(row); + + for (unsigned int i=0; i 0) + mat.block(row_block.first, i).add_entries(row_block.second, + cols.begin()+col_begins, + cols.begin()+col_begins+current_length); + col_begins = block_ends[i]; + } + } + + inline + void + insert_into_sparsity (BlockCompressedSparsityPattern &mat, + const unsigned int row, + const unsigned int n_elements, + const std::vector &cols, + const std::vector &block_ends) + { + Assert (block_ends.size() == mat.n_block_rows(), + ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); + Assert (block_ends[mat.n_block_rows()-1] == n_elements, + ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); + unsigned int col_begins = 0; + const std::pair row_block = + mat.get_row_indices().global_to_local(row); + + for (unsigned int i=0; i 0) + mat.block(row_block.first, i).add_entries(row_block.second, + cols.begin()+col_begins, + cols.begin()+col_begins+current_length); + col_begins = block_ends[i]; + } + } + + inline + void + insert_into_sparsity (BlockCompressedSetSparsityPattern &mat, + const unsigned int row, + const unsigned int n_elements, + const std::vector &cols, + const std::vector &block_ends) + { + Assert (block_ends.size() == mat.n_block_rows(), + ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); + Assert (block_ends[mat.n_block_rows()-1] == n_elements, + ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); + unsigned int col_begins = 0; + const std::pair row_block = + mat.get_row_indices().global_to_local(row); + + for (unsigned int i=0; i 0) + mat.block(row_block.first, i).add_entries(row_block.second, + cols.begin()+col_begins, + cols.begin()+col_begins+current_length); + col_begins = block_ends[i]; + } + } + + inline + void + insert_into_sparsity (BlockCompressedSimpleSparsityPattern &mat, + const unsigned int row, + const unsigned int n_elements, + const std::vector &cols, + const std::vector &block_ends) + { + Assert (block_ends.size() == mat.n_block_rows(), + ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); + Assert (block_ends[mat.n_block_rows()-1] == n_elements, + ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); + unsigned int col_begins = 0; + const std::pair row_block = + mat.get_row_indices().global_to_local(row); + for (unsigned int i=0; i 0) + mat.block(row_block.first, i).add_entries(row_block.second, + cols.begin()+col_begins, + cols.begin()+col_begins+current_length); + col_begins = block_ends[i]; + } + } + + +#ifdef DEAL_II_USE_TRILINOS + inline + void + insert_into_sparsity (TrilinosWrappers::BlockSparsityPattern &mat, + const unsigned int row, + const unsigned int n_elements, + const std::vector &cols, + const std::vector &block_ends) + { + Assert (block_ends.size() == mat.n_block_rows(), + ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); + Assert (block_ends[mat.n_block_rows()-1] == n_elements, + ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); + unsigned int col_begins = 0; + const std::pair row_block = + mat.get_row_indices().global_to_local(row); + + for (unsigned int i=0; i 0) + mat.block(row_block.first, i).add_entries(row_block.second, + cols.begin()+col_begins, + cols.begin()+col_begins+current_length); + col_begins = block_ends[i]; + } + } +#endif + +} + + + +template void ConstraintMatrix:: distribute_local_to_global (const FullMatrix &local_matrix, @@ -843,264 +1510,353 @@ distribute_local_to_global (const FullMatrix &local_matrix, const unsigned int n_local_dofs = local_dof_indices.size(); - // A lock that allows only one thread at - // time to go on in this function. - Threads::ThreadMutex::ScopedLock lock(mutex); - - // have a special case where there are no - // constraints at all, since then we can be - // a lot faster - if (lines.size() == 0) + // when distributing the local data to + // the global matrix, we can quite + // cheaply sort the indices (obviously, + // this introduces the need for + // allocating some memory on the way, but + // we need to do this only for rows, + // whereas the distribution process + // itself goes over rows and + // columns). This has the advantage that + // when writing into the global matrix, + // we can make use of the sortedness. + + // so the first step is to create a + // sorted list of all row values that are + // possible. these values are either the + // rows from unconstrained dofs, or some + // indices introduced by dofs constrained + // to a combination of some other + // dofs. regarding the data type, choose + // an STL vector of a pair of unsigned + // ints (for global columns) and internal + // data (containing local columns + + // possible jumps from + // constraints). Choosing an STL map + // would be much more expensive here! + typedef std::vector > row_data; + row_data my_indices; + my_indices.reserve(n_local_dofs); + + std::vector > constraint_lines; + if (use_vectors == true) + constraint_lines.reserve (n_local_dofs); + + // cache whether we have no constraints + // at all. + bool have_constrained_dofs = false; + + // so go through all the local dofs, and + // resolve the list of all constraints. + for (unsigned int i=0; i::const_iterator + position = std::lower_bound (lines.begin(), + lines.end(), + index_comparison); + + // should only get here when the row + // actually is constrained + Assert (position->line == local_dof_indices[i], + ExcInternalError()); + + for (unsigned int q=0; qentries.size(); ++q) + { + have_constrained_dofs = true; + internals::insert_index(my_indices, position->entries[q].first, + deal_II_numbers::invalid_unsigned_int, + std::make_pair + (i,position->entries[q].second)); + } + + // to make sure that the global matrix + // remains invertible, we need to do + // something with the diagonal + // elements. add the absolute value of + // the local matrix, so the resulting + // entry will always be positive and + // furthermore be in the same order of + // magnitude as the other elements of the + // matrix + // + // note that this also captures the + // special case that a dof is both + // constrained and fixed (this can happen + // for hanging nodes in 3d that also + // happen to be on the boundary). in that + // case, following the above program + // flow, it is realized that when + // distributing the row and column no + // elements of the matrix are actually + // touched if all the degrees of freedom + // to which this dof is constrained are + // also constrained (the usual case with + // hanging nodes in 3d). however, in the + // line below, we do actually do + // something with this dof + Threads::ThreadMutex::ScopedLock lock(mutex); + global_matrix.add(local_dof_indices[i],local_dof_indices[i], + std::fabs(local_matrix(i,i))); + if (use_vectors == true) - for (unsigned int i=0; i(i,&*position)); } - else + + const unsigned int n_actual_dofs = my_indices.size(); + + std::vector localized_row_indices (n_actual_dofs); + for (unsigned int i=0; i block_ends(1, n_actual_dofs); + internals::make_block_indices_local (global_matrix, localized_row_indices, + block_ends); + std::vector actual_block_ends (block_ends.size()); + + // create arrays for the column data + // (indices and values) that will then be + // written into the matrix. + std::vector cols (n_actual_dofs); + std::vector vals (n_actual_dofs); + + // now do the actual job. + for (unsigned int i=0; i - constraint_lines (n_local_dofs, - static_cast(0)); - unsigned int n_max_entries_per_row = 0; - for (unsigned int i=0; i::iterator block_it = block_ends.begin(); + const unsigned int row = my_indices[i].first; + const unsigned int loc_row = my_indices[i].second.local_row; + double val = 0; + + // fast function if there are no indirect + // references to any of the local rows at + // all on this set of dofs (saves a lot + // of checks). the only check we actually + // need to perform is whether the matrix + // element is zero. + if (have_constrained_dofs == false) + { + Assert(loc_row >= 0 && loc_row < n_local_dofs, + ExcInternalError()); - const std::vector::const_iterator - position = std::lower_bound (lines.begin(), - lines.end(), - index_comparison); - - // if this dof is constrained, - // then set the respective entry - // in the array. otherwise leave - // it at the invalid position - if ((position != lines.end()) && - (position->line == local_dof_indices[i])) + for (unsigned int j=0; j < n_actual_dofs; ++j) { - constraint_lines[i] = &*position; - n_max_entries_per_row += position->entries.size(); + // now comes the hack that sets the + // correct end of block in case we work + // with block matrices. + while (j == *block_it) + { + actual_block_ends[block_it-block_ends.begin()] = col_counter; + ++block_it; + Assert ((unsigned int)(block_it - block_ends.begin()) <= block_ends.size(), + ExcInternalError()); + } + + const unsigned int loc_col = my_indices[j].second.local_row; + Assert(loc_col >= 0 && loc_col < n_local_dofs, + ExcInternalError()); + + if (local_matrix(loc_row,loc_col) != 0) + { + vals[col_counter] = local_matrix(loc_row,loc_col); + cols[col_counter] = localized_row_indices[j]; + col_counter++; + } + } + // now comes the hack that sets the + // correct end of block in case we work + // with block matrices. + while (n_actual_dofs == *block_it) + { + actual_block_ends[block_it-block_ends.begin()] = col_counter; + ++block_it; + Assert ((unsigned int)(block_it - block_ends.begin()) <= block_ends.size(), + ExcInternalError()); } - } - // We need to add the number of - // entries in the local matrix in - // order to obtain a sufficient size - // for the scratch array. - n_max_entries_per_row += n_local_dofs; - if (column_indices.size() < n_max_entries_per_row) - column_indices.resize(n_max_entries_per_row); - if (column_values.size() < n_max_entries_per_row) - column_values.resize(column_indices.size()); - - // now distribute entries row by row - for (unsigned int i=0; iinhomogeneity * + local_matrix(loc_row,constraint_lines[i].first); + } + } - // ok, this row is not constrained, all the - // entries that will be created end up in - // the row given by local_dof_indices[i]. - if (is_constrained_i == false) + // slower functions when there are + // indirect references and when we need + // to do some more checks. + else + { + for (unsigned int j=0; j < n_actual_dofs; ++j) { - unsigned int col_counter = 0; - double inhom_correction = 0; - for (unsigned int j=0; j= 0 && loc_row < n_local_dofs, + ExcInternalError()); - if (is_constrained_j == false) + // case 1a: col has direct contribution + // in local matrix + if (loc_col != deal_II_numbers::invalid_unsigned_int) { - // neither row nor column - // is constrained - column_indices[col_counter] = local_dof_indices[j]; - column_values[col_counter] = local_matrix(i,j); - col_counter++; + Assert (loc_col >= 0 && loc_col < n_local_dofs, + ExcInternalError()); + col_val = local_matrix(loc_row,loc_col); } - else - { - // column is - // constrained. This - // creates entries in - // several new column - // entries according to - // the information in the - // constraint line. - for (unsigned int p=0; pentries.size(); ++p) - { - column_indices[col_counter] = position_j->entries[p].first; - column_values[col_counter] = local_matrix(i,j) * - position_j->entries[p].second; - col_counter++; - } + // case 1b: col has no direct + // contribution in local matrix + else + col_val = 0; + + // account for indirect contributions by + // constraints + for (unsigned int p=0; pinhomogeneity; + // case 2: row has no direct contribution in + // local matrix + else + col_val = 0; + + // account for indirect contributions by + // constraints in row, going trough the + // direct and indirect references in the + // given column. + for (unsigned int q=0; q= 0 && loc_col < n_local_dofs, + ExcInternalError()); + add_this = local_matrix(my_indices[i].second.constraints[q].first, + loc_col); } + else + add_this = 0; + + for (unsigned int p=0; p 0) - global_matrix.add(local_dof_indices[i], col_counter, - &column_indices[0], &column_values[0], - false); - if (use_vectors == true) - global_vector(local_dof_indices[i]) += local_vector(i) - inhom_correction; + + + // if we got some nontrivial value, + // append it to the array of values. + if (col_val != 0) + { + cols[col_counter] = localized_row_indices[j]; + vals[col_counter] = col_val; + col_counter++; + } } - // ok, row is - // constrained. this - // means that we will get - // entries to several - // rows. For each such - // row, we can use the - // scratch array. We will - // go through the - // different lines - // individually, and for - // each one proceed as - // above. - else + // now comes the hack that sets the + // correct end of block in case we work + // with block matrices. + while (n_actual_dofs == *block_it) { - for (unsigned int q=0; qentries.size(); ++q) - { - unsigned int col_counter = 0; - const unsigned int row = position_i->entries[q].first; - const double weight = position_i->entries[q].second; - double inhom_correction = 0; - - for (unsigned int j=0; jentries.size(); ++p) - { - column_indices[col_counter] = position_j->entries[p].first; - column_values[col_counter] = local_matrix(i,j) * weight * - position_j->entries[p].second; - col_counter++; - } - inhom_correction += local_matrix(i,j) * - position_j->inhomogeneity; - } - } - - // write the scratch array into the - // sparse matrix - Assert (col_counter <= n_max_entries_per_row, ExcInternalError()); - if (col_counter > 0) - global_matrix.add(row, col_counter, - &column_indices[0], &column_values[0], - false); + actual_block_ends[block_it-block_ends.begin()] = col_counter; + ++block_it; + Assert ((unsigned int)(block_it - block_ends.begin()) <= block_ends.size(), + ExcInternalError()); + } - // And we take care of the vector - if (use_vectors == true) - global_vector(row) += (local_vector(i) - inhom_correction) * weight; + // now to the vectors. besides doing the + // same job as we did above (i.e., + // distribute the content of the local + // vector into the global one), need to + // account for inhomogeneities here: thie + // corresponds to eliminating the + // respective column in the local matrix + // with value on the right hand side. + if (use_vectors == true) + { + if (loc_row != deal_II_numbers::invalid_unsigned_int) + { + Assert (loc_row >= 0 && loc_row < n_local_dofs, + ExcInternalError()); + val = local_vector(loc_row); + for (unsigned int i=0; iinhomogeneity * + local_matrix(loc_row,constraint_lines[i].first); } - // to make sure that the - // global matrix remains - // invertible, we need to - // do something with the - // diagonal elements. add - // the absolute value of - // the local matrix, so - // the resulting entry - // will always be - // positive and - // furthermore be in the - // same order of - // magnitude as the other - // elements of the matrix - // - // note that this also - // captures the special - // case that a dof is - // both constrained and - // fixed (this can happen - // for hanging nodes in - // 3d that also happen to - // be on the - // boundary). in that - // case, following the - // above program flow, it - // is realized that when - // distributing the row - // and column no elements - // of the matrix are - // actually touched if - // all the degrees of - // freedom to which this - // dof is constrained are - // also constrained (the - // usual case with - // hanging nodes in - // 3d). however, in the - // line below, we do - // actually do something - // with this dof - global_matrix.add(local_dof_indices[i],local_dof_indices[i], - std::fabs(local_matrix(i,i))); + for (unsigned int q=0; q < my_indices[i].second.constraints.size(); ++q) + { + const unsigned int loc_row_q = my_indices[i].second.constraints[q].first; + double add_this = local_vector (loc_row_q); + for (unsigned int k=0; kinhomogeneity * + local_matrix(loc_row_q,constraint_lines[k].first); + val += add_this * my_indices[i].second.constraints[q].second; + } } } + + // finally, write all the information + // that accumulated under the given + // process into the global matrix row and + // into the vector + Threads::ThreadMutex::ScopedLock lock(mutex); + internals::sum_into_matrix (global_matrix, row, col_counter, + cols, vals, actual_block_ends); + if (val != 0) + global_vector(row) += val; } } @@ -1125,262 +1881,284 @@ add_entries_local_to_global (const std::vector &local_dof_indices, ExcDimensionMismatch(dof_mask.n_cols(), n_local_dofs)); } - // A lock that allows only one thread at - // time to go on in this function. - Threads::ThreadMutex::ScopedLock lock(mutex); - if (lines.size() == 0) - { - if (column_indices.size() < local_dof_indices.size()) - column_indices.resize(local_dof_indices.size()); - - // if the dof_mask is not active, we can - // just submit the local_dof_indices - // array for one row after the other. - if (dof_mask_is_active == false) - for (unsigned int i=0; i > row_data; + row_data my_indices; + my_indices.reserve(n_local_dofs); + + bool have_constrained_dofs = false; + + for (unsigned int i=0; i - constraint_lines (n_local_dofs, - static_cast(0)); - for (unsigned int i=0; i::const_iterator - position = std::lower_bound (lines.begin(), - lines.end(), - index_comparison); + if (constraint_line_exists[local_dof_indices[i]] == false) + { + internals::insert_index(my_indices, local_dof_indices[i], i); + continue; + } + + ConstraintLine index_comparison; + index_comparison.line = local_dof_indices[i]; + + const std::vector::const_iterator + position = std::lower_bound (lines.begin(), + lines.end(), + index_comparison); + + Assert (position->line == local_dof_indices[i], + ExcInternalError()); - if ((position != lines.end()) && - (position->line == local_dof_indices[i])) + for (unsigned int q=0; qentries.size(); ++q) + { + have_constrained_dofs = true; + internals::insert_index(my_indices, position->entries[q].first, + deal_II_numbers::invalid_unsigned_int, + std::make_pair (i,0.)); + } + + // need to add the whole row and column + // structure in case we keep constrained + // entries. Unfortunately, we can't use + // the nice matrix structure we use + // elsewhere, so manually add those + // indices. + Threads::ThreadMutex::ScopedLock lock(mutex); + if (keep_constrained_entries == true) + { + for (unsigned int j=0; jentries.size(); + bool add_this_ij = true, add_this_ji = true; + if (dof_mask_is_active == true) + { + if (dof_mask[i][j] == false) + add_this_ij = false; + if (dof_mask[j][i] == false) + add_this_ji = false; + } + if (add_this_ij == true) + sparsity_pattern.add(local_dof_indices[i], + local_dof_indices[j]); + if (add_this_ji == true) + sparsity_pattern.add(local_dof_indices[j], + local_dof_indices[i]); } - } + } + else + // don't keep constrained entries - just + // add the diagonal. + sparsity_pattern.add(local_dof_indices[i],local_dof_indices[i]); + } - n_max_entries_per_row += n_local_dofs; - if (column_indices.size() < n_max_entries_per_row) - column_indices.resize(n_max_entries_per_row); - - // we might add the same row with some - // column entries several times when - // using constraints. To avoid doing so, - // we keep a list of (global) rows that - // have already been inserted, which is - // then used to break the loop. This is - // only useful for the case when we leave - // out constrained dofs. Otherwise, there - // are some more entries that we can't - // keep track of that easily. - std::vector already_inserted; - already_inserted.reserve(n_local_dofs); + const unsigned int n_actual_dofs = my_indices.size(); - for (unsigned int i=0; i localized_row_indices (n_actual_dofs); + for (unsigned int i=0; i block_ends(1, n_actual_dofs); + internals::make_block_indices_local (sparsity_pattern, localized_row_indices, + block_ends); + std::vector actual_block_ends (block_ends.size()); - if (is_constrained_i == false) + // create arrays for the column data + // (indices and values) that will then be + // written into the matrix. + std::vector cols (n_actual_dofs); + + // now do the actual job. + for (unsigned int i=0; i::iterator block_it = block_ends.begin(); + const unsigned int row = my_indices[i].first; + const unsigned int loc_row = my_indices[i].second.local_row; + + // fast function if there are no indirect + // references to any of the local rows at + // all on this set of dofs + if (have_constrained_dofs == false) + { + Assert(loc_row >= 0 && loc_row < n_local_dofs, + ExcInternalError()); + + for (unsigned int j=0; j < n_actual_dofs; ++j) { - // in case we are at a row that has - // already been added, do not need to go - // on. the way how we do it is to first - // find an insertion point and then check - // whether the element is already present - std::vector::iterator inserted_it = - std::lower_bound(already_inserted.begin(), already_inserted.end(), - local_dof_indices[i]); - if (inserted_it != already_inserted.end()) - if (*inserted_it == local_dof_indices[i]) - continue; - - unsigned int col_counter = 0; - for (unsigned int j=0; jentries.size(); ++q) - column_indices[col_counter++] = position_j->entries[q].first; + actual_block_ends[block_it-block_ends.begin()] = col_counter; + ++block_it; + Assert ((unsigned int)(block_it - block_ends.begin()) <= block_ends.size(), + ExcInternalError()); } - // Check whether we did remain within the - // arrays when adding elements into the - // scratch arrays. - Assert (col_counter <= n_max_entries_per_row, ExcInternalError()); - - // Finally, write the scratch array into - // the sparsity pattern. Of course, do it - // only in case there is anything to - // write... - if (col_counter > 0) - sparsity_pattern.add_entries (local_dof_indices[i], - column_indices.begin(), - column_indices.begin()+col_counter); - - // now this row has been visited, so se - if (keep_constrained_entries == false) - already_inserted.insert(inserted_it, local_dof_indices[i]); + + const unsigned int loc_col = my_indices[j].second.local_row; + Assert(loc_col >= 0 && loc_col < n_local_dofs, + ExcInternalError()); + + bool add_this = true; + if (dof_mask_is_active == true) + if (dof_mask[loc_row][loc_col] == false) + add_this = false; + + if (add_this == true) + cols[col_counter++] = localized_row_indices[j]; + } - else + + // now comes the hack that sets the + // correct end of block in case we work + // with block matrices. + while (n_actual_dofs == *block_it) { - // in case we keep constrained entries, - // add the whole line. - if (keep_constrained_entries == true) - { - unsigned int col_counter = 0; - for (unsigned int j=0; j 0) - sparsity_pattern.add_entries (local_dof_indices[i], - column_indices.begin(), - column_indices.begin()+col_counter); + // slower functions when there are + // indirect references and when we need + // to do some more checks. + else + { + for (unsigned int j=0; j < n_actual_dofs; ++j) + { + + // now comes the hack that sets the + // correct end of block in case we work + // with block matrices. + while (j == *block_it) + { + actual_block_ends[block_it-block_ends.begin()] = col_counter; + ++block_it; + Assert ((unsigned int)(block_it - block_ends.begin()) <= block_ends.size(), + ExcInternalError()); } - // in case we do not add constrained - // entries, still add the diagonal - else - sparsity_pattern.add (local_dof_indices[i],local_dof_indices[i]); - // now if the i-th row is constrained, - // we get additional entries in other - // rows. The columns are added the same - // way as in the unconstrained rows. - for (unsigned int p=0; pentries.size(); ++p) + const unsigned int loc_col = my_indices[j].second.local_row; + + bool add_this = false; + + // case 1: row has direct contribution in + // local matrix + if (loc_row != deal_II_numbers::invalid_unsigned_int) { - const unsigned int row = position_i->entries[p].first; - - // in case we are at a row that has - // already been added, do not need to go - // on - std::vector::iterator inserted_it = - std::lower_bound(already_inserted.begin(), already_inserted.end(), - row); - if (inserted_it != already_inserted.end()) - if (*inserted_it == row) - continue; - - unsigned int col_counter = 0; - for (unsigned int j=0; j= 0 && loc_row < n_local_dofs, + ExcInternalError()); + + // case 1a: col has direct contribution + // in local matrix + if (loc_col != deal_II_numbers::invalid_unsigned_int) { - // Again, first check whether - // the mask contains any - // information, and decide - // then whether the current - // index should be added. - if (dof_mask_is_active) - if (dof_mask[i][j] != true) - continue; - - const ConstraintLine *position_j = constraint_lines[j]; - const bool is_constrained_j = (position_j != 0); - - if (is_constrained_j == false) - column_indices[col_counter++] = local_dof_indices[j]; + Assert (loc_col >= 0 && loc_col < n_local_dofs, + ExcInternalError()); + if (dof_mask_is_active == true) + { + if (dof_mask[loc_row][loc_col] == true) + add_this = true; + } else - for (unsigned int q=0; qentries.size(); ++q) - column_indices[col_counter++] = position_j->entries[q].first; + add_this = true; } - // Check whether we did remain within the - // arrays when adding elements into the - // scratch arrays. - Assert (col_counter <= n_max_entries_per_row, ExcInternalError()); - - // Finally, write the scratch array into - // the sparsity pattern. Of course, do it - // only in case there is anything to - // write... - if (col_counter > 0) - sparsity_pattern.add_entries (row, - column_indices.begin(), - column_indices.begin()+col_counter); - - if (keep_constrained_entries == false) - already_inserted.insert(inserted_it, row); + // account for indirect contributions by + // constraints + if (add_this == false) + for (unsigned int p=0; p= 0 && loc_col < n_local_dofs, + ExcInternalError()); + if (dof_mask_is_active == true) + { + if (dof_mask[my_indices[i].second.constraints[q].first][loc_col] == true) + { + add_this = true; + break; + } + } + else + { + add_this = true; + break; + } + } + + for (unsigned int p=0; p::add (const unsigned int row, const unsigned int n_cols, const unsigned int *col_indices, const number2 *values, - const bool elide_zero_values) + const bool elide_zero_values, + const bool col_indices_are_sorted) { Assert (cols != 0, ExcNotInitialized()); + // if we have sufficiently many columns + // and sorted indices it is faster to + // just go through the column indices and + // look whether we found one, rather than + // doing a binary search for every column + if (col_indices_are_sorted == true) + if (n_cols * 8 > cols->row_length(row)) + { + // check whether the given indices are + // really sorted +#ifdef DEBUG + for (unsigned int i=1; i col_indices[i-1], + ExcMessage("Indices are not sorted.")); +#endif + if (cols->optimize_diagonal() == true) + { + const unsigned int * this_cols = + &cols->get_column_numbers()[cols->get_rowstart_indices()[row]]; + number * val_ptr = &val[cols->get_rowstart_indices()[row]]; + Assert (this_cols[0] == row, ExcInternalError()); + unsigned int counter = 1; + for (unsigned int i=0; i= this_cols[counter], ExcInternalError()); + + while (this_cols[counter] < col_indices[i]) + ++counter; + + Assert (this_cols[counter] == col_indices[i] || values[i] == 0, + ExcInvalidIndex(row,col_indices[i])); + + if (values[i] != 0) + val_ptr[counter] += values[i]; + } + Assert (counter < cols->row_length(row), ExcInternalError()); + } + else + { + const unsigned int * this_cols = + &cols->get_column_numbers()[cols->get_rowstart_indices()[row]]; + number * val_ptr = &val[cols->get_rowstart_indices()[row]]; + unsigned int counter = 0; + for (unsigned int i=0; i= this_cols[counter], ExcInternalError()); + + while (this_cols[counter] < col_indices[i]) + ++counter; + + Assert (this_cols[counter] == col_indices[i] || values[i] == 0, + ExcInvalidIndex(row,col_indices[i])); + + if (values[i] != 0) + val_ptr[counter] += values[i]; + } + Assert (counter < cols->row_length(row), ExcInternalError()); + } + return; + } + unsigned int n_columns = 0; if (global_indices.size() < n_cols) @@ -2261,7 +2332,7 @@ SparseMatrix::add (const unsigned int row, // First, search all the indices to find // out which values we actually need to // add. - // TODO: Could probably made more + // TODO: Could probably be made more // efficient. for (unsigned int j=0; j::add (const unsigned int row, const unsigned int n_cols, const unsigned int *col_indices, const number2 *values, - const bool elide_zero_values) + const bool elide_zero_values, + const bool /*col_indices_are_sorted*/) { //TODO: This function can surely be made more efficient for (unsigned int j=0; j void add_entries (const unsigned int row, ForwardIterator begin, - ForwardIterator end); + ForwardIterator end, + const bool indices_are_sorted = false); /** * Make the sparsity pattern diff --git a/deal.II/lac/include/lac/trilinos_sparse_matrix.h b/deal.II/lac/include/lac/trilinos_sparse_matrix.h index 92728a1855..452f6f6aed 100755 --- a/deal.II/lac/include/lac/trilinos_sparse_matrix.h +++ b/deal.II/lac/include/lac/trilinos_sparse_matrix.h @@ -1104,7 +1104,8 @@ namespace TrilinosWrappers const unsigned int n_cols, const unsigned int *col_indices, const TrilinosScalar *values, - const bool elide_zero_values = true); + const bool elide_zero_values = true, + const bool col_indices_are_sorted = false); /** * Multiply the entire matrix @@ -2422,7 +2423,8 @@ namespace TrilinosWrappers const unsigned int n_cols, const unsigned int *col_indices, const TrilinosScalar *values, - const bool elide_zero_values) + const bool elide_zero_values, + const bool /*col_indices_are_sorted*/) { int ierr; if (last_action == Insert) diff --git a/deal.II/lac/include/lac/trilinos_sparsity_pattern.h b/deal.II/lac/include/lac/trilinos_sparsity_pattern.h index 5fcdb2edf2..2b69c70ea8 100755 --- a/deal.II/lac/include/lac/trilinos_sparsity_pattern.h +++ b/deal.II/lac/include/lac/trilinos_sparsity_pattern.h @@ -810,7 +810,8 @@ namespace TrilinosWrappers template void add_entries (const unsigned int row, ForwardIterator begin, - ForwardIterator end); + ForwardIterator end, + const bool indices_are_sorted = false); //@} /** * @name Access of underlying Trilinos data @@ -1318,7 +1319,8 @@ namespace TrilinosWrappers void SparsityPattern::add_entries (const unsigned int row, ForwardIterator begin, - ForwardIterator end) + ForwardIterator end, + const bool /*indices_are_sorted*/) { if (begin == end) return; diff --git a/deal.II/lac/source/sparsity_pattern.cc b/deal.II/lac/source/sparsity_pattern.cc index 63b2dfa2cf..af42af2703 100644 --- a/deal.II/lac/source/sparsity_pattern.cc +++ b/deal.II/lac/source/sparsity_pattern.cc @@ -869,7 +869,8 @@ template void SparsityPattern::add_entries (const unsigned int row, ForwardIterator begin, - ForwardIterator end) + ForwardIterator end, + const bool /*indices_are_sorted*/) { // right now, just forward to the other function. for (ForwardIterator it = begin; it != end; ++it) @@ -1137,14 +1138,17 @@ template void SparsityPattern::copy_from (const FullMatrix &, bo template void SparsityPattern::add_entries (const unsigned int, const unsigned int*, - const unsigned int*); + const unsigned int*, + const bool); template void SparsityPattern::add_entries::const_iterator> (const unsigned int, std::vector::const_iterator, - std::vector::const_iterator); + std::vector::const_iterator, + const bool); template void SparsityPattern::add_entries::iterator> (const unsigned int, std::vector::iterator, - std::vector::iterator); + std::vector::iterator, + const bool); DEAL_II_NAMESPACE_CLOSE diff --git a/deal.II/lac/source/trilinos_sparse_matrix.cc b/deal.II/lac/source/trilinos_sparse_matrix.cc index 496c11b175..7cde3d2772 100755 --- a/deal.II/lac/source/trilinos_sparse_matrix.cc +++ b/deal.II/lac/source/trilinos_sparse_matrix.cc @@ -1081,7 +1081,7 @@ namespace TrilinosWrappers SparseMatrix::l1_norm () const { if (matrix->Filled() == false) - matrix->FillComplete(col_map, row_map, true); + matrix->GlobalAssemble(col_map, row_map, true); TrilinosScalar result = matrix->NormOne(); @@ -1094,7 +1094,7 @@ namespace TrilinosWrappers SparseMatrix::linfty_norm () const { if (matrix->Filled() == false) - matrix->FillComplete(col_map, row_map, true); + matrix->GlobalAssemble(col_map, row_map, true); TrilinosScalar result = matrix->NormInf(); @@ -1107,7 +1107,7 @@ namespace TrilinosWrappers SparseMatrix::frobenius_norm () const { if (matrix->Filled() == false) - matrix->FillComplete(col_map, row_map, true); + matrix->GlobalAssemble(col_map, row_map, true); TrilinosScalar result = matrix->NormFrobenius(); @@ -1149,7 +1149,7 @@ namespace TrilinosWrappers Assert (&src != &dst, ExcSourceEqualsDestination()); if (matrix->Filled() == false) - matrix->FillComplete(col_map, row_map, true); + matrix->GlobalAssemble(col_map, row_map, true); Assert (src.vector_partitioner().SameAs(matrix->DomainMap()) == true, ExcMessage ("Column map of matrix does not fit with vector map!")); @@ -1170,7 +1170,7 @@ namespace TrilinosWrappers Assert (&src != &dst, ExcSourceEqualsDestination()); if (matrix->Filled() == false) - matrix->FillComplete(col_map, row_map, true); + matrix->GlobalAssemble(col_map, row_map, true); Assert (src.vector_partitioner().SameAs(matrix->RangeMap()) == true, ExcMessage ("Column map of matrix does not fit with vector map!")); -- 2.39.5