From: Martin Kronbichler Date: Mon, 30 Mar 2009 16:54:57 +0000 (+0000) Subject: Use a smarter way to specialize the local_to_global functions for standard (non-block... X-Git-Tag: v8.0.0~7934 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=92464e5366325c4c6309213864665a680b29a958;p=dealii.git Use a smarter way to specialize the local_to_global functions for standard (non-block) and block matrices. git-svn-id: https://svn.dealii.org/trunk@18528 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/lac/include/lac/block_matrix_base.h b/deal.II/lac/include/lac/block_matrix_base.h index 582b8764f5..861c73df88 100644 --- a/deal.II/lac/include/lac/block_matrix_base.h +++ b/deal.II/lac/include/lac/block_matrix_base.h @@ -29,6 +29,7 @@ DEAL_II_NAMESPACE_OPEN template class MatrixIterator; template class BlockMatrixBase; +template class BlockSparsityPatternBase; template class BlockSparseMatrixEZ; /** @@ -56,13 +57,23 @@ struct IsBlockMatrix /** * Overload returning true if the class - * is derived from BlockMatrixBase, which - * is what block matrices do (with the - * exception of BlockSparseMatrixEZ. + * is derived from BlockMatrixBase, + * which is what block matrices do + * (with the exception of + * BlockSparseMatrixEZ). */ template static yes_type check_for_block_matrix (const BlockMatrixBase *); + /** + * Overload returning true if the class + * is derived from + * BlockSparsityPatternBase, which is + * what block sparsity patterns do. + */ + template + static yes_type check_for_block_matrix (const BlockSparsityPatternBase *); + /** * Overload for BlockSparseMatrixEZ, * which is the only block matrix not diff --git a/deal.II/lac/include/lac/constraint_matrix.h b/deal.II/lac/include/lac/constraint_matrix.h index 9e3c9cb6a5..807d581ed2 100644 --- a/deal.II/lac/include/lac/constraint_matrix.h +++ b/deal.II/lac/include/lac/constraint_matrix.h @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -1426,6 +1427,60 @@ class ConstraintMatrix : public Subscriptor * time. */ mutable Threads::ThreadMutex mutex; + + /** + * This function actually implements + * the local_to_global function for + * standard (non-block) matrices. + */ + template + void + distribute_local_to_global (const FullMatrix &local_matrix, + const Vector &local_vector, + const std::vector &local_dof_indices, + MatrixType &global_matrix, + VectorType &global_vector, + internal::bool2type) const; + + /** + * This function actually implements + * the local_to_global function for + * block matrices. + */ + template + void + distribute_local_to_global (const FullMatrix &local_matrix, + const Vector &local_vector, + const std::vector &local_dof_indices, + MatrixType &global_matrix, + VectorType &global_vector, + internal::bool2type) const; + + /** + * This function actually implements + * the local_to_global function for + * standard (non-block) sparsity types. + */ + template + void + add_entries_local_to_global (const std::vector &local_dof_indices, + SparsityType &sparsity_pattern, + const bool keep_constrained_entries, + const Table<2,bool> &dof_mask, + internal::bool2type) const; + + /** + * This function actually implements + * the local_to_global function for + * block sparsity types. + */ + template + void + add_entries_local_to_global (const std::vector &local_dof_indices, + SparsityType &sparsity_pattern, + const bool keep_constrained_entries, + const Table<2,bool> &dof_mask, + internal::bool2type) const; }; diff --git a/deal.II/lac/include/lac/constraint_matrix.templates.h b/deal.II/lac/include/lac/constraint_matrix.templates.h index faa035d712..5fb76095ed 100644 --- a/deal.II/lac/include/lac/constraint_matrix.templates.h +++ b/deal.II/lac/include/lac/constraint_matrix.templates.h @@ -810,13 +810,153 @@ distribute_local_to_global (const FullMatrix &local_matrix, const std::vector &local_dof_indices, MatrixType &global_matrix) const { + // create a dummy and hand on to the + // function actually implementing this + // feature further down. Vector dummy(0); distribute_local_to_global (local_matrix, dummy, local_dof_indices, - global_matrix, dummy); + global_matrix, dummy, + internal::bool2type::value>()); } +template +void +ConstraintMatrix:: +distribute_local_to_global (const FullMatrix &local_matrix, + const Vector &local_vector, + const std::vector &local_dof_indices, + MatrixType &global_matrix, + VectorType &global_vector) const +{ + // enter the internal function with the + // respective block information set, the + // actual implementation follows further + // down. + distribute_local_to_global (local_matrix, local_vector, local_dof_indices, + global_matrix, global_vector, + internal::bool2type::value>()); +} + + + +template +void +ConstraintMatrix:: +add_entries_local_to_global (const std::vector &local_dof_indices, + SparsityType &sparsity_pattern, + const bool keep_constrained_entries, + const Table<2,bool> &dof_mask) const +{ + // enter the internal function with the + // respective block information set, the + // actual implementation follows further + // down. + add_entries_local_to_global (local_dof_indices, sparsity_pattern, + keep_constrained_entries, dof_mask, + internal::bool2type::value>()); +} + + + +template +void +ConstraintMatrix::distribute (const VectorType &condensed, + VectorType &uncondensed) const +{ + Assert (sorted == true, ExcMatrixNotClosed()); + Assert (condensed.size()+n_constraints() == uncondensed.size(), + ExcDimensionMismatch(condensed.size()+n_constraints(), + uncondensed.size())); + + // store for each line of the new vector + // its old line number before + // distribution. If the shift is + // -1, this line was condensed away + std::vector old_line; + + old_line.reserve (uncondensed.size()); + + std::vector::const_iterator next_constraint = lines.begin(); + unsigned int shift = 0; + unsigned int n_rows = uncondensed.size(); + + if (next_constraint == lines.end()) + // if no constraint is to be handled + for (unsigned int row=0; row!=n_rows; ++row) + old_line.push_back (row); + else + for (unsigned int row=0; row!=n_rows; ++row) + if (row == next_constraint->line) + { + // this line is constrained + old_line.push_back (-1); + // note that @p lines is ordered + ++shift; + ++next_constraint; + if (next_constraint == lines.end()) + // nothing more to do; finish rest + // of loop + { + for (unsigned int i=row+1; iinhomogeneity; + // then add the different + // contributions + for (unsigned int i=0; ientries.size(); ++i) + uncondensed(line) += (condensed(old_line[next_constraint->entries[i].first]) * + next_constraint->entries[i].second); + ++next_constraint; + }; +} + + + +template +void +ConstraintMatrix::distribute (VectorType &vec) const +{ + Assert (sorted == true, ExcMatrixNotClosed()); + + std::vector::const_iterator next_constraint = lines.begin(); + for (; next_constraint != lines.end(); ++next_constraint) + { + // fill entry in line + // next_constraint.line by adding the + // different contributions + double new_value = next_constraint->inhomogeneity; + for (unsigned int i=0; ientries.size(); ++i) + new_value += (vec(next_constraint->entries[i].first) * + next_constraint->entries[i].second); + vec(next_constraint->line) = new_value; + } +} + + // Some helper definitions for the // local_to_global functions. namespace internals @@ -865,7 +1005,8 @@ namespace internals // a value to an already existing // row. Similar functionality as for // std::map, - // but much faster here. + // but here done for a vector of pairs, + // and much faster. inline void insert_index (std::vector > &my_indices, @@ -889,599 +1030,74 @@ namespace internals my_indices.end(), row); - if (pos->first == row) - pos1 = pos; - else - pos1 = my_indices.insert(pos,std::make_pair - (row,distributing())); - } - - if (local_row == deal_II_numbers::invalid_unsigned_int) - pos1->second.constraints.push_back (constraint); - else - pos1->second.local_row = local_row; - } - - - // a lot of functions that cover all the - // different situations in templated - // calls to - // distribute_local_to_global. this is a - // lot of duplicated code, find out - // whether we can do this better - template - inline - void - make_block_indices_local (const MatrixType &mat, - std::vector &indices, - const std::vector &block_end) - { - return; - } - - template - inline - void - make_block_indices_local (const BlockSparseMatrix &mat, - std::vector &indices, - std::vector &block_end) - { - typedef std::vector::iterator row_iterator; - const unsigned int num_blocks = mat.n_block_rows(); - Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); - row_iterator col_indices = indices.begin(); - block_end.resize(num_blocks,0); - unsigned int n_cols = indices.size(); - for (unsigned int i=0;i &indices, - std::vector &block_end) - { - typedef std::vector::iterator row_iterator; - const unsigned int num_blocks = mat.n_block_rows(); - Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); - row_iterator col_indices = indices.begin(); - block_end.resize(num_blocks,0); - unsigned int n_cols = indices.size(); - for (unsigned int i=0;i &indices, - std::vector &block_end) - { - typedef std::vector::iterator row_iterator; - const unsigned int num_blocks = mat.n_block_rows(); - Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); - row_iterator col_indices = indices.begin(); - block_end.resize(num_blocks,0); - unsigned int n_cols = indices.size(); - for (unsigned int i=0;i &indices, - std::vector &block_end) - { - typedef std::vector::iterator row_iterator; - const unsigned int num_blocks = mat.n_block_rows(); - Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); - row_iterator col_indices = indices.begin(); - block_end.resize(num_blocks,0); - unsigned int n_cols = indices.size(); - for (unsigned int i=0;i - inline - void - sum_into_matrix (MatrixType &mat, - const unsigned int row, - const unsigned int n_elements, - const std::vector &cols, - const std::vector &vals, - const std::vector &block_ends) - { - // should get here only if we have not - // modified anything with - // make_block_indices_local - Assert (block_ends.size() == 1, ExcInternalError()); - if (n_elements > 0) - mat.add(row, n_elements, &cols[0], &vals[0], false, true); - } - - // now some efficient versions for block - // matrices. have to replicate code for - // each matrix type to make things work - // (or is there a smarter way for doing - // that?) - template - inline - void - sum_into_matrix (BlockSparseMatrix &mat, - const unsigned int row, - const unsigned int n_elements, - const std::vector &cols, - const std::vector &vals, - const std::vector &block_ends) - { - Assert (block_ends.size() == mat.n_block_rows(), - ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); - Assert (block_ends[mat.n_block_rows()-1] == n_elements, - ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); - unsigned int col_begins = 0; - const std::pair row_block = mat.get_row_indices().global_to_local(row); - - for (unsigned int i=0; i 0) - mat.block(row_block.first, i).add(row_block.second, current_length, - &cols[col_begins], &vals[col_begins], - false, true); - col_begins = block_ends[i]; - } - } - - -#ifdef DEAL_II_USE_PETSC - inline - void - sum_into_matrix (PETScWrappers::BlockSparseMatrix &mat, - const unsigned int row, - const unsigned int n_elements, - const std::vector &cols, - const std::vector &vals, - const std::vector &block_ends) - { - Assert (block_ends.size() == mat.n_block_rows(), - ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); - Assert (block_ends[mat.n_block_rows()-1] == n_elements, - ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); - unsigned int col_begins = 0; - const std::pair row_block = mat.get_row_indices().global_to_local(row); - - for (unsigned int i=0; i 0) - mat.block(row_block.first, i).add(row_block.second, current_length, - &cols[col_begins], &vals[col_begins], - false, true); - col_begins = block_ends[i]; - } - } - - inline - void - sum_into_matrix (PETScWrappers::MPI::BlockSparseMatrix &mat, - const unsigned int row, - const unsigned int n_elements, - const std::vector &cols, - const std::vector &vals, - const std::vector &block_ends) - { - Assert (block_ends.size() == mat.n_block_rows(), - ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); - Assert (block_ends[mat.n_block_rows()-1] == n_elements, - ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); - unsigned int col_begins = 0; - const std::pair row_block = mat.get_row_indices().global_to_local(row); - - for (unsigned int i=0; i 0) - mat.block(row_block.first, i).add(row_block.second, current_length, - &cols[col_begins], &vals[col_begins], - false, true); - col_begins = block_ends[i]; - } - } -#endif - -#ifdef DEAL_II_USE_TRILINOS - inline - void - sum_into_matrix (TrilinosWrappers::BlockSparseMatrix &mat, - const unsigned int row, - const unsigned int n_elements, - const std::vector &cols, - const std::vector &vals, - const std::vector &block_ends) - { - Assert (block_ends.size() == mat.n_block_rows(), - ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); - Assert (block_ends[mat.n_block_rows()-1] == n_elements, - ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); - unsigned int col_begins = 0; - const std::pair row_block = mat.get_row_indices().global_to_local(row); - - for (unsigned int i=0; i 0) - mat.block(row_block.first, i).add(row_block.second, current_length, - &cols[col_begins], &vals[col_begins], - false, true); - col_begins = block_ends[i]; - } - } -#endif - - - - inline - void - make_block_indices_local (const BlockSparsityPattern &mat, - std::vector &indices, - std::vector &block_end) - { - typedef std::vector::iterator row_iterator; - const unsigned int num_blocks = mat.n_block_rows(); - Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); - row_iterator col_indices = indices.begin(); - block_end.resize(num_blocks,0); - unsigned int n_cols = indices.size(); - for (unsigned int i=0;i &indices, - std::vector &block_end) - { - typedef std::vector::iterator row_iterator; - const unsigned int num_blocks = mat.n_block_rows(); - Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); - row_iterator col_indices = indices.begin(); - block_end.resize(num_blocks,0); - unsigned int n_cols = indices.size(); - for (unsigned int i=0;i &indices, - std::vector &block_end) - { - typedef std::vector::iterator row_iterator; - const unsigned int num_blocks = mat.n_block_rows(); - Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); - row_iterator col_indices = indices.begin(); - block_end.resize(num_blocks,0); - unsigned int n_cols = indices.size(); - for (unsigned int i=0;i &indices, - std::vector &block_end) - { - typedef std::vector::iterator row_iterator; - const unsigned int num_blocks = mat.n_block_rows(); - Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); - row_iterator col_indices = indices.begin(); - block_end.resize(num_blocks,0); - unsigned int n_cols = indices.size(); - for (unsigned int i=0;i &indices, - std::vector &block_end) - { - typedef std::vector::iterator row_iterator; - const unsigned int num_blocks = mat.n_block_rows(); - Assert (num_blocks == mat.n_block_cols(), ExcNotQuadratic()); - row_iterator col_indices = indices.begin(); - block_end.resize(num_blocks,0); - unsigned int n_cols = indices.size(); - for (unsigned int i=0;i - inline - void - insert_into_sparsity (SparsityType &mat, - const unsigned int row, - const unsigned int n_elements, - const std::vector &cols, - const std::vector &block_ends) - { - // should get here only if we have not - // modified anything with - // make_block_indices_local - Assert (block_ends.size() == 1, ExcInternalError()); - if (n_elements > 0) - mat.add_entries(row, cols.begin(), cols.begin()+n_elements); - } - - // now some efficient versions for block - // sparsity patterns. have to replicate - // code for each sp type to make things - // work (or is there a smarter way for - // doing the template stuff?) - inline - void - insert_into_sparsity (BlockSparsityPattern &mat, - const unsigned int row, - const unsigned int n_elements, - const std::vector &cols, - const std::vector &block_ends) - { - Assert (block_ends.size() == mat.n_block_rows(), - ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); - Assert (block_ends[mat.n_block_rows()-1] == n_elements, - ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); - unsigned int col_begins = 0; - const std::pair row_block = - mat.get_row_indices().global_to_local(row); - - for (unsigned int i=0; i 0) - mat.block(row_block.first, i).add_entries(row_block.second, - cols.begin()+col_begins, - cols.begin()+col_begins+current_length); - col_begins = block_ends[i]; - } - } - - inline - void - insert_into_sparsity (BlockCompressedSparsityPattern &mat, - const unsigned int row, - const unsigned int n_elements, - const std::vector &cols, - const std::vector &block_ends) - { - Assert (block_ends.size() == mat.n_block_rows(), - ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); - Assert (block_ends[mat.n_block_rows()-1] == n_elements, - ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); - unsigned int col_begins = 0; - const std::pair row_block = - mat.get_row_indices().global_to_local(row); - - for (unsigned int i=0; i 0) - mat.block(row_block.first, i).add_entries(row_block.second, - cols.begin()+col_begins, - cols.begin()+col_begins+current_length); - col_begins = block_ends[i]; - } - } - - inline - void - insert_into_sparsity (BlockCompressedSetSparsityPattern &mat, - const unsigned int row, - const unsigned int n_elements, - const std::vector &cols, - const std::vector &block_ends) - { - Assert (block_ends.size() == mat.n_block_rows(), - ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); - Assert (block_ends[mat.n_block_rows()-1] == n_elements, - ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); - unsigned int col_begins = 0; - const std::pair row_block = - mat.get_row_indices().global_to_local(row); - - for (unsigned int i=0; i 0) - mat.block(row_block.first, i).add_entries(row_block.second, - cols.begin()+col_begins, - cols.begin()+col_begins+current_length); - col_begins = block_ends[i]; - } - } - - inline - void - insert_into_sparsity (BlockCompressedSimpleSparsityPattern &mat, - const unsigned int row, - const unsigned int n_elements, - const std::vector &cols, - const std::vector &block_ends) - { - Assert (block_ends.size() == mat.n_block_rows(), - ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); - Assert (block_ends[mat.n_block_rows()-1] == n_elements, - ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); - unsigned int col_begins = 0; - const std::pair row_block = - mat.get_row_indices().global_to_local(row); - for (unsigned int i=0; i 0) - mat.block(row_block.first, i).add_entries(row_block.second, - cols.begin()+col_begins, - cols.begin()+col_begins+current_length); - col_begins = block_ends[i]; - } - } - + if (pos->first == row) + pos1 = pos; + else + pos1 = my_indices.insert(pos,std::make_pair + (row,distributing())); + } + + if (local_row == deal_II_numbers::invalid_unsigned_int) + pos1->second.constraints.push_back (constraint); + else + pos1->second.local_row = local_row; + } -#ifdef DEAL_II_USE_TRILINOS inline void - insert_into_sparsity (TrilinosWrappers::BlockSparsityPattern &mat, - const unsigned int row, - const unsigned int n_elements, - const std::vector &cols, - const std::vector &block_ends) + list_shellsort (std::vector > &my_indices) { - Assert (block_ends.size() == mat.n_block_rows(), - ExcDimensionMismatch (block_ends.size(), mat.n_block_rows())); - Assert (block_ends[mat.n_block_rows()-1] == n_elements, - ExcDimensionMismatch(block_ends[mat.n_block_rows()-1], n_elements)); - unsigned int col_begins = 0; - const std::pair row_block = - mat.get_row_indices().global_to_local(row); - - for (unsigned int i=0; i 0) { - const int current_length = block_ends[i] - col_begins; - if (current_length > 0) - mat.block(row_block.first, i).add_entries(row_block.second, - cols.begin()+col_begins, - cols.begin()+col_begins+current_length); - col_begins = block_ends[i]; + for (i=step; i < length; i++) + { + istep = step; + j = i; + j2 = j-istep; + temp = my_indices[i].first; + templ = my_indices[i].second.local_row; + if (my_indices[j2].first > temp) + { + while ((j >= istep) && (my_indices[j2].first > temp)) + { + my_indices[j].first = my_indices[j2].first; + my_indices[j].second.local_row = my_indices[j2].second.local_row; + j = j2; + j2 -= istep; + } + my_indices[j].first = temp; + my_indices[j].second.local_row = templ; + } + } + step = step>>1; } - } -#endif + } } - + // internal implementation for + // distribute_local_to_global for + // standard (non-block) matrices template +inline void ConstraintMatrix:: distribute_local_to_global (const FullMatrix &local_matrix, const Vector &local_vector, const std::vector &local_dof_indices, MatrixType &global_matrix, - VectorType &global_vector) const + VectorType &global_vector, + internal::bool2type) const { // check whether we work on real vectors // or we just used a dummy when calling @@ -1531,60 +1147,66 @@ distribute_local_to_global (const FullMatrix &local_matrix, // constraints). Choosing an STL map or // anything else I know of would be much // more expensive here! - typedef std::vector > row_data; - row_data my_indices; - my_indices.reserve(n_local_dofs); + std::vector > my_indices (n_local_dofs); std::vector > constraint_lines; - if (use_vectors == true) - constraint_lines.reserve (n_local_dofs); - - // cache whether we have no constraints - // at all. - bool have_constrained_dofs = false; - - // so go through all the local dofs, and - // resolve the list of all constraints. - for (unsigned int i=0; i::const_iterator - position = std::lower_bound (lines.begin(), - lines.end(), - index_comparison); + const std::vector::const_iterator + position = std::lower_bound (lines.begin(), + lines.end(), + index_comparison); + Assert (position->line == local_dof_indices[i], + ExcInternalError()); - // should only get here when the row - // actually is constrained - Assert (position->line == local_dof_indices[i], - ExcInternalError()); + constraint_lines.push_back (std::make_pair(i,&*position)); + } + Assert (constraint_lines.size() + added_rows == n_local_dofs, + ExcInternalError()); + my_indices.resize (added_rows); + } + internals::list_shellsort (my_indices); + // now in the second step actually + // resolve the constraints + const unsigned int n_constrained_dofs = constraint_lines.size(); + for (unsigned int i=0; ientries.size(); ++q) { - have_constrained_dofs = true; + have_indirect_rows = true; internals::insert_index(my_indices, position->entries[q].first, deal_II_numbers::invalid_unsigned_int, std::make_pair - (i,position->entries[q].second)); + (local_row, position->entries[q].second)); } // to make sure that the global matrix @@ -1613,34 +1235,326 @@ distribute_local_to_global (const FullMatrix &local_matrix, // line below, we do actually do // something with this dof Threads::ThreadMutex::ScopedLock lock(mutex); - global_matrix.add(local_dof_indices[i],local_dof_indices[i], - std::fabs(local_matrix(i,i))); + global_matrix.add(global_row,global_row, + std::fabs(local_matrix(local_row,local_row))); + } + + const unsigned int n_actual_dofs = my_indices.size(); + + // create arrays for the column data + // (indices and values) that will then be + // written into the matrix. + std::vector cols (n_actual_dofs); + std::vector vals (n_actual_dofs); + + // now do the actual job. + for (unsigned int i=0; i= 0 && loc_row < n_local_dofs, + ExcInternalError()); + + for (unsigned int j=0; j < n_actual_dofs; ++j) + { + const unsigned int loc_col = my_indices[j].second.local_row; + Assert(loc_col >= 0 && loc_col < n_local_dofs, + ExcInternalError()); + + if (local_matrix(loc_row,loc_col) != 0) + { + vals[col_counter] = local_matrix(loc_row,loc_col); + cols[col_counter] = my_indices[j].first; + col_counter++; + } + } + + if (use_vectors == true) + { + val = local_vector(loc_row); + + // need to account for inhomogeneities + // here: thie corresponds to eliminating + // the respective column in the local + // matrix with value on the right hand + // side. + for (unsigned int i=0; iinhomogeneity * + local_matrix(loc_row,constraint_lines[i].first); + } + } + + // slower functions when there are + // indirect references and when we need + // to do some more checks. + else + { + for (unsigned int j=0; j < n_actual_dofs; ++j) + { + double col_val; + const unsigned int loc_col = my_indices[j].second.local_row; + + // case 1: row has direct contribution in + // local matrix + if (loc_row != deal_II_numbers::invalid_unsigned_int) + { + Assert (loc_row >= 0 && loc_row < n_local_dofs, + ExcInternalError()); + + // case 1a: col has direct contribution + // in local matrix + if (loc_col != deal_II_numbers::invalid_unsigned_int) + { + Assert (loc_col >= 0 && loc_col < n_local_dofs, + ExcInternalError()); + col_val = local_matrix(loc_row,loc_col); + } + // case 1b: col has no direct + // contribution in local matrix + else + col_val = 0; + + // account for indirect contributions by + // constraints + for (unsigned int p=0; p= 0 && loc_col < n_local_dofs, + ExcInternalError()); + add_this = local_matrix(my_indices[i].second.constraints[q].first, + loc_col); + } + else + add_this = 0; + + for (unsigned int p=0; p= 0 && loc_row < n_local_dofs, + ExcInternalError()); + val = local_vector(loc_row); + for (unsigned int i=0; iinhomogeneity * + local_matrix(loc_row,constraint_lines[i].first); + } + + for (unsigned int q=0; q < my_indices[i].second.constraints.size(); ++q) + { + const unsigned int loc_row_q = my_indices[i].second.constraints[q].first; + double add_this = local_vector (loc_row_q); + for (unsigned int k=0; kinhomogeneity * + local_matrix(loc_row_q,constraint_lines[k].first); + val += add_this * my_indices[i].second.constraints[q].second; + } + } + } + + // finally, write all the information + // that accumulated under the given + // process into the global matrix row and + // into the vector + Threads::ThreadMutex::ScopedLock lock(mutex); + if (col_counter > 0) + global_matrix.add(row, col_counter, &cols[0], &vals[0], false, true); + if (val != 0) + global_vector(row) += val; + } +} + + + +template +inline +void +ConstraintMatrix:: +distribute_local_to_global (const FullMatrix &local_matrix, + const Vector &local_vector, + const std::vector &local_dof_indices, + MatrixType &global_matrix, + VectorType &global_vector, + internal::bool2type) const +{ + // similar function as above, but now + // specialized for block matrices. See + // the other function for additional + // comments. + + const bool use_vectors = (local_vector.size() == 0 && + global_vector.size() == 0) ? false : true; + + Assert (local_matrix.n() == local_dof_indices.size(), + ExcDimensionMismatch(local_matrix.n(), local_dof_indices.size())); + Assert (local_matrix.m() == local_dof_indices.size(), + ExcDimensionMismatch(local_matrix.m(), local_dof_indices.size())); + Assert (global_matrix.m() == global_matrix.n(), ExcNotQuadratic()); + Assert (global_matrix.n_block_rows() == global_matrix.n_block_cols(), + ExcNotQuadratic()); + if (use_vectors == true) + { + Assert (local_matrix.m() == local_vector.size(), + ExcDimensionMismatch(local_matrix.m(), local_vector.size())); + Assert (global_matrix.m() == global_vector.size(), + ExcDimensionMismatch(global_matrix.m(), global_vector.size())); + } + Assert (sorted == true, ExcMatrixNotClosed()); + + const unsigned int n_local_dofs = local_dof_indices.size(); + const unsigned int num_blocks = global_matrix.n_block_rows(); + + std::vector > my_indices (n_local_dofs); + std::vector > constraint_lines; + constraint_lines.reserve(n_local_dofs); + + bool have_indirect_rows = false; + { + unsigned int added_rows = 0; + for (unsigned int i = 0; i::const_iterator + position = std::lower_bound (lines.begin(), + lines.end(), + index_comparison); + Assert (position->line == local_dof_indices[i], + ExcInternalError()); - if (use_vectors == true) constraint_lines.push_back (std::make_pair(i,&*position)); + } + Assert (constraint_lines.size() + added_rows == n_local_dofs, + ExcInternalError()); + my_indices.resize (added_rows); + } + internals::list_shellsort (my_indices); + + const unsigned int n_constrained_dofs = constraint_lines.size(); + for (unsigned int i=0; ientries.size(); ++q) + { + have_indirect_rows = true; + internals::insert_index(my_indices, position->entries[q].first, + deal_II_numbers::invalid_unsigned_int, + std::make_pair + (local_row, position->entries[q].second)); + } + + Threads::ThreadMutex::ScopedLock lock(mutex); + global_matrix.add(global_row,global_row, + std::fabs(local_matrix(local_row,local_row))); } const unsigned int n_actual_dofs = my_indices.size(); - std::vector localized_row_indices (n_actual_dofs); + std::vector localized_indices (n_actual_dofs); for (unsigned int i=0; i block_ends(1, n_actual_dofs); - internals::make_block_indices_local (global_matrix, localized_row_indices, - block_ends); - std::vector actual_block_ends (block_ends.size()); + std::vector block_ends(num_blocks, n_actual_dofs); + { + typedef std::vector::iterator row_iterator; + row_iterator col_indices = localized_indices.begin(); + unsigned int n_cols = n_actual_dofs; + + // find end of rows. + for (unsigned int i=0;i actual_block_ends (num_blocks); - // create arrays for the column data - // (indices and values) that will then be - // written into the matrix. std::vector cols (n_actual_dofs); std::vector vals (n_actual_dofs); - // now do the actual job. for (unsigned int i=0; i &local_matrix, const unsigned int loc_row = my_indices[i].second.local_row; double val = 0; - // fast function if there are no indirect - // references to any of the local rows at - // all on this set of dofs (saves a lot - // of checks). the only check we actually - // need to perform is whether the matrix - // element is zero. - if (have_constrained_dofs == false) + if (have_indirect_rows == false) { Assert(loc_row >= 0 && loc_row < n_local_dofs, ExcInternalError()); @@ -1680,14 +1588,14 @@ distribute_local_to_global (const FullMatrix &local_matrix, if (local_matrix(loc_row,loc_col) != 0) { vals[col_counter] = local_matrix(loc_row,loc_col); - cols[col_counter] = localized_row_indices[j]; + cols[col_counter] = localized_indices[j]; col_counter++; } } // now comes the hack that sets the // correct end of block in case we work // with block matrices. - while (n_actual_dofs == *block_it) + while (n_actual_dofs == *block_it && block_it != block_ends.end()) { actual_block_ends[block_it-block_ends.begin()] = col_counter; ++block_it; @@ -1699,27 +1607,16 @@ distribute_local_to_global (const FullMatrix &local_matrix, { val = local_vector(loc_row); - // need to account for inhomogeneities - // here: thie corresponds to eliminating - // the respective column in the local - // matrix with value on the right hand - // side. for (unsigned int i=0; iinhomogeneity * local_matrix(loc_row,constraint_lines[i].first); } } - // slower functions when there are - // indirect references and when we need - // to do some more checks. else { for (unsigned int j=0; j < n_actual_dofs; ++j) { - // now comes the hack that sets the - // correct end of block in case we work - // with block matrices. while (j == *block_it) { actual_block_ends[block_it-block_ends.begin()] = col_counter; @@ -1731,28 +1628,20 @@ distribute_local_to_global (const FullMatrix &local_matrix, double col_val; const unsigned int loc_col = my_indices[j].second.local_row; - // case 1: row has direct contribution in - // local matrix if (loc_row != deal_II_numbers::invalid_unsigned_int) { Assert (loc_row >= 0 && loc_row < n_local_dofs, ExcInternalError()); - // case 1a: col has direct contribution - // in local matrix if (loc_col != deal_II_numbers::invalid_unsigned_int) { Assert (loc_col >= 0 && loc_col < n_local_dofs, ExcInternalError()); col_val = local_matrix(loc_row,loc_col); } - // case 1b: col has no direct - // contribution in local matrix else col_val = 0; - // account for indirect contributions by - // constraints for (unsigned int p=0; p &local_matrix, my_indices[j].second.constraints[p].second; } - // case 2: row has no direct contribution in - // local matrix else col_val = 0; - // account for indirect contributions by - // constraints in row, going trough the - // direct and indirect references in the - // given column. for (unsigned int q=0; q &local_matrix, - // if we got some nontrivial value, - // append it to the array of values. if (col_val != 0) { - cols[col_counter] = localized_row_indices[j]; + cols[col_counter] = localized_indices[j]; vals[col_counter] = col_val; col_counter++; } } - // now comes the hack that sets the - // correct end of block in case we work - // with block matrices. - while (n_actual_dofs == *block_it) + while (n_actual_dofs == *block_it && block_it != block_ends.end()) { actual_block_ends[block_it-block_ends.begin()] = col_counter; ++block_it; @@ -1812,34 +1690,300 @@ distribute_local_to_global (const FullMatrix &local_matrix, ExcInternalError()); } - // now to the vectors. besides doing the - // same job as we did above (i.e., - // distribute the content of the local - // vector into the global one), need to - // account for inhomogeneities here: thie - // corresponds to eliminating the - // respective column in the local matrix - // with value on the right hand side. - if (use_vectors == true) - { - if (loc_row != deal_II_numbers::invalid_unsigned_int) + if (use_vectors == true) + { + if (loc_row != deal_II_numbers::invalid_unsigned_int) + { + Assert (loc_row >= 0 && loc_row < n_local_dofs, + ExcInternalError()); + val = local_vector(loc_row); + for (unsigned int i=0; iinhomogeneity * + local_matrix(loc_row,constraint_lines[i].first); + } + + for (unsigned int q=0; q < my_indices[i].second.constraints.size(); ++q) + { + const unsigned int loc_row_q = my_indices[i].second.constraints[q].first; + double add_this = local_vector (loc_row_q); + for (unsigned int k=0; kinhomogeneity * + local_matrix(loc_row_q,constraint_lines[k].first); + val += add_this * my_indices[i].second.constraints[q].second; + } + } + } + + // finally, write all the information + // that accumulated under the given + // process into the global matrix row and + // into the vector. For the block matrix, + // go trough the individual blocks and + // look which entries we need to set. + Threads::ThreadMutex::ScopedLock lock(mutex); + { + unsigned int col_begins = 0; + const std::pair row_block = + global_matrix.get_row_indices().global_to_local(row); + + for (unsigned int k=0; k 0) + global_matrix.block(row_block.first, k).add(row_block.second, current_length, + &cols[col_begins], &vals[col_begins], + false, true); + col_begins = actual_block_ends[k]; + } + } + if (val != 0) + global_vector(row) += val; + } +} + + + +template +inline +void +ConstraintMatrix:: +add_entries_local_to_global (const std::vector &local_dof_indices, + SparsityType &sparsity_pattern, + const bool keep_constrained_entries, + const Table<2,bool> &dof_mask, + internal::bool2type ) const +{ + // similar to the function for distributing + // matrix entries. + Assert (sparsity_pattern.n_rows() == sparsity_pattern.n_cols(), ExcNotQuadratic()); + + const unsigned int n_local_dofs = local_dof_indices.size(); + bool dof_mask_is_active = false; + if (dof_mask.n_rows() == n_local_dofs) + { + dof_mask_is_active = true; + Assert (dof_mask.n_cols() == n_local_dofs, + ExcDimensionMismatch(dof_mask.n_cols(), n_local_dofs)); + } + + std::vector > my_indices (n_local_dofs); + + std::vector > constraint_lines; + constraint_lines.reserve(n_local_dofs); + + // cache whether we have to resolve any + // indirect rows generated from resolving + // constrained dofs. + bool have_indirect_rows = false; + { + unsigned int added_rows = 0; + // first add the indices in an unsorted + // way and only keep track of the + // constraints that appear. They are + // resolved in a second step. + for (unsigned int i = 0; i::const_iterator + position = std::lower_bound (lines.begin(), + lines.end(), + index_comparison); + Assert (position->line == local_dof_indices[i], + ExcInternalError()); + + constraint_lines.push_back (std::make_pair(i,&*position)); + } + Assert (constraint_lines.size() + added_rows == n_local_dofs, + ExcInternalError()); + my_indices.resize (added_rows); + } + internals::list_shellsort (my_indices); + + // now in the second step actually + // resolve the constraints + const unsigned int n_constrained_dofs = constraint_lines.size(); + for (unsigned int i=0; ientries.size(); ++q) + { + have_indirect_rows = true; + internals::insert_index(my_indices, position->entries[q].first, + deal_II_numbers::invalid_unsigned_int, + std::make_pair + (local_row, position->entries[q].second)); + } + + Threads::ThreadMutex::ScopedLock lock(mutex); + + // need to add the whole row and column + // structure in case we keep constrained + // entries. Unfortunately, we can't use + // the nice matrix structure we use + // elsewhere, so manually add those + // indices one by one. + if (keep_constrained_entries == true) + { + for (unsigned int j=0; j cols (n_actual_dofs); + + // now do the actual job. + for (unsigned int i=0; i= 0 && loc_row < n_local_dofs, + ExcInternalError()); + + for (unsigned int j=0; j < n_actual_dofs; ++j) + { + const unsigned int loc_col = my_indices[j].second.local_row; + Assert(loc_col >= 0 && loc_col < n_local_dofs, + ExcInternalError()); + + bool add_this = true; + if (dof_mask_is_active == true) + if (dof_mask[loc_row][loc_col] == false) + add_this = false; + + if (add_this == true) + cols[col_counter++] = my_indices[j].first; + + } + } + + // slower functions when there are + // indirect references and when we need + // to do some more checks. + else + { + for (unsigned int j=0; j < n_actual_dofs; ++j) + { + const unsigned int loc_col = my_indices[j].second.local_row; + + bool add_this = false; + + // case 1: row has direct contribution in + // local matrix + if (loc_row != deal_II_numbers::invalid_unsigned_int) + { + Assert (loc_row >= 0 && loc_row < n_local_dofs, + ExcInternalError()); + + // case 1a: col has direct contribution + // in local matrix + if (loc_col != deal_II_numbers::invalid_unsigned_int) + { + Assert (loc_col >= 0 && loc_col < n_local_dofs, + ExcInternalError()); + if (dof_mask_is_active == true) + { + if (dof_mask[loc_row][loc_col] == true) + goto add_this_index; + } + else + goto add_this_index; + } + + // account for indirect contributions by + // constraints + for (unsigned int p=0; p= 0 && loc_row < n_local_dofs, - ExcInternalError()); - val = local_vector(loc_row); - for (unsigned int i=0; iinhomogeneity * - local_matrix(loc_row,constraint_lines[i].first); - } + if (loc_col != deal_II_numbers::invalid_unsigned_int) + { + Assert (loc_col >= 0 && loc_col < n_local_dofs, + ExcInternalError()); + if (dof_mask_is_active == true) + { + if (dof_mask[my_indices[i].second.constraints[q].first][loc_col] == true) + goto add_this_index; + } + else + goto add_this_index; + } + + for (unsigned int p=0; pinhomogeneity * - local_matrix(loc_row_q,constraint_lines[k].first); - val += add_this * my_indices[i].second.constraints[q].second; + add_this_index: + cols[col_counter++] = my_indices[j].first; } } } @@ -1849,26 +1993,35 @@ distribute_local_to_global (const FullMatrix &local_matrix, // process into the global matrix row and // into the vector Threads::ThreadMutex::ScopedLock lock(mutex); - internals::sum_into_matrix (global_matrix, row, col_counter, - cols, vals, actual_block_ends); - if (val != 0) - global_vector(row) += val; + if (col_counter > 0) + sparsity_pattern.add_entries(row, cols.begin(), cols.begin()+col_counter); } } + template +inline void ConstraintMatrix:: add_entries_local_to_global (const std::vector &local_dof_indices, SparsityType &sparsity_pattern, const bool keep_constrained_entries, - const Table<2,bool> &dof_mask) const + const Table<2,bool> &dof_mask, + internal::bool2type ) const { - // similar to the function for distributing - // matrix entries. + // just as the other + // add_entries_local_to_global function, + // but now specialized for block + // matrices. + Assert (sparsity_pattern.n_rows() == sparsity_pattern.n_cols(), ExcNotQuadratic()); + Assert (sparsity_pattern.n_block_rows() == sparsity_pattern.n_block_cols(), + ExcNotQuadratic()); + const unsigned int n_local_dofs = local_dof_indices.size(); + const unsigned int num_blocks = sparsity_pattern.n_block_rows(); + bool dof_mask_is_active = false; if (dof_mask.n_rows() == n_local_dofs) { @@ -1877,52 +2030,76 @@ add_entries_local_to_global (const std::vector &local_dof_indices, ExcDimensionMismatch(dof_mask.n_cols(), n_local_dofs)); } - typedef std::vector > row_data; - row_data my_indices; - my_indices.reserve(n_local_dofs); + std::vector > my_indices (n_local_dofs); - bool have_constrained_dofs = false; - - for (unsigned int i=0; i > constraint_lines; + constraint_lines.reserve(n_local_dofs); - if (constraint_line_exists[local_dof_indices[i]] == false) - { - internals::insert_index(my_indices, local_dof_indices[i], i); - continue; - } + // cache whether we have to resolve any + // indirect rows generated from resolving + // constrained dofs. + bool have_indirect_rows = false; + { + unsigned int added_rows = 0; + // first add the indices in an unsorted + // way and only keep track of the + // constraints that appear. They are + // resolved in a second step. + for (unsigned int i = 0; i::const_iterator - position = std::lower_bound (lines.begin(), - lines.end(), - index_comparison); + const std::vector::const_iterator + position = std::lower_bound (lines.begin(), + lines.end(), + index_comparison); + Assert (position->line == local_dof_indices[i], + ExcInternalError()); - Assert (position->line == local_dof_indices[i], - ExcInternalError()); + constraint_lines.push_back (std::make_pair(i,&*position)); + } + Assert (constraint_lines.size() + added_rows == n_local_dofs, + ExcInternalError()); + my_indices.resize (added_rows); + } + internals::list_shellsort (my_indices); + // now in the second step actually + // resolve the constraints + const unsigned int n_constrained_dofs = constraint_lines.size(); + for (unsigned int i=0; ientries.size(); ++q) { - have_constrained_dofs = true; + have_indirect_rows = true; internals::insert_index(my_indices, position->entries[q].first, deal_II_numbers::invalid_unsigned_int, - std::make_pair (i,0.)); + std::make_pair + (local_row, position->entries[q].second)); } - // need to add the whole row and column - // structure in case we keep constrained - // entries. Unfortunately, we can't use - // the nice matrix structure we use - // elsewhere, so manually add those - // indices. Threads::ThreadMutex::ScopedLock lock(mutex); + + // need to add the whole row and column + // structure in case we keep constrained + // entries. Unfortunately, we can't use + // the nice matrix structure we use + // elsewhere, so manually add those + // indices one by one. if (keep_constrained_entries == true) { for (unsigned int j=0; j &local_dof_indices, bool add_this_ij = true, add_this_ji = true; if (dof_mask_is_active == true) { - if (dof_mask[i][j] == false) + if (dof_mask[local_row][j] == false) add_this_ij = false; - if (dof_mask[j][i] == false) + if (dof_mask[j][local_row] == false) add_this_ji = false; } if (add_this_ij == true) - sparsity_pattern.add(local_dof_indices[i], + sparsity_pattern.add(global_row, local_dof_indices[j]); if (add_this_ji == true) sparsity_pattern.add(local_dof_indices[j], - local_dof_indices[i]); + global_row); } } else // don't keep constrained entries - just // add the diagonal. - sparsity_pattern.add(local_dof_indices[i],local_dof_indices[i]); + sparsity_pattern.add(global_row,global_row); } + const unsigned int n_actual_dofs = my_indices.size(); - std::vector localized_row_indices (n_actual_dofs); + std::vector localized_indices (n_actual_dofs); for (unsigned int i=0; i block_ends(1, n_actual_dofs); - internals::make_block_indices_local (sparsity_pattern, localized_row_indices, - block_ends); - std::vector actual_block_ends (block_ends.size()); + std::vector block_ends(num_blocks, n_actual_dofs); + { + typedef std::vector::iterator row_iterator; + row_iterator col_indices = localized_indices.begin(); + unsigned int n_cols = n_actual_dofs; + + // find end of rows. + for (unsigned int i=0;i actual_block_ends (num_blocks); - // create arrays for the column data - // (indices and values) that will then be - // written into the matrix. std::vector cols (n_actual_dofs); - // now do the actual job. for (unsigned int i=0; i &local_dof_indices, const unsigned int row = my_indices[i].first; const unsigned int loc_row = my_indices[i].second.local_row; - // fast function if there are no indirect - // references to any of the local rows at - // all on this set of dofs - if (have_constrained_dofs == false) + if (have_indirect_rows == false) { Assert(loc_row >= 0 && loc_row < n_local_dofs, ExcInternalError()); @@ -2006,14 +2198,13 @@ add_entries_local_to_global (const std::vector &local_dof_indices, add_this = false; if (add_this == true) - cols[col_counter++] = localized_row_indices[j]; - + cols[col_counter++] = localized_indices[j]; } // now comes the hack that sets the // correct end of block in case we work // with block matrices. - while (n_actual_dofs == *block_it) + while (n_actual_dofs == *block_it && block_it != block_ends.end()) { actual_block_ends[block_it-block_ends.begin()] = col_counter; ++block_it; @@ -2022,9 +2213,8 @@ add_entries_local_to_global (const std::vector &local_dof_indices, } } - // slower functions when there are - // indirect references and when we need - // to do some more checks. + // have indirect references by + // constraints, resolve them else { for (unsigned int j=0; j < n_actual_dofs; ++j) @@ -2045,15 +2235,11 @@ add_entries_local_to_global (const std::vector &local_dof_indices, bool add_this = false; - // case 1: row has direct contribution in - // local matrix if (loc_row != deal_II_numbers::invalid_unsigned_int) { Assert (loc_row >= 0 && loc_row < n_local_dofs, ExcInternalError()); - // case 1a: col has direct contribution - // in local matrix if (loc_col != deal_II_numbers::invalid_unsigned_int) { Assert (loc_col >= 0 && loc_col < n_local_dofs, @@ -2061,85 +2247,60 @@ add_entries_local_to_global (const std::vector &local_dof_indices, if (dof_mask_is_active == true) { if (dof_mask[loc_row][loc_col] == true) - add_this = true; + goto add_this_index; } else - add_this = true; + goto add_this_index; } - // account for indirect contributions by - // constraints - if (add_this == false) - for (unsigned int p=0; p= 0 && loc_col < n_local_dofs, - ExcInternalError()); - if (dof_mask_is_active == true) - { - if (dof_mask[my_indices[i].second.constraints[q].first][loc_col] == true) - { - add_this = true; - break; - } - } - else - { - add_this = true; - break; - } - } - - for (unsigned int p=0; p= 0 && loc_col < n_local_dofs, + ExcInternalError()); + if (dof_mask_is_active == true) { - add_this = true; - break; + if (dof_mask[my_indices[i].second.constraints[q].first][loc_col] == true) + goto add_this_index; } - } + else + goto add_this_index; + } + + for (unsigned int p=0; p &local_dof_indices, // process into the global matrix row and // into the vector Threads::ThreadMutex::ScopedLock lock(mutex); - internals::insert_into_sparsity (sparsity_pattern, row, col_counter, - cols, actual_block_ends); - } -} - - - -template -void -ConstraintMatrix::distribute (const VectorType &condensed, - VectorType &uncondensed) const -{ - Assert (sorted == true, ExcMatrixNotClosed()); - Assert (condensed.size()+n_constraints() == uncondensed.size(), - ExcDimensionMismatch(condensed.size()+n_constraints(), - uncondensed.size())); - - // store for each line of the new vector - // its old line number before - // distribution. If the shift is - // -1, this line was condensed away - std::vector old_line; - - old_line.reserve (uncondensed.size()); - - std::vector::const_iterator next_constraint = lines.begin(); - unsigned int shift = 0; - unsigned int n_rows = uncondensed.size(); - - if (next_constraint == lines.end()) - // if no constraint is to be handled - for (unsigned int row=0; row!=n_rows; ++row) - old_line.push_back (row); - else - for (unsigned int row=0; row!=n_rows; ++row) - if (row == next_constraint->line) - { - // this line is constrained - old_line.push_back (-1); - // note that @p lines is ordered - ++shift; - ++next_constraint; - if (next_constraint == lines.end()) - // nothing more to do; finish rest - // of loop - { - for (unsigned int i=row+1; iinhomogeneity; - // then add the different - // contributions - for (unsigned int i=0; ientries.size(); ++i) - uncondensed(line) += (condensed(old_line[next_constraint->entries[i].first]) * - next_constraint->entries[i].second); - ++next_constraint; - }; -} - - - -template -void -ConstraintMatrix::distribute (VectorType &vec) const -{ - Assert (sorted == true, ExcMatrixNotClosed()); - - std::vector::const_iterator next_constraint = lines.begin(); - for (; next_constraint != lines.end(); ++next_constraint) - { - // fill entry in line - // next_constraint.line by adding the - // different contributions - double new_value = next_constraint->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) - new_value += (vec(next_constraint->entries[i].first) * - next_constraint->entries[i].second); - vec(next_constraint->line) = new_value; + unsigned int col_begins = 0; + const std::pair row_block = + sparsity_pattern.get_row_indices().global_to_local(row); + + for (unsigned int i=0; i 0) + sparsity_pattern.block(row_block.first, i).add_entries(row_block.second, + cols.begin()+col_begins, + cols.begin()+col_begins+current_length); + col_begins = actual_block_ends[i]; + } + } } } diff --git a/deal.II/lac/include/lac/sparse_matrix.templates.h b/deal.II/lac/include/lac/sparse_matrix.templates.h index 7361a2e1cd..0816f00ec3 100644 --- a/deal.II/lac/include/lac/sparse_matrix.templates.h +++ b/deal.II/lac/include/lac/sparse_matrix.templates.h @@ -1451,7 +1451,9 @@ template unsigned int SparseMatrix::memory_consumption () const { - return sizeof(*this) + max_len*sizeof(number); + return sizeof(*this) + max_len*sizeof(number) + + global_indices.size()*sizeof(unsigned int) + + column_values.size()*sizeof(number); }