From: Matthias Maier Date: Thu, 24 May 2018 20:28:30 +0000 (-0500) Subject: lac: add number template parameter to affine_constraints.templates.h X-Git-Tag: v9.1.0-rc1~1067^2~37 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=759770c847c7a6d1034e950771d47ed79bea1ac1;p=dealii.git lac: add number template parameter to affine_constraints.templates.h --- diff --git a/include/deal.II/lac/affine_constraints.templates.h b/include/deal.II/lac/affine_constraints.templates.h index db9c1e204e..03542e1481 100644 --- a/include/deal.II/lac/affine_constraints.templates.h +++ b/include/deal.II/lac/affine_constraints.templates.h @@ -13,25 +13,22 @@ // // --------------------------------------------------------------------- - #ifndef dealii_affine_constraints_templates_h #define dealii_affine_constraints_templates_h - -#include - #include #include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include #include +#include +#include #include +#include +#include #include #include @@ -39,33 +36,30 @@ DEAL_II_NAMESPACE_OPEN - template void -AffineConstraints::condense (SparseMatrix &uncondensed) const +AffineConstraints::condense(SparseMatrix &uncondensed) const { - Vector dummy (0); - condense (uncondensed, dummy); + Vector dummy(0); + condense(uncondensed, dummy); } - - template void -AffineConstraints::condense (BlockSparseMatrix &uncondensed) const +AffineConstraints::condense( + BlockSparseMatrix &uncondensed) const { - BlockVector dummy (0); - condense (uncondensed, dummy); + BlockVector dummy(0); + condense(uncondensed, dummy); } - - +template template void -AffineConstraints::condense (const VectorType &vec_ghosted, - VectorType &vec) const +AffineConstraints::condense(const VectorType &vec_ghosted, + VectorType & vec) const { - Assert (sorted == true, ExcMatrixNotClosed()); + Assert(sorted == true, ExcMatrixNotClosed()); // if this is called with different arguments, we need to copy the data over: if (&vec != &vec_ghosted) @@ -76,70 +70,69 @@ AffineConstraints::condense (const VectorType &vec_ghosted, // and in the second one we need to set elements to zero. for // parallel vectors, this can only work if we can put a compress() // in between, but we don't want to call compress() twice per entry - for (std::vector::const_iterator - constraint_line = lines.begin(); - constraint_line!=lines.end(); ++constraint_line) + for (std::vector::const_iterator constraint_line = + lines.begin(); + constraint_line != lines.end(); + ++constraint_line) { // in case the constraint is // inhomogeneous, this function is not // appropriate. Throw an exception. - Assert (constraint_line->inhomogeneity == 0., - ExcMessage ("Inhomogeneous constraint cannot be condensed " - "without any matrix specified.")); + Assert(constraint_line->inhomogeneity == 0., + ExcMessage("Inhomogeneous constraint cannot be condensed " + "without any matrix specified.")); - const typename VectorType::value_type old_value = vec_ghosted(constraint_line->index); - for (size_type q=0; q!=constraint_line->entries.size(); ++q) + const typename VectorType::value_type old_value = + vec_ghosted(constraint_line->index); + for (size_type q = 0; q != constraint_line->entries.size(); ++q) if (vec.in_local_range(constraint_line->entries[q].first) == true) - vec(constraint_line->entries[q].first) - += (static_cast - (old_value) * - constraint_line->entries[q].second); + vec(constraint_line->entries[q].first) += + (static_cast(old_value) * + constraint_line->entries[q].second); } vec.compress(VectorOperation::add); - for (std::vector::const_iterator - constraint_line = lines.begin(); - constraint_line!=lines.end(); ++constraint_line) + for (std::vector::const_iterator constraint_line = + lines.begin(); + constraint_line != lines.end(); + ++constraint_line) if (vec.in_local_range(constraint_line->index) == true) vec(constraint_line->index) = 0.; vec.compress(VectorOperation::insert); } - - +template template void -AffineConstraints::condense (VectorType &vec) const +AffineConstraints::condense(VectorType &vec) const { condense(vec, vec); } - - -template +template +template void -AffineConstraints::condense (SparseMatrix &uncondensed, - VectorType &vec) const +AffineConstraints::condense(SparseMatrix &uncondensed, + VectorType & vec) const { // check whether we work on real vectors // or we just used a dummy when calling // the other function above. const bool use_vectors = vec.size() == 0 ? false : true; - const SparsityPattern &sparsity = uncondensed.get_sparsity_pattern (); + const SparsityPattern &sparsity = uncondensed.get_sparsity_pattern(); - Assert (sorted == true, ExcMatrixNotClosed()); - Assert (sparsity.is_compressed() == true, ExcMatrixNotClosed()); - Assert (sparsity.n_rows() == sparsity.n_cols(), - ExcNotQuadratic()); + Assert(sorted == true, ExcMatrixNotClosed()); + Assert(sparsity.is_compressed() == true, ExcMatrixNotClosed()); + Assert(sparsity.n_rows() == sparsity.n_cols(), ExcNotQuadratic()); if (use_vectors == true) - AssertDimension (vec.size(), sparsity.n_rows()); + AssertDimension(vec.size(), sparsity.n_rows()); double average_diagonal = 0; - for (size_type i=0; i &uncondensed, // necessary. otherwise, the number states // which line in the constraint matrix // handles this index - std::vector distribute (sparsity.n_rows(), - numbers::invalid_size_type); + std::vector distribute(sparsity.n_rows(), + numbers::invalid_size_type); - for (size_type c=0; c::iterator - entry = uncondensed.begin(row); - entry != uncondensed.end(row); ++entry) + for (typename SparseMatrix::iterator entry = + uncondensed.begin(row); + entry != uncondensed.end(row); + ++entry) { const size_type column = entry->column(); @@ -171,8 +165,8 @@ AffineConstraints::condense (SparseMatrix &uncondensed, // happen, since we only // operate on compressed // matrices! - Assert (column != SparsityPattern::invalid_entry, - ExcMatrixNotClosed()); + Assert(column != SparsityPattern::invalid_entry, + ExcMatrixNotClosed()); if (distribute[column] != numbers::invalid_size_type) // distribute entry at @@ -182,16 +176,18 @@ AffineConstraints::condense (SparseMatrix &uncondensed, // set old entry to // zero { - for (size_type q=0; - q!=lines[distribute[column]].entries.size(); ++q) + for (size_type q = 0; + q != lines[distribute[column]].entries.size(); + ++q) { // need a temporary variable to avoid errors like - // no known conversion from 'complex::type>' to 'const complex' for 3rd argument + // no known conversion from 'complex::type>' to 'const + // complex' for 3rd argument number v = static_cast(entry->value()); - v *=lines[distribute[column]].entries[q].second; - uncondensed.add (row, - lines[distribute[column]].entries[q].first, - v); + v *= lines[distribute[column]].entries[q].second; + uncondensed.add( + row, lines[distribute[column]].entries[q].first, v); } // need to subtract this element from the @@ -200,8 +196,8 @@ AffineConstraints::condense (SparseMatrix &uncondensed, // row of the inhomogeneous constraint in // the matrix with Gauss elimination if (use_vectors == true) - vec(row) -= - static_cast(entry->value()) * lines[distribute[column]].inhomogeneity; + vec(row) -= static_cast(entry->value()) * + lines[distribute[column]].inhomogeneity; // set old value to zero entry->value() = 0.; @@ -211,9 +207,10 @@ AffineConstraints::condense (SparseMatrix &uncondensed, else // row must be distributed { - for (typename SparseMatrix::iterator - entry = uncondensed.begin(row); - entry != uncondensed.end(row); ++entry) + for (typename SparseMatrix::iterator entry = + uncondensed.begin(row); + entry != uncondensed.end(row); + ++entry) { const size_type column = entry->column(); @@ -222,8 +219,8 @@ AffineConstraints::condense (SparseMatrix &uncondensed, // happen, since we only // operate on compressed // matrices! - Assert (column != SparsityPattern::invalid_entry, - ExcMatrixNotClosed()); + Assert(column != SparsityPattern::invalid_entry, + ExcMatrixNotClosed()); if (distribute[column] == numbers::invalid_size_type) // distribute entry at @@ -233,16 +230,18 @@ AffineConstraints::condense (SparseMatrix &uncondensed, // column. set // old entry to zero { - for (size_type q=0; - q!=lines[distribute[row]].entries.size(); ++q) + for (size_type q = 0; + q != lines[distribute[row]].entries.size(); + ++q) { // need a temporary variable to avoid errors like - // no known conversion from 'complex::type>' to 'const complex' for 3rd argument + // no known conversion from 'complex::type>' to 'const + // complex' for 3rd argument number v = static_cast(entry->value()); v *= lines[distribute[row]].entries[q].second; - uncondensed.add (lines[distribute[row]].entries[q].first, - column, - v); + uncondensed.add( + lines[distribute[row]].entries[q].first, column, v); } // set old entry to zero @@ -256,39 +255,47 @@ AffineConstraints::condense (SparseMatrix &uncondensed, // to one on main // diagonal, zero otherwise { - for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p) + for (size_type p = 0; + p != lines[distribute[row]].entries.size(); + ++p) { - for (size_type q=0; - q!=lines[distribute[column]].entries.size(); ++q) + for (size_type q = 0; + q != lines[distribute[column]].entries.size(); + ++q) { // need a temporary variable to avoid errors like - // no known conversion from 'complex::type>' to 'const complex' for 3rd argument + // no known conversion from 'complex::type>' to 'const + // complex' for 3rd argument number v = static_cast(entry->value()); v *= lines[distribute[row]].entries[p].second * lines[distribute[column]].entries[q].second; - uncondensed.add (lines[distribute[row]].entries[p].first, - lines[distribute[column]].entries[q].first, - v); + uncondensed.add( + lines[distribute[row]].entries[p].first, + lines[distribute[column]].entries[q].first, + v); } if (use_vectors == true) vec(lines[distribute[row]].entries[p].first) -= - static_cast(entry->value()) * lines[distribute[row]].entries[p].second * + static_cast(entry->value()) * + lines[distribute[row]].entries[p].second * lines[distribute[column]].inhomogeneity; } // set old entry to correct // value - entry->value() = (row == column ? average_diagonal : 0. ); + entry->value() = (row == column ? average_diagonal : 0.); } } // take care of vector if (use_vectors == true) { - for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q) - vec(lines[distribute[row]].entries[q].first) - += (vec(row) * lines[distribute[row]].entries[q].second); + for (size_type q = 0; q != lines[distribute[row]].entries.size(); + ++q) + vec(lines[distribute[row]].entries[q].first) += + (vec(row) * lines[distribute[row]].entries[q].second); vec(lines[distribute[row]].index) = 0.; } @@ -296,12 +303,11 @@ AffineConstraints::condense (SparseMatrix &uncondensed, } } - - -template +template +template void -AffineConstraints::condense (BlockSparseMatrix &uncondensed, - BlockVectorType &vec) const +AffineConstraints::condense(BlockSparseMatrix &uncondensed, + BlockVectorType & vec) const { // check whether we work on real vectors // or we just used a dummy when calling @@ -310,34 +316,29 @@ AffineConstraints::condense (BlockSparseMatrix &uncondensed, const size_type blocks = uncondensed.n_block_rows(); - const BlockSparsityPattern & - sparsity = uncondensed.get_sparsity_pattern (); + const BlockSparsityPattern &sparsity = uncondensed.get_sparsity_pattern(); - Assert (sorted == true, ExcMatrixNotClosed()); - Assert (sparsity.is_compressed() == true, ExcMatrixNotClosed()); - Assert (sparsity.n_rows() == sparsity.n_cols(), - ExcNotQuadratic()); - Assert (sparsity.n_block_rows() == sparsity.n_block_cols(), - ExcNotQuadratic()); - Assert (sparsity.n_block_rows() == sparsity.n_block_cols(), - ExcNotQuadratic()); - Assert (sparsity.get_column_indices() == sparsity.get_row_indices(), - ExcNotQuadratic()); + Assert(sorted == true, ExcMatrixNotClosed()); + Assert(sparsity.is_compressed() == true, ExcMatrixNotClosed()); + Assert(sparsity.n_rows() == sparsity.n_cols(), ExcNotQuadratic()); + Assert(sparsity.n_block_rows() == sparsity.n_block_cols(), ExcNotQuadratic()); + Assert(sparsity.n_block_rows() == sparsity.n_block_cols(), ExcNotQuadratic()); + Assert(sparsity.get_column_indices() == sparsity.get_row_indices(), + ExcNotQuadratic()); if (use_vectors == true) { - AssertDimension (vec.size(), sparsity.n_rows()); - AssertDimension (vec.n_blocks(), sparsity.n_block_rows()); + AssertDimension(vec.size(), sparsity.n_rows()); + AssertDimension(vec.n_blocks(), sparsity.n_block_rows()); } double average_diagonal = 0; - for (size_type b=0; b &uncondensed, // otherwise, the number states which line // in the constraint matrix handles this // index - std::vector distribute (sparsity.n_rows(), - numbers::invalid_size_type); + std::vector distribute(sparsity.n_rows(), + numbers::invalid_size_type); - for (size_type c=0; c - block_index = index_mapping.global_to_local(row); + const std::pair block_index = + index_mapping.global_to_local(row); const size_type block_row = block_index.first; if (distribute[row] == numbers::invalid_size_type) @@ -367,22 +368,23 @@ AffineConstraints::condense (BlockSparseMatrix &uncondensed, // whether this column must // be distributed { - // to loop over all entries // in this row, we have to // loop over all blocks in // this blockrow and the // corresponding row // therein - for (size_type block_col=0; block_col::iterator - entry = uncondensed.block(block_row, block_col).begin(block_index.second); - entry != uncondensed.block(block_row, block_col).end(block_index.second); + for (typename SparseMatrix::iterator entry = + uncondensed.block(block_row, block_col) + .begin(block_index.second); + entry != uncondensed.block(block_row, block_col) + .end(block_index.second); ++entry) { - const size_type global_col - = index_mapping.local_to_global(block_col,entry->column()); + const size_type global_col = + index_mapping.local_to_global(block_col, entry->column()); if (distribute[global_col] != numbers::invalid_size_type) // distribute entry at @@ -391,14 +393,16 @@ AffineConstraints::condense (BlockSparseMatrix &uncondensed, // global_col; set old // entry to zero { - const double old_value = entry->value (); + const double old_value = entry->value(); - for (size_type q=0; - q!=lines[distribute[global_col]].entries.size(); ++q) - uncondensed.add (row, - lines[distribute[global_col]].entries[q].first, - old_value * - lines[distribute[global_col]].entries[q].second); + for (size_type q = 0; + q != lines[distribute[global_col]].entries.size(); + ++q) + uncondensed.add( + row, + lines[distribute[global_col]].entries[q].first, + old_value * + lines[distribute[global_col]].entries[q].second); // need to subtract this element from the // vector. this corresponds to an @@ -421,18 +425,19 @@ AffineConstraints::condense (BlockSparseMatrix &uncondensed, // whole row into the // chunks defined by the // blocks - for (size_type block_col=0; block_col::iterator - entry = uncondensed.block(block_row, block_col).begin(block_index.second); - entry != uncondensed.block(block_row, block_col).end(block_index.second); + for (typename SparseMatrix::iterator entry = + uncondensed.block(block_row, block_col) + .begin(block_index.second); + entry != uncondensed.block(block_row, block_col) + .end(block_index.second); ++entry) { - const size_type global_col - = index_mapping.local_to_global (block_col, entry->column()); + const size_type global_col = + index_mapping.local_to_global(block_col, entry->column()); - if (distribute[global_col] == - numbers::invalid_size_type) + if (distribute[global_col] == numbers::invalid_size_type) // distribute // entry at // irregular @@ -445,12 +450,13 @@ AffineConstraints::condense (BlockSparseMatrix &uncondensed, { const double old_value = entry->value(); - for (size_type q=0; - q!=lines[distribute[row]].entries.size(); ++q) - uncondensed.add (lines[distribute[row]].entries[q].first, - global_col, - old_value * - lines[distribute[row]].entries[q].second); + for (size_type q = 0; + q != lines[distribute[row]].entries.size(); + ++q) + uncondensed.add( + lines[distribute[row]].entries[q].first, + global_col, + old_value * lines[distribute[row]].entries[q].second); entry->value() = 0.; } @@ -463,24 +469,34 @@ AffineConstraints::condense (BlockSparseMatrix &uncondensed, // main diagonal, zero // otherwise { - const double old_value = entry->value (); + const double old_value = entry->value(); - for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p) + for (size_type p = 0; + p != lines[distribute[row]].entries.size(); + ++p) { - for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q) - uncondensed.add (lines[distribute[row]].entries[p].first, - lines[distribute[global_col]].entries[q].first, - old_value * - lines[distribute[row]].entries[p].second * - lines[distribute[global_col]].entries[q].second); + for (size_type q = 0; + q != + lines[distribute[global_col]].entries.size(); + ++q) + uncondensed.add( + lines[distribute[row]].entries[p].first, + lines[distribute[global_col]].entries[q].first, + old_value * + lines[distribute[row]].entries[p].second * + lines[distribute[global_col]] + .entries[q] + .second); if (use_vectors == true) vec(lines[distribute[row]].entries[p].first) -= - old_value * lines[distribute[row]].entries[p].second * + old_value * + lines[distribute[row]].entries[p].second * lines[distribute[global_col]].inhomogeneity; } - entry->value() = (row == global_col ? average_diagonal : 0. ); + entry->value() = + (row == global_col ? average_diagonal : 0.); } } } @@ -488,9 +504,10 @@ AffineConstraints::condense (BlockSparseMatrix &uncondensed, // take care of vector if (use_vectors == true) { - for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q) - vec(lines[distribute[row]].entries[q].first) - += (vec(row) * lines[distribute[row]].entries[q].second); + for (size_type q = 0; q != lines[distribute[row]].entries.size(); + ++q) + vec(lines[distribute[row]].entries[q].first) += + (vec(row) * lines[distribute[row]].entries[q].second); vec(lines[distribute[row]].index) = 0.; } @@ -498,11 +515,10 @@ AffineConstraints::condense (BlockSparseMatrix &uncondensed, } } - -//TODO: I'm sure the following could be made more elegant by using a bit of -//introspection using static member variables of the various vector -//classes to dispatch between the different functions, rather than using -//knowledge of the individual types +// TODO: I'm sure the following could be made more elegant by using a bit of +// introspection using static member variables of the various vector +// classes to dispatch between the different functions, rather than using +// knowledge of the individual types // number of functions to select the right implementation for set_zero(). namespace internal @@ -514,40 +530,46 @@ namespace internal typedef types::global_dof_index size_type; template - void set_zero_parallel(const std::vector &cm, - VectorType &vec, - size_type shift = 0) + void + set_zero_parallel(const std::vector &cm, + VectorType & vec, + size_type shift = 0) { Assert(!vec.has_ghost_elements(), ExcInternalError()); IndexSet locally_owned = vec.locally_owned_elements(); for (typename std::vector::const_iterator it = cm.begin(); - it != cm.end(); ++it) + it != cm.end(); + ++it) { // If shift>0 then we are working on a part of a BlockVector // so vec(i) is actually the global entry i+shift. // We first make sure the line falls into the range of vec, // then check if is part of the local part of the vector, before // finally setting it to 0. - if ((*it)::set(0., idx, vec); } } - template - void set_zero_parallel(const std::vector &cm, LinearAlgebra::distributed::Vector &vec, size_type shift = 0) + template + void + set_zero_parallel(const std::vector & cm, + LinearAlgebra::distributed::Vector &vec, + size_type shift = 0) { for (typename std::vector::const_iterator it = cm.begin(); - it != cm.end(); ++it) + it != cm.end(); + ++it) { // If shift>0 then we are working on a part of a BlockVector // so vec(i) is actually the global entry i+shift. // We first make sure the line falls into the range of vec, // then check if is part of the local part of the vector, before // finally setting it to 0. - if ((*it) - void set_zero_in_parallel(const std::vector &cm, - VectorType &vec, - std::integral_constant) + void + set_zero_in_parallel(const std::vector &cm, + VectorType & vec, + std::integral_constant) { set_zero_parallel(cm, vec, 0); } // in parallel for BlockVectors template - void set_zero_in_parallel(const std::vector &cm, - VectorType &vec, - std::integral_constant) + void + set_zero_in_parallel(const std::vector &cm, + VectorType & vec, + std::integral_constant) { size_type start_shift = 0; - for (size_type j=0; j - void set_zero_serial(const std::vector &cm, - VectorType &vec) + void + set_zero_serial(const std::vector &cm, VectorType &vec) { for (typename std::vector::const_iterator it = cm.begin(); - it != cm.end(); ++it) + it != cm.end(); + ++it) vec(*it) = 0.; } template - void set_zero_all(const std::vector &cm, - VectorType &vec) + void + set_zero_all(const std::vector &cm, VectorType &vec) { - set_zero_in_parallel(cm, vec, std::integral_constant::value>()); + set_zero_in_parallel( + cm, + vec, + std::integral_constant::value>()); vec.compress(VectorOperation::insert); } - template - void set_zero_all(const std::vector &cm, - dealii::Vector &vec) + void + set_zero_all(const std::vector &cm, dealii::Vector &vec) { set_zero_serial(cm, vec); } template - void set_zero_all(const std::vector &cm, - dealii::BlockVector &vec) + void + set_zero_all(const std::vector &cm, + dealii::BlockVector & vec) { set_zero_serial(cm, vec); } - } - } -} - + } // namespace + } // namespace AffineConstraintsImplementation +} // namespace internal +template template void -AffineConstraints::set_zero (VectorType &vec) const +AffineConstraints::set_zero(VectorType &vec) const { // since we lines is a private member, we cannot pass it to the functions // above. therefore, copy the content which is cheap std::vector constrained_lines(lines.size()); - for (unsigned int i=0; i template void -AffineConstraints:: -distribute_local_to_global (const Vector &local_vector, - const std::vector &local_dof_indices, - VectorType &global_vector, - const FullMatrix &local_matrix) const +AffineConstraints::distribute_local_to_global( + const Vector & local_vector, + const std::vector &local_dof_indices, + VectorType & global_vector, + const FullMatrix & local_matrix) const { - distribute_local_to_global(local_vector,local_dof_indices,local_dof_indices, global_vector, local_matrix, true); + distribute_local_to_global(local_vector, + local_dof_indices, + local_dof_indices, + global_vector, + local_matrix, + true); } - - +template template void -AffineConstraints:: -distribute_local_to_global (const Vector &local_vector, - const std::vector &local_dof_indices_row, - const std::vector &local_dof_indices_col, - VectorType &global_vector, - const FullMatrix &local_matrix, - bool diagonal) const +AffineConstraints::distribute_local_to_global( + const Vector & local_vector, + const std::vector &local_dof_indices_row, + const std::vector &local_dof_indices_col, + VectorType & global_vector, + const FullMatrix & local_matrix, + bool diagonal) const { - Assert (sorted == true, ExcMatrixNotClosed()); - AssertDimension (local_vector.size(), local_dof_indices_row.size()); - AssertDimension (local_matrix.m(), local_dof_indices_row.size()); - AssertDimension (local_matrix.n(), local_dof_indices_col.size()); + Assert(sorted == true, ExcMatrixNotClosed()); + AssertDimension(local_vector.size(), local_dof_indices_row.size()); + AssertDimension(local_matrix.m(), local_dof_indices_row.size()); + AssertDimension(local_matrix.n(), local_dof_indices_col.size()); // diagonal checks if we have only one index set (if both are equal // diagonal should be set to true). @@ -672,7 +704,7 @@ distribute_local_to_global (const Vector &local_vector, global_vector.add(local_dof_indices_row, local_vector); } else - for (size_type i=0; i &local_vector, // find the constraint line to the given // global dof index - const size_type line_index = calculate_line_index (local_dof_indices_col[i]); - const ConstraintLine *position = - lines_cache.size() <= line_index ? nullptr : &lines[lines_cache[line_index]]; + const size_type line_index = + calculate_line_index(local_dof_indices_col[i]); + const ConstraintLine *position = lines_cache.size() <= line_index ? + nullptr : + &lines[lines_cache[line_index]]; // Gauss elimination of the matrix columns with the inhomogeneity. // Go through them one by one and again check whether they are // constrained. If so, distribute the constraint const double val = position->inhomogeneity; if (val != 0) - for (size_type j=0; j &local_vector, // the entries of fixed dofs if (diagonal) { - for (size_type j=0; jentries.size(); ++j) + for (size_type j = 0; j < position->entries.size(); ++j) { - Assert (!(!local_lines.size() - || local_lines.is_element(position->entries[j].first)) - || is_constrained(position->entries[j].first) == false, - ExcMessage ("Tried to distribute to a fixed dof.")); - global_vector(position->entries[j].first) - += local_vector(i) * position->entries[j].second; + Assert(!(!local_lines.size() || + local_lines.is_element(position->entries[j].first)) || + is_constrained(position->entries[j].first) == false, + ExcMessage("Tried to distribute to a fixed dof.")); + global_vector(position->entries[j].first) += + local_vector(i) * position->entries[j].second; } } } } - - namespace internal { namespace @@ -754,104 +789,112 @@ namespace internal // need a few overloads #ifdef DEAL_II_WITH_TRILINOS void - import_vector_with_ghost_elements (const TrilinosWrappers::MPI::Vector &vec, - const IndexSet &/*locally_owned_elements*/, - const IndexSet &needed_elements, - TrilinosWrappers::MPI::Vector &output, - const std::integral_constant /*is_block_vector*/) + import_vector_with_ghost_elements( + const TrilinosWrappers::MPI::Vector &vec, + const IndexSet & /*locally_owned_elements*/, + const IndexSet & needed_elements, + TrilinosWrappers::MPI::Vector &output, + const std::integral_constant /*is_block_vector*/) { - Assert(!vec.has_ghost_elements(), - ExcGhostsPresent()); -#ifdef DEAL_II_WITH_MPI - const Epetra_MpiComm *mpi_comm - = dynamic_cast(&vec.trilinos_vector().Comm()); - - Assert (mpi_comm != nullptr, ExcInternalError()); - output.reinit (needed_elements, mpi_comm->GetMpiComm()); -#else - output.reinit (needed_elements, MPI_COMM_SELF); -#endif + Assert(!vec.has_ghost_elements(), ExcGhostsPresent()); +# ifdef DEAL_II_WITH_MPI + const Epetra_MpiComm *mpi_comm = + dynamic_cast(&vec.trilinos_vector().Comm()); + + Assert(mpi_comm != nullptr, ExcInternalError()); + output.reinit(needed_elements, mpi_comm->GetMpiComm()); +# else + output.reinit(needed_elements, MPI_COMM_SELF); +# endif output = vec; } #endif #ifdef DEAL_II_WITH_PETSC void - import_vector_with_ghost_elements (const PETScWrappers::MPI::Vector &vec, - const IndexSet &locally_owned_elements, - const IndexSet &needed_elements, - PETScWrappers::MPI::Vector &output, - const std::integral_constant /*is_block_vector*/) + import_vector_with_ghost_elements( + const PETScWrappers::MPI::Vector &vec, + const IndexSet & locally_owned_elements, + const IndexSet & needed_elements, + PETScWrappers::MPI::Vector & output, + const std::integral_constant /*is_block_vector*/) { - output.reinit (locally_owned_elements, needed_elements, vec.get_mpi_communicator()); + output.reinit( + locally_owned_elements, needed_elements, vec.get_mpi_communicator()); output = vec; } #endif template void - import_vector_with_ghost_elements (const LinearAlgebra::distributed::Vector &vec, - const IndexSet &locally_owned_elements, - const IndexSet &needed_elements, - LinearAlgebra::distributed::Vector &output, - const std::integral_constant /*is_block_vector*/) + import_vector_with_ghost_elements( + const LinearAlgebra::distributed::Vector &vec, + const IndexSet & locally_owned_elements, + const IndexSet & needed_elements, + LinearAlgebra::distributed::Vector & output, + const std::integral_constant /*is_block_vector*/) { // TODO: the in vector might already have all elements. need to find a // way to efficiently avoid the copy then - const_cast&>(vec).zero_out_ghosts(); - output.reinit (locally_owned_elements, needed_elements, vec.get_mpi_communicator()); + const_cast &>(vec) + .zero_out_ghosts(); + output.reinit( + locally_owned_elements, needed_elements, vec.get_mpi_communicator()); output = vec; output.update_ghost_values(); } - // all other vector non-block vector types are sequential and we should // not have this function called at all -- so throw an exception template void - import_vector_with_ghost_elements (const Vector &/*vec*/, - const IndexSet &/*locally_owned_elements*/, - const IndexSet &/*needed_elements*/, - Vector &/*output*/, - const std::integral_constant /*is_block_vector*/) + import_vector_with_ghost_elements( + const Vector & /*vec*/, + const IndexSet & /*locally_owned_elements*/, + const IndexSet & /*needed_elements*/, + Vector & /*output*/, + const std::integral_constant /*is_block_vector*/) { - Assert (false, ExcMessage ("We shouldn't even get here!")); + Assert(false, ExcMessage("We shouldn't even get here!")); } - // for block vectors, simply dispatch to the individual blocks template void - import_vector_with_ghost_elements (const VectorType &vec, - const IndexSet &locally_owned_elements, - const IndexSet &needed_elements, - VectorType &output, - const std::integral_constant /*is_block_vector*/) + import_vector_with_ghost_elements( + const VectorType &vec, + const IndexSet & locally_owned_elements, + const IndexSet & needed_elements, + VectorType & output, + const std::integral_constant /*is_block_vector*/) { - output.reinit (vec.n_blocks()); + output.reinit(vec.n_blocks()); types::global_dof_index block_start = 0; - for (unsigned int b=0; b()); + import_vector_with_ghost_elements( + vec.block(b), + locally_owned_elements.get_view(block_start, + block_start + vec.block(b).size()), + needed_elements.get_view(block_start, + block_start + vec.block(b).size()), + output.block(b), + std::integral_constant()); block_start += vec.block(b).size(); } - output.collect_sizes (); + output.collect_sizes(); } - } -} - + } // namespace +} // namespace internal +template template void -AffineConstraints::distribute (VectorType &vec) const +AffineConstraints::distribute(VectorType &vec) const { - Assert (sorted==true, ExcMatrixNotClosed()); + Assert(sorted == true, ExcMatrixNotClosed()); // if the vector type supports parallel storage and if the vector actually // does store only part of the vector, distributing is slightly more @@ -865,47 +908,47 @@ AffineConstraints::distribute (VectorType &vec) const // the last else is for the simple case (sequential vector) const IndexSet vec_owned_elements = vec.locally_owned_elements(); - if ( dealii::is_serial_vector< VectorType >::value == false ) + if (dealii::is_serial_vector::value == false) { // This processor owns only part of the vector. one may think that // every processor should be able to simply communicate those elements // it owns and for which it knows that they act as sources to constrained // DoFs to the owner of these DoFs. This would lead to a scheme where all - // we need to do is to add some local elements to (possibly non-local) ones - // and then call compress(). + // we need to do is to add some local elements to (possibly non-local) + // ones and then call compress(). // - // Alas, this scheme does not work as evidenced by the disaster of bug #51, - // see http://code.google.com/p/dealii/issues/detail?id=51 and the + // Alas, this scheme does not work as evidenced by the disaster of bug + // #51, see http://code.google.com/p/dealii/issues/detail?id=51 and the // reversion of one attempt that implements this in r29662. Rather, we // need to get a vector that has all the *sources* or constraints we // own locally, possibly as ghost vector elements, then read from them, - // and finally throw away the ghosted vector. Implement this in the following. + // and finally throw away the ghosted vector. Implement this in the + // following. IndexSet needed_elements = vec_owned_elements; typedef std::vector::const_iterator constraint_iterator; - for (constraint_iterator it = lines.begin(); - it != lines.end(); ++it) + for (constraint_iterator it = lines.begin(); it != lines.end(); ++it) if (vec_owned_elements.is_element(it->index)) - for (unsigned int i=0; ientries.size(); ++i) + for (unsigned int i = 0; i < it->entries.size(); ++i) if (!vec_owned_elements.is_element(it->entries[i].first)) needed_elements.add_index(it->entries[i].first); VectorType ghosted_vector; - internal::import_vector_with_ghost_elements (vec, - vec_owned_elements, needed_elements, - ghosted_vector, - std::integral_constant::value>()); - - for (constraint_iterator it = lines.begin(); - it != lines.end(); ++it) + internal::import_vector_with_ghost_elements( + vec, + vec_owned_elements, + needed_elements, + ghosted_vector, + std::integral_constant::value>()); + + for (constraint_iterator it = lines.begin(); it != lines.end(); ++it) if (vec_owned_elements.is_element(it->index)) { - typename VectorType::value_type - new_value = it->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) - new_value += (static_cast - (internal::ElementAccess::get( - ghosted_vector, it->entries[i].first)) * + typename VectorType::value_type new_value = it->inhomogeneity; + for (unsigned int i = 0; i < it->entries.size(); ++i) + new_value += (static_cast( + internal::ElementAccess::get( + ghosted_vector, it->entries[i].first)) * it->entries[i].second); AssertIsFinite(new_value); internal::ElementAccess::set(new_value, it->index, vec); @@ -916,35 +959,34 @@ AffineConstraints::distribute (VectorType &vec) const // // this shouldn't be strictly necessary but it probably doesn't // hurt either - vec.compress (VectorOperation::insert); + vec.compress(VectorOperation::insert); } else // purely sequential vector (either because the type doesn't // support anything else or because it's completely stored // locally) { - std::vector::const_iterator next_constraint = lines.begin(); + std::vector::const_iterator next_constraint = + lines.begin(); for (; next_constraint != lines.end(); ++next_constraint) { // fill entry in line // next_constraint.index by adding the // different contributions - typename VectorType::value_type - new_value = next_constraint->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) - new_value += (static_cast - (internal::ElementAccess::get( - vec, next_constraint->entries[i].first))* + typename VectorType::value_type new_value = + next_constraint->inhomogeneity; + for (unsigned int i = 0; i < next_constraint->entries.size(); ++i) + new_value += (static_cast( + internal::ElementAccess::get( + vec, next_constraint->entries[i].first)) * next_constraint->entries[i].second); AssertIsFinite(new_value); - internal::ElementAccess::set(new_value, next_constraint->index, - vec); + internal::ElementAccess::set( + new_value, next_constraint->index, vec); } } } - - // Some helper definitions for the local_to_global functions. namespace internals { @@ -967,54 +1009,52 @@ namespace internals // constraint. struct Distributing { - Distributing (const size_type global_row = numbers::invalid_size_type, - const size_type local_row = numbers::invalid_size_type); - Distributing (const Distributing &in); - Distributing &operator = (const Distributing &in); - bool operator < (const Distributing &in) const + Distributing(const size_type global_row = numbers::invalid_size_type, + const size_type local_row = numbers::invalid_size_type); + Distributing(const Distributing &in); + Distributing & + operator=(const Distributing &in); + bool + operator<(const Distributing &in) const { - return global_row > >, but tuned so that @@ -1026,75 +1066,74 @@ namespace internals // the GlobalRowsFromLocal. struct DataCache { - DataCache () - : - row_length (8) + DataCache() : row_length(8) {} - void reinit () + void + reinit() { individual_size.resize(0); data.resize(0); } - size_type insert_new_index (const std::pair &pair) + size_type + insert_new_index(const std::pair &pair) { Assert(row_length > 0, ExcInternalError()); const unsigned int index = individual_size.size(); individual_size.push_back(1); - data.resize(individual_size.size()*row_length); - data[index*row_length] = pair; - individual_size[index] = 1; + data.resize(individual_size.size() * row_length); + data[index * row_length] = pair; + individual_size[index] = 1; return index; } - void append_index (const size_type index, - const std::pair &pair) + void + append_index(const size_type index, + const std::pair &pair) { - AssertIndexRange (index, individual_size.size()); + AssertIndexRange(index, individual_size.size()); const size_type my_length = individual_size[index]; if (my_length == row_length) { - AssertDimension(data.size(), individual_size.size()*row_length); + AssertDimension(data.size(), individual_size.size() * row_length); // no space left in this row, need to double row_length and // rearrange the data items. Move all items to the right except the // first one, starting at the back. Since individual_size contains // at least one element when we get here, subtracting 1 works fine. - data.resize(2*data.size()); - for (size_type i=individual_size.size()-1; i>0; --i) + data.resize(2 * data.size()); + for (size_type i = individual_size.size() - 1; i > 0; --i) { const auto ptr = data.data(); - std::move_backward(ptr + i*row_length, - ptr + i*row_length + individual_size[i], - ptr + i*2*row_length + individual_size[i]); + std::move_backward(ptr + i * row_length, + ptr + i * row_length + individual_size[i], + ptr + i * 2 * row_length + individual_size[i]); } row_length *= 2; } - data[index*row_length+my_length] = pair; - individual_size[index] = my_length + 1; + data[index * row_length + my_length] = pair; + individual_size[index] = my_length + 1; } size_type - get_size (const size_type index) const + get_size(const size_type index) const { return individual_size[index]; } - const std::pair * - get_entry (const size_type index) const + const std::pair * + get_entry(const size_type index) const { - return &data[index*row_length]; + return &data[index * row_length]; } size_type row_length; - std::vector > data; + std::vector> data; std::vector individual_size; }; - - // collects all the global rows from a local contribution (cell) and their // origin (direct/constraint). this is basically a vector consisting of // "Distributing" structs using access via the DataCache. Provides some @@ -1123,76 +1162,78 @@ namespace internals class GlobalRowsFromLocal { public: - GlobalRowsFromLocal () - : - n_active_rows (0), - n_inhomogeneous_rows (0) + GlobalRowsFromLocal() : n_active_rows(0), n_inhomogeneous_rows(0) {} - void reinit (const size_type n_local_rows) + void + reinit(const size_type n_local_rows) { total_row_indices.resize(n_local_rows); - for (unsigned int i=0; i= n_inhomogeneous_rows, ExcInternalError()); - std::swap (total_row_indices[n_active_rows+i], - total_row_indices[n_active_rows+n_inhomogeneous_rows]); + Assert(i >= n_inhomogeneous_rows, ExcInternalError()); + std::swap(total_row_indices[n_active_rows + i], + total_row_indices[n_active_rows + n_inhomogeneous_rows]); n_inhomogeneous_rows++; } // the local row where constraint number i was detected, to find that row // easily when the GlobalRowsToLocal has been set up - size_type constraint_origin (size_type i) const + size_type + constraint_origin(size_type i) const { - return total_row_indices[n_active_rows+i].local_row; + return total_row_indices[n_active_rows + i].local_row; } // a vector that contains all the global ids and the corresponding local @@ -1287,13 +1340,13 @@ namespace internals private: // holds the actual data from the constraints - DataCache data_cache; + DataCache data_cache; // how many rows there are, constraints disregarded - size_type n_active_rows; + size_type n_active_rows; // the number of rows with inhomogeneous constraints - size_type n_inhomogeneous_rows; + size_type n_inhomogeneous_rows; }; // a function that appends an additional row to the list of values, or @@ -1301,24 +1354,24 @@ namespace internals // std::map, but here done for a // std::vector, much faster for short lists as we have them // here - inline - void - GlobalRowsFromLocal::insert_index (const size_type global_row, - const size_type local_row, - const double constraint_value) + inline void + GlobalRowsFromLocal::insert_index(const size_type global_row, + const size_type local_row, + const double constraint_value) { typedef std::vector::iterator index_iterator; - index_iterator pos, pos1; - Distributing row_value (global_row); - std::pair constraint (local_row, constraint_value); + index_iterator pos, pos1; + Distributing row_value(global_row); + std::pair constraint(local_row, constraint_value); // check whether the list was really sorted before entering here - for (size_type i=1; iglobal_row == global_row) pos1 = pos; else @@ -1328,9 +1381,9 @@ namespace internals } if (pos1->constraint_position == numbers::invalid_size_type) - pos1->constraint_position = data_cache.insert_new_index (constraint); + pos1->constraint_position = data_cache.insert_new_index(constraint); else - data_cache.append_index (pos1->constraint_position, constraint); + data_cache.append_index(pos1->constraint_position, constraint); } // this sort algorithm sorts std::vector, but does not take @@ -1339,9 +1392,8 @@ namespace internals // shellsort, which is very fast in case the indices are already sorted // (which is the usual case with DG elements), and not too slow in other // cases - inline - void - GlobalRowsFromLocal::sort () + inline void + GlobalRowsFromLocal::sort() { size_type i, j, j2, temp, templ, istep; size_type step; @@ -1350,41 +1402,42 @@ namespace internals const size_type length = size(); // make sure that we are in the range of the vector - AssertIndexRange (length, total_row_indices.size()+1); - for (size_type i=0; i 0) { - for (i=step; i < length; i++) + for (i = step; i < length; i++) { istep = step; - j = i; - j2 = j-istep; - temp = total_row_indices[i].global_row; + j = i; + j2 = j - istep; + temp = total_row_indices[i].global_row; templ = total_row_indices[i].local_row; if (total_row_indices[j2].global_row > temp) { - while ((j >= istep) && (total_row_indices[j2].global_row > temp)) + while ((j >= istep) && + (total_row_indices[j2].global_row > temp)) { - total_row_indices[j].global_row = total_row_indices[j2].global_row; - total_row_indices[j].local_row = total_row_indices[j2].local_row; + total_row_indices[j].global_row = + total_row_indices[j2].global_row; + total_row_indices[j].local_row = + total_row_indices[j2].local_row; j = j2; j2 -= istep; } total_row_indices[j].global_row = temp; - total_row_indices[j].local_row = templ; + total_row_indices[j].local_row = templ; } } - step = step>>1; + step = step >> 1; } } - - /** * Scratch data that is used during calls to distribute_local_to_global and * add_entries_local_to_global. In order to avoid frequent memory @@ -1411,17 +1464,13 @@ namespace internals /** * Constructor, does nothing. */ - ScratchData () - : - in_use (false) + ScratchData() : in_use(false) {} /** * Copy constructor, does nothing */ - ScratchData (const ScratchData &) - : - in_use (false) + ScratchData(const ScratchData &) : in_use(false) {} /** @@ -1437,7 +1486,7 @@ namespace internals /** * Temporary array for column values */ - std::vector values; + std::vector values; /** * Temporary array for block start indices @@ -1475,13 +1524,13 @@ namespace internals * Constructor. Grabs a scratch data object on the current thread and * mark it as used */ - ScratchDataAccessor() - : + ScratchDataAccessor() : my_scratch_data(&AffineConstraintsData::scratch_data.get()) { Assert(my_scratch_data->in_use == false, - ExcMessage("Access to thread-local scratch data tried, but it is already " - "in use")); + ExcMessage( + "Access to thread-local scratch data tried, but it is already " + "in use")); my_scratch_data->in_use = true; } @@ -1496,7 +1545,7 @@ namespace internals /** * Dereferencing operator. */ - ScratchData &operator* () + ScratchData &operator*() { return *my_scratch_data; } @@ -1504,7 +1553,7 @@ namespace internals /** * Dereferencing operator. */ - ScratchData *operator-> () + ScratchData *operator->() { return my_scratch_data; } @@ -1520,112 +1569,106 @@ namespace internals static Threads::ThreadLocalStorage scratch_data; }; - - // function for block matrices: Find out where in the list of local dofs // (sorted according to global ids) the individual blocks start. Transform // the global indices to block-local indices in order to be able to use // functions like vector.block(1)(block_local_id), instead of // vector(global_id). This avoids transforming indices one-by-one later on. template - inline - void - make_block_starts (const BlockType &block_object, - GlobalRowsFromLocal &global_rows, - std::vector &block_starts) + inline void + make_block_starts(const BlockType & block_object, + GlobalRowsFromLocal & global_rows, + std::vector &block_starts) { - AssertDimension (block_starts.size(), block_object.n_block_rows()+1); + AssertDimension(block_starts.size(), block_object.n_block_rows() + 1); typedef std::vector::iterator row_iterator; row_iterator block_indices = global_rows.total_row_indices.begin(); - const size_type num_blocks = block_object.n_block_rows(); + const size_type num_blocks = block_object.n_block_rows(); const size_type n_active_rows = global_rows.size(); // find end of rows. block_starts[0] = 0; - for (size_type i=1; i instead of // GlobalRowsFromLocal. Used in functions for sparsity patterns. template - inline - void - make_block_starts (const BlockType &block_object, - std::vector &row_indices, - std::vector &block_starts) + inline void + make_block_starts(const BlockType & block_object, + std::vector &row_indices, + std::vector &block_starts) { - AssertDimension (block_starts.size(), block_object.n_block_rows()+1); + AssertDimension(block_starts.size(), block_object.n_block_rows() + 1); typedef std::vector::iterator row_iterator; - row_iterator col_indices = row_indices.begin(); + row_iterator col_indices = row_indices.begin(); const size_type num_blocks = block_object.n_block_rows(); // find end of rows. block_starts[0] = 0; - for (size_type i=1; i - static inline - LocalType resolve_matrix_entry (const GlobalRowsFromLocal &global_rows, - const GlobalRowsFromLocal &global_cols, - const size_type i, - const size_type j, - const size_type loc_row, - const FullMatrix &local_matrix) + static inline LocalType + resolve_matrix_entry(const GlobalRowsFromLocal & global_rows, + const GlobalRowsFromLocal & global_cols, + const size_type i, + const size_type j, + const size_type loc_row, + const FullMatrix &local_matrix) { const size_type loc_col = global_cols.local_row(j); - LocalType col_val; + LocalType col_val; // case 1: row has direct contribution in local matrix. decide whether col // has a direct contribution. if not, set the value to zero. if (loc_row != numbers::invalid_size_type) { col_val = ((loc_col != numbers::invalid_size_type) ? - local_matrix(loc_row, loc_col) : 0); + local_matrix(loc_row, loc_col) : + 0); // account for indirect contributions by constraints in column - for (size_type p=0; p - inline - void - resolve_matrix_row (const GlobalRowsFromLocal &global_rows, - const GlobalRowsFromLocal &global_cols, - const size_type i, - const size_type column_start, - const size_type column_end, - const FullMatrix &local_matrix, - size_type *&col_ptr, - number *&val_ptr) + inline void + resolve_matrix_row(const GlobalRowsFromLocal & global_rows, + const GlobalRowsFromLocal & global_cols, + const size_type i, + const size_type column_start, + const size_type column_end, + const FullMatrix &local_matrix, + size_type *& col_ptr, + number *& val_ptr) { if (column_end == column_start) return; - AssertIndexRange (column_end-1, global_cols.size()); + AssertIndexRange(column_end - 1, global_cols.size()); const size_type loc_row = global_rows.local_row(i); // fast function if there are no indirect references to any of the local @@ -1681,14 +1722,14 @@ namespace internals AssertIndexRange(loc_row, local_matrix.m()); const LocalType *matrix_ptr = &local_matrix(loc_row, 0); - for (size_type j=column_start; j (col_val); + *val_ptr++ = static_cast(col_val); *col_ptr++ = global_cols.global_row(j); } } @@ -1698,75 +1739,73 @@ namespace internals // to do some more checks. else { - for (size_type j=column_start; j (col_val); + *val_ptr++ = static_cast(col_val); *col_ptr++ = global_cols.global_row(j); } } } } - - // specialized function that can write into the row of a // SparseMatrix. namespace dealiiSparseMatrix { template - static inline - void add_value (const LocalType value, - const size_type row, - const size_type column, - SparseMatrixIterator &matrix_values) + static inline void + add_value(const LocalType value, + const size_type row, + const size_type column, + SparseMatrixIterator &matrix_values) { (void)row; - if (value != LocalType ()) + if (value != LocalType()) { while (matrix_values->column() < column) ++matrix_values; - Assert (matrix_values->column() == column, - typename SparseMatrix::ExcInvalidIndex(row, column)); + Assert( + matrix_values->column() == column, + typename SparseMatrix::ExcInvalidIndex(row, column)); matrix_values->value() += value; } } - } - + } // namespace dealiiSparseMatrix // similar as before, now with shortcut for deal.II sparse matrices. this // lets us avoid using extra arrays, and does all the operations just in // place, i.e., in the respective matrix row template - inline - void - resolve_matrix_row (const GlobalRowsFromLocal &global_rows, - const size_type i, - const size_type column_start, - const size_type column_end, - const FullMatrix &local_matrix, - SparseMatrix *sparse_matrix) + inline void + resolve_matrix_row(const GlobalRowsFromLocal & global_rows, + const size_type i, + const size_type column_start, + const size_type column_end, + const FullMatrix &local_matrix, + SparseMatrix * sparse_matrix) { if (column_end == column_start) return; - AssertIndexRange (column_end-1, global_rows.size()); + AssertIndexRange(column_end - 1, global_rows.size()); const SparsityPattern &sparsity = sparse_matrix->get_sparsity_pattern(); if (sparsity.n_nonzero_elements() == 0) return; - const size_type row = global_rows.global_row(i); + const size_type row = global_rows.global_row(i); const size_type loc_row = global_rows.local_row(i); - typename SparseMatrix::iterator - matrix_values = sparse_matrix->begin(row); + typename SparseMatrix::iterator matrix_values = + sparse_matrix->begin(row); const bool optimize_diagonal = sparsity.n_rows() == sparsity.n_cols(); // distinguish three cases about what can happen for checking whether the @@ -1777,76 +1816,69 @@ namespace internals { if (global_rows.have_indirect_rows() == false) { - AssertIndexRange (loc_row, local_matrix.m()); + AssertIndexRange(loc_row, local_matrix.m()); const LocalType *matrix_ptr = &local_matrix(loc_row, 0); - for (size_type j=column_start; j=column_start && i= column_start && i < column_end) // case 2: can split loop { ++matrix_values; // jump over diagonal element if (global_rows.have_indirect_rows() == false) { - AssertIndexRange (loc_row, local_matrix.m()); + AssertIndexRange(loc_row, local_matrix.m()); const LocalType *matrix_ptr = &local_matrix(loc_row, 0); sparse_matrix->begin(row)->value() += matrix_ptr[loc_row]; - for (size_type j=column_start; jbegin(row)->value() += - resolve_matrix_entry (global_rows, global_rows, i, i, - loc_row, local_matrix); - for (size_type j=column_start; jbegin(row)->value() += resolve_matrix_entry( + global_rows, global_rows, i, i, loc_row, local_matrix); + for (size_type j = column_start; j < i; ++j) { - LocalType col_val = resolve_matrix_entry (global_rows, global_rows, i, j, - loc_row, local_matrix); - dealiiSparseMatrix::add_value (col_val, row, - global_rows.global_row(j), - matrix_values); + LocalType col_val = resolve_matrix_entry( + global_rows, global_rows, i, j, loc_row, local_matrix); + dealiiSparseMatrix::add_value( + col_val, row, global_rows.global_row(j), matrix_values); } - for (size_type j=i+1; jbegin(row)->value() += col_val; else - dealiiSparseMatrix::add_value(col_val, row, - global_rows.global_row(j), - matrix_values); + dealiiSparseMatrix::add_value( + col_val, row, global_rows.global_row(j), matrix_values); } } else { ++matrix_values; // jump over diagonal element - for (size_type j=column_start; jbegin(row)->value() += col_val; else - dealiiSparseMatrix::add_value (col_val, row, - global_rows.global_row(j), - matrix_values); + dealiiSparseMatrix::add_value( + col_val, row, global_rows.global_row(j), matrix_values); } } } - - // Same function to resolve all entries that will be added to the given // global row global_rows[i] as before, now for sparsity pattern - inline - void - resolve_matrix_row (const GlobalRowsFromLocal &global_rows, - const size_type i, - const size_type column_start, - const size_type column_end, - const Table<2,bool> &dof_mask, - std::vector::iterator &col_ptr) + inline void + resolve_matrix_row(const GlobalRowsFromLocal & global_rows, + const size_type i, + const size_type column_start, + const size_type column_end, + const Table<2, bool> & dof_mask, + std::vector::iterator &col_ptr) { if (column_end == column_start) return; @@ -1908,15 +1935,14 @@ namespace internals // rows at all on this set of dofs if (global_rows.have_indirect_rows() == false) { - Assert(loc_row < dof_mask.n_rows(), - ExcInternalError()); + Assert(loc_row < dof_mask.n_rows(), ExcInternalError()); - for (size_type j=column_start; j inline void - set_matrix_diagonals (const internals::GlobalRowsFromLocal &global_rows, - const std::vector &local_dof_indices, - const FullMatrix &local_matrix, - const AffineConstraints &constraints, - MatrixType &global_matrix, - VectorType &global_vector, - bool use_inhomogeneities_for_rhs) + set_matrix_diagonals( + const internals::GlobalRowsFromLocal & global_rows, + const std::vector & local_dof_indices, + const FullMatrix & local_matrix, + const AffineConstraints &constraints, + MatrixType & global_matrix, + VectorType & global_vector, + bool use_inhomogeneities_for_rhs) { if (global_rows.n_constraints() > 0) { - typename MatrixType::value_type average_diagonal = typename MatrixType::value_type(); - for (size_type i=0; i(local_matrix.m()); - for (size_type i=0; i inline void - set_sparsity_diagonals (const internals::GlobalRowsFromLocal &global_rows, - const std::vector &local_dof_indices, - const Table<2,bool> &dof_mask, - const bool keep_constrained_entries, - SparsityPatternType &sparsity_pattern) + set_sparsity_diagonals(const internals::GlobalRowsFromLocal &global_rows, + const std::vector &local_dof_indices, + const Table<2, bool> & dof_mask, + const bool keep_constrained_entries, + SparsityPatternType & sparsity_pattern) { // if we got constraints, need to add the diagonal element and, if the // user requested so, also the rest of the entries in rows and columns // that have been left out above if (global_rows.n_constraints() > 0) { - for (size_type i=0; i void -AffineConstraints:: -make_sorted_row_list (const std::vector &local_dof_indices, - internals::GlobalRowsFromLocal &global_rows) const +AffineConstraints::make_sorted_row_list( + const std::vector & local_dof_indices, + internals::GlobalRowsFromLocal &global_rows) const { const size_type n_local_dofs = local_dof_indices.size(); - AssertDimension (n_local_dofs, global_rows.size()); + AssertDimension(n_local_dofs, global_rows.size()); // when distributing the local data to the global matrix, we can quite // cheaply sort the indices (obviously, this introduces the need for @@ -2101,7 +2124,7 @@ make_sorted_row_list (const std::vector &local_dof_indices, // first add the indices in an unsorted way and only keep track of the // constraints that appear. They are resolved in a second step. - for (size_type i = 0; i &local_dof_indices, } global_rows.sort(); - const size_type n_constrained_rows = n_local_dofs-added_rows; - for (size_type i=0; i &local_dof_indices, - std::vector &active_dofs) const +template +inline void +AffineConstraints::make_sorted_row_list( + const std::vector &local_dof_indices, + std::vector & active_dofs) const { const size_type n_local_dofs = local_dof_indices.size(); - size_type added_rows = 0; - for (size_type i = 0; i &local_dof_indices, continue; } - active_dofs[n_local_dofs-i+added_rows-1] = i; + active_dofs[n_local_dofs - i + added_rows - 1] = i; } - std::sort (active_dofs.begin(), active_dofs.begin()+added_rows); + std::sort(active_dofs.begin(), active_dofs.begin() + added_rows); - const size_type n_constrained_dofs = n_local_dofs-added_rows; - for (size_type i=n_constrained_dofs; i>0; --i) + const size_type n_constrained_dofs = n_local_dofs - added_rows; + for (size_type i = n_constrained_dofs; i > 0; --i) { const size_type local_row = active_dofs.back(); // remove constrained entry since we are going to resolve it in place active_dofs.pop_back(); - const size_type global_row = local_dof_indices[local_row]; + const size_type global_row = local_dof_indices[local_row]; const ConstraintLine &position = lines[lines_cache[calculate_line_index(global_row)]]; - for (size_type q=0; q::iterator it = - Utilities::lower_bound(active_dofs.begin(), - active_dofs.end()-i+1, - new_index); + std::vector::iterator it = Utilities::lower_bound( + active_dofs.begin(), active_dofs.end() - i + 1, new_index); if (*it != new_index) active_dofs.insert(it, new_index); } @@ -2187,91 +2205,91 @@ make_sorted_row_list (const std::vector &local_dof_indices, } } - - // Resolve the constraints from the vector and apply inhomogeneities. +template template -inline -typename ProductType::type -AffineConstraints:: -resolve_vector_entry (const size_type i, - const internals::GlobalRowsFromLocal &global_rows, - const Vector &local_vector, - const std::vector &local_dof_indices, - const FullMatrix &local_matrix) const +inline typename ProductType::type +AffineConstraints::resolve_vector_entry( + const size_type i, + const internals::GlobalRowsFromLocal &global_rows, + const Vector & local_vector, + const std::vector & local_dof_indices, + const FullMatrix & local_matrix) const { - const size_type loc_row = global_rows.local_row(i); + const size_type loc_row = global_rows.local_row(i); const size_type n_inhomogeneous_rows = global_rows.n_inhomogeneities(); - typename ProductType::type val = 0; + typename ProductType::type val = 0; // has a direct contribution from some local entry. If we have inhomogeneous // constraints, compute the contribution of the inhomogeneity in the current // row. if (loc_row != numbers::invalid_size_type) { val = local_vector(loc_row); - for (size_type i=0; i::type add_this = local_vector (loc_row_q); - for (size_type k=0; k::type add_this = + local_vector(loc_row_q); + for (size_type k = 0; k < n_inhomogeneous_rows; ++k) + add_this -= + (local_matrix(loc_row_q, global_rows.constraint_origin(k)) * + lines[lines_cache[calculate_line_index( + local_dof_indices[global_rows.constraint_origin(k)])]] + .inhomogeneity); + val += add_this * global_rows.constraint_value(i, q); } return val; } - // internal implementation for distribute_local_to_global for standard // (non-block) matrices +template template void -AffineConstraints::distribute_local_to_global ( +AffineConstraints::distribute_local_to_global( const FullMatrix &local_matrix, - const Vector &local_vector, - const std::vector &local_dof_indices, - MatrixType &global_matrix, - VectorType &global_vector, - bool use_inhomogeneities_for_rhs, + const Vector & local_vector, + const std::vector & local_dof_indices, + MatrixType & global_matrix, + VectorType & global_vector, + bool use_inhomogeneities_for_rhs, std::integral_constant) const { // check whether we work on real vectors or we just used a dummy when // calling the other function above. - const bool use_vectors = (local_vector.size() == 0 && - global_vector.size() == 0) ? false : true; + const bool use_vectors = + (local_vector.size() == 0 && global_vector.size() == 0) ? false : true; typedef typename MatrixType::value_type number; - const bool use_dealii_matrix = - std::is_same >::value; + const bool use_dealii_matrix = + std::is_same>::value; - AssertDimension (local_matrix.n(), local_dof_indices.size()); - AssertDimension (local_matrix.m(), local_dof_indices.size()); - Assert (global_matrix.m() == global_matrix.n(), ExcNotQuadratic()); + AssertDimension(local_matrix.n(), local_dof_indices.size()); + AssertDimension(local_matrix.m(), local_dof_indices.size()); + Assert(global_matrix.m() == global_matrix.n(), ExcNotQuadratic()); if (use_vectors == true) { - AssertDimension (local_matrix.m(), local_vector.size()); - AssertDimension (global_matrix.m(), global_vector.size()); + AssertDimension(local_matrix.m(), local_vector.size()); + AssertDimension(global_matrix.m(), global_vector.size()); } - Assert (lines.empty() || sorted == true, ExcMatrixNotClosed()); + Assert(lines.empty() || sorted == true, ExcMatrixNotClosed()); const size_type n_local_dofs = local_dof_indices.size(); - typename internals::AffineConstraintsData::ScratchDataAccessor - scratch_data; + typename internals::AffineConstraintsData< + typename MatrixType::value_type, + typename VectorType::value_type>::ScratchDataAccessor scratch_data; internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows; global_rows.reinit(n_local_dofs); - make_sorted_row_list (local_dof_indices, global_rows); + make_sorted_row_list(local_dof_indices, global_rows); const size_type n_actual_dofs = global_rows.size(); @@ -2281,27 +2299,28 @@ AffineConstraints::distribute_local_to_global ( // an array in any case since we cannot know about the actual data type in // the AffineConstraints class (unless we do cast). This involves a little // bit of logic to determine the type of the matrix value. - std::vector &cols = scratch_data->columns; - std::vector &vals = scratch_data->values; + std::vector &cols = scratch_data->columns; + std::vector & vals = scratch_data->values; // create arrays for writing into the vector as well std::vector &vector_indices = scratch_data->vector_indices; - std::vector &vector_values = scratch_data->vector_values; + std::vector &vector_values = + scratch_data->vector_values; vector_indices.resize(n_actual_dofs); vector_values.resize(n_actual_dofs); - SparseMatrix *sparse_matrix - = dynamic_cast *>(&global_matrix); + SparseMatrix *sparse_matrix = + dynamic_cast *>(&global_matrix); if (use_dealii_matrix == false) { - cols.resize (n_actual_dofs); - vals.resize (n_actual_dofs); + cols.resize(n_actual_dofs); + vals.resize(n_actual_dofs); } else - Assert (sparse_matrix != nullptr, ExcInternalError()); + Assert(sparse_matrix != nullptr, ExcInternalError()); // now do the actual job. go through all the global rows that we will touch // and call resolve_matrix_row for each of those. size_type local_row_n = 0; - for (size_type i=0; i 0) - global_matrix.add(row, n_values, &cols[0], &vals[0], false, - true); + global_matrix.add(row, n_values, &cols[0], &vals[0], false, true); } else - internals::resolve_matrix_row (global_rows, i, 0, n_actual_dofs, - local_matrix, sparse_matrix); + internals::resolve_matrix_row( + global_rows, i, 0, n_actual_dofs, local_matrix, sparse_matrix); // now to the vectors. besides doing the same job as we did above (i.e., // distribute the content of the local vector into the global one), need @@ -2331,23 +2354,20 @@ AffineConstraints::distribute_local_to_global ( // hand side. if (use_vectors == true) { - const typename VectorType::value_type - val = resolve_vector_entry (i, global_rows, - local_vector, - local_dof_indices, - local_matrix); + const typename VectorType::value_type val = resolve_vector_entry( + i, global_rows, local_vector, local_dof_indices, local_matrix); AssertIsFinite(val); - if (val != typename VectorType::value_type ()) + if (val != typename VectorType::value_type()) { vector_indices[local_row_n] = row; - vector_values[local_row_n] = val; + vector_values[local_row_n] = val; ++local_row_n; } } } - // Drop the elements of vector_indices and vector_values that we do not use (we may - // always elide writing zero values to vectors) + // Drop the elements of vector_indices and vector_values that we do not use + // (we may always elide writing zero values to vectors) const size_type n_local_rows = local_row_n; vector_indices.resize(n_local_rows); vector_values.resize(n_local_rows); @@ -2359,187 +2379,199 @@ AffineConstraints::distribute_local_to_global ( // Vector, LinearAlgebra::distributed::vector, etc. if (std::is_same::value) { - global_vector.add(vector_indices, - *reinterpret_cast *>(&vector_values)); + global_vector.add( + vector_indices, + *reinterpret_cast *>(&vector_values)); } else { - for (size_type row_n=0; row_n(vector_values[row_n]); } } - internals::set_matrix_diagonals (global_rows, local_dof_indices, - local_matrix, *this, - global_matrix, global_vector, use_inhomogeneities_for_rhs); + internals::set_matrix_diagonals(global_rows, + local_dof_indices, + local_matrix, + *this, + global_matrix, + global_vector, + use_inhomogeneities_for_rhs); } - - // similar function as above, but now specialized for block matrices. See the // other function for additional comments. +template template void -AffineConstraints:: -distribute_local_to_global ( +AffineConstraints::distribute_local_to_global( const FullMatrix &local_matrix, - const Vector &local_vector, - const std::vector &local_dof_indices, - MatrixType &global_matrix, - VectorType &global_vector, - bool use_inhomogeneities_for_rhs, + const Vector & local_vector, + const std::vector & local_dof_indices, + MatrixType & global_matrix, + VectorType & global_vector, + bool use_inhomogeneities_for_rhs, std::integral_constant) const { - const bool use_vectors = (local_vector.size() == 0 && - global_vector.size() == 0) ? false : true; + const bool use_vectors = + (local_vector.size() == 0 && global_vector.size() == 0) ? false : true; typedef typename MatrixType::value_type number; - const bool use_dealii_matrix = - std::is_same >::value; - - AssertDimension (local_matrix.n(), local_dof_indices.size()); - AssertDimension (local_matrix.m(), local_dof_indices.size()); - Assert (global_matrix.m() == global_matrix.n(), ExcNotQuadratic()); - Assert (global_matrix.n_block_rows() == global_matrix.n_block_cols(), - ExcNotQuadratic()); + const bool use_dealii_matrix = + std::is_same>::value; + + AssertDimension(local_matrix.n(), local_dof_indices.size()); + AssertDimension(local_matrix.m(), local_dof_indices.size()); + Assert(global_matrix.m() == global_matrix.n(), ExcNotQuadratic()); + Assert(global_matrix.n_block_rows() == global_matrix.n_block_cols(), + ExcNotQuadratic()); if (use_vectors == true) { - AssertDimension (local_matrix.m(), local_vector.size()); - AssertDimension (global_matrix.m(), global_vector.size()); + AssertDimension(local_matrix.m(), local_vector.size()); + AssertDimension(global_matrix.m(), global_vector.size()); } - Assert (sorted == true, ExcMatrixNotClosed()); + Assert(sorted == true, ExcMatrixNotClosed()); - typename internals::AffineConstraintsData::ScratchDataAccessor - scratch_data; + typename internals::AffineConstraintsData< + typename MatrixType::value_type, + typename VectorType::value_type>::ScratchDataAccessor scratch_data; - const size_type n_local_dofs = local_dof_indices.size(); - internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows; + const size_type n_local_dofs = local_dof_indices.size(); + internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows; global_rows.reinit(n_local_dofs); - make_sorted_row_list (local_dof_indices, global_rows); + make_sorted_row_list(local_dof_indices, global_rows); const size_type n_actual_dofs = global_rows.size(); std::vector &global_indices = scratch_data->vector_indices; if (use_vectors == true) { global_indices.resize(n_actual_dofs); - for (size_type i=0; i &block_starts = scratch_data->block_starts; - block_starts.resize(num_blocks+1); - internals::make_block_starts (global_matrix, global_rows, block_starts); + block_starts.resize(num_blocks + 1); + internals::make_block_starts(global_matrix, global_rows, block_starts); std::vector &cols = scratch_data->columns; - std::vector &vals = scratch_data->values; + std::vector & vals = scratch_data->values; if (use_dealii_matrix == false) { - cols.resize (n_actual_dofs); - vals.resize (n_actual_dofs); + cols.resize(n_actual_dofs); + vals.resize(n_actual_dofs); } // the basic difference to the non-block variant from now onwards is that we // go through the blocks of the matrix separately, which allows us to set // the block entries individually - for (size_type block=0; block 0) - global_matrix.block(block, block_col).add(row, n_values, - &cols[0], &vals[0], - false, true); + global_matrix.block(block, block_col) + .add(row, n_values, &cols[0], &vals[0], false, true); } else { - SparseMatrix *sparse_matrix - = dynamic_cast *>(&global_matrix.block(block, - block_col)); - Assert (sparse_matrix != nullptr, ExcInternalError()); - internals::resolve_matrix_row (global_rows, i, start_block, - end_block, local_matrix, sparse_matrix); + SparseMatrix *sparse_matrix = + dynamic_cast *>( + &global_matrix.block(block, block_col)); + Assert(sparse_matrix != nullptr, ExcInternalError()); + internals::resolve_matrix_row(global_rows, + i, + start_block, + end_block, + local_matrix, + sparse_matrix); } } if (use_vectors == true) { - const number val = resolve_vector_entry (i, global_rows, - local_vector, - local_dof_indices, - local_matrix); + const number val = resolve_vector_entry( + i, global_rows, local_vector, local_dof_indices, local_matrix); - if (val != number ()) + if (val != number()) global_vector(global_indices[i]) += static_cast(val); } } } - internals::set_matrix_diagonals (global_rows, local_dof_indices, - local_matrix, *this, - global_matrix, global_vector, use_inhomogeneities_for_rhs); + internals::set_matrix_diagonals(global_rows, + local_dof_indices, + local_matrix, + *this, + global_matrix, + global_vector, + use_inhomogeneities_for_rhs); } - - +template template void -AffineConstraints::distribute_local_to_global ( +AffineConstraints::distribute_local_to_global( const FullMatrix &local_matrix, - const std::vector &row_indices, - const std::vector &col_indices, - MatrixType &global_matrix) const + const std::vector & row_indices, + const std::vector & col_indices, + MatrixType & global_matrix) const { - distribute_local_to_global(local_matrix, row_indices, *this, - col_indices, global_matrix); + distribute_local_to_global( + local_matrix, row_indices, *this, col_indices, global_matrix); } - - +template template void -AffineConstraints::distribute_local_to_global ( +AffineConstraints::distribute_local_to_global( const FullMatrix &local_matrix, - const std::vector &row_indices, - const AffineConstraints &col_constraint_matrix, - const std::vector &col_indices, - MatrixType &global_matrix) const + const std::vector & row_indices, + const AffineConstraints & col_constraint_matrix, + const std::vector & col_indices, + MatrixType & global_matrix) const { typedef typename MatrixType::value_type number; - AssertDimension (local_matrix.m(), row_indices.size()); - AssertDimension (local_matrix.n(), col_indices.size()); + AssertDimension(local_matrix.m(), row_indices.size()); + AssertDimension(local_matrix.n(), col_indices.size()); const size_type n_local_row_dofs = row_indices.size(); const size_type n_local_col_dofs = col_indices.size(); - typename internals::AffineConstraintsData::ScratchDataAccessor - scratch_data; + typename internals::AffineConstraintsData< + typename MatrixType::value_type>::ScratchDataAccessor scratch_data; internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows; global_rows.reinit(n_local_row_dofs); internals::GlobalRowsFromLocal &global_cols = scratch_data->global_columns; global_cols.reinit(n_local_col_dofs); - make_sorted_row_list (row_indices, global_rows); - col_constraint_matrix.make_sorted_row_list (col_indices, global_cols); + make_sorted_row_list(row_indices, global_rows); + col_constraint_matrix.make_sorted_row_list(col_indices, global_cols); const size_type n_actual_row_dofs = global_rows.size(); const size_type n_actual_col_dofs = global_cols.size(); @@ -2547,46 +2579,51 @@ AffineConstraints::distribute_local_to_global ( // create arrays for the column data (indices and values) that will then be // written into the matrix. Shortcut for deal.II sparse matrix std::vector &cols = scratch_data->columns; - std::vector &vals = scratch_data->values; + std::vector & vals = scratch_data->values; cols.resize(n_actual_col_dofs); vals.resize(n_actual_col_dofs); // now do the actual job. - for (size_type i=0; i 0) global_matrix.add(row, n_values, &cols[0], &vals[0], false, true); } } - - +template template void -AffineConstraints:: -add_entries_local_to_global (const std::vector &local_dof_indices, - SparsityPatternType &sparsity_pattern, - const bool keep_constrained_entries, - const Table<2,bool> &dof_mask, - std::integral_constant ) const +AffineConstraints::add_entries_local_to_global( + const std::vector &local_dof_indices, + SparsityPatternType & sparsity_pattern, + const bool keep_constrained_entries, + const Table<2, bool> & dof_mask, + std::integral_constant) const { - Assert (sparsity_pattern.n_rows() == sparsity_pattern.n_cols(), ExcNotQuadratic()); + Assert(sparsity_pattern.n_rows() == sparsity_pattern.n_cols(), + ExcNotQuadratic()); - const size_type n_local_dofs = local_dof_indices.size(); - bool dof_mask_is_active = false; + const size_type n_local_dofs = local_dof_indices.size(); + bool dof_mask_is_active = false; if (dof_mask.n_rows() == n_local_dofs) { dof_mask_is_active = true; - AssertDimension (dof_mask.n_cols(), n_local_dofs); + AssertDimension(dof_mask.n_cols(), n_local_dofs); } internals::AffineConstraintsData::ScratchDataAccessor scratch_data; @@ -2599,12 +2636,12 @@ add_entries_local_to_global (const std::vector &local_dof_indices, { std::vector &actual_dof_indices = scratch_data->columns; actual_dof_indices.resize(n_local_dofs); - make_sorted_row_list (local_dof_indices, actual_dof_indices); + make_sorted_row_list(local_dof_indices, actual_dof_indices); const size_type n_actual_dofs = actual_dof_indices.size(); // now add the indices we collected above to the sparsity pattern. Very // easy here - just add the same array to all the rows... - for (size_type i=0; i &local_dof_indices, // need to add the whole row and column structure in case we keep // constrained entries. Unfortunately, we can't use the nice matrix // structure we use elsewhere, so manually add those indices one by one. - for (size_type i=0; iglobal_rows; global_rows.reinit(n_local_dofs); - make_sorted_row_list (local_dof_indices, global_rows); + make_sorted_row_list(local_dof_indices, global_rows); const size_type n_actual_dofs = global_rows.size(); // create arrays for the column indices that will then be written into the @@ -2643,39 +2681,38 @@ add_entries_local_to_global (const std::vector &local_dof_indices, std::vector &cols = scratch_data->columns; cols.resize(n_actual_dofs); - for (size_type i=0; i::iterator col_ptr = cols.begin(); - const size_type row = global_rows.global_row(i); - internals::resolve_matrix_row (global_rows, i, 0, n_actual_dofs, - dof_mask, col_ptr); + const size_type row = global_rows.global_row(i); + internals::resolve_matrix_row( + global_rows, i, 0, n_actual_dofs, dof_mask, col_ptr); // finally, write all the information that accumulated under the given // process into the global matrix row and into the vector if (col_ptr != cols.begin()) - sparsity_pattern.add_entries(row, cols.begin(), col_ptr, - true); + sparsity_pattern.add_entries(row, cols.begin(), col_ptr, true); } - internals::set_sparsity_diagonals (global_rows, local_dof_indices, - dof_mask, keep_constrained_entries, - sparsity_pattern); + internals::set_sparsity_diagonals(global_rows, + local_dof_indices, + dof_mask, + keep_constrained_entries, + sparsity_pattern); } - - - +template template void -AffineConstraints:: -add_entries_local_to_global (const std::vector &row_indices, - const std::vector &col_indices, - SparsityPatternType &sparsity_pattern, - const bool keep_constrained_entries, - const Table<2,bool> &dof_mask) const +AffineConstraints::add_entries_local_to_global( + const std::vector &row_indices, + const std::vector &col_indices, + SparsityPatternType & sparsity_pattern, + const bool keep_constrained_entries, + const Table<2, bool> & dof_mask) const { - const size_type n_local_rows = row_indices.size(); - const size_type n_local_cols = col_indices.size(); - bool dof_mask_is_active = false; + const size_type n_local_rows = row_indices.size(); + const size_type n_local_cols = col_indices.size(); + bool dof_mask_is_active = false; if (dof_mask.n_rows() == n_local_rows && dof_mask.n_cols() == n_local_cols) dof_mask_is_active = true; @@ -2683,14 +2720,14 @@ add_entries_local_to_global (const std::vector &row_indices, // those to the sparsity pattern if (keep_constrained_entries == true) { - for (size_type i=0; i &row_indices, // plus some indices that come from constraints. if (dof_mask_is_active == false) { - std::vector actual_row_indices (n_local_rows); - std::vector actual_col_indices (n_local_cols); - make_sorted_row_list (row_indices, actual_row_indices); - make_sorted_row_list (col_indices, actual_col_indices); + std::vector actual_row_indices(n_local_rows); + std::vector actual_col_indices(n_local_cols); + make_sorted_row_list(row_indices, actual_row_indices); + make_sorted_row_list(col_indices, actual_col_indices); const size_type n_actual_rows = actual_row_indices.size(); // now add the indices we collected above to the sparsity pattern. Very // easy here - just add the same array to all the rows... - for (size_type i=0; i &row_indices, return; } - // TODO: implement this - Assert (false, ExcNotImplemented()); + Assert(false, ExcNotImplemented()); } - - - +template template void -AffineConstraints:: -add_entries_local_to_global (const std::vector &local_dof_indices, - SparsityPatternType &sparsity_pattern, - const bool keep_constrained_entries, - const Table<2,bool> &dof_mask, - std::integral_constant ) const +AffineConstraints::add_entries_local_to_global( + const std::vector &local_dof_indices, + SparsityPatternType & sparsity_pattern, + const bool keep_constrained_entries, + const Table<2, bool> & dof_mask, + std::integral_constant) const { // just as the other add_entries_local_to_global function, but now // specialized for block matrices. - Assert (sparsity_pattern.n_rows() == sparsity_pattern.n_cols(), ExcNotQuadratic()); - Assert (sparsity_pattern.n_block_rows() == sparsity_pattern.n_block_cols(), - ExcNotQuadratic()); + Assert(sparsity_pattern.n_rows() == sparsity_pattern.n_cols(), + ExcNotQuadratic()); + Assert(sparsity_pattern.n_block_rows() == sparsity_pattern.n_block_cols(), + ExcNotQuadratic()); const size_type n_local_dofs = local_dof_indices.size(); - const size_type num_blocks = sparsity_pattern.n_block_rows(); + const size_type num_blocks = sparsity_pattern.n_block_rows(); internals::AffineConstraintsData::ScratchDataAccessor scratch_data; @@ -2747,57 +2782,60 @@ add_entries_local_to_global (const std::vector &local_dof_indices, if (dof_mask.n_rows() == n_local_dofs) { dof_mask_is_active = true; - AssertDimension (dof_mask.n_cols(), n_local_dofs); + AssertDimension(dof_mask.n_cols(), n_local_dofs); } if (dof_mask_is_active == false) { std::vector &actual_dof_indices = scratch_data->columns; actual_dof_indices.resize(n_local_dofs); - make_sorted_row_list (local_dof_indices, actual_dof_indices); + make_sorted_row_list(local_dof_indices, actual_dof_indices); const size_type n_actual_dofs = actual_dof_indices.size(); (void)n_actual_dofs; // additional construct that also takes care of block indices. std::vector &block_starts = scratch_data->block_starts; - block_starts.resize(num_blocks+1); - internals::make_block_starts (sparsity_pattern, actual_dof_indices, - block_starts); + block_starts.resize(num_blocks + 1); + internals::make_block_starts( + sparsity_pattern, actual_dof_indices, block_starts); - for (size_type block=0; block::iterator index_it = actual_dof_indices.begin(); - for (size_type block_col = 0; block_col::iterator index_it = + actual_dof_indices.begin(); + for (size_type block_col = 0; block_col < num_blocks; ++block_col) { - const size_type next_block_col = block_starts[block_col+1]; - sparsity_pattern.block(block,block_col). - add_entries(row, - index_it, - actual_dof_indices.begin() + next_block_col, - true); + const size_type next_block_col = block_starts[block_col + 1]; + sparsity_pattern.block(block, block_col) + .add_entries(row, + index_it, + actual_dof_indices.begin() + next_block_col, + true); index_it = actual_dof_indices.begin() + next_block_col; } } } - for (size_type i=0; i &local_dof_indices, // function for block matrices internals::GlobalRowsFromLocal &global_rows = scratch_data->global_rows; global_rows.reinit(n_local_dofs); - make_sorted_row_list (local_dof_indices, global_rows); + make_sorted_row_list(local_dof_indices, global_rows); const size_type n_actual_dofs = global_rows.size(); // additional construct that also takes care of block indices. std::vector &block_starts = scratch_data->block_starts; - block_starts.resize(num_blocks+1); + block_starts.resize(num_blocks + 1); internals::make_block_starts(sparsity_pattern, global_rows, block_starts); std::vector &cols = scratch_data->columns; @@ -2820,34 +2858,33 @@ add_entries_local_to_global (const std::vector &local_dof_indices, // the basic difference to the non-block variant from now onwards is that we // go through the blocks of the matrix separately. - for (size_type block=0; block::iterator col_ptr = cols.begin(); - internals::resolve_matrix_row (global_rows, i, begin_block, - end_block, dof_mask, col_ptr); + internals::resolve_matrix_row( + global_rows, i, begin_block, end_block, dof_mask, col_ptr); - sparsity_pattern.block(block, block_col).add_entries(row, - cols.begin(), - col_ptr, - true); + sparsity_pattern.block(block, block_col) + .add_entries(row, cols.begin(), col_ptr, true); } } } - internals::set_sparsity_diagonals (global_rows, local_dof_indices, - dof_mask, keep_constrained_entries, - sparsity_pattern); + internals::set_sparsity_diagonals(global_rows, + local_dof_indices, + dof_mask, + keep_constrained_entries, + sparsity_pattern); } - DEAL_II_NAMESPACE_CLOSE #endif