From 1eadf59740f08bc07772aa3b795d7d73a47aabc0 Mon Sep 17 00:00:00 2001 From: Daniel Arndt Date: Thu, 14 Jun 2018 17:01:45 +0200 Subject: [PATCH] Avoid unnamed namespaces in *.h --- include/deal.II/base/symmetric_tensor.h | 332 ++++--- include/deal.II/fe/fe_series.h | 163 ++-- include/deal.II/fe/fe_system.h | 90 +- include/deal.II/fe/fe_values.h | 6 +- include/deal.II/lac/block_linear_operator.h | 396 +++++---- include/deal.II/lac/linear_operator.h | 295 ++++--- include/deal.II/lac/matrix_out.h | 121 ++- include/deal.II/lac/packaged_operation.h | 57 +- include/deal.II/lac/trilinos_sparse_matrix.h | 111 ++- include/deal.II/lac/trilinos_vector.h | 31 +- .../deal.II/matrix_free/evaluation_selector.h | 834 +++++++++--------- include/deal.II/matrix_free/operators.h | 102 ++- .../deal.II/multigrid/mg_transfer_internal.h | 6 +- include/deal.II/physics/notation.h | 21 +- include/deal.II/physics/transformations.h | 207 +++-- source/fe/fe_system.cc | 27 +- 16 files changed, 1398 insertions(+), 1401 deletions(-) diff --git a/include/deal.II/base/symmetric_tensor.h b/include/deal.II/base/symmetric_tensor.h index af3a480184..acf4714ed7 100644 --- a/include/deal.II/base/symmetric_tensor.h +++ b/include/deal.II/base/symmetric_tensor.h @@ -2338,87 +2338,84 @@ namespace internal { namespace SymmetricTensorImplementation { - namespace + // a function to do the unrolling from a set of indices to a + // scalar index into the array in which we store the elements of + // a symmetric tensor + // + // this function is for rank-2 tensors + template + inline unsigned int + component_to_unrolled_index(const TableIndices<2> &indices) { - // a function to do the unrolling from a set of indices to a - // scalar index into the array in which we store the elements of - // a symmetric tensor - // - // this function is for rank-2 tensors - template - inline unsigned int - component_to_unrolled_index(const TableIndices<2> &indices) - { - Assert(indices[0] < dim, ExcIndexRange(indices[0], 0, dim)); - Assert(indices[1] < dim, ExcIndexRange(indices[1], 0, dim)); - - switch (dim) - { - case 1: - { - return 0; - } + Assert(indices[0] < dim, ExcIndexRange(indices[0], 0, dim)); + Assert(indices[1] < dim, ExcIndexRange(indices[1], 0, dim)); - case 2: - { - static const unsigned int table[2][2] = {{0, 2}, {2, 1}}; - return table[indices[0]][indices[1]]; - } - - case 3: - { - static const unsigned int table[3][3] = {{0, 3, 4}, - {3, 1, 5}, - {4, 5, 2}}; - return table[indices[0]][indices[1]]; - } - - case 4: - { - static const unsigned int table[4][4] = {{0, 4, 5, 6}, - {4, 1, 7, 8}, - {5, 7, 2, 9}, - {6, 8, 9, 3}}; - return table[indices[0]][indices[1]]; - } + switch (dim) + { + case 1: + { + return 0; + } - default: - // for the remainder, manually figure out the numbering - { - if (indices[0] == indices[1]) - return indices[0]; + case 2: + { + static const unsigned int table[2][2] = {{0, 2}, {2, 1}}; + return table[indices[0]][indices[1]]; + } - TableIndices<2> sorted_indices(indices); - sorted_indices.sort(); + case 3: + { + static const unsigned int table[3][3] = {{0, 3, 4}, + {3, 1, 5}, + {4, 5, 2}}; + return table[indices[0]][indices[1]]; + } + + case 4: + { + static const unsigned int table[4][4] = {{0, 4, 5, 6}, + {4, 1, 7, 8}, + {5, 7, 2, 9}, + {6, 8, 9, 3}}; + return table[indices[0]][indices[1]]; + } + + default: + // for the remainder, manually figure out the numbering + { + if (indices[0] == indices[1]) + return indices[0]; + + TableIndices<2> sorted_indices(indices); + sorted_indices.sort(); - for (unsigned int d = 0, c = 0; d < dim; ++d) - for (unsigned int e = d + 1; e < dim; ++e, ++c) - if ((sorted_indices[0] == d) && (sorted_indices[1] == e)) - return dim + c; + for (unsigned int d = 0, c = 0; d < dim; ++d) + for (unsigned int e = d + 1; e < dim; ++e, ++c) + if ((sorted_indices[0] == d) && (sorted_indices[1] == e)) + return dim + c; - // should never get here: - Assert(false, ExcInternalError()); - return 0; - } - } - } + // should never get here: + Assert(false, ExcInternalError()); + return 0; + } + } + } - // a function to do the unrolling from a set of indices to a - // scalar index into the array in which we store the elements of - // a symmetric tensor - // - // this function is for tensors of ranks not already handled - // above - template - inline unsigned int - component_to_unrolled_index(const TableIndices &indices) - { - (void)indices; - Assert(false, ExcNotImplemented()); - return numbers::invalid_unsigned_int; - } - } // namespace - } // namespace SymmetricTensorImplementation + // a function to do the unrolling from a set of indices to a + // scalar index into the array in which we store the elements of + // a symmetric tensor + // + // this function is for tensors of ranks not already handled + // above + template + inline unsigned int + component_to_unrolled_index(const TableIndices &indices) + { + (void)indices; + Assert(false, ExcNotImplemented()); + return numbers::invalid_unsigned_int; + } + } // namespace SymmetricTensorImplementation } // namespace internal @@ -2437,94 +2434,91 @@ namespace internal { namespace SymmetricTensorImplementation { - namespace + // a function to do the inverse of the unrolling from a set of + // indices to a scalar index into the array in which we store + // the elements of a symmetric tensor. in other words, it goes + // from the scalar index into the array to a set of indices of + // the tensor + // + // this function is for rank-2 tensors + template + inline TableIndices<2> + unrolled_to_component_indices(const unsigned int i, + const std::integral_constant &) { - // a function to do the inverse of the unrolling from a set of - // indices to a scalar index into the array in which we store - // the elements of a symmetric tensor. in other words, it goes - // from the scalar index into the array to a set of indices of - // the tensor - // - // this function is for rank-2 tensors - template - inline TableIndices<2> - unrolled_to_component_indices(const unsigned int i, - const std::integral_constant &) - { - Assert( - (i < - dealii::SymmetricTensor<2, dim, double>::n_independent_components), - ExcIndexRange( - i, - 0, - dealii::SymmetricTensor<2, dim, double>::n_independent_components)); - switch (dim) - { - case 1: - { - return TableIndices<2>(0, 0); - } - - case 2: - { - const TableIndices<2> table[3] = {TableIndices<2>(0, 0), - TableIndices<2>(1, 1), - TableIndices<2>(0, 1)}; - return table[i]; - } + Assert( + (i < dealii::SymmetricTensor<2, dim, double>::n_independent_components), + ExcIndexRange( + i, + 0, + dealii::SymmetricTensor<2, dim, double>::n_independent_components)); + switch (dim) + { + case 1: + { + return TableIndices<2>(0, 0); + } - case 3: - { - const TableIndices<2> table[6] = {TableIndices<2>(0, 0), - TableIndices<2>(1, 1), - TableIndices<2>(2, 2), - TableIndices<2>(0, 1), - TableIndices<2>(0, 2), - TableIndices<2>(1, 2)}; - return table[i]; - } + case 2: + { + const TableIndices<2> table[3] = {TableIndices<2>(0, 0), + TableIndices<2>(1, 1), + TableIndices<2>(0, 1)}; + return table[i]; + } - default: - if (i < dim) - return TableIndices<2>(i, i); + case 3: + { + const TableIndices<2> table[6] = {TableIndices<2>(0, 0), + TableIndices<2>(1, 1), + TableIndices<2>(2, 2), + TableIndices<2>(0, 1), + TableIndices<2>(0, 2), + TableIndices<2>(1, 2)}; + return table[i]; + } + + default: + if (i < dim) + return TableIndices<2>(i, i); - for (unsigned int d = 0, c = 0; d < dim; ++d) - for (unsigned int e = d + 1; e < dim; ++e, ++c) - if (c == i) - return TableIndices<2>(d, e); + for (unsigned int d = 0, c = 0; d < dim; ++d) + for (unsigned int e = d + 1; e < dim; ++e, ++c) + if (c == i) + return TableIndices<2>(d, e); - // should never get here: - Assert(false, ExcInternalError()); - return TableIndices<2>(0, 0); - } - } + // should never get here: + Assert(false, ExcInternalError()); + return TableIndices<2>(0, 0); + } + } - // a function to do the inverse of the unrolling from a set of - // indices to a scalar index into the array in which we store - // the elements of a symmetric tensor. in other words, it goes - // from the scalar index into the array to a set of indices of - // the tensor - // - // this function is for tensors of a rank not already handled - // above - template - inline TableIndices - unrolled_to_component_indices(const unsigned int i, - const std::integral_constant &) - { - (void)i; - Assert((i < dealii::SymmetricTensor:: - n_independent_components), - ExcIndexRange(i, - 0, - dealii::SymmetricTensor:: - n_independent_components)); - Assert(false, ExcNotImplemented()); - return TableIndices(); - } + // a function to do the inverse of the unrolling from a set of + // indices to a scalar index into the array in which we store + // the elements of a symmetric tensor. in other words, it goes + // from the scalar index into the array to a set of indices of + // the tensor + // + // this function is for tensors of a rank not already handled + // above + template + inline TableIndices + unrolled_to_component_indices(const unsigned int i, + const std::integral_constant &) + { + (void)i; + Assert( + (i < + dealii::SymmetricTensor::n_independent_components), + ExcIndexRange(i, + 0, + dealii::SymmetricTensor:: + n_independent_components)); + Assert(false, ExcNotImplemented()); + return TableIndices(); + } - } // namespace - } // namespace SymmetricTensorImplementation + } // namespace SymmetricTensorImplementation } // namespace internal template @@ -3090,24 +3084,20 @@ namespace internal std::array>, 3> hybrid(const dealii::SymmetricTensor<2, 3, Number> &A); - namespace + /** + * A struct that is used to sort arrays of pairs of eign=envalues and + * eigenvectors. Sorting is performed in descending order of eigenvalue. + */ + template + struct SortEigenValuesVectors { - /** - * A struct that is used to sort arrays of pairs of eign=envalues and - * eigenvectors. Sorting is performed in descending order of eigenvalue. - */ - template - struct SortEigenValuesVectors + typedef std::pair> EigValsVecs; + bool + operator()(const EigValsVecs &lhs, const EigValsVecs &rhs) { - typedef std::pair> EigValsVecs; - bool - operator()(const EigValsVecs &lhs, const EigValsVecs &rhs) - { - return lhs.first > rhs.first; - } - }; - - } // namespace + return lhs.first > rhs.first; + } + }; } // namespace SymmetricTensorImplementation diff --git a/include/deal.II/fe/fe_series.h b/include/deal.II/fe/fe_series.h index 104b201cf5..40e95844f0 100644 --- a/include/deal.II/fe/fe_series.h +++ b/include/deal.II/fe/fe_series.h @@ -288,96 +288,95 @@ namespace FESeries // ------------------- inline and template functions ---------------- -namespace +namespace internal { - template - void - fill_map_index(const Table & coefficients, - const TableIndices & ind, - const std::function( - const TableIndices &)> & predicate, - std::map> &pred_to_values) - { - const std::pair pred_pair = predicate(ind); - // don't add a value if predicate is false - if (pred_pair.first == false) - return; - - const unsigned int &pred_value = pred_pair.second; - const T & coeff_value = coefficients(ind); - // If pred_value is not in the pred_to_values map, the element will be - // created. Otherwise a reference to the existing element is returned. - pred_to_values[pred_value].push_back(coeff_value); - } - - template - void - fill_map( - const Table<1, T> &coefficients, - const std::function(const TableIndices<1> &)> - & predicate, - std::map> &pred_to_values) + namespace FESeriesImplementation { - for (unsigned int i = 0; i < coefficients.size(0); i++) - { - const TableIndices<1> ind(i); - fill_map_index(coefficients, ind, predicate, pred_to_values); - } - } - - template - void - fill_map( - const Table<2, T> &coefficients, - const std::function(const TableIndices<2> &)> - & predicate, - std::map> &pred_to_values) - { - for (unsigned int i = 0; i < coefficients.size(0); i++) - for (unsigned int j = 0; j < coefficients.size(1); j++) + template + void + fill_map_index(const Table & coefficients, + const TableIndices & ind, + const std::function( + const TableIndices &)> & predicate, + std::map> &pred_to_values) + { + const std::pair pred_pair = predicate(ind); + // don't add a value if predicate is false + if (pred_pair.first == false) + return; + + const unsigned int &pred_value = pred_pair.second; + const T & coeff_value = coefficients(ind); + // If pred_value is not in the pred_to_values map, the element will be + // created. Otherwise a reference to the existing element is returned. + pred_to_values[pred_value].push_back(coeff_value); + } + + template + void + fill_map(const Table<1, T> & coefficients, + const std::function( + const TableIndices<1> &)> & predicate, + std::map> &pred_to_values) + { + for (unsigned int i = 0; i < coefficients.size(0); i++) { - const TableIndices<2> ind(i, j); + const TableIndices<1> ind(i); fill_map_index(coefficients, ind, predicate, pred_to_values); } - } - - template - void - fill_map( - const Table<3, T> &coefficients, - const std::function(const TableIndices<3> &)> - & predicate, - std::map> &pred_to_values) - { - for (unsigned int i = 0; i < coefficients.size(0); i++) - for (unsigned int j = 0; j < coefficients.size(1); j++) - for (unsigned int k = 0; k < coefficients.size(2); k++) + } + + template + void + fill_map(const Table<2, T> & coefficients, + const std::function( + const TableIndices<2> &)> & predicate, + std::map> &pred_to_values) + { + for (unsigned int i = 0; i < coefficients.size(0); i++) + for (unsigned int j = 0; j < coefficients.size(1); j++) { - const TableIndices<3> ind(i, j, k); + const TableIndices<2> ind(i, j); fill_map_index(coefficients, ind, predicate, pred_to_values); } - } + } + template + void + fill_map(const Table<3, T> & coefficients, + const std::function( + const TableIndices<3> &)> & predicate, + std::map> &pred_to_values) + { + for (unsigned int i = 0; i < coefficients.size(0); i++) + for (unsigned int j = 0; j < coefficients.size(1); j++) + for (unsigned int k = 0; k < coefficients.size(2); k++) + { + const TableIndices<3> ind(i, j, k); + fill_map_index(coefficients, ind, predicate, pred_to_values); + } + } - template - double - complex_mean_value(const T &value) - { - return value; - } - template - double - complex_mean_value(const std::complex &value) - { - AssertThrow(false, - ExcMessage( - "FESeries::process_coefficients() can not be used with" - "complex-valued coefficients and VectorTools::mean norm.")); - return std::abs(value); - } + template + double + complex_mean_value(const T &value) + { + return value; + } -} // namespace + template + double + complex_mean_value(const std::complex &value) + { + AssertThrow(false, + ExcMessage( + "FESeries::process_coefficients() can not be used with" + "complex-valued coefficients and VectorTools::mean norm.")); + return std::abs(value); + } + } // namespace FESeriesImplementation +} // namespace internal template @@ -395,7 +394,9 @@ FESeries::process_coefficients( // coefficients. We could have stored (predicate values ->TableIndicies) map, // but its processing would have been much harder later on. std::map> pred_to_values; - fill_map(coefficients, predicate, pred_to_values); + internal::FESeriesImplementation::fill_map(coefficients, + predicate, + pred_to_values); // now go through the map and populate the @p norm_values based on @p norm: for (typename std::map>::const_iterator it = @@ -425,7 +426,9 @@ FESeries::process_coefficients( } case VectorTools::mean: { - norm_values.push_back(complex_mean_value(values.mean_value())); + norm_values.push_back( + internal::FESeriesImplementation::complex_mean_value( + values.mean_value())); break; } default: diff --git a/include/deal.II/fe/fe_system.h b/include/deal.II/fe/fe_system.h index 1eac35bea9..e6e3fca1a1 100644 --- a/include/deal.II/fe/fe_system.h +++ b/include/deal.II/fe/fe_system.h @@ -1223,49 +1223,52 @@ private: //------------------------variadic template constructor------------------------ # ifndef DOXYGEN -namespace +namespace internal { - template - unsigned int - count_nonzeros( - const std::initializer_list< - std::pair>, unsigned int>> - &fe_systems) - { - return std::count_if( - fe_systems.begin(), - fe_systems.end(), - [](const std::pair>, - unsigned int> &fe_system) { - return fe_system.second > 0; - }); - } - - - - template - std::pair>, unsigned int> - promote_to_fe_pair(const FiniteElement &fe) - { - return std::make_pair>, - unsigned int>(std::move(fe.clone()), 1u); - } - - - - template - auto - promote_to_fe_pair( - std::pair>, unsigned int> &&p) - -> decltype( - std::forward>, - unsigned int>>(p)) + namespace FESystemImplementation { - return std::forward< - std::pair>, unsigned int>>( - p); - } -} // namespace + template + unsigned int + count_nonzeros( + const std::initializer_list< + std::pair>, unsigned int>> + &fe_systems) + { + return std::count_if( + fe_systems.begin(), + fe_systems.end(), + [](const std::pair>, + unsigned int> &fe_system) { + return fe_system.second > 0; + }); + } + + + + template + std::pair>, unsigned int> + promote_to_fe_pair(const FiniteElement &fe) + { + return std::make_pair>, + unsigned int>(std::move(fe.clone()), 1u); + } + + + + template + auto + promote_to_fe_pair(std::pair>, + unsigned int> &&p) + -> decltype( + std::forward>, + unsigned int>>(p)) + { + return std::forward< + std::pair>, unsigned int>>( + p); + } + } // namespace FESystemImplementation +} // namespace internal @@ -1277,7 +1280,8 @@ template template FESystem::FESystem(FEPairs &&... fe_pairs) : FESystem( - {promote_to_fe_pair(std::forward(fe_pairs))...}) + {internal::FESystemImplementation::promote_to_fe_pair( + std::forward(fe_pairs))...}) {} @@ -1294,7 +1298,7 @@ FESystem::FESystem( fe_systems), FETools::Compositing::compute_nonzero_components( fe_systems)) - , base_elements(count_nonzeros(fe_systems)) + , base_elements(internal::FESystemImplementation::count_nonzeros(fe_systems)) { std::vector *> fes; std::vector multiplicities; diff --git a/include/deal.II/fe/fe_values.h b/include/deal.II/fe/fe_values.h index 6ed5c4a936..5437f9eb9f 100644 --- a/include/deal.II/fe/fe_values.h +++ b/include/deal.II/fe/fe_values.h @@ -4363,7 +4363,7 @@ namespace FEValuesViews - namespace + namespace internal { /** * Return the symmetrized version of a tensor whose n'th row equals the @@ -4433,7 +4433,7 @@ namespace FEValuesViews } } } - } // namespace + } // namespace internal @@ -4454,7 +4454,7 @@ namespace FEValuesViews if (snc == -2) return symmetric_gradient_type(); else if (snc != -1) - return symmetrize_single_row( + return internal::symmetrize_single_row( shape_function_data[shape_function].single_nonzero_component_index, fe_values->finite_element_output.shape_gradients[snc][q_point]); else diff --git a/include/deal.II/lac/block_linear_operator.h b/include/deal.II/lac/block_linear_operator.h index 36e5b5633a..2fe0b3b098 100644 --- a/include/deal.II/lac/block_linear_operator.h +++ b/include/deal.II/lac/block_linear_operator.h @@ -346,219 +346,217 @@ public: }; - -namespace +namespace internal { - // A helper function to apply a given vmult, or Tvmult to a vector with - // intermediate storage, similar to the corresponding helper - // function for LinearOperator. Here, two operators are used. - // The first one takes care of the first "column" and typically doesn't add. - // On the other hand, the second operator is normally an adding one. - template - void - apply_with_intermediate_storage(const Function1 &first_op, - const Function2 &loop_op, - Range & v, - const Domain & u, - bool add) + namespace BlockLinearOperatorImplementation { - GrowingVectorMemory vector_memory; - - typename VectorMemory::Pointer tmp(vector_memory); - tmp->reinit(v, /*bool omit_zeroing_entries =*/true); - - const unsigned int n = u.n_blocks(); - const unsigned int m = v.n_blocks(); - - for (unsigned int i = 0; i < m; ++i) - { - first_op(*tmp, u, i, 0); - for (unsigned int j = 1; j < n; ++j) - loop_op(*tmp, u, i, j); - } + // A helper function to apply a given vmult, or Tvmult to a vector with + // intermediate storage, similar to the corresponding helper + // function for LinearOperator. Here, two operators are used. + // The first one takes care of the first "column" and typically doesn't add. + // On the other hand, the second operator is normally an adding one. + template + void + apply_with_intermediate_storage(const Function1 &first_op, + const Function2 &loop_op, + Range & v, + const Domain & u, + bool add) + { + GrowingVectorMemory vector_memory; - if (add) - v += *tmp; - else - v = *tmp; - } + typename VectorMemory::Pointer tmp(vector_memory); + tmp->reinit(v, /*bool omit_zeroing_entries =*/true); - // Populate the LinearOperator interfaces with the help of the - // BlockLinearOperator functions - template - inline void - populate_linear_operator_functions( - dealii::BlockLinearOperator &op) - { - op.reinit_range_vector = [=](Range &v, bool omit_zeroing_entries) { - const unsigned int m = op.n_block_rows(); + const unsigned int n = u.n_blocks(); + const unsigned int m = v.n_blocks(); - // Reinitialize the block vector to m blocks: - v.reinit(m); - - // And reinitialize every individual block with reinit_range_vectors: for (unsigned int i = 0; i < m; ++i) - op.block(i, 0).reinit_range_vector(v.block(i), omit_zeroing_entries); - - v.collect_sizes(); - }; - - op.reinit_domain_vector = [=](Domain &v, bool omit_zeroing_entries) { - const unsigned int n = op.n_block_cols(); - - // Reinitialize the block vector to n blocks: - v.reinit(n); - - // And reinitialize every individual block with reinit_domain_vectors: - for (unsigned int i = 0; i < n; ++i) - op.block(0, i).reinit_domain_vector(v.block(i), omit_zeroing_entries); - - v.collect_sizes(); - }; - - op.vmult = [&op](Range &v, const Domain &u) { - const unsigned int m = op.n_block_rows(); - const unsigned int n = op.n_block_cols(); - Assert(v.n_blocks() == m, ExcDimensionMismatch(v.n_blocks(), m)); - Assert(u.n_blocks() == n, ExcDimensionMismatch(u.n_blocks(), n)); - - if (PointerComparison::equal(&v, &u)) { - const auto first_op = [&op](Range & v, - const Domain & u, - const unsigned int i, - const unsigned int j) { - op.block(i, j).vmult(v.block(i), u.block(j)); - }; - - const auto loop_op = [&op](Range & v, - const Domain & u, - const unsigned int i, - const unsigned int j) { - op.block(i, j).vmult_add(v.block(i), u.block(j)); - }; - - apply_with_intermediate_storage(first_op, loop_op, v, u, false); + first_op(*tmp, u, i, 0); + for (unsigned int j = 1; j < n; ++j) + loop_op(*tmp, u, i, j); } - else - { - for (unsigned int i = 0; i < m; ++i) - { - op.block(i, 0).vmult(v.block(i), u.block(0)); - for (unsigned int j = 1; j < n; ++j) - op.block(i, j).vmult_add(v.block(i), u.block(j)); - } - } - }; - op.vmult_add = [&op](Range &v, const Domain &u) { - const unsigned int m = op.n_block_rows(); - const unsigned int n = op.n_block_cols(); - Assert(v.n_blocks() == m, ExcDimensionMismatch(v.n_blocks(), m)); - Assert(u.n_blocks() == n, ExcDimensionMismatch(u.n_blocks(), n)); - - if (PointerComparison::equal(&v, &u)) - { - const auto first_op = [&op](Range & v, - const Domain & u, - const unsigned int i, - const unsigned int j) { - op.block(i, j).vmult(v.block(i), u.block(j)); - }; - - const auto loop_op = [&op](Range & v, - const Domain & u, - const unsigned int i, - const unsigned int j) { - op.block(i, j).vmult_add(v.block(i), u.block(j)); - }; - - apply_with_intermediate_storage(first_op, loop_op, v, u, true); - } + if (add) + v += *tmp; else - { - for (unsigned int i = 0; i < m; ++i) - for (unsigned int j = 0; j < n; ++j) + v = *tmp; + } + + // Populate the LinearOperator interfaces with the help of the + // BlockLinearOperator functions + template + inline void + populate_linear_operator_functions( + dealii::BlockLinearOperator &op) + { + op.reinit_range_vector = [=](Range &v, bool omit_zeroing_entries) { + const unsigned int m = op.n_block_rows(); + + // Reinitialize the block vector to m blocks: + v.reinit(m); + + // And reinitialize every individual block with reinit_range_vectors: + for (unsigned int i = 0; i < m; ++i) + op.block(i, 0).reinit_range_vector(v.block(i), omit_zeroing_entries); + + v.collect_sizes(); + }; + + op.reinit_domain_vector = [=](Domain &v, bool omit_zeroing_entries) { + const unsigned int n = op.n_block_cols(); + + // Reinitialize the block vector to n blocks: + v.reinit(n); + + // And reinitialize every individual block with reinit_domain_vectors: + for (unsigned int i = 0; i < n; ++i) + op.block(0, i).reinit_domain_vector(v.block(i), omit_zeroing_entries); + + v.collect_sizes(); + }; + + op.vmult = [&op](Range &v, const Domain &u) { + const unsigned int m = op.n_block_rows(); + const unsigned int n = op.n_block_cols(); + Assert(v.n_blocks() == m, ExcDimensionMismatch(v.n_blocks(), m)); + Assert(u.n_blocks() == n, ExcDimensionMismatch(u.n_blocks(), n)); + + if (PointerComparison::equal(&v, &u)) + { + const auto first_op = [&op](Range & v, + const Domain & u, + const unsigned int i, + const unsigned int j) { + op.block(i, j).vmult(v.block(i), u.block(j)); + }; + + const auto loop_op = [&op](Range & v, + const Domain & u, + const unsigned int i, + const unsigned int j) { op.block(i, j).vmult_add(v.block(i), u.block(j)); - } - }; - - op.Tvmult = [&op](Domain &v, const Range &u) { - const unsigned int n = op.n_block_cols(); - const unsigned int m = op.n_block_rows(); - Assert(v.n_blocks() == n, ExcDimensionMismatch(v.n_blocks(), n)); - Assert(u.n_blocks() == m, ExcDimensionMismatch(u.n_blocks(), m)); - - if (PointerComparison::equal(&v, &u)) - { - const auto first_op = [&op](Range & v, - const Domain & u, - const unsigned int i, - const unsigned int j) { - op.block(j, i).Tvmult(v.block(i), u.block(j)); - }; - - const auto loop_op = [&op](Range & v, - const Domain & u, - const unsigned int i, - const unsigned int j) { - op.block(j, i).Tvmult_add(v.block(i), u.block(j)); - }; - - apply_with_intermediate_storage(first_op, loop_op, v, u, false); - } - else - { - for (unsigned int i = 0; i < n; ++i) - { - op.block(0, i).Tvmult(v.block(i), u.block(0)); - for (unsigned int j = 1; j < m; ++j) + }; + + apply_with_intermediate_storage(first_op, loop_op, v, u, false); + } + else + { + for (unsigned int i = 0; i < m; ++i) + { + op.block(i, 0).vmult(v.block(i), u.block(0)); + for (unsigned int j = 1; j < n; ++j) + op.block(i, j).vmult_add(v.block(i), u.block(j)); + } + } + }; + + op.vmult_add = [&op](Range &v, const Domain &u) { + const unsigned int m = op.n_block_rows(); + const unsigned int n = op.n_block_cols(); + Assert(v.n_blocks() == m, ExcDimensionMismatch(v.n_blocks(), m)); + Assert(u.n_blocks() == n, ExcDimensionMismatch(u.n_blocks(), n)); + + if (PointerComparison::equal(&v, &u)) + { + const auto first_op = [&op](Range & v, + const Domain & u, + const unsigned int i, + const unsigned int j) { + op.block(i, j).vmult(v.block(i), u.block(j)); + }; + + const auto loop_op = [&op](Range & v, + const Domain & u, + const unsigned int i, + const unsigned int j) { + op.block(i, j).vmult_add(v.block(i), u.block(j)); + }; + + apply_with_intermediate_storage(first_op, loop_op, v, u, true); + } + else + { + for (unsigned int i = 0; i < m; ++i) + for (unsigned int j = 0; j < n; ++j) + op.block(i, j).vmult_add(v.block(i), u.block(j)); + } + }; + + op.Tvmult = [&op](Domain &v, const Range &u) { + const unsigned int n = op.n_block_cols(); + const unsigned int m = op.n_block_rows(); + Assert(v.n_blocks() == n, ExcDimensionMismatch(v.n_blocks(), n)); + Assert(u.n_blocks() == m, ExcDimensionMismatch(u.n_blocks(), m)); + + if (PointerComparison::equal(&v, &u)) + { + const auto first_op = [&op](Range & v, + const Domain & u, + const unsigned int i, + const unsigned int j) { + op.block(j, i).Tvmult(v.block(i), u.block(j)); + }; + + const auto loop_op = [&op](Range & v, + const Domain & u, + const unsigned int i, + const unsigned int j) { + op.block(j, i).Tvmult_add(v.block(i), u.block(j)); + }; + + apply_with_intermediate_storage(first_op, loop_op, v, u, false); + } + else + { + for (unsigned int i = 0; i < n; ++i) + { + op.block(0, i).Tvmult(v.block(i), u.block(0)); + for (unsigned int j = 1; j < m; ++j) + op.block(j, i).Tvmult_add(v.block(i), u.block(j)); + } + } + }; + + op.Tvmult_add = [&op](Domain &v, const Range &u) { + const unsigned int n = op.n_block_cols(); + const unsigned int m = op.n_block_rows(); + Assert(v.n_blocks() == n, ExcDimensionMismatch(v.n_blocks(), n)); + Assert(u.n_blocks() == m, ExcDimensionMismatch(u.n_blocks(), m)); + + if (PointerComparison::equal(&v, &u)) + { + const auto first_op = [&op](Range & v, + const Domain & u, + const unsigned int i, + const unsigned int j) { + op.block(j, i).Tvmult(v.block(i), u.block(j)); + }; + + const auto loop_op = [&op](Range & v, + const Domain & u, + const unsigned int i, + const unsigned int j) { + op.block(j, i).Tvmult_add(v.block(i), u.block(j)); + }; + + apply_with_intermediate_storage(first_op, loop_op, v, u, true); + } + else + { + for (unsigned int i = 0; i < n; ++i) + for (unsigned int j = 0; j < m; ++j) op.block(j, i).Tvmult_add(v.block(i), u.block(j)); - } - } - }; + } + }; + } - op.Tvmult_add = [&op](Domain &v, const Range &u) { - const unsigned int n = op.n_block_cols(); - const unsigned int m = op.n_block_rows(); - Assert(v.n_blocks() == n, ExcDimensionMismatch(v.n_blocks(), n)); - Assert(u.n_blocks() == m, ExcDimensionMismatch(u.n_blocks(), m)); - if (PointerComparison::equal(&v, &u)) - { - const auto first_op = [&op](Range & v, - const Domain & u, - const unsigned int i, - const unsigned int j) { - op.block(j, i).Tvmult(v.block(i), u.block(j)); - }; - - const auto loop_op = [&op](Range & v, - const Domain & u, - const unsigned int i, - const unsigned int j) { - op.block(j, i).Tvmult_add(v.block(i), u.block(j)); - }; - - apply_with_intermediate_storage(first_op, loop_op, v, u, true); - } - else - { - for (unsigned int i = 0; i < n; ++i) - for (unsigned int j = 0; j < m; ++j) - op.block(j, i).Tvmult_add(v.block(i), u.block(j)); - } - }; - } -} // namespace -namespace internal -{ - namespace BlockLinearOperatorImplementation - { /** * A dummy class for BlockLinearOperators that do not require any * extensions to facilitate the operations of the block matrix or its @@ -595,7 +593,7 @@ namespace internal }; } // namespace BlockLinearOperatorImplementation -} /*namespace internal*/ +} // namespace internal diff --git a/include/deal.II/lac/linear_operator.h b/include/deal.II/lac/linear_operator.h index 2415f2e8c9..cdd0d34764 100644 --- a/include/deal.II/lac/linear_operator.h +++ b/include/deal.II/lac/linear_operator.h @@ -1034,170 +1034,171 @@ namespace internal return EmptyPayload(); } - } // namespace LinearOperatorImplementation -} /* namespace internal */ - - -namespace -{ - // A trait class that determines whether type T provides public - // (templated or non-templated) vmult_add member functions - template - class has_vmult_add_and_Tvmult_add - { - template - static std::false_type - test(...); - template - static auto - test(Range *r, Domain *d) -> decltype(std::declval().vmult_add(*r, *d), - std::declval().Tvmult_add(*d, *r), - std::true_type()); - public: - // type is std::true_type if Matrix provides vmult_add and Tvmult_add, - // otherwise it is std::false_type - - typedef decltype(test(nullptr, nullptr)) type; - }; - - - // A helper function to apply a given vmult, or Tvmult to a vector with - // intermediate storage - template - void - apply_with_intermediate_storage(Function function, - Range & v, - const Domain &u, - bool add) - { - GrowingVectorMemory vector_memory; + // A trait class that determines whether type T provides public + // (templated or non-templated) vmult_add member functions + template + class has_vmult_add_and_Tvmult_add + { + template + static std::false_type + test(...); - typename VectorMemory::Pointer i(vector_memory); - i->reinit(v, /*bool omit_zeroing_entries =*/true); + template + static auto + test(Range *r, Domain *d) + -> decltype(std::declval().vmult_add(*r, *d), + std::declval().Tvmult_add(*d, *r), + std::true_type()); - function(*i, u); + public: + // type is std::true_type if Matrix provides vmult_add and Tvmult_add, + // otherwise it is std::false_type - if (add) - v += *i; - else - v = *i; - } + typedef decltype(test(nullptr, nullptr)) type; + }; - // A helper class to add a reduced matrix interface to a LinearOperator - // (typically provided by Preconditioner classes) - template - class MatrixInterfaceWithoutVmultAdd - { - public: - template + // A helper function to apply a given vmult, or Tvmult to a vector with + // intermediate storage + template void - operator()(LinearOperator &op, const Matrix &matrix) + apply_with_intermediate_storage(Function function, + Range & v, + const Domain &u, + bool add) { - op.vmult = [&matrix](Range &v, const Domain &u) { - if (PointerComparison::equal(&v, &u)) - { - // If v and u are the same memory location use intermediate storage - apply_with_intermediate_storage( - [&matrix](Range &b, const Domain &a) { matrix.vmult(b, a); }, - v, - u, - /*bool add =*/false); - } - else - { - matrix.vmult(v, u); - } - }; + GrowingVectorMemory vector_memory; - op.vmult_add = [&matrix](Range &v, const Domain &u) { - // use intermediate storage to implement vmult_add with vmult - apply_with_intermediate_storage( - [&matrix](Range &b, const Domain &a) { matrix.vmult(b, a); }, - v, - u, - /*bool add =*/true); - }; + typename VectorMemory::Pointer i(vector_memory); + i->reinit(v, /*bool omit_zeroing_entries =*/true); - op.Tvmult = [&matrix](Domain &v, const Range &u) { - if (PointerComparison::equal(&v, &u)) - { - // If v and u are the same memory location use intermediate storage - apply_with_intermediate_storage( - [&matrix](Domain &b, const Range &a) { matrix.Tvmult(b, a); }, - v, - u, - /*bool add =*/false); - } - else - { - matrix.Tvmult(v, u); - } - }; + function(*i, u); - op.Tvmult_add = [&matrix](Domain &v, const Range &u) { - // use intermediate storage to implement Tvmult_add with Tvmult - apply_with_intermediate_storage( - [&matrix](Domain &b, const Range &a) { matrix.Tvmult(b, a); }, - v, - u, - /*bool add =*/true); - }; + if (add) + v += *i; + else + v = *i; } - }; - // A helper class to add the full matrix interface to a LinearOperator - template - class MatrixInterfaceWithVmultAdd - { - public: - template - void - operator()(LinearOperator &op, const Matrix &matrix) + // A helper class to add a reduced matrix interface to a LinearOperator + // (typically provided by Preconditioner classes) + template + class MatrixInterfaceWithoutVmultAdd { - // As above ... - - MatrixInterfaceWithoutVmultAdd().operator()( - op, matrix); - - // ... but add native vmult_add and Tvmult_add variants: - - op.vmult_add = [&matrix](Range &v, const Domain &u) { - if (PointerComparison::equal(&v, &u)) - { - apply_with_intermediate_storage( - [&matrix](Range &b, const Domain &a) { matrix.vmult(b, a); }, - v, - u, - /*bool add =*/true); - } - else - { - matrix.vmult_add(v, u); - } - }; + public: + template + void + operator()(LinearOperator &op, + const Matrix & matrix) + { + op.vmult = [&matrix](Range &v, const Domain &u) { + if (PointerComparison::equal(&v, &u)) + { + // If v and u are the same memory location use intermediate + // storage + apply_with_intermediate_storage( + [&matrix](Range &b, const Domain &a) { matrix.vmult(b, a); }, + v, + u, + /*bool add =*/false); + } + else + { + matrix.vmult(v, u); + } + }; + + op.vmult_add = [&matrix](Range &v, const Domain &u) { + // use intermediate storage to implement vmult_add with vmult + apply_with_intermediate_storage( + [&matrix](Range &b, const Domain &a) { matrix.vmult(b, a); }, + v, + u, + /*bool add =*/true); + }; + + op.Tvmult = [&matrix](Domain &v, const Range &u) { + if (PointerComparison::equal(&v, &u)) + { + // If v and u are the same memory location use intermediate + // storage + apply_with_intermediate_storage( + [&matrix](Domain &b, const Range &a) { matrix.Tvmult(b, a); }, + v, + u, + /*bool add =*/false); + } + else + { + matrix.Tvmult(v, u); + } + }; + + op.Tvmult_add = [&matrix](Domain &v, const Range &u) { + // use intermediate storage to implement Tvmult_add with Tvmult + apply_with_intermediate_storage( + [&matrix](Domain &b, const Range &a) { matrix.Tvmult(b, a); }, + v, + u, + /*bool add =*/true); + }; + } + }; - op.Tvmult_add = [&matrix](Domain &v, const Range &u) { - if (PointerComparison::equal(&v, &u)) - { - apply_with_intermediate_storage( - [&matrix](Domain &b, const Range &a) { matrix.Tvmult(b, a); }, - v, - u, - /*bool add =*/true); - } - else - { - matrix.Tvmult_add(v, u); - } - }; - } - }; -} /* namespace */ + // A helper class to add the full matrix interface to a LinearOperator + template + class MatrixInterfaceWithVmultAdd + { + public: + template + void + operator()(LinearOperator &op, + const Matrix & matrix) + { + // As above ... + + MatrixInterfaceWithoutVmultAdd().operator()( + op, matrix); + + // ... but add native vmult_add and Tvmult_add variants: + + op.vmult_add = [&matrix](Range &v, const Domain &u) { + if (PointerComparison::equal(&v, &u)) + { + apply_with_intermediate_storage( + [&matrix](Range &b, const Domain &a) { matrix.vmult(b, a); }, + v, + u, + /*bool add =*/true); + } + else + { + matrix.vmult_add(v, u); + } + }; + + op.Tvmult_add = [&matrix](Domain &v, const Range &u) { + if (PointerComparison::equal(&v, &u)) + { + apply_with_intermediate_storage( + [&matrix](Domain &b, const Range &a) { matrix.Tvmult(b, a); }, + v, + u, + /*bool add =*/true); + } + else + { + matrix.Tvmult_add(v, u); + } + }; + } + }; + } // namespace LinearOperatorImplementation +} // namespace internal /** @@ -1290,6 +1291,7 @@ template linear_operator(const OperatorExemplar &operator_exemplar, const Matrix &matrix) { + using namespace internal::LinearOperatorImplementation; // Initialize the payload based on the input exemplar matrix LinearOperator return_op( Payload(operator_exemplar, matrix)); @@ -1345,6 +1347,7 @@ LinearOperator linear_operator(const LinearOperator &operator_exemplar, const Matrix & matrix) { + using namespace internal::LinearOperatorImplementation; // Initialize the payload based on the LinearOperator exemplar auto return_op = operator_exemplar; diff --git a/include/deal.II/lac/matrix_out.h b/include/deal.II/lac/matrix_out.h index 5c7abe00dd..d100a3bdd7 100644 --- a/include/deal.II/lac/matrix_out.h +++ b/include/deal.II/lac/matrix_out.h @@ -197,85 +197,82 @@ namespace internal { namespace MatrixOutImplementation { - namespace + /** + * Return the element with given indices of a sparse matrix. + */ + template + double + get_element(const dealii::SparseMatrix &matrix, + const types::global_dof_index i, + const types::global_dof_index j) { - /** - * Return the element with given indices of a sparse matrix. - */ - template - double - get_element(const dealii::SparseMatrix &matrix, - const types::global_dof_index i, - const types::global_dof_index j) - { - return matrix.el(i, j); - } + return matrix.el(i, j); + } - /** - * Return the element with given indices of a block sparse matrix. - */ - template - double - get_element(const dealii::BlockSparseMatrix &matrix, - const types::global_dof_index i, - const types::global_dof_index j) - { - return matrix.el(i, j); - } + /** + * Return the element with given indices of a block sparse matrix. + */ + template + double + get_element(const dealii::BlockSparseMatrix &matrix, + const types::global_dof_index i, + const types::global_dof_index j) + { + return matrix.el(i, j); + } # ifdef DEAL_II_WITH_TRILINOS - /** - * Return the element with given indices of a Trilinos sparse matrix. - */ - inline double - get_element(const TrilinosWrappers::SparseMatrix &matrix, - const types::global_dof_index i, - const types::global_dof_index j) - { - return matrix.el(i, j); - } + /** + * Return the element with given indices of a Trilinos sparse matrix. + */ + inline double + get_element(const TrilinosWrappers::SparseMatrix &matrix, + const types::global_dof_index i, + const types::global_dof_index j) + { + return matrix.el(i, j); + } - /** - * Return the element with given indices of a Trilinos block sparse - * matrix. - */ - inline double - get_element(const TrilinosWrappers::BlockSparseMatrix &matrix, - const types::global_dof_index i, - const types::global_dof_index j) - { - return matrix.el(i, j); - } + /** + * Return the element with given indices of a Trilinos block sparse + * matrix. + */ + inline double + get_element(const TrilinosWrappers::BlockSparseMatrix &matrix, + const types::global_dof_index i, + const types::global_dof_index j) + { + return matrix.el(i, j); + } # endif # ifdef DEAL_II_WITH_PETSC - // no need to do anything: PETSc matrix objects do not distinguish - // between operator() and el(i,j), so we can safely access elements - // through the generic function below + // no need to do anything: PETSc matrix objects do not distinguish + // between operator() and el(i,j), so we can safely access elements + // through the generic function below # endif - /** - * Return the element with given indices from any matrix type for which - * no specialization of this function was declared above. This will call - * operator() on the matrix. - */ - template - double - get_element(const Matrix & matrix, - const types::global_dof_index i, - const types::global_dof_index j) - { - return matrix(i, j); - } - } // namespace - } // namespace MatrixOutImplementation + /** + * Return the element with given indices from any matrix type for which + * no specialization of this function was declared above. This will call + * operator() on the matrix. + */ + template + double + get_element(const Matrix & matrix, + const types::global_dof_index i, + const types::global_dof_index j) + { + return matrix(i, j); + } + } // namespace MatrixOutImplementation } // namespace internal diff --git a/include/deal.II/lac/packaged_operation.h b/include/deal.II/lac/packaged_operation.h index 676986660c..dd0f847919 100644 --- a/include/deal.II/lac/packaged_operation.h +++ b/include/deal.II/lac/packaged_operation.h @@ -472,32 +472,35 @@ operator-(const Range &offset, const PackagedOperation &comp) */ //@{ -namespace +namespace internal { - // Poor man's trait class that determines whether type T is a vector: - // FIXME: Implement this as a proper type trait - similar to - // isBlockVector - - template - class has_vector_interface + namespace PackagedOperationImplementation { - template - static std::false_type - test(...); + // Poor man's trait class that determines whether type T is a vector: + // FIXME: Implement this as a proper type trait - similar to + // isBlockVector + + template + class has_vector_interface + { + template + static std::false_type + test(...); - template - static std::true_type - test(decltype(&C::operator+=), - decltype(&C::operator-=), - decltype(&C::l2_norm)); + template + static std::true_type + test(decltype(&C::operator+=), + decltype(&C::operator-=), + decltype(&C::l2_norm)); - public: - // type is std::true_type if Matrix provides vmult_add and Tvmult_add, - // otherwise it is std::false_type + public: + // type is std::true_type if Matrix provides vmult_add and Tvmult_add, + // otherwise it is std::false_type - typedef decltype(test(nullptr, nullptr, nullptr)) type; - }; -} // namespace + typedef decltype(test(nullptr, nullptr, nullptr)) type; + }; // namespace + } // namespace PackagedOperationImplementation +} // namespace internal /** @@ -516,7 +519,8 @@ namespace template ::type::value>::type> + internal::PackagedOperationImplementation::has_vector_interface< + Range>::type::value>::type> PackagedOperation operator+(const Range &u, const Range &v) { @@ -560,7 +564,8 @@ operator+(const Range &u, const Range &v) template ::type::value>::type> + internal::PackagedOperationImplementation::has_vector_interface< + Range>::type::value>::type> PackagedOperation operator-(const Range &u, const Range &v) { @@ -603,7 +608,8 @@ operator-(const Range &u, const Range &v) */ template ::type::value>::type> + internal::PackagedOperationImplementation::has_vector_interface< + Range>::type::value>::type> PackagedOperation operator*(const Range & u, typename Range::value_type number) { @@ -627,7 +633,8 @@ PackagedOperation operator*(const Range & u, */ template ::type::value>::type> + internal::PackagedOperationImplementation::has_vector_interface< + Range>::type::value>::type> PackagedOperation operator*(typename Range::value_type number, const Range & u) { diff --git a/include/deal.II/lac/trilinos_sparse_matrix.h b/include/deal.II/lac/trilinos_sparse_matrix.h index ef7cff6901..55b3e90e77 100644 --- a/include/deal.II/lac/trilinos_sparse_matrix.h +++ b/include/deal.II/lac/trilinos_sparse_matrix.h @@ -2150,66 +2150,61 @@ namespace TrilinosWrappers namespace internal { - namespace + inline void + check_vector_map_equality(const Epetra_CrsMatrix & mtrx, + const Epetra_MultiVector &src, + const Epetra_MultiVector &dst, + const bool transpose) { - inline void - check_vector_map_equality(const Epetra_CrsMatrix & mtrx, - const Epetra_MultiVector &src, - const Epetra_MultiVector &dst, - const bool transpose) - { - if (transpose == false) - { - Assert(src.Map().SameAs(mtrx.DomainMap()) == true, - ExcMessage( - "Column map of matrix does not fit with vector map!")); - Assert(dst.Map().SameAs(mtrx.RangeMap()) == true, - ExcMessage( - "Row map of matrix does not fit with vector map!")); - } - else - { - Assert(src.Map().SameAs(mtrx.RangeMap()) == true, - ExcMessage( - "Column map of matrix does not fit with vector map!")); - Assert(dst.Map().SameAs(mtrx.DomainMap()) == true, - ExcMessage( - "Row map of matrix does not fit with vector map!")); - } - (void)mtrx; // removes -Wunused-variable in optimized mode - (void)src; - (void)dst; - } + if (transpose == false) + { + Assert(src.Map().SameAs(mtrx.DomainMap()) == true, + ExcMessage( + "Column map of matrix does not fit with vector map!")); + Assert(dst.Map().SameAs(mtrx.RangeMap()) == true, + ExcMessage("Row map of matrix does not fit with vector map!")); + } + else + { + Assert(src.Map().SameAs(mtrx.RangeMap()) == true, + ExcMessage( + "Column map of matrix does not fit with vector map!")); + Assert(dst.Map().SameAs(mtrx.DomainMap()) == true, + ExcMessage("Row map of matrix does not fit with vector map!")); + } + (void)mtrx; // removes -Wunused-variable in optimized mode + (void)src; + (void)dst; + } - inline void - check_vector_map_equality(const Epetra_Operator & op, - const Epetra_MultiVector &src, - const Epetra_MultiVector &dst, - const bool transpose) - { - if (transpose == false) - { - Assert(src.Map().SameAs(op.OperatorDomainMap()) == true, - ExcMessage( - "Column map of operator does not fit with vector map!")); - Assert(dst.Map().SameAs(op.OperatorRangeMap()) == true, - ExcMessage( - "Row map of operator does not fit with vector map!")); - } - else - { - Assert(src.Map().SameAs(op.OperatorRangeMap()) == true, - ExcMessage( - "Column map of operator does not fit with vector map!")); - Assert(dst.Map().SameAs(op.OperatorDomainMap()) == true, - ExcMessage( - "Row map of operator does not fit with vector map!")); - } - (void)op; // removes -Wunused-variable in optimized mode - (void)src; - (void)dst; - } - } // namespace + inline void + check_vector_map_equality(const Epetra_Operator & op, + const Epetra_MultiVector &src, + const Epetra_MultiVector &dst, + const bool transpose) + { + if (transpose == false) + { + Assert(src.Map().SameAs(op.OperatorDomainMap()) == true, + ExcMessage( + "Column map of operator does not fit with vector map!")); + Assert(dst.Map().SameAs(op.OperatorRangeMap()) == true, + ExcMessage( + "Row map of operator does not fit with vector map!")); + } + else + { + Assert(src.Map().SameAs(op.OperatorRangeMap()) == true, + ExcMessage( + "Column map of operator does not fit with vector map!")); + Assert(dst.Map().SameAs(op.OperatorDomainMap()) == true, + ExcMessage( + "Row map of operator does not fit with vector map!")); + } + (void)op; // removes -Wunused-variable in optimized mode + (void)src; + (void)dst; + } namespace LinearOperatorImplementation { diff --git a/include/deal.II/lac/trilinos_vector.h b/include/deal.II/lac/trilinos_vector.h index f2b6525a7b..481a3f49a4 100644 --- a/include/deal.II/lac/trilinos_vector.h +++ b/include/deal.II/lac/trilinos_vector.h @@ -184,28 +184,25 @@ namespace TrilinosWrappers * @endcond */ - namespace - { # ifndef DEAL_II_WITH_64BIT_INDICES // define a helper function that queries the global ID of local ID of - // an Epetra_BlockMap object by calling either the 32- or 64-bit - // function necessary. - inline int - gid(const Epetra_BlockMap &map, int i) - { - return map.GID(i); - } + // an Epetra_BlockMap object by calling either the 32- or 64-bit + // function necessary. + inline int + gid(const Epetra_BlockMap &map, int i) + { + return map.GID(i); + } # else // define a helper function that queries the global ID of local ID of - // an Epetra_BlockMap object by calling either the 32- or 64-bit - // function necessary. - inline long long int - gid(const Epetra_BlockMap &map, int i) - { - return map.GID64(i); - } + // an Epetra_BlockMap object by calling either the 32- or 64-bit + // function necessary. + inline long long int + gid(const Epetra_BlockMap &map, int i) + { + return map.GID64(i); + } # endif - } // namespace /** * Namespace for Trilinos vector classes that work in parallel over MPI. diff --git a/include/deal.II/matrix_free/evaluation_selector.h b/include/deal.II/matrix_free/evaluation_selector.h index d82b75e3ab..b8cd0248cc 100644 --- a/include/deal.II/matrix_free/evaluation_selector.h +++ b/include/deal.II/matrix_free/evaluation_selector.h @@ -22,48 +22,396 @@ DEAL_II_NAMESPACE_OPEN #ifndef DOXYGEN -namespace +namespace internal { - // The following classes serve the purpose of choosing the correct template - // specialization of the FEEvaluationImpl* classes in case fe_degree - // and n_q_points_1d are only given as runtime parameters. - // The logic is the following: - // 1. Start with fe_degree=0, n_q_points_1d=0 and DEPTH=0. - // 2. If the current assumption on fe_degree doesn't match the runtime - // parameter, increase fe_degree by one and try again. - // If fe_degree==10 use the class Default which serves as a fallback. - // 3. After fixing the fe_degree, DEPTH is increased (DEPTH=1) and we start - // with - // n_q_points=fe_degree+1. - // 4. If the current assumption on n_q_points_1d doesn't match the runtime - // parameter, increase n_q_points_1d by one and try again. - // If n_q_points_1d==degree+3 use the class Default which serves as a - // fallback. - - /** - * This class serves as a fallback in case we don't have the appropriate - * template specialization for the run time and template parameters given. - */ - template - struct Default + namespace EvaluationSelectorImplementation { - static inline void - evaluate(const internal::MatrixFreeFunctions::ShapeInfo &shape_info, - Number * values_dofs_actual, - Number * values_quad, - Number * gradients_quad, - Number * hessians_quad, - Number * scratch_data, - const bool evaluate_values, - const bool evaluate_gradients, - const bool evaluate_hessians) + // The following classes serve the purpose of choosing the correct template + // specialization of the FEEvaluationImpl* classes in case fe_degree + // and n_q_points_1d are only given as runtime parameters. + // The logic is the following: + // 1. Start with fe_degree=0, n_q_points_1d=0 and DEPTH=0. + // 2. If the current assumption on fe_degree doesn't match the runtime + // parameter, increase fe_degree by one and try again. + // If fe_degree==10 use the class Default which serves as a fallback. + // 3. After fixing the fe_degree, DEPTH is increased (DEPTH=1) and we start + // with + // n_q_points=fe_degree+1. + // 4. If the current assumption on n_q_points_1d doesn't match the runtime + // parameter, increase n_q_points_1d by one and try again. + // If n_q_points_1d==degree+3 use the class Default which serves as a + // fallback. + + /** + * This class serves as a fallback in case we don't have the appropriate + * template specialization for the run time and template parameters given. + */ + template + struct Default { - internal::FEEvaluationImpl::evaluate(shape_info, + static inline void + evaluate( + const internal::MatrixFreeFunctions::ShapeInfo &shape_info, + Number * values_dofs_actual, + Number * values_quad, + Number * gradients_quad, + Number * hessians_quad, + Number * scratch_data, + const bool evaluate_values, + const bool evaluate_gradients, + const bool evaluate_hessians) + { + internal::FEEvaluationImpl< + internal::MatrixFreeFunctions::tensor_general, + dim, + -1, + 0, + n_components, + Number>::evaluate(shape_info, + values_dofs_actual, + values_quad, + gradients_quad, + hessians_quad, + scratch_data, + evaluate_values, + evaluate_gradients, + evaluate_hessians); + } + + static inline void + integrate( + const internal::MatrixFreeFunctions::ShapeInfo &shape_info, + Number * values_dofs_actual, + Number * values_quad, + Number * gradients_quad, + Number * scratch_data, + const bool integrate_values, + const bool integrate_gradients) + { + internal::FEEvaluationImpl< + internal::MatrixFreeFunctions::tensor_general, + dim, + -1, + 0, + n_components, + Number>::integrate(shape_info, + values_dofs_actual, + values_quad, + gradients_quad, + scratch_data, + integrate_values, + integrate_gradients, + false); + } + }; + + + /** + * This class implements the actual choice of the template specialization. + */ + template + struct Factory : Default + {}; + + /** + * This specialization sets the maximal fe_degree for + * which we want to determine the correct template parameters based at + * runtime. + */ + template + struct Factory + : Default + {}; + + /** + * This specialization sets the maximal number of n_q_points_1d for + * which we want to determine the correct template parameters based at + * runtime. + */ + template + struct Factory::type> + : Default + {}; + + /** + * This class chooses the correct template degree. + */ + template + struct Factory + { + static inline void + evaluate( + const internal::MatrixFreeFunctions::ShapeInfo &shape_info, + Number * values_dofs_actual, + Number * values_quad, + Number * gradients_quad, + Number * hessians_quad, + Number * scratch_data, + const bool evaluate_values, + const bool evaluate_gradients, + const bool evaluate_hessians) + { + const unsigned int runtime_degree = shape_info.fe_degree; + constexpr unsigned int start_n_q_points = degree + 1; + if (runtime_degree == degree) + Factory:: + evaluate(shape_info, + values_dofs_actual, + values_quad, + gradients_quad, + hessians_quad, + scratch_data, + evaluate_values, + evaluate_gradients, + evaluate_hessians); + else + Factory:: + evaluate(shape_info, + values_dofs_actual, + values_quad, + gradients_quad, + hessians_quad, + scratch_data, + evaluate_values, + evaluate_gradients, + evaluate_hessians); + } + + static inline void + integrate( + const internal::MatrixFreeFunctions::ShapeInfo &shape_info, + Number * values_dofs_actual, + Number * values_quad, + Number * gradients_quad, + Number * scratch_data, + const bool integrate_values, + const bool integrate_gradients) + { + const int runtime_degree = shape_info.fe_degree; + constexpr unsigned int start_n_q_points = degree + 1; + if (runtime_degree == degree) + Factory:: + integrate(shape_info, + values_dofs_actual, + values_quad, + gradients_quad, + scratch_data, + integrate_values, + integrate_gradients); + else + Factory:: + integrate(shape_info, + values_dofs_actual, + values_quad, + gradients_quad, + scratch_data, + integrate_values, + integrate_gradients); + } + }; + + /** + * This class chooses the correct template n_q_points_1d after degree was + * chosen. + */ + template + struct Factory::type> + { + static inline void + evaluate( + const internal::MatrixFreeFunctions::ShapeInfo &shape_info, + Number * values_dofs_actual, + Number * values_quad, + Number * gradients_quad, + Number * hessians_quad, + Number * scratch_data, + const bool evaluate_values, + const bool evaluate_gradients, + const bool evaluate_hessians) + { + const int runtime_n_q_points_1d = shape_info.n_q_points_1d; + if (runtime_n_q_points_1d == n_q_points_1d) + { + if (n_q_points_1d == degree + 1 && + shape_info.element_type == + internal::MatrixFreeFunctions::tensor_symmetric_collocation) + internal:: + FEEvaluationImplCollocation:: + evaluate(shape_info, + values_dofs_actual, + values_quad, + gradients_quad, + hessians_quad, + scratch_data, + evaluate_values, + evaluate_gradients, + evaluate_hessians); + else if (degree < n_q_points_1d) + internal::FEEvaluationImplTransformToCollocation< + dim, + degree, + n_q_points_1d, + n_components, + Number>::evaluate(shape_info, + values_dofs_actual, + values_quad, + gradients_quad, + hessians_quad, + scratch_data, + evaluate_values, + evaluate_gradients, + evaluate_hessians); + else + internal::FEEvaluationImpl< + internal::MatrixFreeFunctions::tensor_symmetric, + dim, + degree, + n_q_points_1d, + n_components, + Number>::evaluate(shape_info, + values_dofs_actual, + values_quad, + gradients_quad, + hessians_quad, + scratch_data, + evaluate_values, + evaluate_gradients, + evaluate_hessians); + } + else + Factory:: + evaluate(shape_info, + values_dofs_actual, + values_quad, + gradients_quad, + hessians_quad, + scratch_data, + evaluate_values, + evaluate_gradients, + evaluate_hessians); + } + + static inline void + integrate( + const internal::MatrixFreeFunctions::ShapeInfo &shape_info, + Number * values_dofs_actual, + Number * values_quad, + Number * gradients_quad, + Number * scratch_data, + const bool integrate_values, + const bool integrate_gradients) + { + const int runtime_n_q_points_1d = shape_info.n_q_points_1d; + if (runtime_n_q_points_1d == n_q_points_1d) + { + if (n_q_points_1d == degree + 1 && + shape_info.element_type == + internal::MatrixFreeFunctions::tensor_symmetric_collocation) + internal:: + FEEvaluationImplCollocation:: + integrate(shape_info, + values_dofs_actual, + values_quad, + gradients_quad, + scratch_data, + integrate_values, + integrate_gradients, + false); + else if (degree < n_q_points_1d) + internal::FEEvaluationImplTransformToCollocation< + dim, + degree, + n_q_points_1d, + n_components, + Number>::integrate(shape_info, + values_dofs_actual, + values_quad, + gradients_quad, + scratch_data, + integrate_values, + integrate_gradients, + false); + else + internal::FEEvaluationImpl< + internal::MatrixFreeFunctions::tensor_symmetric, + dim, + degree, + n_q_points_1d, + n_components, + Number>::integrate(shape_info, + values_dofs_actual, + values_quad, + gradients_quad, + scratch_data, + integrate_values, + integrate_gradients, + false); + } + else + Factory:: + integrate(shape_info, + values_dofs_actual, + values_quad, + gradients_quad, + scratch_data, + integrate_values, + integrate_gradients); + } + }; + + + + /** + * This is the entry point for choosing the correct runtime parameters + * for the 'evaluate' function. + */ + template + void + symmetric_selector_evaluate( + const internal::MatrixFreeFunctions::ShapeInfo &shape_info, + Number * values_dofs_actual, + Number * values_quad, + Number * gradients_quad, + Number * hessians_quad, + Number * scratch_data, + const bool evaluate_values, + const bool evaluate_gradients, + const bool evaluate_hessians) + { + Assert(shape_info.element_type <= + internal::MatrixFreeFunctions::tensor_symmetric, + ExcInternalError()); + Factory::evaluate(shape_info, values_dofs_actual, values_quad, gradients_quad, @@ -74,8 +422,15 @@ namespace evaluate_hessians); } - static inline void - integrate( + + + /** + * This is the entry point for choosing the correct runtime parameters + * for the 'integrate' function. + */ + template + void + symmetric_selector_integrate( const internal::MatrixFreeFunctions::ShapeInfo &shape_info, Number * values_dofs_actual, Number * values_quad, @@ -84,366 +439,19 @@ namespace const bool integrate_values, const bool integrate_gradients) { - internal::FEEvaluationImpl::integrate(shape_info, + Assert(shape_info.element_type <= + internal::MatrixFreeFunctions::tensor_symmetric, + ExcInternalError()); + Factory::integrate(shape_info, values_dofs_actual, values_quad, gradients_quad, scratch_data, integrate_values, - integrate_gradients, - false); - } - }; - - - /** - * This class implements the actual choice of the template specialization. - */ - template - struct Factory : Default - {}; - - /** - * This specialization sets the maximal fe_degree for - * which we want to determine the correct template parameters based at - * runtime. - */ - template - struct Factory - : Default - {}; - - /** - * This specialization sets the maximal number of n_q_points_1d for - * which we want to determine the correct template parameters based at - * runtime. - */ - template - struct Factory::type> - : Default - {}; - - /** - * This class chooses the correct template degree. - */ - template - struct Factory - { - static inline void - evaluate(const internal::MatrixFreeFunctions::ShapeInfo &shape_info, - Number * values_dofs_actual, - Number * values_quad, - Number * gradients_quad, - Number * hessians_quad, - Number * scratch_data, - const bool evaluate_values, - const bool evaluate_gradients, - const bool evaluate_hessians) - { - const unsigned int runtime_degree = shape_info.fe_degree; - constexpr unsigned int start_n_q_points = degree + 1; - if (runtime_degree == degree) - Factory:: - evaluate(shape_info, - values_dofs_actual, - values_quad, - gradients_quad, - hessians_quad, - scratch_data, - evaluate_values, - evaluate_gradients, - evaluate_hessians); - else - Factory:: - evaluate(shape_info, - values_dofs_actual, - values_quad, - gradients_quad, - hessians_quad, - scratch_data, - evaluate_values, - evaluate_gradients, - evaluate_hessians); + integrate_gradients); } - - static inline void - integrate( - const internal::MatrixFreeFunctions::ShapeInfo &shape_info, - Number * values_dofs_actual, - Number * values_quad, - Number * gradients_quad, - Number * scratch_data, - const bool integrate_values, - const bool integrate_gradients) - { - const int runtime_degree = shape_info.fe_degree; - constexpr unsigned int start_n_q_points = degree + 1; - if (runtime_degree == degree) - Factory:: - integrate(shape_info, - values_dofs_actual, - values_quad, - gradients_quad, - scratch_data, - integrate_values, - integrate_gradients); - else - Factory:: - integrate(shape_info, - values_dofs_actual, - values_quad, - gradients_quad, - scratch_data, - integrate_values, - integrate_gradients); - } - }; - - /** - * This class chooses the correct template n_q_points_1d after degree was - * chosen. - */ - template - struct Factory::type> - { - static inline void - evaluate(const internal::MatrixFreeFunctions::ShapeInfo &shape_info, - Number * values_dofs_actual, - Number * values_quad, - Number * gradients_quad, - Number * hessians_quad, - Number * scratch_data, - const bool evaluate_values, - const bool evaluate_gradients, - const bool evaluate_hessians) - { - const int runtime_n_q_points_1d = shape_info.n_q_points_1d; - if (runtime_n_q_points_1d == n_q_points_1d) - { - if (n_q_points_1d == degree + 1 && - shape_info.element_type == - internal::MatrixFreeFunctions::tensor_symmetric_collocation) - internal:: - FEEvaluationImplCollocation:: - evaluate(shape_info, - values_dofs_actual, - values_quad, - gradients_quad, - hessians_quad, - scratch_data, - evaluate_values, - evaluate_gradients, - evaluate_hessians); - else if (degree < n_q_points_1d) - internal::FEEvaluationImplTransformToCollocation< - dim, - degree, - n_q_points_1d, - n_components, - Number>::evaluate(shape_info, - values_dofs_actual, - values_quad, - gradients_quad, - hessians_quad, - scratch_data, - evaluate_values, - evaluate_gradients, - evaluate_hessians); - else - internal::FEEvaluationImpl< - internal::MatrixFreeFunctions::tensor_symmetric, - dim, - degree, - n_q_points_1d, - n_components, - Number>::evaluate(shape_info, - values_dofs_actual, - values_quad, - gradients_quad, - hessians_quad, - scratch_data, - evaluate_values, - evaluate_gradients, - evaluate_hessians); - } - else - Factory:: - evaluate(shape_info, - values_dofs_actual, - values_quad, - gradients_quad, - hessians_quad, - scratch_data, - evaluate_values, - evaluate_gradients, - evaluate_hessians); - } - - static inline void - integrate( - const internal::MatrixFreeFunctions::ShapeInfo &shape_info, - Number * values_dofs_actual, - Number * values_quad, - Number * gradients_quad, - Number * scratch_data, - const bool integrate_values, - const bool integrate_gradients) - { - const int runtime_n_q_points_1d = shape_info.n_q_points_1d; - if (runtime_n_q_points_1d == n_q_points_1d) - { - if (n_q_points_1d == degree + 1 && - shape_info.element_type == - internal::MatrixFreeFunctions::tensor_symmetric_collocation) - internal:: - FEEvaluationImplCollocation:: - integrate(shape_info, - values_dofs_actual, - values_quad, - gradients_quad, - scratch_data, - integrate_values, - integrate_gradients, - false); - else if (degree < n_q_points_1d) - internal::FEEvaluationImplTransformToCollocation< - dim, - degree, - n_q_points_1d, - n_components, - Number>::integrate(shape_info, - values_dofs_actual, - values_quad, - gradients_quad, - scratch_data, - integrate_values, - integrate_gradients, - false); - else - internal::FEEvaluationImpl< - internal::MatrixFreeFunctions::tensor_symmetric, - dim, - degree, - n_q_points_1d, - n_components, - Number>::integrate(shape_info, - values_dofs_actual, - values_quad, - gradients_quad, - scratch_data, - integrate_values, - integrate_gradients, - false); - } - else - Factory:: - integrate(shape_info, - values_dofs_actual, - values_quad, - gradients_quad, - scratch_data, - integrate_values, - integrate_gradients); - } - }; - - - - /** - * This is the entry point for choosing the correct runtime parameters - * for the 'evaluate' function. - */ - template - void - symmetric_selector_evaluate( - const internal::MatrixFreeFunctions::ShapeInfo &shape_info, - Number * values_dofs_actual, - Number * values_quad, - Number * gradients_quad, - Number * hessians_quad, - Number * scratch_data, - const bool evaluate_values, - const bool evaluate_gradients, - const bool evaluate_hessians) - { - Assert(shape_info.element_type <= - internal::MatrixFreeFunctions::tensor_symmetric, - ExcInternalError()); - Factory::evaluate(shape_info, - values_dofs_actual, - values_quad, - gradients_quad, - hessians_quad, - scratch_data, - evaluate_values, - evaluate_gradients, - evaluate_hessians); - } - - - - /** - * This is the entry point for choosing the correct runtime parameters - * for the 'integrate' function. - */ - template - void - symmetric_selector_integrate( - const internal::MatrixFreeFunctions::ShapeInfo &shape_info, - Number * values_dofs_actual, - Number * values_quad, - Number * gradients_quad, - Number * scratch_data, - const bool integrate_values, - const bool integrate_gradients) - { - Assert(shape_info.element_type <= - internal::MatrixFreeFunctions::tensor_symmetric, - ExcInternalError()); - Factory::integrate(shape_info, - values_dofs_actual, - values_quad, - gradients_quad, - scratch_data, - integrate_values, - integrate_gradients); - } -} // namespace + } // namespace EvaluationSelectorImplementation +} // namespace internal #endif @@ -883,15 +891,16 @@ SelectEvaluator::evaluate( evaluate_gradients, evaluate_hessians); else - symmetric_selector_evaluate(shape_info, - values_dofs_actual, - values_quad, - gradients_quad, - hessians_quad, - scratch_data, - evaluate_values, - evaluate_gradients, - evaluate_hessians); + internal::EvaluationSelectorImplementation:: + symmetric_selector_evaluate(shape_info, + values_dofs_actual, + values_quad, + gradients_quad, + hessians_quad, + scratch_data, + evaluate_values, + evaluate_gradients, + evaluate_hessians); } @@ -959,14 +968,15 @@ SelectEvaluator::integrate( integrate_gradients, false); else - symmetric_selector_integrate( - shape_info, - values_dofs_actual, - values_quad, - gradients_quad, - scratch_data, - integrate_values, - integrate_gradients); + internal::EvaluationSelectorImplementation:: + symmetric_selector_integrate( + shape_info, + values_dofs_actual, + values_quad, + gradients_quad, + scratch_data, + integrate_values, + integrate_gradients); } #endif // DOXYGEN diff --git a/include/deal.II/matrix_free/operators.h b/include/deal.II/matrix_free/operators.h index 59b8054383..a4eb280ca5 100644 --- a/include/deal.II/matrix_free/operators.h +++ b/include/deal.II/matrix_free/operators.h @@ -36,7 +36,7 @@ DEAL_II_NAMESPACE_OPEN namespace MatrixFreeOperators { - namespace + namespace BlockHelper { // workaroud for unifying non-block vector and block vector implementations // a non-block vector has one block and the only subblock is the vector @@ -101,7 +101,7 @@ namespace MatrixFreeOperators typename std::enable_if::value, void>::type collect_sizes(const VectorType &) {} - } // namespace + } // namespace BlockHelper /** * Abstract base class for matrix-free operators which can be used both at @@ -1052,21 +1052,21 @@ namespace MatrixFreeOperators Base::initialize_dof_vector(VectorType &vec) const { Assert(data.get() != nullptr, ExcNotInitialized()); - AssertDimension(n_blocks(vec), selected_rows.size()); - for (unsigned int i = 0; i < n_blocks(vec); ++i) + AssertDimension(BlockHelper::n_blocks(vec), selected_rows.size()); + for (unsigned int i = 0; i < BlockHelper::n_blocks(vec); ++i) { const unsigned int index = selected_rows[i]; - if (!subblock(vec, index) + if (!BlockHelper::subblock(vec, index) .partitioners_are_compatible( *data->get_dof_info(index).vector_partitioner)) - data->initialize_dof_vector(subblock(vec, index), index); + data->initialize_dof_vector(BlockHelper::subblock(vec, index), index); - Assert(subblock(vec, index) + Assert(BlockHelper::subblock(vec, index) .partitioners_are_globally_compatible( *data->get_dof_info(index).vector_partitioner), ExcInternalError()); } - collect_sizes(vec); + BlockHelper::collect_sizes(vec); } @@ -1214,14 +1214,15 @@ namespace MatrixFreeOperators void Base::set_constrained_entries_to_one(VectorType &dst) const { - for (unsigned int j = 0; j < n_blocks(dst); ++j) + for (unsigned int j = 0; j < BlockHelper::n_blocks(dst); ++j) { const std::vector &constrained_dofs = data->get_constrained_dofs(selected_rows[j]); for (unsigned int i = 0; i < constrained_dofs.size(); ++i) - subblock(dst, j).local_element(constrained_dofs[i]) = 1.; + BlockHelper::subblock(dst, j).local_element(constrained_dofs[i]) = 1.; for (unsigned int i = 0; i < edge_constrained_indices[j].size(); ++i) - subblock(dst, j).local_element(edge_constrained_indices[j][i]) = 1.; + BlockHelper::subblock(dst, j).local_element( + edge_constrained_indices[j][i]) = 1.; } } @@ -1264,29 +1265,30 @@ namespace MatrixFreeOperators const bool is_row) const { typedef typename Base::value_type Number; - for (unsigned int i = 0; i < n_blocks(src); ++i) + for (unsigned int i = 0; i < BlockHelper::n_blocks(src); ++i) { const unsigned int mf_component = is_row ? selected_rows[i] : selected_columns[i]; // If both vectors use the same partitioner -> done - if (subblock(src, i).get_partitioner().get() == + if (BlockHelper::subblock(src, i).get_partitioner().get() == data->get_dof_info(mf_component).vector_partitioner.get()) continue; // If not, assert that the local ranges are the same and reset to the // current partitioner Assert( - subblock(src, i).get_partitioner()->local_size() == + BlockHelper::subblock(src, i).get_partitioner()->local_size() == data->get_dof_info(mf_component).vector_partitioner->local_size(), ExcMessage("The vector passed to the vmult() function does not have " "the correct size for compatibility with MatrixFree.")); // copy the vector content to a temporary vector so that it does not get // lost - LinearAlgebra::distributed::Vector copy_vec(subblock(src, i)); - subblock(const_cast(src), i) + LinearAlgebra::distributed::Vector copy_vec( + BlockHelper::subblock(src, i)); + BlockHelper::subblock(const_cast(src), i) .reinit(data->get_dof_info(mf_component).vector_partitioner); - subblock(const_cast(src), i) + BlockHelper::subblock(const_cast(src), i) .copy_locally_owned_data_from(copy_vec); } } @@ -1304,14 +1306,16 @@ namespace MatrixFreeOperators // set zero Dirichlet values on the input vector (and remember the src and // dst values because we need to reset them at the end) - for (unsigned int j = 0; j < n_blocks(dst); ++j) + for (unsigned int j = 0; j < BlockHelper::n_blocks(dst); ++j) { for (unsigned int i = 0; i < edge_constrained_indices[j].size(); ++i) { edge_constrained_values[j][i] = std::pair( - subblock(src, j).local_element(edge_constrained_indices[j][i]), - subblock(dst, j).local_element(edge_constrained_indices[j][i])); - subblock(const_cast(src), j) + BlockHelper::subblock(src, j).local_element( + edge_constrained_indices[j][i]), + BlockHelper::subblock(dst, j).local_element( + edge_constrained_indices[j][i])); + BlockHelper::subblock(const_cast(src), j) .local_element(edge_constrained_indices[j][i]) = 0.; } } @@ -1326,8 +1330,8 @@ namespace MatrixFreeOperators const bool transpose) const { AssertDimension(dst.size(), src.size()); - AssertDimension(n_blocks(dst), n_blocks(src)); - AssertDimension(n_blocks(dst), selected_rows.size()); + AssertDimension(BlockHelper::n_blocks(dst), BlockHelper::n_blocks(src)); + AssertDimension(BlockHelper::n_blocks(dst), selected_rows.size()); preprocess_constraints(dst, src); if (transpose) Tapply_add(dst, src); @@ -1343,25 +1347,26 @@ namespace MatrixFreeOperators Base::postprocess_constraints(VectorType & dst, const VectorType &src) const { - for (unsigned int j = 0; j < n_blocks(dst); ++j) + for (unsigned int j = 0; j < BlockHelper::n_blocks(dst); ++j) { const std::vector &constrained_dofs = data->get_constrained_dofs(selected_rows[j]); for (unsigned int i = 0; i < constrained_dofs.size(); ++i) - subblock(dst, j).local_element(constrained_dofs[i]) += - subblock(src, j).local_element(constrained_dofs[i]); + BlockHelper::subblock(dst, j).local_element(constrained_dofs[i]) += + BlockHelper::subblock(src, j).local_element(constrained_dofs[i]); } // reset edge constrained values, multiply by unit matrix and add into // destination - for (unsigned int j = 0; j < n_blocks(dst); ++j) + for (unsigned int j = 0; j < BlockHelper::n_blocks(dst); ++j) { for (unsigned int i = 0; i < edge_constrained_indices[j].size(); ++i) { - subblock(const_cast(src), j) + BlockHelper::subblock(const_cast(src), j) .local_element(edge_constrained_indices[j][i]) = edge_constrained_values[j][i].first; - subblock(dst, j).local_element(edge_constrained_indices[j][i]) = + BlockHelper::subblock(dst, j).local_element( + edge_constrained_indices[j][i]) = edge_constrained_values[j][i].second + edge_constrained_values[j][i].first; } @@ -1387,34 +1392,36 @@ namespace MatrixFreeOperators // set zero Dirichlet values on the input vector (and remember the src and // dst values because we need to reset them at the end) - for (unsigned int j = 0; j < n_blocks(dst); ++j) + for (unsigned int j = 0; j < BlockHelper::n_blocks(dst); ++j) for (unsigned int i = 0; i < edge_constrained_indices[j].size(); ++i) { edge_constrained_values[j][i] = std::pair( - subblock(src, j).local_element(edge_constrained_indices[j][i]), - subblock(dst, j).local_element(edge_constrained_indices[j][i])); - subblock(const_cast(src), j) + BlockHelper::subblock(src, j).local_element( + edge_constrained_indices[j][i]), + BlockHelper::subblock(dst, j).local_element( + edge_constrained_indices[j][i])); + BlockHelper::subblock(const_cast(src), j) .local_element(edge_constrained_indices[j][i]) = 0.; } apply_add(dst, src); - for (unsigned int j = 0; j < n_blocks(dst); ++j) + for (unsigned int j = 0; j < BlockHelper::n_blocks(dst); ++j) { unsigned int c = 0; for (unsigned int i = 0; i < edge_constrained_indices[j].size(); ++i) { for (; c < edge_constrained_indices[j][i]; ++c) - subblock(dst, j).local_element(c) = 0.; + BlockHelper::subblock(dst, j).local_element(c) = 0.; ++c; // reset the src values - subblock(const_cast(src), j) + BlockHelper::subblock(const_cast(src), j) .local_element(edge_constrained_indices[j][i]) = edge_constrained_values[j][i].first; } - for (; c < subblock(dst, j).local_size(); ++c) - subblock(dst, j).local_element(c) = 0.; + for (; c < BlockHelper::subblock(dst, j).local_size(); ++c) + BlockHelper::subblock(dst, j).local_element(c) = 0.; } } @@ -1436,24 +1443,25 @@ namespace MatrixFreeOperators return; VectorType src_cpy(src); - for (unsigned int j = 0; j < n_blocks(dst); ++j) + for (unsigned int j = 0; j < BlockHelper::n_blocks(dst); ++j) { unsigned int c = 0; for (unsigned int i = 0; i < edge_constrained_indices[j].size(); ++i) { for (; c < edge_constrained_indices[j][i]; ++c) - subblock(src_cpy, j).local_element(c) = 0.; + BlockHelper::subblock(src_cpy, j).local_element(c) = 0.; ++c; } - for (; c < subblock(src_cpy, j).local_size(); ++c) - subblock(src_cpy, j).local_element(c) = 0.; + for (; c < BlockHelper::subblock(src_cpy, j).local_size(); ++c) + BlockHelper::subblock(src_cpy, j).local_element(c) = 0.; } apply_add(dst, src_cpy); - for (unsigned int j = 0; j < n_blocks(dst); ++j) + for (unsigned int j = 0; j < BlockHelper::n_blocks(dst); ++j) for (unsigned int i = 0; i < edge_constrained_indices[j].size(); ++i) - subblock(dst, j).local_element(edge_constrained_indices[j][i]) = 0.; + BlockHelper::subblock(dst, j).local_element( + edge_constrained_indices[j][i]) = 0.; } @@ -1840,7 +1848,7 @@ namespace MatrixFreeOperators src); } - namespace + namespace Implementation { template bool @@ -1853,7 +1861,7 @@ namespace MatrixFreeOperators return true; } - } // namespace + } // namespace Implementation @@ -1877,7 +1885,7 @@ namespace MatrixFreeOperators { for (unsigned int q = 0; q < phi.n_q_points; ++q) { - Assert(non_negative((*scalar_coefficient)(cell, q)), + Assert(Implementation::non_negative((*scalar_coefficient)(cell, q)), ExcMessage("Coefficient must be non-negative")); phi.submit_gradient((*scalar_coefficient)(cell, q) * phi.get_gradient(q), diff --git a/include/deal.II/multigrid/mg_transfer_internal.h b/include/deal.II/multigrid/mg_transfer_internal.h index 8523513548..1cc1c60287 100644 --- a/include/deal.II/multigrid/mg_transfer_internal.h +++ b/include/deal.II/multigrid/mg_transfer_internal.h @@ -42,8 +42,8 @@ namespace internal template void fill_copy_indices( - const dealii::DoFHandler &mg_dof, - const MGConstrainedDoFs * mg_constrained_dofs, + const DoFHandler &mg_dof, + const MGConstrainedDoFs * mg_constrained_dofs, std::vector>> ©_indices, @@ -120,7 +120,7 @@ namespace internal template void setup_transfer( - const dealii::DoFHandler & mg_dof, + const DoFHandler & mg_dof, const MGConstrainedDoFs * mg_constrained_dofs, ElementInfo & elem_info, std::vector> &level_dof_indices, diff --git a/include/deal.II/physics/notation.h b/include/deal.II/physics/notation.h index 82c227bc98..9bc7371cf3 100644 --- a/include/deal.II/physics/notation.h +++ b/include/deal.II/physics/notation.h @@ -962,18 +962,15 @@ namespace Physics namespace internal { - namespace - { - template - struct is_rank_2_symmetric_tensor : std::false_type - {}; - - template - struct is_rank_2_symmetric_tensor> - : std::true_type - {}; - } // namespace - } // namespace internal + template + struct is_rank_2_symmetric_tensor : std::false_type + {}; + + template + struct is_rank_2_symmetric_tensor> + : std::true_type + {}; + } // namespace internal template + inline Tensor<1, dim, Number> + transformation_contraction(const Tensor<1, dim, Number> &V, + const Tensor<2, dim, Number> &F) { - template - inline Tensor<1, dim, Number> - transformation_contraction(const Tensor<1, dim, Number> &V, - const Tensor<2, dim, Number> &F) - { - return contract<1, 0>(F, V); - } + return contract<1, 0>(F, V); + } - template - inline Tensor<2, dim, Number> - transformation_contraction(const Tensor<2, dim, Number> &T, - const Tensor<2, dim, Number> &F) - { - return contract<1, 0>(F, contract<1, 1>(T, F)); - } + template + inline Tensor<2, dim, Number> + transformation_contraction(const Tensor<2, dim, Number> &T, + const Tensor<2, dim, Number> &F) + { + return contract<1, 0>(F, contract<1, 1>(T, F)); + } - template - inline dealii::SymmetricTensor<2, dim, Number> - transformation_contraction( - const dealii::SymmetricTensor<2, dim, Number> &T, - const Tensor<2, dim, Number> & F) - { - Tensor<2, dim, Number> tmp_1; - for (unsigned int i = 0; i < dim; ++i) + template + inline dealii::SymmetricTensor<2, dim, Number> + transformation_contraction(const dealii::SymmetricTensor<2, dim, Number> &T, + const Tensor<2, dim, Number> & F) + { + Tensor<2, dim, Number> tmp_1; + for (unsigned int i = 0; i < dim; ++i) + for (unsigned int J = 0; J < dim; ++J) + for (unsigned int I = 0; I < dim; ++I) + tmp_1[i][J] += F[i][I] * T[I][J]; + + dealii::SymmetricTensor<2, dim, Number> out; + for (unsigned int i = 0; i < dim; ++i) + for (unsigned int j = i; j < dim; ++j) for (unsigned int J = 0; J < dim; ++J) - for (unsigned int I = 0; I < dim; ++I) - tmp_1[i][J] += F[i][I] * T[I][J]; + out[i][j] += F[j][J] * tmp_1[i][J]; - dealii::SymmetricTensor<2, dim, Number> out; - for (unsigned int i = 0; i < dim; ++i) - for (unsigned int j = i; j < dim; ++j) - for (unsigned int J = 0; J < dim; ++J) - out[i][j] += F[j][J] * tmp_1[i][J]; + return out; + } - return out; - } - - template - inline Tensor<4, dim, Number> - transformation_contraction(const Tensor<4, dim, Number> &H, - const Tensor<2, dim, Number> &F) - { - // This contraction order and indexing might look a bit dubious, so a - // quick explanation as to what's going on is probably in order: - // - // When the contract() function operates on the inner indices, the - // result has the inner index and outer index transposed, i.e. - // contract<2,1>(H,F) implies - // T_{IJLk} = (H_{IJMN} F_{mM}) \delta_{mL} \delta_{Nk} - // rather than T_{IJkL} (the desired result). - // So, in effect, contraction of the 3rd (inner) index with F as the - // second argument results in its transposition with respect to its - // adjacent neighbor. This is due to the position of the argument F, - // leading to the free index being on the right hand side of the result. - // However, given that we can do two transformations from the LHS of H - // and two from the right we can undo the otherwise erroneous - // swapping of the outer indices upon application of the second - // sets of contractions. - // - // Note: Its significantly quicker (in 3d) to push forward - // each index individually - return contract<1, 1>( - F, contract<1, 1>(F, contract<2, 1>(contract<2, 1>(H, F), F))); - } + template + inline Tensor<4, dim, Number> + transformation_contraction(const Tensor<4, dim, Number> &H, + const Tensor<2, dim, Number> &F) + { + // This contraction order and indexing might look a bit dubious, so a + // quick explanation as to what's going on is probably in order: + // + // When the contract() function operates on the inner indices, the + // result has the inner index and outer index transposed, i.e. + // contract<2,1>(H,F) implies + // T_{IJLk} = (H_{IJMN} F_{mM}) \delta_{mL} \delta_{Nk} + // rather than T_{IJkL} (the desired result). + // So, in effect, contraction of the 3rd (inner) index with F as the + // second argument results in its transposition with respect to its + // adjacent neighbor. This is due to the position of the argument F, + // leading to the free index being on the right hand side of the result. + // However, given that we can do two transformations from the LHS of H + // and two from the right we can undo the otherwise erroneous + // swapping of the outer indices upon application of the second + // sets of contractions. + // + // Note: Its significantly quicker (in 3d) to push forward + // each index individually + return contract<1, 1>( + F, contract<1, 1>(F, contract<2, 1>(contract<2, 1>(H, F), F))); + } - template - inline dealii::SymmetricTensor<4, dim, Number> - transformation_contraction( - const dealii::SymmetricTensor<4, dim, Number> &H, - const Tensor<2, dim, Number> & F) - { - // The first and last transformation operations respectively - // break and recover the symmetry properties of the tensors. - // We also want to perform a minimal number of operations here - // and avoid some complications related to the transposition of - // tensor indices when contracting inner indices using the contract() - // function. (For an explanation of the contraction operations, - // please see the note in the equivalent function for standard - // Tensors.) So what we'll do here is manually perform the first - // and last contractions that break/recover the tensor symmetries - // on the inner indices, and use the contract() function only on - // the outer indices. - // - // Note: Its significantly quicker (in 3d) to push forward - // each index individually - - // Push forward (inner) index 1 - Tensor<4, dim, Number> tmp; - for (unsigned int I = 0; I < dim; ++I) - for (unsigned int j = 0; j < dim; ++j) - for (unsigned int K = 0; K < dim; ++K) - for (unsigned int L = 0; L < dim; ++L) - for (unsigned int J = 0; J < dim; ++J) - tmp[I][j][K][L] += F[j][J] * H[I][J][K][L]; - - // Push forward (outer) indices 0 and 3 - tmp = contract<1, 0>(F, contract<3, 1>(tmp, F)); - - // Push forward (inner) index 2 - dealii::SymmetricTensor<4, dim, Number> out; - for (unsigned int i = 0; i < dim; ++i) - for (unsigned int j = i; j < dim; ++j) - for (unsigned int k = 0; k < dim; ++k) - for (unsigned int l = k; l < dim; ++l) - for (unsigned int K = 0; K < dim; ++K) - out[i][j][k][l] += F[k][K] * tmp[i][j][K][l]; - - return out; - } - } // namespace - } // namespace Physics + template + inline dealii::SymmetricTensor<4, dim, Number> + transformation_contraction(const dealii::SymmetricTensor<4, dim, Number> &H, + const Tensor<2, dim, Number> & F) + { + // The first and last transformation operations respectively + // break and recover the symmetry properties of the tensors. + // We also want to perform a minimal number of operations here + // and avoid some complications related to the transposition of + // tensor indices when contracting inner indices using the contract() + // function. (For an explanation of the contraction operations, + // please see the note in the equivalent function for standard + // Tensors.) So what we'll do here is manually perform the first + // and last contractions that break/recover the tensor symmetries + // on the inner indices, and use the contract() function only on + // the outer indices. + // + // Note: Its significantly quicker (in 3d) to push forward + // each index individually + + // Push forward (inner) index 1 + Tensor<4, dim, Number> tmp; + for (unsigned int I = 0; I < dim; ++I) + for (unsigned int j = 0; j < dim; ++j) + for (unsigned int K = 0; K < dim; ++K) + for (unsigned int L = 0; L < dim; ++L) + for (unsigned int J = 0; J < dim; ++J) + tmp[I][j][K][L] += F[j][J] * H[I][J][K][L]; + + // Push forward (outer) indices 0 and 3 + tmp = contract<1, 0>(F, contract<3, 1>(tmp, F)); + + // Push forward (inner) index 2 + dealii::SymmetricTensor<4, dim, Number> out; + for (unsigned int i = 0; i < dim; ++i) + for (unsigned int j = i; j < dim; ++j) + for (unsigned int k = 0; k < dim; ++k) + for (unsigned int l = k; l < dim; ++l) + for (unsigned int K = 0; K < dim; ++K) + out[i][j][k][l] += F[k][K] * tmp[i][j][K][l]; + + return out; + } + } // namespace Physics } // namespace internal diff --git a/source/fe/fe_system.cc b/source/fe/fe_system.cc index ea5cae7ede..d01c87c34c 100644 --- a/source/fe/fe_system.cc +++ b/source/fe/fe_system.cc @@ -33,22 +33,16 @@ DEAL_II_NAMESPACE_OPEN -namespace internal +namespace { - namespace FESystemImplementation + unsigned int + count_nonzeros(const std::vector &vec) { - namespace - { - unsigned int - count_nonzeros(const std::vector &vec) - { - return std::count_if(vec.begin(), vec.end(), [](const unsigned int i) { - return i > 0; - }); - } - } // namespace - } // namespace FESystemImplementation -} // namespace internal + return std::count_if(vec.begin(), vec.end(), [](const unsigned int i) { + return i > 0; + }); + } +} // namespace /* ----------------------- FESystem::InternalData ------------------- */ @@ -308,8 +302,7 @@ FESystem::FESystem( fes, multiplicities), FETools::Compositing::compute_nonzero_components(fes, multiplicities)) - , base_elements( - internal::FESystemImplementation::count_nonzeros(multiplicities)) + , base_elements(count_nonzeros(multiplicities)) { initialize(fes, multiplicities); } @@ -1618,7 +1611,7 @@ FESystem::initialize( ExcDimensionMismatch(fes.size(), multiplicities.size())); Assert(fes.size() > 0, ExcMessage("Need to pass at least one finite element.")); - Assert(internal::FESystemImplementation::count_nonzeros(multiplicities) > 0, + Assert(count_nonzeros(multiplicities) > 0, ExcMessage("You only passed FiniteElements with multiplicity 0.")); // Note that we need to skip every fe with multiplicity 0 in the following -- 2.39.5