From e56bbd5653078faf626d114c8f4c924b80094489 Mon Sep 17 00:00:00 2001 From: Daniel Arndt Date: Thu, 14 Jun 2018 17:01:07 +0200 Subject: [PATCH] Avoid unnamed namespaces in *.templates.h --- include/deal.II/fe/fe_tools.templates.h | 1195 ++++++----- .../fe/fe_tools_extrapolate.templates.h | 359 ++-- .../fe/fe_tools_interpolate.templates.h | 280 ++- .../lac/affine_constraints.templates.h | 395 ++-- include/deal.II/lac/full_matrix.templates.h | 77 +- .../lac/la_parallel_block_vector.templates.h | 6 +- .../lac/la_parallel_vector.templates.h | 190 +- include/deal.II/lac/sparse_matrix.templates.h | 70 +- .../deal.II/matrix_free/dof_info.templates.h | 13 +- .../matrix_free/mapping_info.templates.h | 133 +- .../matrix_free/matrix_free.templates.h | 185 +- .../matrix_free/shape_info.templates.h | 25 +- .../deal.II/multigrid/mg_transfer.templates.h | 279 +-- .../numerics/data_out_dof_data.templates.h | 28 +- .../numerics/error_estimator.templates.h | 1901 ++++++++--------- .../numerics/matrix_creator.templates.h | 38 +- .../deal.II/numerics/vector_tools.templates.h | 139 +- 17 files changed, 2650 insertions(+), 2663 deletions(-) diff --git a/include/deal.II/fe/fe_tools.templates.h b/include/deal.II/fe/fe_tools.templates.h index 1283029750..fd3c7a0bc7 100644 --- a/include/deal.II/fe/fe_tools.templates.h +++ b/include/deal.II/fe/fe_tools.templates.h @@ -1138,247 +1138,252 @@ namespace FETools { return std_cxx14::make_unique>(quad); } -} // namespace FETools -namespace -{ - // The following three functions serve to fill the maps from element - // names to elements fe_name_map below. The first one exists because - // we have finite elements which are not implemented for nonzero - // codimension. These should be transferred to the second function - // eventually. - template - void - fill_no_codim_fe_names( - std::map> &result) + + namespace internal { - result["FE_Q_Hierarchical"] = - std_cxx14::make_unique>>(); - result["FE_ABF"] = - std_cxx14::make_unique>>(); - result["FE_Bernstein"] = - std_cxx14::make_unique>>(); - result["FE_BDM"] = - std_cxx14::make_unique>>(); - result["FE_DGBDM"] = - std_cxx14::make_unique>>(); - result["FE_DGNedelec"] = - std_cxx14::make_unique>>(); - result["FE_DGRaviartThomas"] = - std_cxx14::make_unique>>(); - result["FE_RaviartThomas"] = - std_cxx14::make_unique>>(); - result["FE_RaviartThomasNodal"] = - std_cxx14::make_unique>>(); - result["FE_RT_Bubbles"] = - std_cxx14::make_unique>>(); - result["FE_Nedelec"] = - std_cxx14::make_unique>>(); - result["FE_DGPNonparametric"] = - std_cxx14::make_unique>>(); - result["FE_DGP"] = - std_cxx14::make_unique>>(); - result["FE_DGPMonomial"] = - std_cxx14::make_unique>>(); - result["FE_DGQ"] = - std_cxx14::make_unique>>(); - result["FE_DGQArbitraryNodes"] = - std_cxx14::make_unique>>(); - result["FE_DGQLegendre"] = - std_cxx14::make_unique>>(); - result["FE_DGQHermite"] = - std_cxx14::make_unique>>(); - result["FE_FaceQ"] = - std_cxx14::make_unique>>(); - result["FE_FaceP"] = - std_cxx14::make_unique>>(); - result["FE_Q"] = std_cxx14::make_unique>>(); - result["FE_Q_DG0"] = - std_cxx14::make_unique>>(); - result["FE_Q_Bubbles"] = - std_cxx14::make_unique>>(); - result["FE_Q_iso_Q1"] = - std_cxx14::make_unique>>(); - result["FE_Nothing"] = - std_cxx14::make_unique>>(); - result["FE_RannacherTurek"] = - std_cxx14::make_unique>>(); - } + // The following three functions serve to fill the maps from element + // names to elements fe_name_map below. The first one exists because + // we have finite elements which are not implemented for nonzero + // codimension. These should be transferred to the second function + // eventually. + namespace FEToolsAddFENameHelper + { + template + void + fill_no_codim_fe_names( + std::map> &result) + { + result["FE_Q_Hierarchical"] = + std_cxx14::make_unique>>(); + result["FE_ABF"] = + std_cxx14::make_unique>>(); + result["FE_Bernstein"] = + std_cxx14::make_unique>>(); + result["FE_BDM"] = + std_cxx14::make_unique>>(); + result["FE_DGBDM"] = + std_cxx14::make_unique>>(); + result["FE_DGNedelec"] = + std_cxx14::make_unique>>(); + result["FE_DGRaviartThomas"] = + std_cxx14::make_unique>>(); + result["FE_RaviartThomas"] = + std_cxx14::make_unique>>(); + result["FE_RaviartThomasNodal"] = std_cxx14::make_unique< + FETools::FEFactory>>(); + result["FE_RT_Bubbles"] = + std_cxx14::make_unique>>(); + result["FE_Nedelec"] = + std_cxx14::make_unique>>(); + result["FE_DGPNonparametric"] = std_cxx14::make_unique< + FETools::FEFactory>>(); + result["FE_DGP"] = + std_cxx14::make_unique>>(); + result["FE_DGPMonomial"] = + std_cxx14::make_unique>>(); + result["FE_DGQ"] = + std_cxx14::make_unique>>(); + result["FE_DGQArbitraryNodes"] = + std_cxx14::make_unique>>(); + result["FE_DGQLegendre"] = + std_cxx14::make_unique>>(); + result["FE_DGQHermite"] = + std_cxx14::make_unique>>(); + result["FE_FaceQ"] = + std_cxx14::make_unique>>(); + result["FE_FaceP"] = + std_cxx14::make_unique>>(); + result["FE_Q"] = + std_cxx14::make_unique>>(); + result["FE_Q_DG0"] = + std_cxx14::make_unique>>(); + result["FE_Q_Bubbles"] = + std_cxx14::make_unique>>(); + result["FE_Q_iso_Q1"] = + std_cxx14::make_unique>>(); + result["FE_Nothing"] = + std_cxx14::make_unique>>(); + result["FE_RannacherTurek"] = + std_cxx14::make_unique>>(); + } - // This function fills a map from names to finite elements for any - // dimension and codimension for those elements which support - // nonzero codimension. - template - void - fill_codim_fe_names( - std::map> &result) - { - result["FE_Bernstein"] = - std_cxx14::make_unique>>(); - result["FE_DGP"] = - std_cxx14::make_unique>>(); - result["FE_DGQ"] = - std_cxx14::make_unique>>(); - result["FE_Nothing"] = - std_cxx14::make_unique>>(); - result["FE_DGQArbitraryNodes"] = - std_cxx14::make_unique>>(); - result["FE_DGQLegendre"] = std_cxx14::make_unique< - FETools::FEFactory>>(); - result["FE_DGQHermite"] = std_cxx14::make_unique< - FETools::FEFactory>>(); - result["FE_Q_Bubbles"] = - std_cxx14::make_unique>>(); - result["FE_Q_DG0"] = - std_cxx14::make_unique>>(); - result["FE_Q_iso_Q1"] = - std_cxx14::make_unique>>(); - result["FE_Q"] = - std_cxx14::make_unique>>(); - result["FE_Bernstein"] = - std_cxx14::make_unique>>(); - } + // This function fills a map from names to finite elements for any + // dimension and codimension for those elements which support + // nonzero codimension. + template + void + fill_codim_fe_names( + std::map> &result) + { + result["FE_Bernstein"] = std_cxx14::make_unique< + FETools::FEFactory>>(); + result["FE_DGP"] = + std_cxx14::make_unique>>(); + result["FE_DGQ"] = + std_cxx14::make_unique>>(); + result["FE_Nothing"] = std_cxx14::make_unique< + FETools::FEFactory>>(); + result["FE_DGQArbitraryNodes"] = + std_cxx14::make_unique>>(); + result["FE_DGQLegendre"] = std_cxx14::make_unique< + FETools::FEFactory>>(); + result["FE_DGQHermite"] = std_cxx14::make_unique< + FETools::FEFactory>>(); + result["FE_Q_Bubbles"] = std_cxx14::make_unique< + FETools::FEFactory>>(); + result["FE_Q_DG0"] = + std_cxx14::make_unique>>(); + result["FE_Q_iso_Q1"] = std_cxx14::make_unique< + FETools::FEFactory>>(); + result["FE_Q"] = + std_cxx14::make_unique>>(); + result["FE_Bernstein"] = std_cxx14::make_unique< + FETools::FEFactory>>(); + } - // The function filling the vector fe_name_map below. It iterates - // through all legal dimension/spacedimension pairs and fills - // fe_name_map[dimension][spacedimension] with the maps generated - // by the functions above. - std::array< - std::array>, 4>, - 4> - fill_default_map() - { - std::array< - std::array>, 4>, - 4> - result; + // The function filling the vector fe_name_map below. It iterates + // through all legal dimension/spacedimension pairs and fills + // fe_name_map[dimension][spacedimension] with the maps generated + // by the functions above. + std::array< + std::array>, + 4>, + 4> + fill_default_map() + { + std::array< + std::array>, + 4>, + 4> + result; - fill_no_codim_fe_names<1>(result[1][1]); - fill_no_codim_fe_names<2>(result[2][2]); - fill_no_codim_fe_names<3>(result[3][3]); + fill_no_codim_fe_names<1>(result[1][1]); + fill_no_codim_fe_names<2>(result[2][2]); + fill_no_codim_fe_names<3>(result[3][3]); - fill_codim_fe_names<1, 2>(result[1][2]); - fill_codim_fe_names<1, 3>(result[1][3]); - fill_codim_fe_names<2, 3>(result[2][3]); + fill_codim_fe_names<1, 2>(result[1][2]); + fill_codim_fe_names<1, 3>(result[1][3]); + fill_codim_fe_names<2, 3>(result[2][3]); - return result; - } + return result; + } - // have a lock that guarantees that at most one thread is changing - // and accessing the fe_name_map variable. make this lock local to - // this file. - // - // this and the next variable are declared static (even though - // they're in an anonymous namespace) in order to make icc happy - // (which otherwise reports a multiply defined symbol when linking - // libraries for more than one space dimension together - static Threads::Mutex fe_name_map_lock; - - // This is the map used by FETools::get_fe_by_name and - // FETools::add_fe_name. It is only accessed by functions in this - // file, so it is safe to make it a static variable here. It must be - // static so that we can link several dimensions together. - - // The organization of this storage is such that - // fe_name_map[dim][spacedim][name] points to an - // FEFactoryBase with the name given. Since - // all entries of this vector are of different type, we store - // pointers to generic objects and cast them when needed. - - // We use a unique pointer to factory objects, to ensure that they - // get deleted at the end of the program run and don't end up as - // apparent memory leaks to programs like valgrind. - - // This vector is initialized at program start time using the - // function above. because at this time there are no threads - // running, there are no thread-safety issues here. since this is - // compiled for all dimensions at once, need to create objects for - // each dimension and then separate between them further down - static std::array< - std::array>, 4>, - 4> - fe_name_map = fill_default_map(); -} // namespace - - - -namespace -{ - // forwarder function for - // FE::get_interpolation_matrix. we - // will want to call that function - // for arbitrary FullMatrix - // types, but it only accepts - // double arguments. since it is a - // virtual function, this can also - // not be changed. so have a - // forwarder function that calls - // that function directly if - // T==double, and otherwise uses a - // temporary - template - inline void - gim_forwarder(const FiniteElement &fe1, - const FiniteElement &fe2, - FullMatrix & interpolation_matrix) - { - fe2.get_interpolation_matrix(fe1, interpolation_matrix); - } + // have a lock that guarantees that at most one thread is changing + // and accessing the fe_name_map variable. make this lock local to + // this file. + // + // this and the next variable are declared static (even though + // they're in an anonymous namespace) in order to make icc happy + // (which otherwise reports a multiply defined symbol when linking + // libraries for more than one space dimension together + static Threads::Mutex fe_name_map_lock; + + // This is the map used by FETools::get_fe_by_name and + // FETools::add_fe_name. It is only accessed by functions in this + // file, so it is safe to make it a static variable here. It must be + // static so that we can link several dimensions together. + + // The organization of this storage is such that + // fe_name_map[dim][spacedim][name] points to an + // FEFactoryBase with the name given. Since + // all entries of this vector are of different type, we store + // pointers to generic objects and cast them when needed. + + // We use a unique pointer to factory objects, to ensure that they + // get deleted at the end of the program run and don't end up as + // apparent memory leaks to programs like valgrind. + + // This vector is initialized at program start time using the + // function above. because at this time there are no threads + // running, there are no thread-safety issues here. since this is + // compiled for all dimensions at once, need to create objects for + // each dimension and then separate between them further down + static std::array< + std::array>, + 4>, + 4> + fe_name_map = fill_default_map(); + } // namespace FEToolsAddFENameHelper + + namespace FEToolsGetInterpolationMatrixHelper + { + // forwarder function for + // FE::get_interpolation_matrix. we + // will want to call that function + // for arbitrary FullMatrix + // types, but it only accepts + // double arguments. since it is a + // virtual function, this can also + // not be changed. so have a + // forwarder function that calls + // that function directly if + // T==double, and otherwise uses a + // temporary + template + inline void + gim_forwarder(const FiniteElement &fe1, + const FiniteElement &fe2, + FullMatrix & interpolation_matrix) + { + fe2.get_interpolation_matrix(fe1, interpolation_matrix); + } - template - inline void - gim_forwarder(const FiniteElement &fe1, - const FiniteElement &fe2, - FullMatrix & interpolation_matrix) - { - FullMatrix tmp(interpolation_matrix.m(), interpolation_matrix.n()); - fe2.get_interpolation_matrix(fe1, tmp); - interpolation_matrix = tmp; - } + template + inline void + gim_forwarder(const FiniteElement &fe1, + const FiniteElement &fe2, + FullMatrix & interpolation_matrix) + { + FullMatrix tmp(interpolation_matrix.m(), + interpolation_matrix.n()); + fe2.get_interpolation_matrix(fe1, tmp); + interpolation_matrix = tmp; + } - // return how many characters - // starting at the given position - // of the string match either the - // generic string "" or the - // specialized string with "dim" - // replaced with the numeric value - // of the template argument - template - inline unsigned int - match_dimension(const std::string &name, const unsigned int position) - { - if (position >= name.size()) - return 0; + // return how many characters + // starting at the given position + // of the string match either the + // generic string "" or the + // specialized string with "dim" + // replaced with the numeric value + // of the template argument + template + inline unsigned int + match_dimension(const std::string &name, const unsigned int position) + { + if (position >= name.size()) + return 0; - if ((position + 5 < name.size()) && (name[position] == '<') && - (name[position + 1] == 'd') && (name[position + 2] == 'i') && - (name[position + 3] == 'm') && (name[position + 4] == '>')) - return 5; + if ((position + 5 < name.size()) && (name[position] == '<') && + (name[position + 1] == 'd') && (name[position + 2] == 'i') && + (name[position + 3] == 'm') && (name[position + 4] == '>')) + return 5; - Assert(dim < 10, ExcNotImplemented()); - const char dim_char = '0' + dim; + Assert(dim < 10, ExcNotImplemented()); + const char dim_char = '0' + dim; - if ((position + 3 < name.size()) && (name[position] == '<') && - (name[position + 1] == dim_char) && (name[position + 2] == '>')) - return 3; + if ((position + 3 < name.size()) && (name[position] == '<') && + (name[position + 1] == dim_char) && (name[position + 2] == '>')) + return 3; + + // some other string that doesn't + // match + return 0; + } + } // namespace FEToolsGetInterpolationMatrixHelper + } // namespace internal - // some other string that doesn't - // match - return 0; - } -} // namespace -namespace FETools -{ template void compute_component_wise(const FiniteElement & element, @@ -1485,7 +1490,8 @@ namespace FETools bool fe_implements_interpolation = true; try { - gim_forwarder(fe1, fe2, interpolation_matrix); + internal::FEToolsGetInterpolationMatrixHelper::gim_forwarder( + fe1, fe2, interpolation_matrix); } catch ( typename FiniteElement::ExcInterpolationNotImplemented &) @@ -1631,8 +1637,8 @@ namespace FETools mass(i, j) += v * val2.shape_value(j, k) * dx; } } - // Invert the matrix. Gauss-Jordan should be sufficient since we expect the - // mass matrix to be well-conditioned + // Invert the matrix. Gauss-Jordan should be sufficient since we expect + // the mass matrix to be well-conditioned mass.gauss_jordan(); // Now, test every function of fe1 with test functions of fe2 and @@ -1674,9 +1680,9 @@ namespace FETools const std::vector> &points = fe.get_generalized_support_points(); - // We need the values of the polynomials in all generalized support points. - // This function specifically works for the case where shape functions - // have 'dim' vector components, so allocate that much space + // We need the values of the polynomials in all generalized support + // points. This function specifically works for the case where shape + // functions have 'dim' vector components, so allocate that much space std::vector> support_point_values(points.size(), Vector(dim)); @@ -1717,241 +1723,213 @@ namespace FETools - /* - template <> - void - compute_embedding_matrices(const FiniteElement<1,2> &, - std::vector > > &, - const bool) + namespace internal + { + namespace FEToolsComputeEmbeddingMatricesHelper { - Assert(false, ExcNotImplemented()); - } + template + void + compute_embedding_for_shape_function( + const unsigned int i, + const FiniteElement &fe, + const FEValues & coarse, + const Householder & H, + FullMatrix & this_matrix, + const double threshold) + { + const unsigned int n = fe.dofs_per_cell; + const unsigned int nd = fe.n_components(); + const unsigned int nq = coarse.n_quadrature_points; + Vector v_coarse(nq * nd); + Vector v_fine(n); - template <> - void - compute_embedding_matrices(const FiniteElement<1,3> &, - std::vector > > &, - const bool) - { - Assert(false, ExcNotImplemented()); - } + // The right hand side of + // the least squares + // problem consists of the + // function values of the + // coarse grid function in + // each quadrature point. + if (fe.is_primitive()) + { + const unsigned int d = fe.system_to_component_index(i).first; + const double * phi_i = &coarse.shape_value(i, 0); + for (unsigned int k = 0; k < nq; ++k) + v_coarse(k * nd + d) = phi_i[k]; + } + else + for (unsigned int d = 0; d < nd; ++d) + for (unsigned int k = 0; k < nq; ++k) + v_coarse(k * nd + d) = coarse.shape_value_component(i, k, d); + + // solve the least squares + // problem. + const double result = H.least_squares(v_fine, v_coarse); + Assert(result <= threshold, FETools::ExcLeastSquaresError(result)); + // Avoid warnings in release mode + (void)result; + (void)threshold; + + // Copy into the result + // matrix. Since the matrix + // maps a coarse grid + // function to a fine grid + // function, the columns + // are fine grid. + for (unsigned int j = 0; j < n; ++j) + this_matrix(j, i) = v_fine(j); + } - template <> - void - compute_embedding_matrices(const FiniteElement<2,3>&, - std::vector > >&, - const bool) - { - Assert(false, ExcNotImplemented()); - } - */ - namespace - { - template - void - compute_embedding_for_shape_function(const unsigned int i, - const FiniteElement &fe, - const FEValues &coarse, - const Householder & H, - FullMatrix &this_matrix, - const double threshold) - { - const unsigned int n = fe.dofs_per_cell; - const unsigned int nd = fe.n_components(); - const unsigned int nq = coarse.n_quadrature_points; - - Vector v_coarse(nq * nd); - Vector v_fine(n); - - // The right hand side of - // the least squares - // problem consists of the - // function values of the - // coarse grid function in - // each quadrature point. - if (fe.is_primitive()) - { - const unsigned int d = fe.system_to_component_index(i).first; - const double * phi_i = &coarse.shape_value(i, 0); + template + void + compute_embedding_matrices_for_refinement_case( + const FiniteElement &fe, + std::vector> & matrices, + const unsigned int ref_case, + const double threshold) + { + const unsigned int n = fe.dofs_per_cell; + const unsigned int nc = + GeometryInfo::n_children(RefinementCase(ref_case)); + for (unsigned int i = 0; i < nc; ++i) + { + Assert(matrices[i].n() == n, + ExcDimensionMismatch(matrices[i].n(), n)); + Assert(matrices[i].m() == n, + ExcDimensionMismatch(matrices[i].m(), n)); + } - for (unsigned int k = 0; k < nq; ++k) - v_coarse(k * nd + d) = phi_i[k]; - } + // Set up meshes, one with a single + // reference cell and refine it once + Triangulation tria; + GridGenerator::hyper_cube(tria, 0, 1); + tria.begin_active()->set_refine_flag(RefinementCase(ref_case)); + tria.execute_coarsening_and_refinement(); - else - for (unsigned int d = 0; d < nd; ++d) - for (unsigned int k = 0; k < nq; ++k) - v_coarse(k * nd + d) = coarse.shape_value_component(i, k, d); - - // solve the least squares - // problem. - const double result = H.least_squares(v_fine, v_coarse); - Assert(result <= threshold, ExcLeastSquaresError(result)); - // Avoid warnings in release mode - (void)result; - (void)threshold; - - // Copy into the result - // matrix. Since the matrix - // maps a coarse grid - // function to a fine grid - // function, the columns - // are fine grid. - for (unsigned int j = 0; j < n; ++j) - this_matrix(j, i) = v_fine(j); - } + const unsigned int degree = fe.degree; + QGauss q_fine(degree + 1); + const unsigned int nq = q_fine.size(); + FEValues fine(fe, + q_fine, + update_quadrature_points | + update_JxW_values | update_values); + // We search for the polynomial on + // the small cell, being equal to + // the coarse polynomial in all + // quadrature points. - template - void - compute_embedding_matrices_for_refinement_case( - const FiniteElement &fe, - std::vector> & matrices, - const unsigned int ref_case, - const double threshold) - { - const unsigned int n = fe.dofs_per_cell; - const unsigned int nc = - GeometryInfo::n_children(RefinementCase(ref_case)); - for (unsigned int i = 0; i < nc; ++i) - { - Assert(matrices[i].n() == n, - ExcDimensionMismatch(matrices[i].n(), n)); - Assert(matrices[i].m() == n, - ExcDimensionMismatch(matrices[i].m(), n)); - } + // First build the matrix for this + // least squares problem. This + // contains the values of the fine + // cell polynomials in the fine + // cell grid points. - // Set up meshes, one with a single - // reference cell and refine it once - Triangulation tria; - GridGenerator::hyper_cube(tria, 0, 1); - tria.begin_active()->set_refine_flag(RefinementCase(ref_case)); - tria.execute_coarsening_and_refinement(); + // This matrix is the same for all + // children. + fine.reinit(tria.begin_active()); + const unsigned int nd = fe.n_components(); + FullMatrix A(nq * nd, n); - const unsigned int degree = fe.degree; - QGauss q_fine(degree + 1); - const unsigned int nq = q_fine.size(); + for (unsigned int j = 0; j < n; ++j) + for (unsigned int d = 0; d < nd; ++d) + for (unsigned int k = 0; k < nq; ++k) + A(k * nd + d, j) = fine.shape_value_component(j, k, d); - FEValues fine(fe, - q_fine, - update_quadrature_points | - update_JxW_values | update_values); + Householder H(A); + unsigned int cell_number = 0; - // We search for the polynomial on - // the small cell, being equal to - // the coarse polynomial in all - // quadrature points. - - // First build the matrix for this - // least squares problem. This - // contains the values of the fine - // cell polynomials in the fine - // cell grid points. - - // This matrix is the same for all - // children. - fine.reinit(tria.begin_active()); - const unsigned int nd = fe.n_components(); - FullMatrix A(nq * nd, n); - - for (unsigned int j = 0; j < n; ++j) - for (unsigned int d = 0; d < nd; ++d) - for (unsigned int k = 0; k < nq; ++k) - A(k * nd + d, j) = fine.shape_value_component(j, k, d); - - Householder H(A); - unsigned int cell_number = 0; - - Threads::TaskGroup task_group; - - for (typename Triangulation::active_cell_iterator - fine_cell = tria.begin_active(); - fine_cell != tria.end(); - ++fine_cell, ++cell_number) - { - fine.reinit(fine_cell); - - // evaluate on the coarse cell (which - // is the first -- inactive -- cell on - // the lowest level of the - // triangulation we have created) - const std::vector> &q_points_fine = - fine.get_quadrature_points(); - std::vector> q_points_coarse(q_points_fine.size()); - for (unsigned int i = 0; i < q_points_fine.size(); ++i) - for (unsigned int j = 0; j < dim; ++j) - q_points_coarse[i](j) = q_points_fine[i](j); - const Quadrature q_coarse(q_points_coarse, - fine.get_JxW_values()); - FEValues coarse(fe, q_coarse, update_values); - - coarse.reinit(tria.begin(0)); - - FullMatrix &this_matrix = matrices[cell_number]; - - // Compute this once for each - // coarse grid basis function. can - // spawn subtasks if n is - // sufficiently large so that there - // are more than about 5000 - // operations in the inner loop - // (which is basically const * n^2 - // operations). - if (n > 30) - { - for (unsigned int i = 0; i < n; ++i) - { - task_group += Threads::new_task( - &compute_embedding_for_shape_function, - i, - fe, - coarse, - H, - this_matrix, - threshold); - } - task_group.join_all(); - } - else - { - for (unsigned int i = 0; i < n; ++i) - { - compute_embedding_for_shape_function( - i, fe, coarse, H, this_matrix, threshold); - } - } + Threads::TaskGroup task_group; - // Remove small entries from - // the matrix - for (unsigned int i = 0; i < this_matrix.m(); ++i) - for (unsigned int j = 0; j < this_matrix.n(); ++j) - if (std::fabs(this_matrix(i, j)) < 1e-12) - this_matrix(i, j) = 0.; - } + for (typename Triangulation::active_cell_iterator + fine_cell = tria.begin_active(); + fine_cell != tria.end(); + ++fine_cell, ++cell_number) + { + fine.reinit(fine_cell); - Assert(cell_number == - GeometryInfo::n_children(RefinementCase(ref_case)), - ExcInternalError()); - } - } // namespace + // evaluate on the coarse cell (which + // is the first -- inactive -- cell on + // the lowest level of the + // triangulation we have created) + const std::vector> &q_points_fine = + fine.get_quadrature_points(); + std::vector> q_points_coarse(q_points_fine.size()); + for (unsigned int i = 0; i < q_points_fine.size(); ++i) + for (unsigned int j = 0; j < dim; ++j) + q_points_coarse[i](j) = q_points_fine[i](j); + const Quadrature q_coarse(q_points_coarse, + fine.get_JxW_values()); + FEValues coarse(fe, q_coarse, update_values); + + coarse.reinit(tria.begin(0)); + + FullMatrix &this_matrix = matrices[cell_number]; + + // Compute this once for each + // coarse grid basis function. can + // spawn subtasks if n is + // sufficiently large so that there + // are more than about 5000 + // operations in the inner loop + // (which is basically const * n^2 + // operations). + if (n > 30) + { + for (unsigned int i = 0; i < n; ++i) + { + task_group += Threads::new_task( + &compute_embedding_for_shape_function, + i, + fe, + coarse, + H, + this_matrix, + threshold); + } + task_group.join_all(); + } + else + { + for (unsigned int i = 0; i < n; ++i) + { + compute_embedding_for_shape_function( + i, fe, coarse, H, this_matrix, threshold); + } + } + + // Remove small entries from + // the matrix + for (unsigned int i = 0; i < this_matrix.m(); ++i) + for (unsigned int j = 0; j < this_matrix.n(); ++j) + if (std::fabs(this_matrix(i, j)) < 1e-12) + this_matrix(i, j) = 0.; + } + + Assert(cell_number == + GeometryInfo::n_children(RefinementCase(ref_case)), + ExcInternalError()); + } + } // namespace FEToolsComputeEmbeddingMatricesHelper + } // namespace internal template void - compute_embedding_matrices( - const FiniteElement & fe, - std::vector>> &matrices, - const bool isotropic_only, - const double threshold) + compute_embedding_matrices(const FiniteElement &fe, + std::vector> + + > & matrices, + const bool isotropic_only, + const double threshold) { Threads::TaskGroup task_group; @@ -1962,7 +1940,8 @@ namespace FETools for (; ref_case <= RefinementCase::isotropic_refinement; ++ref_case) task_group += Threads::new_task( - &compute_embedding_matrices_for_refinement_case, + &internal::FEToolsComputeEmbeddingMatricesHelper:: + compute_embedding_matrices_for_refinement_case, fe, matrices[ref_case - 1], ref_case, @@ -1972,7 +1951,6 @@ namespace FETools } - template void compute_face_embedding_matrices( @@ -2118,7 +2096,6 @@ namespace FETools Vector v_fine(n); - for (unsigned int cell_number = 0; cell_number < GeometryInfo::max_children_per_face; ++cell_number) @@ -2165,7 +2142,7 @@ namespace FETools // solve the least squares // problem. const double result = H.least_squares(v_fine, v_coarse); - Assert(result <= threshold, ExcLeastSquaresError(result)); + Assert(result <= threshold, FETools::ExcLeastSquaresError(result)); // Avoid compiler warnings in Release mode (void)result; (void)threshold; @@ -2189,13 +2166,13 @@ namespace FETools } - template void - compute_projection_matrices( - const FiniteElement & fe, - std::vector>> &matrices, - const bool isotropic_only) + compute_projection_matrices(const FiniteElement &fe, + std::vector> + + > & matrices, + const bool isotropic_only) { const unsigned int n = fe.dofs_per_cell; const unsigned int nd = fe.n_components(); @@ -2224,7 +2201,11 @@ namespace FETools const std::vector &JxW = coarse.get_JxW_values(); for (unsigned int i = 0; i < n; ++i) for (unsigned int j = 0; j < n; ++j) - if (fe.is_primitive()) + if (fe. + + is_primitive() + + ) { const double *coarse_i = &coarse.shape_value(i, 0); const double *coarse_j = &coarse.shape_value(j, 0); @@ -2244,7 +2225,9 @@ namespace FETools } // invert mass matrix - mass.gauss_jordan(); + mass. + + gauss_jordan(); } @@ -2257,17 +2240,42 @@ namespace FETools for (unsigned int i = 0; i < nc; ++i) { - Assert(matrices[i].n() == n, - ExcDimensionMismatch(matrices[i].n(), n)); - Assert(matrices[i].m() == n, - ExcDimensionMismatch(matrices[i].m(), n)); + Assert(matrices[i]. + + n() + + == n, + ExcDimensionMismatch(matrices[i]. + + n(), + n + + )); + Assert(matrices[i]. + + m() + + == n, + ExcDimensionMismatch(matrices[i]. + + m(), + n + + )); } // create a respective refinement on the triangulation Triangulation tr; GridGenerator::hyper_cube(tr, 0, 1); - tr.begin_active()->set_refine_flag(RefinementCase(ref_case)); - tr.execute_coarsening_and_refinement(); + tr. + + begin_active() + -> + + set_refine_flag(RefinementCase(ref_case)); + tr. + + execute_coarsening_and_refinement(); FEValues fine(StaticMappingQ1::mapping, fe, @@ -2290,8 +2298,16 @@ namespace FETools fine.reinit(coarse_cell->child(cell_number)); const std::vector> &q_points_fine = fine.get_quadrature_points(); - std::vector> q_points_coarse(q_points_fine.size()); - for (unsigned int q = 0; q < q_points_fine.size(); ++q) + std::vector> q_points_coarse(q_points_fine. + + size() + + ); + for (unsigned int q = 0; q < q_points_fine. + + size(); + + ++q) for (unsigned int j = 0; j < dim; ++j) q_points_coarse[q](j) = q_points_fine[q](j); Quadrature q_coarse(q_points_coarse, fine.get_JxW_values()); @@ -2311,7 +2327,11 @@ namespace FETools { for (unsigned int i = 0; i < fe.dofs_per_cell; ++i) { - if (fe.is_primitive()) + if (fe. + + is_primitive() + + ) { const double *coarse_i = &coarse.shape_value(i, 0); const double *fine_j = &fine.shape_value(j, 0); @@ -2340,8 +2360,16 @@ namespace FETools } // Remove small entries from the matrix - for (unsigned int i = 0; i < this_matrix.m(); ++i) - for (unsigned int j = 0; j < this_matrix.n(); ++j) + for (unsigned int i = 0; i < this_matrix. + + m(); + + ++i) + for (unsigned int j = 0; j < this_matrix. + + n(); + + ++j) if (std::fabs(this_matrix(i, j)) < 1e-12) this_matrix(i, j) = 0.; } @@ -2358,9 +2386,10 @@ namespace FETools compute_one_case(ref_case, mass, matrices[ref_case - 1]); }); - tasks.join_all(); - } + tasks. + join_all(); + } template @@ -2380,22 +2409,25 @@ namespace FETools // operation of this function; // for this, acquire the lock // until we quit this function - Threads::Mutex::ScopedLock lock(fe_name_map_lock); + Threads::Mutex::ScopedLock lock( + internal::FEToolsAddFENameHelper::fe_name_map_lock); Assert( - fe_name_map[dim][spacedim].find(name) == fe_name_map[dim][spacedim].end(), + internal::FEToolsAddFENameHelper::fe_name_map[dim][spacedim].find(name) == + internal::FEToolsAddFENameHelper::fe_name_map[dim][spacedim].end(), ExcMessage("Cannot change existing element in finite element name list")); // Insert the normalized name into // the map - fe_name_map[dim][spacedim][name] = + internal::FEToolsAddFENameHelper::fe_name_map[dim][spacedim][name] = std::unique_ptr(factory); } + namespace internal { - namespace + namespace FEToolsGetFEHelper { // TODO: this encapsulates the call to the // dimension-dependent fe_name_map so that we @@ -2526,17 +2558,18 @@ namespace FETools // so this properly returns // FE_Nothing() const Subscriptor *ptr = fe_name_map.find(name_part)->second.get(); - const FEFactoryBase *fef = - dynamic_cast *>(ptr); + const FETools::FEFactoryBase *fef = + dynamic_cast *>(ptr); return fef->get(1); } else { // Make sure no other thread // is just adding an element - Threads::Mutex::ScopedLock lock(fe_name_map_lock); + Threads::Mutex::ScopedLock lock( + internal::FEToolsAddFENameHelper::fe_name_map_lock); AssertThrow(fe_name_map.find(name_part) != fe_name_map.end(), - ExcInvalidFEName(name)); + FETools::ExcInvalidFEName(name)); // Now, just the (degree) // or (Quadrature<1>(degree+1)) @@ -2551,8 +2584,9 @@ namespace FETools name.erase(0, tmp.second + 1); const Subscriptor *ptr = fe_name_map.find(name_part)->second.get(); - const FEFactoryBase *fef = - dynamic_cast *>(ptr); + const FETools::FEFactoryBase *fef = + dynamic_cast *>( + ptr); return fef->get(tmp.first); } else @@ -2568,8 +2602,9 @@ namespace FETools name.erase(0, tmp.second + 2); const Subscriptor *ptr = fe_name_map.find(name_part)->second.get(); - const FEFactoryBase *fef = - dynamic_cast *>(ptr); + const FETools::FEFactoryBase *fef = + dynamic_cast< + const FETools::FEFactoryBase *>(ptr); return fef->get(QGaussLobatto<1>(tmp.first)); } else if (quadrature_name.compare("QGauss") == 0) @@ -2580,8 +2615,9 @@ namespace FETools name.erase(0, tmp.second + 2); const Subscriptor *ptr = fe_name_map.find(name_part)->second.get(); - const FEFactoryBase *fef = - dynamic_cast *>(ptr); + const FETools::FEFactoryBase *fef = + dynamic_cast< + const FETools::FEFactoryBase *>(ptr); return fef->get(QGauss<1>(tmp.first)); } else if (quadrature_name.compare("QIterated") == 0) @@ -2601,8 +2637,9 @@ namespace FETools name.erase(0, tmp.second + 2); const Subscriptor *ptr = fe_name_map.find(name_part)->second.get(); - const FEFactoryBase *fef = - dynamic_cast *>(ptr); + const FETools::FEFactoryBase *fef = + dynamic_cast< + const FETools::FEFactoryBase *>(ptr); return fef->get(QIterated<1>(QTrapez<1>(), tmp.first)); } else @@ -2617,7 +2654,7 @@ namespace FETools // didn't know what to do with the // string we got. so do as the docs // say: raise an exception - AssertThrow(false, ExcInvalidFEName(name)); + AssertThrow(false, FETools::ExcInvalidFEName(name)); // make some compilers happy that // do not realize that we can't get @@ -2631,10 +2668,10 @@ namespace FETools std::unique_ptr> get_fe_by_name(std::string &name) { - return get_fe_by_name_ext(name, - fe_name_map[dim][spacedim]); + return get_fe_by_name_ext( + name, FEToolsAddFENameHelper::fe_name_map[dim][spacedim]); } - } // namespace + } // namespace FEToolsGetFEHelper } // namespace internal @@ -2713,7 +2750,8 @@ namespace FETools try { - auto fe = internal::get_fe_by_name(name); + auto fe = + internal::FEToolsGetFEHelper::get_fe_by_name(name); // Make sure the auxiliary function // ate up all characters of the name. @@ -2985,113 +3023,115 @@ namespace FETools - namespace + namespace internal { - // Helper functions for - // FETools::convert_generalized_support_point_values_to_dof_values - - template - static void - convert_helper(const FiniteElement &finite_element, - const std::vector> & support_point_values, - std::vector & dof_values) + namespace FEToolsConvertHelper { - static Threads::ThreadLocalStorage>> - double_support_point_values; - static Threads::ThreadLocalStorage> double_dof_values; + // Helper functions for + // FETools::convert_generalized_support_point_values_to_dof_values + + template + static void + convert_helper(const FiniteElement &finite_element, + const std::vector> & support_point_values, + std::vector & dof_values) + { + static Threads::ThreadLocalStorage>> + double_support_point_values; + static Threads::ThreadLocalStorage> + double_dof_values; - double_support_point_values.get().resize(support_point_values.size()); - double_dof_values.get().resize(dof_values.size()); + double_support_point_values.get().resize(support_point_values.size()); + double_dof_values.get().resize(dof_values.size()); - for (unsigned int i = 0; i < support_point_values.size(); ++i) - { - double_support_point_values.get()[i].reinit( - finite_element.n_components(), false); - std::copy(std::begin(support_point_values[i]), - std::end(support_point_values[i]), - std::begin(double_support_point_values.get()[i])); - } + for (unsigned int i = 0; i < support_point_values.size(); ++i) + { + double_support_point_values.get()[i].reinit( + finite_element.n_components(), false); + std::copy(std::begin(support_point_values[i]), + std::end(support_point_values[i]), + std::begin(double_support_point_values.get()[i])); + } - finite_element.convert_generalized_support_point_values_to_dof_values( - double_support_point_values.get(), double_dof_values.get()); + finite_element.convert_generalized_support_point_values_to_dof_values( + double_support_point_values.get(), double_dof_values.get()); - std::copy(std::begin(double_dof_values.get()), - std::end(double_dof_values.get()), - std::begin(dof_values)); - } + std::copy(std::begin(double_dof_values.get()), + std::end(double_dof_values.get()), + std::begin(dof_values)); + } - template - static void - convert_helper( - const FiniteElement & finite_element, - const std::vector>> &support_point_values, - std::vector> & dof_values) - { - static Threads::ThreadLocalStorage>> - double_support_point_values_real; - static Threads::ThreadLocalStorage> - double_dof_values_real; - static Threads::ThreadLocalStorage>> - double_support_point_values_imag; - static Threads::ThreadLocalStorage> - double_dof_values_imag; - - double_support_point_values_real.get().resize( - support_point_values.size()); - double_dof_values_real.get().resize(dof_values.size()); - double_support_point_values_imag.get().resize( - support_point_values.size()); - double_dof_values_imag.get().resize(dof_values.size()); - - for (unsigned int i = 0; i < support_point_values.size(); ++i) - { - double_support_point_values_real.get()[i].reinit( - finite_element.n_components(), false); - double_support_point_values_imag.get()[i].reinit( - finite_element.n_components(), false); - - std::transform(std::begin(support_point_values[i]), - std::end(support_point_values[i]), - std::begin(double_support_point_values_real.get()[i]), - [](std::complex c) -> double { - return c.real(); - }); - - std::transform(std::begin(support_point_values[i]), - std::end(support_point_values[i]), - std::begin(double_support_point_values_imag.get()[i]), - [](std::complex c) -> double { - return c.imag(); - }); - } + template + static void + convert_helper( + const FiniteElement & finite_element, + const std::vector>> &support_point_values, + std::vector> & dof_values) + { + static Threads::ThreadLocalStorage>> + double_support_point_values_real; + static Threads::ThreadLocalStorage> + double_dof_values_real; + static Threads::ThreadLocalStorage>> + double_support_point_values_imag; + static Threads::ThreadLocalStorage> + double_dof_values_imag; + + double_support_point_values_real.get().resize( + support_point_values.size()); + double_dof_values_real.get().resize(dof_values.size()); + double_support_point_values_imag.get().resize( + support_point_values.size()); + double_dof_values_imag.get().resize(dof_values.size()); + + for (unsigned int i = 0; i < support_point_values.size(); ++i) + { + double_support_point_values_real.get()[i].reinit( + finite_element.n_components(), false); + double_support_point_values_imag.get()[i].reinit( + finite_element.n_components(), false); + + std::transform( + std::begin(support_point_values[i]), + std::end(support_point_values[i]), + std::begin(double_support_point_values_real.get()[i]), + [](std::complex c) -> double { return c.real(); }); + + std::transform( + std::begin(support_point_values[i]), + std::end(support_point_values[i]), + std::begin(double_support_point_values_imag.get()[i]), + [](std::complex c) -> double { return c.imag(); }); + } - finite_element.convert_generalized_support_point_values_to_dof_values( - double_support_point_values_real.get(), double_dof_values_real.get()); - finite_element.convert_generalized_support_point_values_to_dof_values( - double_support_point_values_imag.get(), double_dof_values_imag.get()); - - std::transform(std::begin(double_dof_values_real.get()), - std::end(double_dof_values_real.get()), - std::begin(double_dof_values_imag.get()), - std::begin(dof_values), - [](number real, number imag) -> std::complex { - return {real, imag}; - }); - } + finite_element.convert_generalized_support_point_values_to_dof_values( + double_support_point_values_real.get(), double_dof_values_real.get()); + finite_element.convert_generalized_support_point_values_to_dof_values( + double_support_point_values_imag.get(), double_dof_values_imag.get()); + + std::transform(std::begin(double_dof_values_real.get()), + std::end(double_dof_values_real.get()), + std::begin(double_dof_values_imag.get()), + std::begin(dof_values), + [](number real, number imag) -> std::complex { + return {real, imag}; + }); + } - template - static void - convert_helper(const FiniteElement &finite_element, - const std::vector> & support_point_values, - std::vector & dof_values) - { - finite_element.convert_generalized_support_point_values_to_dof_values( - support_point_values, dof_values); - } + template + static void + convert_helper(const FiniteElement &finite_element, + const std::vector> & support_point_values, + std::vector & dof_values) + { + finite_element.convert_generalized_support_point_values_to_dof_values( + support_point_values, dof_values); + } - } /* anonymous namespace */ + } // namespace FEToolsConvertHelper + } // namespace internal @@ -3106,9 +3146,8 @@ namespace FETools finite_element.get_generalized_support_points().size()); AssertDimension(dof_values.size(), finite_element.dofs_per_cell); - convert_helper(finite_element, - support_point_values, - dof_values); + internal::FEToolsConvertHelper::convert_helper( + finite_element, support_point_values, dof_values); } @@ -3321,7 +3360,7 @@ namespace FETools hierarchic_to_lexicographic_numbering(fe)); } -} // end of namespace FETools +} // namespace FETools DEAL_II_NAMESPACE_CLOSE diff --git a/include/deal.II/fe/fe_tools_extrapolate.templates.h b/include/deal.II/fe/fe_tools_extrapolate.templates.h index 0d8029407c..aaa5297c54 100644 --- a/include/deal.II/fe/fe_tools_extrapolate.templates.h +++ b/include/deal.II/fe/fe_tools_extrapolate.templates.h @@ -1521,220 +1521,217 @@ namespace FETools } #endif // DEAL_II_WITH_P4EST - namespace + template + struct BlockTypeHelper { - template - struct BlockTypeHelper - { - using type = VectorType; - }; + using type = VectorType; + }; - template - struct BlockTypeHelper< - VectorType, - typename std::enable_if::value>::type> - { - using type = typename VectorType::BlockType; - }; + template + struct BlockTypeHelper< + VectorType, + typename std::enable_if::value>::type> + { + using type = typename VectorType::BlockType; + }; - template - using BlockType = typename BlockTypeHelper::type; + template + using BlockType = typename BlockTypeHelper::type; - template - void - reinit_distributed(const DH &dh, VectorType &vector) - { - vector.reinit(dh.n_dofs()); - } + template + void + reinit_distributed(const DH &dh, VectorType &vector) + { + vector.reinit(dh.n_dofs()); + } #ifdef DEAL_II_WITH_PETSC - template - void - reinit_distributed(const DoFHandler &dh, - PETScWrappers::MPI::Vector & vector) - { - const parallel::distributed::Triangulation - *parallel_tria = dynamic_cast< - const parallel::distributed::Triangulation *>( - &dh.get_triangulation()); - Assert(parallel_tria != nullptr, ExcNotImplemented()); - - const IndexSet &locally_owned_dofs = dh.locally_owned_dofs(); - vector.reinit(locally_owned_dofs, parallel_tria->get_communicator()); - } + template + void + reinit_distributed(const DoFHandler &dh, + PETScWrappers::MPI::Vector & vector) + { + const parallel::distributed::Triangulation *parallel_tria = + dynamic_cast< + const parallel::distributed::Triangulation *>( + &dh.get_triangulation()); + Assert(parallel_tria != nullptr, ExcNotImplemented()); + + const IndexSet &locally_owned_dofs = dh.locally_owned_dofs(); + vector.reinit(locally_owned_dofs, parallel_tria->get_communicator()); + } #endif // DEAL_II_WITH_PETSC #ifdef DEAL_II_WITH_TRILINOS - template - void - reinit_distributed(const DoFHandler &dh, - TrilinosWrappers::MPI::Vector & vector) - { - const parallel::distributed::Triangulation - *parallel_tria = dynamic_cast< - const parallel::distributed::Triangulation *>( - &dh.get_triangulation()); - Assert(parallel_tria != nullptr, ExcNotImplemented()); - - const IndexSet &locally_owned_dofs = dh.locally_owned_dofs(); - vector.reinit(locally_owned_dofs, parallel_tria->get_communicator()); - } + template + void + reinit_distributed(const DoFHandler &dh, + TrilinosWrappers::MPI::Vector & vector) + { + const parallel::distributed::Triangulation *parallel_tria = + dynamic_cast< + const parallel::distributed::Triangulation *>( + &dh.get_triangulation()); + Assert(parallel_tria != nullptr, ExcNotImplemented()); + + const IndexSet &locally_owned_dofs = dh.locally_owned_dofs(); + vector.reinit(locally_owned_dofs, parallel_tria->get_communicator()); + } # ifdef DEAL_II_WITH_MPI - template - void - reinit_distributed(const DoFHandler & dh, - LinearAlgebra::EpetraWrappers::Vector &vector) - { - const parallel::distributed::Triangulation - *parallel_tria = dynamic_cast< - const parallel::distributed::Triangulation *>( - &dh.get_triangulation()); - Assert(parallel_tria != nullptr, ExcNotImplemented()); - - const IndexSet &locally_owned_dofs = dh.locally_owned_dofs(); - vector.reinit(locally_owned_dofs, parallel_tria->get_communicator()); - } + template + void + reinit_distributed(const DoFHandler & dh, + LinearAlgebra::EpetraWrappers::Vector &vector) + { + const parallel::distributed::Triangulation *parallel_tria = + dynamic_cast< + const parallel::distributed::Triangulation *>( + &dh.get_triangulation()); + Assert(parallel_tria != nullptr, ExcNotImplemented()); + + const IndexSet &locally_owned_dofs = dh.locally_owned_dofs(); + vector.reinit(locally_owned_dofs, parallel_tria->get_communicator()); + } # endif #endif // DEAL_II_WITH_TRILINOS - template - void - reinit_distributed(const DoFHandler & dh, - LinearAlgebra::distributed::Vector &vector) - { - const parallel::distributed::Triangulation - *parallel_tria = dynamic_cast< - const parallel::distributed::Triangulation *>( - &dh.get_triangulation()); - Assert(parallel_tria != nullptr, ExcNotImplemented()); - - const IndexSet &locally_owned_dofs = dh.locally_owned_dofs(); - vector.reinit(locally_owned_dofs, parallel_tria->get_communicator()); - } + template + void + reinit_distributed(const DoFHandler & dh, + LinearAlgebra::distributed::Vector &vector) + { + const parallel::distributed::Triangulation *parallel_tria = + dynamic_cast< + const parallel::distributed::Triangulation *>( + &dh.get_triangulation()); + Assert(parallel_tria != nullptr, ExcNotImplemented()); + + const IndexSet &locally_owned_dofs = dh.locally_owned_dofs(); + vector.reinit(locally_owned_dofs, parallel_tria->get_communicator()); + } - template - void - reinit_ghosted(const DH & /*dh*/, VectorType & /*vector*/) - { - Assert(false, ExcNotImplemented()); - } + template + void + reinit_ghosted(const DH & /*dh*/, VectorType & /*vector*/) + { + Assert(false, ExcNotImplemented()); + } #ifdef DEAL_II_WITH_PETSC - template - void - reinit_ghosted(const DoFHandler &dh, - PETScWrappers::MPI::Vector & vector) - { - const parallel::distributed::Triangulation - *parallel_tria = dynamic_cast< - const parallel::distributed::Triangulation *>( - &dh.get_triangulation()); - Assert(parallel_tria != nullptr, ExcNotImplemented()); - const IndexSet &locally_owned_dofs = dh.locally_owned_dofs(); - IndexSet locally_relevant_dofs; - DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs); - vector.reinit(locally_owned_dofs, - locally_relevant_dofs, - parallel_tria->get_communicator()); - } + template + void + reinit_ghosted(const DoFHandler &dh, + PETScWrappers::MPI::Vector & vector) + { + const parallel::distributed::Triangulation *parallel_tria = + dynamic_cast< + const parallel::distributed::Triangulation *>( + &dh.get_triangulation()); + Assert(parallel_tria != nullptr, ExcNotImplemented()); + const IndexSet &locally_owned_dofs = dh.locally_owned_dofs(); + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs); + vector.reinit(locally_owned_dofs, + locally_relevant_dofs, + parallel_tria->get_communicator()); + } #endif // DEAL_II_WITH_PETSC #ifdef DEAL_II_WITH_TRILINOS - template - void - reinit_ghosted(const DoFHandler &dh, - TrilinosWrappers::MPI::Vector & vector) - { - const parallel::distributed::Triangulation - *parallel_tria = dynamic_cast< - const parallel::distributed::Triangulation *>( - &dh.get_triangulation()); - Assert(parallel_tria != nullptr, ExcNotImplemented()); - const IndexSet &locally_owned_dofs = dh.locally_owned_dofs(); - IndexSet locally_relevant_dofs; - DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs); - vector.reinit(locally_owned_dofs, - locally_relevant_dofs, - parallel_tria->get_communicator()); - } + template + void + reinit_ghosted(const DoFHandler &dh, + TrilinosWrappers::MPI::Vector & vector) + { + const parallel::distributed::Triangulation *parallel_tria = + dynamic_cast< + const parallel::distributed::Triangulation *>( + &dh.get_triangulation()); + Assert(parallel_tria != nullptr, ExcNotImplemented()); + const IndexSet &locally_owned_dofs = dh.locally_owned_dofs(); + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs); + vector.reinit(locally_owned_dofs, + locally_relevant_dofs, + parallel_tria->get_communicator()); + } #endif // DEAL_II_WITH_TRILINOS - template - void - reinit_ghosted(const DoFHandler & dh, - LinearAlgebra::distributed::Vector &vector) - { - const parallel::distributed::Triangulation - *parallel_tria = dynamic_cast< - const parallel::distributed::Triangulation *>( - &dh.get_triangulation()); - Assert(parallel_tria != nullptr, ExcNotImplemented()); - const IndexSet &locally_owned_dofs = dh.locally_owned_dofs(); - IndexSet locally_relevant_dofs; - DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs); - vector.reinit(locally_owned_dofs, - locally_relevant_dofs, - parallel_tria->get_communicator()); - } - + template + void + reinit_ghosted(const DoFHandler & dh, + LinearAlgebra::distributed::Vector &vector) + { + const parallel::distributed::Triangulation *parallel_tria = + dynamic_cast< + const parallel::distributed::Triangulation *>( + &dh.get_triangulation()); + Assert(parallel_tria != nullptr, ExcNotImplemented()); + const IndexSet &locally_owned_dofs = dh.locally_owned_dofs(); + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs); + vector.reinit(locally_owned_dofs, + locally_relevant_dofs, + parallel_tria->get_communicator()); + } - template - void - extrapolate_serial(const InVector & u3, - const DoFHandler &dof2, - OutVector & u2) - { - const unsigned int dofs_per_cell = dof2.get_fe().dofs_per_cell; - Vector dof_values(dofs_per_cell); - // then traverse grid bottom up - for (unsigned int level = 0; - level < dof2.get_triangulation().n_levels() - 1; - ++level) - { - typename DoFHandler::cell_iterator cell = dof2.begin( - level), - endc = - dof2.end(level); + template + void + extrapolate_serial(const InVector & u3, + const DoFHandler &dof2, + OutVector & u2) + { + const unsigned int dofs_per_cell = dof2.get_fe().dofs_per_cell; + Vector dof_values(dofs_per_cell); - for (; cell != endc; ++cell) - if (!cell->active()) - { - // check whether this - // cell has active - // children - bool active_children = false; - for (unsigned int child_n = 0; child_n < cell->n_children(); - ++child_n) - if (cell->child(child_n)->active()) - { - active_children = true; - break; - } + // then traverse grid bottom up + for (unsigned int level = 0; + level < dof2.get_triangulation().n_levels() - 1; + ++level) + { + typename DoFHandler::cell_iterator cell = + dof2.begin(level), + endc = + dof2.end(level); - // if there are active - // children, this process - // has to work on this - // cell. get the data - // from the one vector - // and set it on the - // other - if (active_children) + for (; cell != endc; ++cell) + if (!cell->active()) + { + // check whether this + // cell has active + // children + bool active_children = false; + for (unsigned int child_n = 0; child_n < cell->n_children(); + ++child_n) + if (cell->child(child_n)->active()) { - cell->get_interpolated_dof_values(u3, dof_values); - cell->set_dof_values_by_interpolation(dof_values, u2); + active_children = true; + break; } - } - } - } - } // namespace - } // namespace internal + + // if there are active + // children, this process + // has to work on this + // cell. get the data + // from the one vector + // and set it on the + // other + if (active_children) + { + cell->get_interpolated_dof_values(u3, dof_values); + cell->set_dof_values_by_interpolation(dof_values, u2); + } + } + } + } + } // namespace internal template void diff --git a/include/deal.II/fe/fe_tools_interpolate.templates.h b/include/deal.II/fe/fe_tools_interpolate.templates.h index c59a2d371a..27ed8f0329 100644 --- a/include/deal.II/fe/fe_tools_interpolate.templates.h +++ b/include/deal.II/fe/fe_tools_interpolate.templates.h @@ -367,116 +367,109 @@ namespace FETools namespace internal { - namespace + template + void + back_interpolate( + const DoFHandler & dof1, + const AffineConstraints &constraints1, + const InVector & u1, + const DoFHandler & dof2, + const AffineConstraints &constraints2, + InVector & u1_interpolated) { - template - void - back_interpolate( - const DoFHandler & dof1, - const AffineConstraints &constraints1, - const InVector & u1, - const DoFHandler & dof2, - const AffineConstraints &constraints2, - InVector & u1_interpolated) - { - Vector u2(dof2.n_dofs()); - interpolate(dof1, u1, dof2, constraints2, u2); - interpolate(dof2, u2, dof1, constraints1, u1_interpolated); - } + Vector u2(dof2.n_dofs()); + interpolate(dof1, u1, dof2, constraints2, u2); + interpolate(dof2, u2, dof1, constraints1, u1_interpolated); + } - // special version for PETSc + // special version for PETSc #ifdef DEAL_II_WITH_PETSC - template - void - back_interpolate( - const DoFHandler &dof1, - const AffineConstraints - & constraints1, - const PETScWrappers::MPI::Vector &u1, - const DoFHandler & dof2, - const AffineConstraints - & constraints2, - PETScWrappers::MPI::Vector &u1_interpolated) - { - // if u1 is a parallel distributed PETSc vector, we create a - // vector u2 with based on the sets of locally owned and relevant - // dofs of dof2 - const IndexSet &dof2_locally_owned_dofs = dof2.locally_owned_dofs(); - IndexSet dof2_locally_relevant_dofs; - DoFTools::extract_locally_relevant_dofs(dof2, - dof2_locally_relevant_dofs); - - PETScWrappers::MPI::Vector u2_out(dof2_locally_owned_dofs, - u1.get_mpi_communicator()); - interpolate(dof1, u1, dof2, constraints2, u2_out); - PETScWrappers::MPI::Vector u2(dof2_locally_owned_dofs, - dof2_locally_relevant_dofs, - u1.get_mpi_communicator()); - u2 = u2_out; - interpolate(dof2, u2, dof1, constraints1, u1_interpolated); - } + template + void + back_interpolate( + const DoFHandler &dof1, + const AffineConstraints + & constraints1, + const PETScWrappers::MPI::Vector &u1, + const DoFHandler & dof2, + const AffineConstraints + & constraints2, + PETScWrappers::MPI::Vector &u1_interpolated) + { + // if u1 is a parallel distributed PETSc vector, we create a + // vector u2 with based on the sets of locally owned and relevant + // dofs of dof2 + const IndexSet &dof2_locally_owned_dofs = dof2.locally_owned_dofs(); + IndexSet dof2_locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dof2, dof2_locally_relevant_dofs); + + PETScWrappers::MPI::Vector u2_out(dof2_locally_owned_dofs, + u1.get_mpi_communicator()); + interpolate(dof1, u1, dof2, constraints2, u2_out); + PETScWrappers::MPI::Vector u2(dof2_locally_owned_dofs, + dof2_locally_relevant_dofs, + u1.get_mpi_communicator()); + u2 = u2_out; + interpolate(dof2, u2, dof1, constraints1, u1_interpolated); + } #endif - // special version for Trilinos + // special version for Trilinos #ifdef DEAL_II_WITH_TRILINOS - template - void - back_interpolate( - const DoFHandler &dof1, - const AffineConstraints< - typename TrilinosWrappers::MPI::Vector::value_type> &constraints1, - const TrilinosWrappers::MPI::Vector & u1, - const DoFHandler & dof2, - const AffineConstraints< - typename TrilinosWrappers::MPI::Vector::value_type> &constraints2, - TrilinosWrappers::MPI::Vector & u1_interpolated) - { - // if u1 is a parallel distributed Trilinos vector, we create a - // vector u2 with based on the sets of locally owned and relevant - // dofs of dof2 - const IndexSet &dof2_locally_owned_dofs = dof2.locally_owned_dofs(); - IndexSet dof2_locally_relevant_dofs; - DoFTools::extract_locally_relevant_dofs(dof2, - dof2_locally_relevant_dofs); - - TrilinosWrappers::MPI::Vector u2_out(dof2_locally_owned_dofs, - u1.get_mpi_communicator()); - interpolate(dof1, u1, dof2, constraints2, u2_out); - TrilinosWrappers::MPI::Vector u2(dof2_locally_owned_dofs, - dof2_locally_relevant_dofs, - u1.get_mpi_communicator()); - u2 = u2_out; - interpolate(dof2, u2, dof1, constraints1, u1_interpolated); - } + template + void + back_interpolate( + const DoFHandler &dof1, + const AffineConstraints< + typename TrilinosWrappers::MPI::Vector::value_type> &constraints1, + const TrilinosWrappers::MPI::Vector & u1, + const DoFHandler & dof2, + const AffineConstraints< + typename TrilinosWrappers::MPI::Vector::value_type> &constraints2, + TrilinosWrappers::MPI::Vector & u1_interpolated) + { + // if u1 is a parallel distributed Trilinos vector, we create a + // vector u2 with based on the sets of locally owned and relevant + // dofs of dof2 + const IndexSet &dof2_locally_owned_dofs = dof2.locally_owned_dofs(); + IndexSet dof2_locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dof2, dof2_locally_relevant_dofs); + + TrilinosWrappers::MPI::Vector u2_out(dof2_locally_owned_dofs, + u1.get_mpi_communicator()); + interpolate(dof1, u1, dof2, constraints2, u2_out); + TrilinosWrappers::MPI::Vector u2(dof2_locally_owned_dofs, + dof2_locally_relevant_dofs, + u1.get_mpi_communicator()); + u2 = u2_out; + interpolate(dof2, u2, dof1, constraints1, u1_interpolated); + } #endif - // special version for LinearAlgebra::distributed::Vector - template - void - back_interpolate( - const DoFHandler & dof1, - const AffineConstraints & constraints1, - const LinearAlgebra::distributed::Vector &u1, - const DoFHandler & dof2, - const AffineConstraints & constraints2, - LinearAlgebra::distributed::Vector & u1_interpolated) - { - const IndexSet &dof2_locally_owned_dofs = dof2.locally_owned_dofs(); - IndexSet dof2_locally_relevant_dofs; - DoFTools::extract_locally_relevant_dofs(dof2, - dof2_locally_relevant_dofs); - - LinearAlgebra::distributed::Vector u2( - dof2_locally_owned_dofs, - dof2_locally_relevant_dofs, - u1.get_mpi_communicator()); - - interpolate(dof1, u1, dof2, constraints2, u2); - u2.update_ghost_values(); - interpolate(dof2, u2, dof1, constraints1, u1_interpolated); - } - } // namespace - } // namespace internal + // special version for LinearAlgebra::distributed::Vector + template + void + back_interpolate( + const DoFHandler & dof1, + const AffineConstraints & constraints1, + const LinearAlgebra::distributed::Vector &u1, + const DoFHandler & dof2, + const AffineConstraints & constraints2, + LinearAlgebra::distributed::Vector & u1_interpolated) + { + const IndexSet &dof2_locally_owned_dofs = dof2.locally_owned_dofs(); + IndexSet dof2_locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dof2, dof2_locally_relevant_dofs); + + LinearAlgebra::distributed::Vector u2(dof2_locally_owned_dofs, + dof2_locally_relevant_dofs, + u1.get_mpi_communicator()); + + interpolate(dof1, u1, dof2, constraints2, u2); + u2.update_ghost_values(); + interpolate(dof2, u2, dof1, constraints1, u1_interpolated); + } + } // namespace internal @@ -596,53 +589,50 @@ namespace FETools namespace internal { - namespace + template + void + interpolation_difference( + const DoFHandler & dof1, + const AffineConstraints &constraints1, + const InVector & u1, + const DoFHandler & dof2, + const AffineConstraints &constraints2, + OutVector & u1_difference) { - template - void - interpolation_difference( - const DoFHandler & dof1, - const AffineConstraints &constraints1, - const InVector & u1, - const DoFHandler & dof2, - const AffineConstraints &constraints2, - OutVector & u1_difference) - { - back_interpolate( - dof1, constraints1, u1, dof2, constraints2, u1_difference); - u1_difference.sadd(-1., 1., u1); - } + back_interpolate( + dof1, constraints1, u1, dof2, constraints2, u1_difference); + u1_difference.sadd(-1., 1., u1); + } - // special version for Trilinos + // special version for Trilinos #ifdef DEAL_II_WITH_TRILINOS - template - void - interpolation_difference( - const DoFHandler &dof1, - const AffineConstraints - & constraints1, - const TrilinosWrappers::MPI::Vector &u1, - const DoFHandler & dof2, - const AffineConstraints - & constraints2, - TrilinosWrappers::MPI::Vector &u1_difference) - { - back_interpolate( - dof1, constraints1, u1, dof2, constraints2, u1_difference); - - // Trilinos vectors with and without ghost entries are very different - // and we cannot use the sadd function directly, so we have to create - // a completely distributed vector first and copy the local entries - // from the vector with ghost entries - TrilinosWrappers::MPI::Vector u1_completely_distributed; - u1_completely_distributed.reinit(u1_difference, true); - u1_completely_distributed = u1; - - u1_difference.sadd(-1, u1_completely_distributed); - } + template + void + interpolation_difference( + const DoFHandler &dof1, + const AffineConstraints + & constraints1, + const TrilinosWrappers::MPI::Vector &u1, + const DoFHandler & dof2, + const AffineConstraints + & constraints2, + TrilinosWrappers::MPI::Vector &u1_difference) + { + back_interpolate( + dof1, constraints1, u1, dof2, constraints2, u1_difference); + + // Trilinos vectors with and without ghost entries are very different + // and we cannot use the sadd function directly, so we have to create + // a completely distributed vector first and copy the local entries + // from the vector with ghost entries + TrilinosWrappers::MPI::Vector u1_completely_distributed; + u1_completely_distributed.reinit(u1_difference, true); + u1_completely_distributed = u1; + + u1_difference.sadd(-1, u1_completely_distributed); + } #endif - } // namespace - } // namespace internal + } // namespace internal diff --git a/include/deal.II/lac/affine_constraints.templates.h b/include/deal.II/lac/affine_constraints.templates.h index d38c490589..7cd691b39b 100644 --- a/include/deal.II/lac/affine_constraints.templates.h +++ b/include/deal.II/lac/affine_constraints.templates.h @@ -1905,120 +1905,116 @@ namespace internal { namespace AffineConstraintsImplementation { - namespace - { - typedef types::global_dof_index size_type; + typedef types::global_dof_index size_type; - template - void - set_zero_parallel(const std::vector &cm, - VectorType & vec, - size_type shift = 0) - { - Assert(!vec.has_ghost_elements(), ExcInternalError()); - IndexSet locally_owned = vec.locally_owned_elements(); - for (typename std::vector::const_iterator it = cm.begin(); - it != cm.end(); - ++it) - { - // If shift>0 then we are working on a part of a BlockVector - // so vec(i) is actually the global entry i+shift. - // We first make sure the line falls into the range of vec, - // then check if is part of the local part of the vector, before - // finally setting it to 0. - if ((*it) < shift) - continue; - size_type idx = *it - shift; - if (idx < vec.size() && locally_owned.is_element(idx)) - internal::ElementAccess::set(0., idx, vec); - } - } + template + void + set_zero_parallel(const std::vector &cm, + VectorType & vec, + size_type shift = 0) + { + Assert(!vec.has_ghost_elements(), ExcInternalError()); + IndexSet locally_owned = vec.locally_owned_elements(); + for (typename std::vector::const_iterator it = cm.begin(); + it != cm.end(); + ++it) + { + // If shift>0 then we are working on a part of a BlockVector + // so vec(i) is actually the global entry i+shift. + // We first make sure the line falls into the range of vec, + // then check if is part of the local part of the vector, before + // finally setting it to 0. + if ((*it) < shift) + continue; + size_type idx = *it - shift; + if (idx < vec.size() && locally_owned.is_element(idx)) + internal::ElementAccess::set(0., idx, vec); + } + } - template - void - set_zero_parallel(const std::vector & cm, - LinearAlgebra::distributed::Vector &vec, - size_type shift = 0) - { - for (typename std::vector::const_iterator it = cm.begin(); - it != cm.end(); - ++it) - { - // If shift>0 then we are working on a part of a BlockVector - // so vec(i) is actually the global entry i+shift. - // We first make sure the line falls into the range of vec, - // then check if is part of the local part of the vector, before - // finally setting it to 0. - if ((*it) < shift) - continue; - size_type idx = *it - shift; - if (vec.in_local_range(idx)) - vec(idx) = 0.; - } - vec.zero_out_ghosts(); - } + template + void + set_zero_parallel(const std::vector & cm, + LinearAlgebra::distributed::Vector &vec, + size_type shift = 0) + { + for (typename std::vector::const_iterator it = cm.begin(); + it != cm.end(); + ++it) + { + // If shift>0 then we are working on a part of a BlockVector + // so vec(i) is actually the global entry i+shift. + // We first make sure the line falls into the range of vec, + // then check if is part of the local part of the vector, before + // finally setting it to 0. + if ((*it) < shift) + continue; + size_type idx = *it - shift; + if (vec.in_local_range(idx)) + vec(idx) = 0.; + } + vec.zero_out_ghosts(); + } - template - void - set_zero_in_parallel(const std::vector &cm, - VectorType & vec, - std::integral_constant) - { - set_zero_parallel(cm, vec, 0); - } + template + void + set_zero_in_parallel(const std::vector &cm, + VectorType & vec, + std::integral_constant) + { + set_zero_parallel(cm, vec, 0); + } - // in parallel for BlockVectors - template - void - set_zero_in_parallel(const std::vector &cm, - VectorType & vec, - std::integral_constant) - { - size_type start_shift = 0; - for (size_type j = 0; j < vec.n_blocks(); ++j) - { - set_zero_parallel(cm, vec.block(j), start_shift); - start_shift += vec.block(j).size(); - } - } + // in parallel for BlockVectors + template + void + set_zero_in_parallel(const std::vector &cm, + VectorType & vec, + std::integral_constant) + { + size_type start_shift = 0; + for (size_type j = 0; j < vec.n_blocks(); ++j) + { + set_zero_parallel(cm, vec.block(j), start_shift); + start_shift += vec.block(j).size(); + } + } - template - void - set_zero_serial(const std::vector &cm, VectorType &vec) - { - for (typename std::vector::const_iterator it = cm.begin(); - it != cm.end(); - ++it) - vec(*it) = 0.; - } + template + void + set_zero_serial(const std::vector &cm, VectorType &vec) + { + for (typename std::vector::const_iterator it = cm.begin(); + it != cm.end(); + ++it) + vec(*it) = 0.; + } - template - void - set_zero_all(const std::vector &cm, VectorType &vec) - { - set_zero_in_parallel( - cm, - vec, - std::integral_constant::value>()); - vec.compress(VectorOperation::insert); - } + template + void + set_zero_all(const std::vector &cm, VectorType &vec) + { + set_zero_in_parallel( + cm, + vec, + std::integral_constant::value>()); + vec.compress(VectorOperation::insert); + } - template - void - set_zero_all(const std::vector &cm, dealii::Vector &vec) - { - set_zero_serial(cm, vec); - } + template + void + set_zero_all(const std::vector &cm, dealii::Vector &vec) + { + set_zero_serial(cm, vec); + } - template - void - set_zero_all(const std::vector &cm, - dealii::BlockVector & vec) - { - set_zero_serial(cm, vec); - } - } // namespace - } // namespace AffineConstraintsImplementation + template + void + set_zero_all(const std::vector &cm, dealii::BlockVector &vec) + { + set_zero_serial(cm, vec); + } + } // namespace AffineConstraintsImplementation } // namespace internal template @@ -2160,115 +2156,112 @@ AffineConstraints::distribute_local_to_global( namespace internal { - namespace - { - // create an output vector that consists of the input vector's locally owned - // elements plus some ghost elements that need to be imported from elsewhere - // - // this is an operation that is different for all vector types and so we - // need a few overloads + // create an output vector that consists of the input vector's locally owned + // elements plus some ghost elements that need to be imported from elsewhere + // + // this is an operation that is different for all vector types and so we + // need a few overloads #ifdef DEAL_II_WITH_TRILINOS - void - import_vector_with_ghost_elements( - const TrilinosWrappers::MPI::Vector &vec, - const IndexSet & /*locally_owned_elements*/, - const IndexSet & needed_elements, - TrilinosWrappers::MPI::Vector &output, - const std::integral_constant /*is_block_vector*/) - { - Assert(!vec.has_ghost_elements(), ExcGhostsPresent()); + void + import_vector_with_ghost_elements( + const TrilinosWrappers::MPI::Vector &vec, + const IndexSet & /*locally_owned_elements*/, + const IndexSet & needed_elements, + TrilinosWrappers::MPI::Vector &output, + const std::integral_constant /*is_block_vector*/) + { + Assert(!vec.has_ghost_elements(), ExcGhostsPresent()); # ifdef DEAL_II_WITH_MPI - const Epetra_MpiComm *mpi_comm = - dynamic_cast(&vec.trilinos_vector().Comm()); + const Epetra_MpiComm *mpi_comm = + dynamic_cast(&vec.trilinos_vector().Comm()); - Assert(mpi_comm != nullptr, ExcInternalError()); - output.reinit(needed_elements, mpi_comm->GetMpiComm()); + Assert(mpi_comm != nullptr, ExcInternalError()); + output.reinit(needed_elements, mpi_comm->GetMpiComm()); # else - output.reinit(needed_elements, MPI_COMM_SELF); + output.reinit(needed_elements, MPI_COMM_SELF); # endif - output = vec; - } + output = vec; + } #endif #ifdef DEAL_II_WITH_PETSC - void - import_vector_with_ghost_elements( - const PETScWrappers::MPI::Vector &vec, - const IndexSet & locally_owned_elements, - const IndexSet & needed_elements, - PETScWrappers::MPI::Vector & output, - const std::integral_constant /*is_block_vector*/) - { - output.reinit(locally_owned_elements, - needed_elements, - vec.get_mpi_communicator()); - output = vec; - } + void + import_vector_with_ghost_elements( + const PETScWrappers::MPI::Vector &vec, + const IndexSet & locally_owned_elements, + const IndexSet & needed_elements, + PETScWrappers::MPI::Vector & output, + const std::integral_constant /*is_block_vector*/) + { + output.reinit(locally_owned_elements, + needed_elements, + vec.get_mpi_communicator()); + output = vec; + } #endif - template - void - import_vector_with_ghost_elements( - const LinearAlgebra::distributed::Vector &vec, - const IndexSet & locally_owned_elements, - const IndexSet & needed_elements, - LinearAlgebra::distributed::Vector & output, - const std::integral_constant /*is_block_vector*/) - { - // TODO: the in vector might already have all elements. need to find a - // way to efficiently avoid the copy then - const_cast &>(vec) - .zero_out_ghosts(); - output.reinit(locally_owned_elements, - needed_elements, - vec.get_mpi_communicator()); - output = vec; - output.update_ghost_values(); - } + template + void + import_vector_with_ghost_elements( + const LinearAlgebra::distributed::Vector &vec, + const IndexSet & locally_owned_elements, + const IndexSet & needed_elements, + LinearAlgebra::distributed::Vector & output, + const std::integral_constant /*is_block_vector*/) + { + // TODO: the in vector might already have all elements. need to find a + // way to efficiently avoid the copy then + const_cast &>(vec) + .zero_out_ghosts(); + output.reinit(locally_owned_elements, + needed_elements, + vec.get_mpi_communicator()); + output = vec; + output.update_ghost_values(); + } - // all other vector non-block vector types are sequential and we should - // not have this function called at all -- so throw an exception - template - void - import_vector_with_ghost_elements( - const Vector & /*vec*/, - const IndexSet & /*locally_owned_elements*/, - const IndexSet & /*needed_elements*/, - Vector & /*output*/, - const std::integral_constant /*is_block_vector*/) - { - Assert(false, ExcMessage("We shouldn't even get here!")); - } + // all other vector non-block vector types are sequential and we should + // not have this function called at all -- so throw an exception + template + void + import_vector_with_ghost_elements( + const Vector & /*vec*/, + const IndexSet & /*locally_owned_elements*/, + const IndexSet & /*needed_elements*/, + Vector & /*output*/, + const std::integral_constant /*is_block_vector*/) + { + Assert(false, ExcMessage("We shouldn't even get here!")); + } - // for block vectors, simply dispatch to the individual blocks - template - void - import_vector_with_ghost_elements( - const VectorType &vec, - const IndexSet & locally_owned_elements, - const IndexSet & needed_elements, - VectorType & output, - const std::integral_constant /*is_block_vector*/) - { - output.reinit(vec.n_blocks()); + // for block vectors, simply dispatch to the individual blocks + template + void + import_vector_with_ghost_elements( + const VectorType &vec, + const IndexSet & locally_owned_elements, + const IndexSet & needed_elements, + VectorType & output, + const std::integral_constant /*is_block_vector*/) + { + output.reinit(vec.n_blocks()); - types::global_dof_index block_start = 0; - for (unsigned int b = 0; b < vec.n_blocks(); ++b) - { - import_vector_with_ghost_elements( - vec.block(b), - locally_owned_elements.get_view(block_start, - block_start + vec.block(b).size()), - needed_elements.get_view(block_start, - block_start + vec.block(b).size()), - output.block(b), - std::integral_constant()); - block_start += vec.block(b).size(); - } + types::global_dof_index block_start = 0; + for (unsigned int b = 0; b < vec.n_blocks(); ++b) + { + import_vector_with_ghost_elements( + vec.block(b), + locally_owned_elements.get_view(block_start, + block_start + vec.block(b).size()), + needed_elements.get_view(block_start, + block_start + vec.block(b).size()), + output.block(b), + std::integral_constant()); + block_start += vec.block(b).size(); + } - output.collect_sizes(); - } - } // namespace + output.collect_sizes(); + } } // namespace internal template diff --git a/include/deal.II/lac/full_matrix.templates.h b/include/deal.II/lac/full_matrix.templates.h index da3d78b6d5..04941943f0 100644 --- a/include/deal.II/lac/full_matrix.templates.h +++ b/include/deal.II/lac/full_matrix.templates.h @@ -1209,54 +1209,51 @@ FullMatrix::operator==(const FullMatrix &M) const namespace internal { - namespace + // LAPACKFullMatrix is not implemented for + // complex numbers or long doubles + template + struct Determinant { - // LAPACKFullMatrix is not implemented for - // complex numbers or long doubles - template - struct Determinant + static number + value(const FullMatrix &) { - static number - value(const FullMatrix &) - { - AssertThrow(false, ExcNotImplemented()); - return 0.0; - } - }; + AssertThrow(false, ExcNotImplemented()); + return 0.0; + } + }; - // LAPACKFullMatrix is only implemented for - // floats and doubles - template - struct Determinant< - number, - typename std::enable_if::value || - std::is_same::value>::type> - { + // LAPACKFullMatrix is only implemented for + // floats and doubles + template + struct Determinant< + number, + typename std::enable_if::value || + std::is_same::value>::type> + { #ifdef DEAL_II_WITH_LAPACK - static number - value(const FullMatrix &A) - { - using s_type = typename LAPACKFullMatrix::size_type; - AssertIndexRange(A.m() - 1, std::numeric_limits::max()); - AssertIndexRange(A.n() - 1, std::numeric_limits::max()); - LAPACKFullMatrix lp_A(static_cast(A.m()), - static_cast(A.n())); - lp_A = A; - lp_A.compute_lu_factorization(); - return lp_A.determinant(); - } + static number + value(const FullMatrix &A) + { + using s_type = typename LAPACKFullMatrix::size_type; + AssertIndexRange(A.m() - 1, std::numeric_limits::max()); + AssertIndexRange(A.n() - 1, std::numeric_limits::max()); + LAPACKFullMatrix lp_A(static_cast(A.m()), + static_cast(A.n())); + lp_A = A; + lp_A.compute_lu_factorization(); + return lp_A.determinant(); + } #else - static number - value(const FullMatrix &) - { - AssertThrow(false, ExcNeedsLAPACK()); - return 0.0; - } + static number + value(const FullMatrix &) + { + AssertThrow(false, ExcNeedsLAPACK()); + return 0.0; + } #endif - }; + }; - } // namespace } // namespace internal diff --git a/include/deal.II/lac/la_parallel_block_vector.templates.h b/include/deal.II/lac/la_parallel_block_vector.templates.h index 054d6d796c..d2567d59d3 100644 --- a/include/deal.II/lac/la_parallel_block_vector.templates.h +++ b/include/deal.II/lac/la_parallel_block_vector.templates.h @@ -829,7 +829,7 @@ namespace LinearAlgebra - namespace + namespace internal { template inline void @@ -845,7 +845,7 @@ namespace LinearAlgebra else matrix.set_property(LAPACKSupport::general); } - } // namespace + } // namespace internal @@ -872,7 +872,7 @@ namespace LinearAlgebra // reset the matrix matrix = typename FullMatrixType::value_type(0.0); - set_symmetric(matrix, symmetric); + internal::set_symmetric(matrix, symmetric); if (symmetric) { Assert(m == n, ExcDimensionMismatch(m, n)); diff --git a/include/deal.II/lac/la_parallel_vector.templates.h b/include/deal.II/lac/la_parallel_vector.templates.h index 88cd2b61d4..44a45b1889 100644 --- a/include/deal.II/lac/la_parallel_vector.templates.h +++ b/include/deal.II/lac/la_parallel_vector.templates.h @@ -233,10 +233,8 @@ namespace LinearAlgebra { dealii::internal::VectorOperations::Vector_copy copier(v.values.get(), values.get()); - internal::VectorOperations::parallel_for(copier, - 0, - partitioner->local_size(), - thread_loop_partitioner); + dealii::internal::VectorOperations::parallel_for( + copier, 0, partitioner->local_size(), thread_loop_partitioner); } } @@ -372,10 +370,8 @@ namespace LinearAlgebra { dealii::internal::VectorOperations::Vector_copy copier(c.values.get(), values.get()); - internal::VectorOperations::parallel_for(copier, - 0, - this_size, - thread_loop_partitioner); + dealii::internal::VectorOperations::parallel_for( + copier, 0, this_size, thread_loop_partitioner); } if (must_update_ghost_values) @@ -397,10 +393,8 @@ namespace LinearAlgebra { dealii::internal::VectorOperations::Vector_copy copier(src.values.get(), values.get()); - internal::VectorOperations::parallel_for(copier, - 0, - partitioner->local_size(), - thread_loop_partitioner); + dealii::internal::VectorOperations::parallel_for( + copier, 0, partitioner->local_size(), thread_loop_partitioner); } } @@ -771,13 +765,11 @@ namespace LinearAlgebra const size_type this_size = local_size(); if (this_size > 0) { - internal::VectorOperations::Vector_set setter(s, - values.get()); + dealii::internal::VectorOperations::Vector_set setter( + s, values.get()); - internal::VectorOperations::parallel_for(setter, - 0, - this_size, - thread_loop_partitioner); + dealii::internal::VectorOperations::parallel_for( + setter, 0, this_size, thread_loop_partitioner); } // if we call Vector::operator=0, we want to zero out all the entries @@ -816,12 +808,10 @@ namespace LinearAlgebra AssertDimension(local_size(), v.local_size()); - internal::VectorOperations::Vectorization_add_v vector_add( - values.get(), v.values.get()); - internal::VectorOperations::parallel_for(vector_add, - 0, - partitioner->local_size(), - thread_loop_partitioner); + dealii::internal::VectorOperations::Vectorization_add_v + vector_add(values.get(), v.values.get()); + dealii::internal::VectorOperations::parallel_for( + vector_add, 0, partitioner->local_size(), thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -842,12 +832,10 @@ namespace LinearAlgebra AssertDimension(local_size(), v.local_size()); - internal::VectorOperations::Vectorization_subtract_v + dealii::internal::VectorOperations::Vectorization_subtract_v vector_subtract(values.get(), v.values.get()); - internal::VectorOperations::parallel_for(vector_subtract, - 0, - partitioner->local_size(), - thread_loop_partitioner); + dealii::internal::VectorOperations::parallel_for( + vector_subtract, 0, partitioner->local_size(), thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -863,12 +851,10 @@ namespace LinearAlgebra { AssertIsFinite(a); - internal::VectorOperations::Vectorization_add_factor vector_add( - values.get(), a); - internal::VectorOperations::parallel_for(vector_add, - 0, - partitioner->local_size(), - thread_loop_partitioner); + dealii::internal::VectorOperations::Vectorization_add_factor + vector_add(values.get(), a); + dealii::internal::VectorOperations::parallel_for( + vector_add, 0, partitioner->local_size(), thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -893,12 +879,10 @@ namespace LinearAlgebra if (a == Number(0.)) return; - internal::VectorOperations::Vectorization_add_av vector_add( - values.get(), v.values.get(), a); - internal::VectorOperations::parallel_for(vector_add, - 0, - partitioner->local_size(), - thread_loop_partitioner); + dealii::internal::VectorOperations::Vectorization_add_av + vector_add(values.get(), v.values.get(), a); + dealii::internal::VectorOperations::parallel_for( + vector_add, 0, partitioner->local_size(), thread_loop_partitioner); } @@ -936,12 +920,10 @@ namespace LinearAlgebra AssertDimension(local_size(), v.local_size()); AssertDimension(local_size(), w.local_size()); - internal::VectorOperations::Vectorization_add_avpbw vector_add( - values.get(), v.values.get(), w.values.get(), a, b); - internal::VectorOperations::parallel_for(vector_add, - 0, - partitioner->local_size(), - thread_loop_partitioner); + dealii::internal::VectorOperations::Vectorization_add_avpbw + vector_add(values.get(), v.values.get(), w.values.get(), a, b); + dealii::internal::VectorOperations::parallel_for( + vector_add, 0, partitioner->local_size(), thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -969,12 +951,10 @@ namespace LinearAlgebra AssertIsFinite(x); AssertDimension(local_size(), v.local_size()); - internal::VectorOperations::Vectorization_sadd_xv vector_sadd( - values.get(), v.values.get(), x); - internal::VectorOperations::parallel_for(vector_sadd, - 0, - partitioner->local_size(), - thread_loop_partitioner); + dealii::internal::VectorOperations::Vectorization_sadd_xv + vector_sadd(values.get(), v.values.get(), x); + dealii::internal::VectorOperations::parallel_for( + vector_sadd, 0, partitioner->local_size(), thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -997,12 +977,10 @@ namespace LinearAlgebra AssertIsFinite(a); AssertDimension(local_size(), v.local_size()); - internal::VectorOperations::Vectorization_sadd_xav vector_sadd( - values.get(), v.values.get(), a, x); - internal::VectorOperations::parallel_for(vector_sadd, - 0, - partitioner->local_size(), - thread_loop_partitioner); + dealii::internal::VectorOperations::Vectorization_sadd_xav + vector_sadd(values.get(), v.values.get(), a, x); + dealii::internal::VectorOperations::parallel_for( + vector_sadd, 0, partitioner->local_size(), thread_loop_partitioner); } @@ -1036,12 +1014,10 @@ namespace LinearAlgebra AssertDimension(local_size(), v.local_size()); AssertDimension(local_size(), w.local_size()); - internal::VectorOperations::Vectorization_sadd_xavbw vector_sadd( - values.get(), v.values.get(), w.values.get(), x, a, b); - internal::VectorOperations::parallel_for(vector_sadd, - 0, - partitioner->local_size(), - thread_loop_partitioner); + dealii::internal::VectorOperations::Vectorization_sadd_xavbw + vector_sadd(values.get(), v.values.get(), w.values.get(), x, a, b); + dealii::internal::VectorOperations::parallel_for( + vector_sadd, 0, partitioner->local_size(), thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -1054,13 +1030,11 @@ namespace LinearAlgebra Vector::operator*=(const Number factor) { AssertIsFinite(factor); - internal::VectorOperations::Vectorization_multiply_factor + dealii::internal::VectorOperations::Vectorization_multiply_factor vector_multiply(values.get(), factor); - internal::VectorOperations::parallel_for(vector_multiply, - 0, - partitioner->local_size(), - thread_loop_partitioner); + dealii::internal::VectorOperations::parallel_for( + vector_multiply, 0, partitioner->local_size(), thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -1091,12 +1065,10 @@ namespace LinearAlgebra AssertDimension(local_size(), v.local_size()); - internal::VectorOperations::Vectorization_scale vector_scale( - values.get(), v.values.get()); - internal::VectorOperations::parallel_for(vector_scale, - 0, - partitioner->local_size(), - thread_loop_partitioner); + dealii::internal::VectorOperations::Vectorization_scale + vector_scale(values.get(), v.values.get()); + dealii::internal::VectorOperations::parallel_for( + vector_scale, 0, partitioner->local_size(), thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -1116,12 +1088,10 @@ namespace LinearAlgebra AssertIsFinite(a); AssertDimension(local_size(), v.local_size()); - internal::VectorOperations::Vectorization_equ_au vector_equ( - values.get(), v.values.get(), a); - internal::VectorOperations::parallel_for(vector_equ, - 0, - partitioner->local_size(), - thread_loop_partitioner); + dealii::internal::VectorOperations::Vectorization_equ_au + vector_equ(values.get(), v.values.get(), a); + dealii::internal::VectorOperations::parallel_for( + vector_equ, 0, partitioner->local_size(), thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -1142,12 +1112,10 @@ namespace LinearAlgebra AssertDimension(local_size(), v.local_size()); AssertDimension(local_size(), w.local_size()); - internal::VectorOperations::Vectorization_equ_aubv vector_equ( - values.get(), v.values.get(), w.values.get(), a, b); - internal::VectorOperations::parallel_for(vector_equ, - 0, - partitioner->local_size(), - thread_loop_partitioner); + dealii::internal::VectorOperations::Vectorization_equ_aubv + vector_equ(values.get(), v.values.get(), w.values.get(), a, b); + dealii::internal::VectorOperations::parallel_for( + vector_equ, 0, partitioner->local_size(), thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -1197,10 +1165,10 @@ namespace LinearAlgebra AssertDimension(partitioner->local_size(), v.partitioner->local_size()); - Number sum; - internal::VectorOperations::Dot dot(values.get(), - v.values.get()); - internal::VectorOperations::parallel_reduce( + Number sum; + dealii::internal::VectorOperations::Dot dot( + values.get(), v.values.get()); + dealii::internal::VectorOperations::parallel_reduce( dot, 0, partitioner->local_size(), sum, thread_loop_partitioner); AssertIsFinite(sum); @@ -1231,9 +1199,10 @@ namespace LinearAlgebra typename Vector::real_type Vector::norm_sqr_local() const { - real_type sum; - internal::VectorOperations::Norm2 norm2(values.get()); - internal::VectorOperations::parallel_reduce( + real_type sum; + dealii::internal::VectorOperations::Norm2 norm2( + values.get()); + dealii::internal::VectorOperations::parallel_reduce( norm2, 0, partitioner->local_size(), sum, thread_loop_partitioner); AssertIsFinite(sum); @@ -1251,9 +1220,9 @@ namespace LinearAlgebra if (partitioner->local_size() == 0) return Number(); - Number sum; - internal::VectorOperations::MeanValue mean(values.get()); - internal::VectorOperations::parallel_reduce( + Number sum; + dealii::internal::VectorOperations::MeanValue mean(values.get()); + dealii::internal::VectorOperations::parallel_reduce( mean, 0, partitioner->local_size(), sum, thread_loop_partitioner); return sum / real_type(partitioner->local_size()); @@ -1281,9 +1250,10 @@ namespace LinearAlgebra typename Vector::real_type Vector::l1_norm_local() const { - real_type sum; - internal::VectorOperations::Norm1 norm1(values.get()); - internal::VectorOperations::parallel_reduce( + real_type sum; + dealii::internal::VectorOperations::Norm1 norm1( + values.get()); + dealii::internal::VectorOperations::parallel_reduce( norm1, 0, partitioner->local_size(), sum, thread_loop_partitioner); return sum; @@ -1332,10 +1302,10 @@ namespace LinearAlgebra typename Vector::real_type Vector::lp_norm_local(const real_type p) const { - real_type sum; - internal::VectorOperations::NormP normp(values.get(), - p); - internal::VectorOperations::parallel_reduce( + real_type sum; + dealii::internal::VectorOperations::NormP normp( + values.get(), p); + dealii::internal::VectorOperations::parallel_reduce( normp, 0, partitioner->local_size(), sum, thread_loop_partitioner); return std::pow(sum, 1. / p); } @@ -1397,12 +1367,10 @@ namespace LinearAlgebra AssertDimension(vec_size, v.local_size()); AssertDimension(vec_size, w.local_size()); - Number sum; - internal::VectorOperations::AddAndDot adder(this->values.get(), - v.values.get(), - w.values.get(), - a); - internal::VectorOperations::parallel_reduce( + Number sum; + dealii::internal::VectorOperations::AddAndDot adder( + this->values.get(), v.values.get(), w.values.get(), a); + dealii::internal::VectorOperations::parallel_reduce( adder, 0, vec_size, sum, thread_loop_partitioner); AssertIsFinite(sum); return sum; diff --git a/include/deal.II/lac/sparse_matrix.templates.h b/include/deal.II/lac/sparse_matrix.templates.h index 273a25abcf..446436123b 100644 --- a/include/deal.II/lac/sparse_matrix.templates.h +++ b/include/deal.II/lac/sparse_matrix.templates.h @@ -1383,35 +1383,39 @@ SparseMatrix::residual(Vector & dst, } -namespace +namespace internal { - // assert that the matrix has no zeros on the diagonal. this is important - // for preconditioners such as Jacobi or SSOR - template - void - AssertNoZerosOnDiagonal(const SparseMatrix &matrix) + namespace SparseMatrixImplementation { + // assert that the matrix has no zeros on the diagonal. this is important + // for preconditioners such as Jacobi or SSOR + template + void + AssertNoZerosOnDiagonal(const SparseMatrix &matrix) + { #ifdef DEBUG - for (typename SparseMatrix::size_type row = 0; row < matrix.m(); - ++row) - Assert(matrix.diag_element(row) != number(), - ExcMessage("There is a zero on the diagonal of this matrix " - "in row " + - Utilities::to_string(row) + - ". The preconditioner you selected cannot work if that " - "is the case because one of its steps requires " - "division by the diagonal elements of the matrix." - "\n\n" - "You should check whether you have correctly " - "assembled the matrix that you use for this " - "preconditioner. If it is correct that there are " - "zeros on the diagonal, then you will have to chose " - "a different preconditioner.")); + for (typename SparseMatrix::size_type row = 0; row < matrix.m(); + ++row) + Assert(matrix.diag_element(row) != number(), + ExcMessage( + "There is a zero on the diagonal of this matrix " + "in row " + + Utilities::to_string(row) + + ". The preconditioner you selected cannot work if that " + "is the case because one of its steps requires " + "division by the diagonal elements of the matrix." + "\n\n" + "You should check whether you have correctly " + "assembled the matrix that you use for this " + "preconditioner. If it is correct that there are " + "zeros on the diagonal, then you will have to chose " + "a different preconditioner.")); #else - (void)matrix; + (void)matrix; #endif - } -} // namespace + } + } // namespace SparseMatrixImplementation +} // namespace internal template @@ -1427,7 +1431,7 @@ SparseMatrix::precondition_Jacobi(Vector & dst, AssertDimension(dst.size(), n()); AssertDimension(src.size(), n()); - AssertNoZerosOnDiagonal(*this); + internal::SparseMatrixImplementation::AssertNoZerosOnDiagonal(*this); const size_type n = src.size(); somenumber * dst_ptr = dst.begin(); @@ -1474,7 +1478,7 @@ SparseMatrix::precondition_SSOR( AssertDimension(dst.size(), n()); AssertDimension(src.size(), n()); - AssertNoZerosOnDiagonal(*this); + internal::SparseMatrixImplementation::AssertNoZerosOnDiagonal(*this); const size_type n = src.size(); const std::size_t *rowstart_ptr = &cols->rowstart[0]; @@ -1628,7 +1632,7 @@ SparseMatrix::SOR(Vector &dst, const number om) const AssertDimension(m(), n()); AssertDimension(dst.size(), n()); - AssertNoZerosOnDiagonal(*this); + internal::SparseMatrixImplementation::AssertNoZerosOnDiagonal(*this); for (size_type row = 0; row < m(); ++row) { @@ -1655,7 +1659,7 @@ SparseMatrix::TSOR(Vector &dst, const number om) const AssertDimension(m(), n()); AssertDimension(dst.size(), n()); - AssertNoZerosOnDiagonal(*this); + internal::SparseMatrixImplementation::AssertNoZerosOnDiagonal(*this); size_type row = m() - 1; while (true) @@ -1693,7 +1697,7 @@ SparseMatrix::PSOR(Vector & dst, Assert(m() == inverse_permutation.size(), ExcDimensionMismatch(m(), inverse_permutation.size())); - AssertNoZerosOnDiagonal(*this); + internal::SparseMatrixImplementation::AssertNoZerosOnDiagonal(*this); for (size_type urow = 0; urow < m(); ++urow) { @@ -1732,7 +1736,7 @@ SparseMatrix::TPSOR(Vector & dst, Assert(m() == inverse_permutation.size(), ExcDimensionMismatch(m(), inverse_permutation.size())); - AssertNoZerosOnDiagonal(*this); + internal::SparseMatrixImplementation::AssertNoZerosOnDiagonal(*this); for (size_type urow = m(); urow != 0;) { @@ -1796,7 +1800,7 @@ SparseMatrix::SOR_step(Vector & v, Assert(m() == v.size(), ExcDimensionMismatch(m(), v.size())); Assert(m() == b.size(), ExcDimensionMismatch(m(), b.size())); - AssertNoZerosOnDiagonal(*this); + internal::SparseMatrixImplementation::AssertNoZerosOnDiagonal(*this); for (size_type row = 0; row < m(); ++row) { @@ -1824,7 +1828,7 @@ SparseMatrix::TSOR_step(Vector & v, Assert(m() == v.size(), ExcDimensionMismatch(m(), v.size())); Assert(m() == b.size(), ExcDimensionMismatch(m(), b.size())); - AssertNoZerosOnDiagonal(*this); + internal::SparseMatrixImplementation::AssertNoZerosOnDiagonal(*this); for (int row = m() - 1; row >= 0; --row) { @@ -1866,7 +1870,7 @@ SparseMatrix::SSOR(Vector &dst, const number om) const AssertDimension(m(), n()); Assert(m() == dst.size(), ExcDimensionMismatch(m(), dst.size())); - AssertNoZerosOnDiagonal(*this); + internal::SparseMatrixImplementation::AssertNoZerosOnDiagonal(*this); const size_type n = dst.size(); size_type j; diff --git a/include/deal.II/matrix_free/dof_info.templates.h b/include/deal.II/matrix_free/dof_info.templates.h index 9ea2cde975..dd47ec7df0 100644 --- a/include/deal.II/matrix_free/dof_info.templates.h +++ b/include/deal.II/matrix_free/dof_info.templates.h @@ -984,7 +984,7 @@ namespace internal - namespace + namespace internal { // rudimentary version of a vector that keeps entries always ordered class ordered_vector : public std::vector @@ -1146,7 +1146,7 @@ namespace internal row_entries.end()); } } - } // namespace + } // namespace internal @@ -1166,10 +1166,11 @@ namespace internal // first determine row lengths std::vector row_lengths(n_rows); - std::vector mutexes(n_rows / bucket_size_threading + 1); + std::vector mutexes( + n_rows / internal::bucket_size_threading + 1); parallel::apply_to_subranges(0, task_info.n_active_cells, - std::bind(&compute_row_lengths, + std::bind(&internal::compute_row_lengths, std::placeholders::_1, std::placeholders::_2, std::cref(*this), @@ -1191,7 +1192,7 @@ namespace internal row_lengths); parallel::apply_to_subranges(0, task_info.n_active_cells, - std::bind(&fill_connectivity_dofs, + std::bind(&internal::fill_connectivity_dofs, std::placeholders::_1, std::placeholders::_2, std::cref(*this), @@ -1212,7 +1213,7 @@ namespace internal // for cell renumbering[j] in the original ordering. parallel::apply_to_subranges(0, task_info.n_active_cells, - std::bind(&fill_connectivity, + std::bind(&internal::fill_connectivity, std::placeholders::_1, std::placeholders::_2, std::cref(*this), diff --git a/include/deal.II/matrix_free/mapping_info.templates.h b/include/deal.II/matrix_free/mapping_info.templates.h index c67600eee1..47ae5585b9 100644 --- a/include/deal.II/matrix_free/mapping_info.templates.h +++ b/include/deal.II/matrix_free/mapping_info.templates.h @@ -293,9 +293,9 @@ namespace internal /* ------------------------- initialization of cells ------------------- */ - // Anonymous namespace with implementation of extraction of values on cell + // Namespace with implementation of extraction of values on cell // range - namespace + namespace ExtractCellHelper { template double @@ -991,7 +991,7 @@ namespace internal } } - } // end of anonymous namespace + } // namespace ExtractCellHelper @@ -1039,7 +1039,7 @@ namespace internal MultithreadInfo::n_threads()); std::vector>, - CompressedCellData>> + ExtractCellHelper::CompressedCellData>> data_cells_local; // Reserve enough space to avoid re-allocation (which would break the // references to the data fields passed to the tasks!) @@ -1052,17 +1052,19 @@ namespace internal { data_cells_local.push_back(std::make_pair( std::vector>(n_quads), - CompressedCellData(get_jacobian_size(tria)))); - tasks += Threads::new_task(&initialize_cell_range, - cell_range, - tria, - cells, - active_fe_index, - mapping, - quad, - update_flags, - *this, - data_cells_local.back()); + ExtractCellHelper::CompressedCellData( + ExtractCellHelper::get_jacobian_size(tria)))); + tasks += Threads::new_task( + &ExtractCellHelper::initialize_cell_range, + cell_range, + tria, + cells, + active_fe_index, + mapping, + quad, + update_flags, + *this, + data_cells_local.back()); cell_range.first = cell_range.second; cell_range.second += work_per_chunk; } @@ -1074,9 +1076,10 @@ namespace internal std::vector> indices_compressed( data_cells_local.size()); for (unsigned int i = 0; i < data_cells_local.size(); ++i) - merge_compressed_data(data_cells_local[i].second.data, - data_cells_local[0].second.data, - indices_compressed[i]); + ExtractCellHelper::merge_compressed_data( + data_cells_local[i].second.data, + data_cells_local[0].second.data, + indices_compressed[i]); // Collect all data in the final data fields. // First allocate the memory @@ -1117,13 +1120,14 @@ namespace internal // Start tasks that copy the local data Threads::TaskGroup<> tasks; for (unsigned int i = 0; i < data_cells_local.size(); ++i) - tasks += Threads::new_task(©_data, - work_per_chunk * i, - shift[i], - indices_compressed[i], - cell_type, - data_cells_local[i].first[my_q], - cell_data[my_q]); + tasks += + Threads::new_task(&ExtractCellHelper::copy_data, + work_per_chunk * i, + shift[i], + indices_compressed[i], + cell_type, + data_cells_local[i].first[my_q], + cell_data[my_q]); // finally, insert the constant cell data at the beginning (the // other tasks can already start copying the non-constant data) @@ -1165,9 +1169,9 @@ namespace internal /* ------------------------- initialization of faces ------------------- */ - // Anonymous namespace with implementation of extraction of values on cell + // Namespace with implementation of extraction of values on face // range - namespace + namespace ExtractFaceHelper { template struct CompressedFaceData @@ -1270,7 +1274,8 @@ namespace internal fe_subface_values_container[my_q].resize( mapping_info.face_data[my_q].descriptor.size()); - LocalData face_data(get_jacobian_size(tria)); + ExtractCellHelper::LocalData face_data( + ExtractCellHelper::get_jacobian_size(tria)); const unsigned int end_face = std::min(std::size_t(face_range.second), faces.size()); @@ -1641,7 +1646,7 @@ namespace internal } } - } // end of anonymous namespace + } // namespace ExtractFaceHelper @@ -1697,7 +1702,7 @@ namespace internal std::vector< std::pair>, - CompressedFaceData>> + ExtractFaceHelper::CompressedFaceData>> data_faces_local; // Reserve enough space to avoid re-allocation (which would destroy the // references passed to the tasks!) @@ -1711,17 +1716,19 @@ namespace internal data_faces_local.push_back(std::make_pair( std::vector>( quad.size()), - CompressedFaceData(get_jacobian_size(tria)))); - tasks += Threads::new_task(&initialize_face_range, - face_range, - tria, - cells, - faces, - mapping, - update_flags_compute_boundary, - update_flags_compute_inner, - *this, - data_faces_local.back()); + ExtractFaceHelper::CompressedFaceData( + ExtractCellHelper::get_jacobian_size(tria)))); + tasks += Threads::new_task( + &ExtractFaceHelper::initialize_face_range, + face_range, + tria, + cells, + faces, + mapping, + update_flags_compute_boundary, + update_flags_compute_inner, + *this, + data_faces_local.back()); face_range.first = face_range.second; face_range.second += work_per_chunk; } @@ -1734,9 +1741,10 @@ namespace internal std::vector> indices_compressed( data_faces_local.size()); for (unsigned int i = 0; i < data_faces_local.size(); ++i) - merge_compressed_data(data_faces_local[i].second.data, - data_faces_local[0].second.data, - indices_compressed[i]); + ExtractCellHelper::merge_compressed_data( + data_faces_local[i].second.data, + data_faces_local[0].second.data, + indices_compressed[i]); // Collect all data in the final data fields. // First allocate the memory @@ -1789,18 +1797,20 @@ namespace internal // start the tasks to gather the data in parallel Threads::TaskGroup<> tasks; for (unsigned int i = 0; i < data_faces_local.size(); ++i) - tasks += Threads::new_task(©_data, - work_per_chunk * i, - shift[i], - indices_compressed[i], - face_type, - data_faces_local[i].first[my_q], - face_data[my_q]); + tasks += Threads::new_task( + &ExtractCellHelper::copy_data, + work_per_chunk * i, + shift[i], + indices_compressed[i], + face_type, + data_faces_local[i].first[my_q], + face_data[my_q]); // fill the constant data fields (in parallel to the loop above) if (my_q == 0) { - const Number jac_size = get_jacobian_size(tria); + const Number jac_size = + ExtractCellHelper::get_jacobian_size(tria); for (auto &it : data_faces_local[0].second.data) { // JxW values; invert previously applied scaling @@ -1848,14 +1858,13 @@ namespace internal // finally compute the normal times the jacobian for (unsigned int i = 0; i < data_faces_local.size(); ++i) - tasks += - Threads::new_task(&compute_normal_times_jacobian, - work_per_chunk * i, - std::min(work_per_chunk * (i + 1), - (unsigned int)faces.size()), - face_type, - faces, - face_data[my_q]); + tasks += Threads::new_task( + &ExtractFaceHelper::compute_normal_times_jacobian, + work_per_chunk * i, + std::min(work_per_chunk * (i + 1), (unsigned int)faces.size()), + face_type, + faces, + face_data[my_q]); tasks.join_all(); } } @@ -1999,7 +2008,7 @@ namespace internal for (unsigned int d = 0; d < dim; ++d) for (unsigned int e = 0; e < dim; ++e) { - const unsigned int ee = + const unsigned int ee = ExtractFaceHelper:: reorder_face_derivative_indices(face, e); face_data_by_cells[my_q] .jacobians[0][offset][d][e][v] = @@ -2033,7 +2042,7 @@ namespace internal for (unsigned int d = 0; d < dim; ++d) for (unsigned int e = 0; e < dim; ++e) { - const unsigned int ee = + const unsigned int ee = ExtractFaceHelper:: reorder_face_derivative_indices(face, e); face_data_by_cells[my_q] diff --git a/include/deal.II/matrix_free/matrix_free.templates.h b/include/deal.II/matrix_free/matrix_free.templates.h index c2e4550916..e5783ef949 100644 --- a/include/deal.II/matrix_free/matrix_free.templates.h +++ b/include/deal.II/matrix_free/matrix_free.templates.h @@ -1753,104 +1753,99 @@ MatrixFree::clear() namespace internal { - namespace + void + fill_index_subrange( + const unsigned int begin, + const unsigned int end, + const std::vector> &cell_level_index, + tbb::concurrent_unordered_map, + unsigned int> & map) { - void - fill_index_subrange( - const unsigned int begin, - const unsigned int end, - const std::vector> - & cell_level_index, - tbb::concurrent_unordered_map, - unsigned int> &map) - { - if (cell_level_index.empty()) - return; - unsigned int cell = begin; - if (cell == 0) - map.insert(std::make_pair(cell_level_index[cell++], 0U)); - for (; cell < end; ++cell) - if (cell_level_index[cell] != cell_level_index[cell - 1]) - map.insert(std::make_pair(cell_level_index[cell], cell)); - } + if (cell_level_index.empty()) + return; + unsigned int cell = begin; + if (cell == 0) + map.insert(std::make_pair(cell_level_index[cell++], 0U)); + for (; cell < end; ++cell) + if (cell_level_index[cell] != cell_level_index[cell - 1]) + map.insert(std::make_pair(cell_level_index[cell], cell)); + } - template - void - fill_connectivity_subrange( - const unsigned int begin, - const unsigned int end, - const dealii::Triangulation &tria, - const std::vector> - & cell_level_index, - const tbb::concurrent_unordered_map, - unsigned int> &map, - DynamicSparsityPattern & connectivity_direct) - { - std::vector new_indices; - for (unsigned int cell = begin; cell < end; ++cell) - { - new_indices.clear(); - typename dealii::Triangulation::cell_iterator dcell( - &tria, cell_level_index[cell].first, cell_level_index[cell].second); - for (unsigned int f = 0; f < GeometryInfo::faces_per_cell; ++f) - { - // Only inner faces couple different cells - if (dcell->at_boundary(f) == false && - dcell->neighbor_or_periodic_neighbor(f) - ->level_subdomain_id() == dcell->level_subdomain_id()) - { - std::pair level_index( - dcell->neighbor_or_periodic_neighbor(f)->level(), - dcell->neighbor_or_periodic_neighbor(f)->index()); - auto it = map.find(level_index); - if (it != map.end()) - { - const unsigned int neighbor_cell = it->second; - if (neighbor_cell != cell) - new_indices.push_back(neighbor_cell); - } - } - } - std::sort(new_indices.begin(), new_indices.end()); - connectivity_direct.add_entries(cell, - new_indices.begin(), - std::unique(new_indices.begin(), - new_indices.end())); - } - } + template + void + fill_connectivity_subrange( + const unsigned int begin, + const unsigned int end, + const dealii::Triangulation & tria, + const std::vector> &cell_level_index, + const tbb::concurrent_unordered_map, + unsigned int> & map, + DynamicSparsityPattern &connectivity_direct) + { + std::vector new_indices; + for (unsigned int cell = begin; cell < end; ++cell) + { + new_indices.clear(); + typename dealii::Triangulation::cell_iterator dcell( + &tria, cell_level_index[cell].first, cell_level_index[cell].second); + for (unsigned int f = 0; f < GeometryInfo::faces_per_cell; ++f) + { + // Only inner faces couple different cells + if (dcell->at_boundary(f) == false && + dcell->neighbor_or_periodic_neighbor(f)->level_subdomain_id() == + dcell->level_subdomain_id()) + { + std::pair level_index( + dcell->neighbor_or_periodic_neighbor(f)->level(), + dcell->neighbor_or_periodic_neighbor(f)->index()); + auto it = map.find(level_index); + if (it != map.end()) + { + const unsigned int neighbor_cell = it->second; + if (neighbor_cell != cell) + new_indices.push_back(neighbor_cell); + } + } + } + std::sort(new_indices.begin(), new_indices.end()); + connectivity_direct.add_entries(cell, + new_indices.begin(), + std::unique(new_indices.begin(), + new_indices.end())); + } + } - void - fill_connectivity_indirect_subrange( - const unsigned int begin, - const unsigned int end, - const DynamicSparsityPattern &connectivity_direct, - DynamicSparsityPattern & connectivity) - { - std::vector new_indices; - for (unsigned int block = begin; block < end; ++block) - { - new_indices.clear(); - for (DynamicSparsityPattern::iterator it = - connectivity_direct.begin(block); - it != connectivity_direct.end(block); - ++it) - { - new_indices.push_back(it->column()); - for (DynamicSparsityPattern::iterator it_neigh = - connectivity_direct.begin(it->column()); - it_neigh != connectivity_direct.end(it->column()); - ++it_neigh) - if (it_neigh->column() != block) - new_indices.push_back(it_neigh->column()); - } - std::sort(new_indices.begin(), new_indices.end()); - connectivity.add_entries(block, - new_indices.begin(), - std::unique(new_indices.begin(), - new_indices.end())); - } - } - } // namespace + void + fill_connectivity_indirect_subrange( + const unsigned int begin, + const unsigned int end, + const DynamicSparsityPattern &connectivity_direct, + DynamicSparsityPattern & connectivity) + { + std::vector new_indices; + for (unsigned int block = begin; block < end; ++block) + { + new_indices.clear(); + for (DynamicSparsityPattern::iterator it = + connectivity_direct.begin(block); + it != connectivity_direct.end(block); + ++it) + { + new_indices.push_back(it->column()); + for (DynamicSparsityPattern::iterator it_neigh = + connectivity_direct.begin(it->column()); + it_neigh != connectivity_direct.end(it->column()); + ++it_neigh) + if (it_neigh->column() != block) + new_indices.push_back(it_neigh->column()); + } + std::sort(new_indices.begin(), new_indices.end()); + connectivity.add_entries(block, + new_indices.begin(), + std::unique(new_indices.begin(), + new_indices.end())); + } + } } // namespace internal #endif diff --git a/include/deal.II/matrix_free/shape_info.templates.h b/include/deal.II/matrix_free/shape_info.templates.h index 9637efb635..18ed6fba80 100644 --- a/include/deal.II/matrix_free/shape_info.templates.h +++ b/include/deal.II/matrix_free/shape_info.templates.h @@ -40,22 +40,19 @@ namespace internal { // ----------------- actual ShapeInfo functions -------------------- - namespace + template + Number + get_first_array_element(const Number a) { - template - Number - get_first_array_element(const Number a) - { - return a; - } + return a; + } - template - Number - get_first_array_element(const VectorizedArray a) - { - return a[0]; - } - } // namespace + template + Number + get_first_array_element(const VectorizedArray a) + { + return a[0]; + } template ShapeInfo::ShapeInfo() diff --git a/include/deal.II/multigrid/mg_transfer.templates.h b/include/deal.II/multigrid/mg_transfer.templates.h index c7ee6d88aa..7c5a13bbc5 100644 --- a/include/deal.II/multigrid/mg_transfer.templates.h +++ b/include/deal.II/multigrid/mg_transfer.templates.h @@ -43,151 +43,154 @@ DEAL_II_NAMESPACE_OPEN -namespace +namespace internal { - /** - * Adjust vectors on all levels to correct size. Here, we just count the - * numbers of degrees of freedom on each level and @p reinit each level - * vector to this length. For compatibility reasons with the next function - * the target_component is added here but is not used. - */ - template - void - reinit_vector(const dealii::DoFHandler &mg_dof, - const std::vector &, - MGLevelObject> &v) + namespace MGTransfer { - for (unsigned int level = v.min_level(); level <= v.max_level(); ++level) - { - unsigned int n = mg_dof.n_dofs(level); - v[level].reinit(n); - } - } - - /** - * Adjust vectors on all levels to correct size. Here, we just count the - * numbers of degrees of freedom on each level and @p reinit each level - * vector to this length. The target_component is handed to - * MGTools::count_dofs_per_block. See for documentation there. - */ - template - void - reinit_vector(const dealii::DoFHandler &mg_dof, - std::vector target_component, - MGLevelObject> & v) - { - const unsigned int n_blocks = mg_dof.get_fe().n_blocks(); - if (target_component.size() == 0) - { - target_component.resize(n_blocks); - for (unsigned int i = 0; i < n_blocks; ++i) - target_component[i] = i; - } - Assert(target_component.size() == n_blocks, - ExcDimensionMismatch(target_component.size(), n_blocks)); - const unsigned int max_block = - *std::max_element(target_component.begin(), target_component.end()); - const unsigned int n_target_blocks = max_block + 1; - - std::vector> ndofs( - mg_dof.get_triangulation().n_levels(), - std::vector(n_target_blocks)); - MGTools::count_dofs_per_block(mg_dof, ndofs, target_component); - - for (unsigned int level = v.min_level(); level <= v.max_level(); ++level) - { - v[level].reinit(n_target_blocks); - for (unsigned int b = 0; b < n_target_blocks; ++b) - v[level].block(b).reinit(ndofs[level][b]); - v[level].collect_sizes(); - } - } + /** + * Adjust vectors on all levels to correct size. Here, we just count the + * numbers of degrees of freedom on each level and @p reinit each level + * vector to this length. For compatibility reasons with the next function + * the target_component is added here but is not used. + */ + template + void + reinit_vector(const dealii::DoFHandler &mg_dof, + const std::vector &, + MGLevelObject> &v) + { + for (unsigned int level = v.min_level(); level <= v.max_level(); ++level) + { + unsigned int n = mg_dof.n_dofs(level); + v[level].reinit(n); + } + } - /** - * Adjust vectors on all levels to correct size. Here, we just count the - * numbers of degrees of freedom on each level and @p reinit each level - * vector to this length. - */ - template - void - reinit_vector(const dealii::DoFHandler &mg_dof, - const std::vector &, - MGLevelObject> &v) - { - const parallel::Triangulation *tria = - (dynamic_cast *>( - &mg_dof.get_triangulation())); + /** + * Adjust vectors on all levels to correct size. Here, we just count the + * numbers of degrees of freedom on each level and @p reinit each level + * vector to this length. The target_component is handed to + * MGTools::count_dofs_per_block. See for documentation there. + */ + template + void + reinit_vector(const dealii::DoFHandler &mg_dof, + std::vector target_component, + MGLevelObject> & v) + { + const unsigned int n_blocks = mg_dof.get_fe().n_blocks(); + if (target_component.size() == 0) + { + target_component.resize(n_blocks); + for (unsigned int i = 0; i < n_blocks; ++i) + target_component[i] = i; + } + Assert(target_component.size() == n_blocks, + ExcDimensionMismatch(target_component.size(), n_blocks)); + const unsigned int max_block = + *std::max_element(target_component.begin(), target_component.end()); + const unsigned int n_target_blocks = max_block + 1; + + std::vector> ndofs( + mg_dof.get_triangulation().n_levels(), + std::vector(n_target_blocks)); + MGTools::count_dofs_per_block(mg_dof, ndofs, target_component); + + for (unsigned int level = v.min_level(); level <= v.max_level(); ++level) + { + v[level].reinit(n_target_blocks); + for (unsigned int b = 0; b < n_target_blocks; ++b) + v[level].block(b).reinit(ndofs[level][b]); + v[level].collect_sizes(); + } + } - for (unsigned int level = v.min_level(); level <= v.max_level(); ++level) - { - if (v[level].size() != mg_dof.locally_owned_mg_dofs(level).size() || - v[level].local_size() != - mg_dof.locally_owned_mg_dofs(level).n_elements()) - v[level].reinit(mg_dof.locally_owned_mg_dofs(level), - tria != nullptr ? tria->get_communicator() : - MPI_COMM_SELF); - else - v[level] = 0.; - } - } + /** + * Adjust vectors on all levels to correct size. Here, we just count the + * numbers of degrees of freedom on each level and @p reinit each level + * vector to this length. + */ + template + void + reinit_vector(const dealii::DoFHandler &mg_dof, + const std::vector &, + MGLevelObject> &v) + { + const parallel::Triangulation *tria = + (dynamic_cast *>( + &mg_dof.get_triangulation())); + + for (unsigned int level = v.min_level(); level <= v.max_level(); ++level) + { + if (v[level].size() != mg_dof.locally_owned_mg_dofs(level).size() || + v[level].local_size() != + mg_dof.locally_owned_mg_dofs(level).n_elements()) + v[level].reinit(mg_dof.locally_owned_mg_dofs(level), + tria != nullptr ? tria->get_communicator() : + MPI_COMM_SELF); + else + v[level] = 0.; + } + } #ifdef DEAL_II_WITH_TRILINOS - /** - * Adjust vectors on all levels to correct size. Here, we just count the - * numbers of degrees of freedom on each level and @p reinit each level - * vector to this length. - */ - template - void - reinit_vector(const dealii::DoFHandler &mg_dof, - const std::vector &, - MGLevelObject &v) - { - const dealii::parallel::Triangulation *tria = - (dynamic_cast *>( - &mg_dof.get_triangulation())); - AssertThrow( - tria != nullptr, - ExcMessage( - "multigrid with Trilinos vectors only works with a parallel Triangulation!")); - - for (unsigned int level = v.min_level(); level <= v.max_level(); ++level) - { - v[level].reinit(mg_dof.locally_owned_mg_dofs(level), - tria->get_communicator()); - } - } + /** + * Adjust vectors on all levels to correct size. Here, we just count the + * numbers of degrees of freedom on each level and @p reinit each level + * vector to this length. + */ + template + void + reinit_vector(const dealii::DoFHandler &mg_dof, + const std::vector &, + MGLevelObject &v) + { + const dealii::parallel::Triangulation *tria = + (dynamic_cast *>( + &mg_dof.get_triangulation())); + AssertThrow( + tria != nullptr, + ExcMessage( + "multigrid with Trilinos vectors only works with a parallel Triangulation!")); + + for (unsigned int level = v.min_level(); level <= v.max_level(); ++level) + { + v[level].reinit(mg_dof.locally_owned_mg_dofs(level), + tria->get_communicator()); + } + } #endif #ifdef DEAL_II_WITH_PETSC - /** - * Adjust vectors on all levels to correct size. Here, we just count the - * numbers of degrees of freedom on each level and @p reinit each level - * vector to this length. - */ - template - void - reinit_vector(const dealii::DoFHandler &mg_dof, - const std::vector &, - MGLevelObject &v) - { - const dealii::parallel::Triangulation *tria = - (dynamic_cast *>( - &mg_dof.get_triangulation())); - AssertThrow( - tria != nullptr, - ExcMessage( - "multigrid with parallel PETSc vectors only works with a parallel Triangulation!")); - - for (unsigned int level = v.min_level(); level <= v.max_level(); ++level) - { - v[level].reinit(mg_dof.locally_owned_mg_dofs(level), - tria->get_communicator()); - } - } + /** + * Adjust vectors on all levels to correct size. Here, we just count the + * numbers of degrees of freedom on each level and @p reinit each level + * vector to this length. + */ + template + void + reinit_vector(const dealii::DoFHandler &mg_dof, + const std::vector &, + MGLevelObject &v) + { + const dealii::parallel::Triangulation *tria = + (dynamic_cast *>( + &mg_dof.get_triangulation())); + AssertThrow( + tria != nullptr, + ExcMessage( + "multigrid with parallel PETSc vectors only works with a parallel Triangulation!")); + + for (unsigned int level = v.min_level(); level <= v.max_level(); ++level) + { + v[level].reinit(mg_dof.locally_owned_mg_dofs(level), + tria->get_communicator()); + } + } #endif -} // namespace + } // namespace MGTransfer +} // namespace internal @@ -241,7 +244,9 @@ MGLevelGlobalTransfer::copy_to_mg( AssertIndexRange(dst.max_level(), mg_dof_handler.get_triangulation().n_global_levels()); AssertIndexRange(dst.min_level(), dst.max_level() + 1); - reinit_vector(mg_dof_handler, component_to_block_map, dst); + internal::MGTransfer::reinit_vector(mg_dof_handler, + component_to_block_map, + dst); #ifdef DEBUG_OUTPUT std::cout << "copy_to_mg src " << src.l2_norm() << std::endl; int ierr = MPI_Barrier(MPI_COMM_WORLD); @@ -456,7 +461,9 @@ MGLevelGlobalTransfer>::copy_to_mg( AssertIndexRange(dst.max_level(), mg_dof_handler.get_triangulation().n_global_levels()); AssertIndexRange(dst.min_level(), dst.max_level() + 1); - reinit_vector(mg_dof_handler, component_to_block_map, dst); + internal::MGTransfer::reinit_vector(mg_dof_handler, + component_to_block_map, + dst); if (perform_plain_copy) { diff --git a/include/deal.II/numerics/data_out_dof_data.templates.h b/include/deal.II/numerics/data_out_dof_data.templates.h index 0b9bafe28d..32d4874ef2 100644 --- a/include/deal.II/numerics/data_out_dof_data.templates.h +++ b/include/deal.II/numerics/data_out_dof_data.templates.h @@ -629,23 +629,19 @@ namespace internal {} - namespace + template + inline typename VectorType::value_type + get_vector_element(const VectorType &vector, const unsigned int cell_number) { - template - inline typename VectorType::value_type - get_vector_element(const VectorType & vector, - const unsigned int cell_number) - { - return internal::ElementAccess::get(vector, cell_number); - } - - - inline double - get_vector_element(const IndexSet &is, const unsigned int cell_number) - { - return (is.is_element(cell_number) ? 1 : 0); - } - } // namespace + return internal::ElementAccess::get(vector, cell_number); + } + + + inline double + get_vector_element(const IndexSet &is, const unsigned int cell_number) + { + return (is.is_element(cell_number) ? 1 : 0); + } diff --git a/include/deal.II/numerics/error_estimator.templates.h b/include/deal.II/numerics/error_estimator.templates.h index ee5fdc2aa6..6357100cb1 100644 --- a/include/deal.II/numerics/error_estimator.templates.h +++ b/include/deal.II/numerics/error_estimator.templates.h @@ -61,1010 +61,999 @@ DEAL_II_NAMESPACE_OPEN namespace internal { - namespace + /** + * All small temporary data objects that are needed once per thread by the + * several functions of the error estimator are gathered in this struct. + * The reason for this structure is mainly that we have a number of + * functions that operate on cells or faces and need a number of small + * temporary data objects. Since these functions may run in parallel, we + * cannot make these objects member variables of the enclosing class. On + * the other hand, declaring them locally in each of these functions would + * require their reallocating every time we visit the next cell or face, + * which we found can take a significant amount of time if it happens + * often even in the single threaded case (10-20 per cent in our + * measurements); however, most importantly, memory allocation requires + * synchronization in multithreaded mode. While that is done by the C++ + * library and has not to be handcoded, it nevertheless seriously damages + * the ability to efficiently run the functions of this class in parallel, + * since they are quite often blocked by these synchronization points, + * slowing everything down by a factor of two or three. + * + * Thus, every thread gets an instance of this class to work with and + * needs not allocate memory itself, or synchronize with other threads. + * + * The sizes of the arrays are initialized with the maximal number of + * entries necessary for the hp case. Within the loop over individual + * cells, we then resize the arrays as necessary. Since for std::vector + * resizing to a smaller size doesn't imply memory allocation, this is + * fast. + */ + template + struct ParallelData { + static const unsigned int dim = DoFHandlerType::dimension; + static const unsigned int spacedim = DoFHandlerType::space_dimension; + /** - * All small temporary data objects that are needed once per thread by the - * several functions of the error estimator are gathered in this struct. - * The reason for this structure is mainly that we have a number of - * functions that operate on cells or faces and need a number of small - * temporary data objects. Since these functions may run in parallel, we - * cannot make these objects member variables of the enclosing class. On - * the other hand, declaring them locally in each of these functions would - * require their reallocating every time we visit the next cell or face, - * which we found can take a significant amount of time if it happens - * often even in the single threaded case (10-20 per cent in our - * measurements); however, most importantly, memory allocation requires - * synchronization in multithreaded mode. While that is done by the C++ - * library and has not to be handcoded, it nevertheless seriously damages - * the ability to efficiently run the functions of this class in parallel, - * since they are quite often blocked by these synchronization points, - * slowing everything down by a factor of two or three. - * - * Thus, every thread gets an instance of this class to work with and - * needs not allocate memory itself, or synchronize with other threads. - * - * The sizes of the arrays are initialized with the maximal number of - * entries necessary for the hp case. Within the loop over individual - * cells, we then resize the arrays as necessary. Since for std::vector - * resizing to a smaller size doesn't imply memory allocation, this is - * fast. + * The finite element to be used. */ - template - struct ParallelData - { - static const unsigned int dim = DoFHandlerType::dimension; - static const unsigned int spacedim = DoFHandlerType::space_dimension; - - /** - * The finite element to be used. - */ - const dealii::hp::FECollection finite_element; - - /** - * The quadrature formulas to be used for the faces. - */ - const dealii::hp::QCollection face_quadratures; - - /** - * FEFaceValues objects to integrate over the faces of the current and - * potentially of neighbor cells. - */ - dealii::hp::FEFaceValues fe_face_values_cell; - dealii::hp::FEFaceValues fe_face_values_neighbor; - dealii::hp::FESubfaceValues fe_subface_values; - - /** - * A vector to store the jump of the normal vectors in the quadrature - * points for each of the solution vectors (i.e. a temporary value). - * This vector is not allocated inside the functions that use it, but - * rather globally, since memory allocation is slow, in particular in - * presence of multiple threads where synchronization makes things even - * slower. - */ - std::vector>> phi; - - /** - * A vector for the gradients of the finite element function on one cell - * - * Let psi be a short name for a grad u_h, where the third - * index be the component of the finite element, and the second index - * the number of the quadrature point. The first index denotes the index - * of the solution vector. - */ - std::vector>>> psi; - - /** - * The same vector for a neighbor cell - */ - std::vector>>> - neighbor_psi; - - /** - * The normal vectors of the finite element function on one face - */ - std::vector> normal_vectors; - - /** - * Normal vectors of the opposing face. - */ - std::vector> neighbor_normal_vectors; - - /** - * Two arrays needed for the values of coefficients in the jumps, if - * they are given. - */ - std::vector coefficient_values1; - std::vector> coefficient_values; - - /** - * Array for the products of Jacobian determinants and weights of - * quadraturs points. - */ - std::vector JxW_values; - - /** - * The subdomain id we are to care for. - */ - const types::subdomain_id subdomain_id; - /** - * The material id we are to care for. - */ - const types::material_id material_id; - - /** - * Some more references to input data to the - * KellyErrorEstimator::estimate() function. - */ - const std::map *> - * neumann_bc; - const ComponentMask component_mask; - const Function *coefficients; - - /** - * Constructor. - */ - template - ParallelData( - const FE & fe, - const dealii::hp::QCollection & face_quadratures, - const dealii::hp::MappingCollection &mapping, - const bool need_quadrature_points, - const unsigned int n_solution_vectors, - const types::subdomain_id subdomain_id, - const types::material_id material_id, - const std::map *> - * neumann_bc, - const ComponentMask & component_mask, - const Function *coefficients); - - /** - * Resize the arrays so that they fit the number of quadrature points - * associated with the given finite element index into the hp - * collections. - */ - void - resize(const unsigned int active_fe_index); - }; - - - template - template - ParallelData::ParallelData( - const FE & fe, - const dealii::hp::QCollection & face_quadratures, - const dealii::hp::MappingCollection &mapping, - const bool need_quadrature_points, - const unsigned int n_solution_vectors, - const types::subdomain_id subdomain_id, - const types::material_id material_id, - const std::map *> - * neumann_bc, - const ComponentMask & component_mask, - const Function *coefficients) - : finite_element(fe) - , face_quadratures(face_quadratures) - , fe_face_values_cell(mapping, - finite_element, - face_quadratures, - update_gradients | update_JxW_values | - (need_quadrature_points ? - update_quadrature_points : - UpdateFlags()) | - update_normal_vectors) - , fe_face_values_neighbor(mapping, - finite_element, - face_quadratures, - update_gradients | update_normal_vectors) - , fe_subface_values(mapping, - finite_element, - face_quadratures, - update_gradients | update_normal_vectors) - , phi(n_solution_vectors, - std::vector>( - face_quadratures.max_n_quadrature_points(), - std::vector(fe.n_components()))) - , psi(n_solution_vectors, - std::vector>>( - face_quadratures.max_n_quadrature_points(), - std::vector>(fe.n_components()))) - , neighbor_psi(n_solution_vectors, - std::vector>>( - face_quadratures.max_n_quadrature_points(), - std::vector>( - fe.n_components()))) - , normal_vectors(face_quadratures.max_n_quadrature_points()) - , neighbor_normal_vectors(face_quadratures.max_n_quadrature_points()) - , coefficient_values1(face_quadratures.max_n_quadrature_points()) - , coefficient_values(face_quadratures.max_n_quadrature_points(), - dealii::Vector(fe.n_components())) - , JxW_values(face_quadratures.max_n_quadrature_points()) - , subdomain_id(subdomain_id) - , material_id(material_id) - , neumann_bc(neumann_bc) - , component_mask(component_mask) - , coefficients(coefficients) - {} - - - - template - void - ParallelData::resize( - const unsigned int active_fe_index) - { - const unsigned int n_q_points = face_quadratures[active_fe_index].size(); - const unsigned int n_components = finite_element.n_components(); - - normal_vectors.resize(n_q_points); - neighbor_normal_vectors.resize(n_q_points); - coefficient_values1.resize(n_q_points); - coefficient_values.resize(n_q_points); - JxW_values.resize(n_q_points); - - for (unsigned int i = 0; i < phi.size(); ++i) - { - phi[i].resize(n_q_points); - psi[i].resize(n_q_points); - neighbor_psi[i].resize(n_q_points); - - for (unsigned int qp = 0; qp < n_q_points; ++qp) - { - phi[i][qp].resize(n_components); - psi[i][qp].resize(n_components); - neighbor_psi[i][qp].resize(n_components); - } - } - - for (unsigned int qp = 0; qp < n_q_points; ++qp) - coefficient_values[qp].reinit(n_components); - } - + const dealii::hp::FECollection finite_element; + /** + * The quadrature formulas to be used for the faces. + */ + const dealii::hp::QCollection face_quadratures; /** - * Copy data from the local_face_integrals map of a single ParallelData - * object into a global such map. This is the copier stage of a WorkStream - * pipeline. + * FEFaceValues objects to integrate over the faces of the current and + * potentially of neighbor cells. */ - template - void - copy_local_to_global( - const std::map> &local_face_integrals, - std::map> - &face_integrals) - { - // now copy locally computed elements into the global map - for (typename std::map>::const_iterator p = - local_face_integrals.begin(); - p != local_face_integrals.end(); - ++p) - { - // double check that the element does not already exists in the - // global map - Assert(face_integrals.find(p->first) == face_integrals.end(), - ExcInternalError()); - - for (unsigned int i = 0; i < p->second.size(); ++i) - { - Assert(numbers::is_finite(p->second[i]), ExcInternalError()); - Assert(p->second[i] >= 0, ExcInternalError()); - } - - face_integrals[p->first] = p->second; - } - } + dealii::hp::FEFaceValues fe_face_values_cell; + dealii::hp::FEFaceValues fe_face_values_neighbor; + dealii::hp::FESubfaceValues fe_subface_values; + /** + * A vector to store the jump of the normal vectors in the quadrature + * points for each of the solution vectors (i.e. a temporary value). + * This vector is not allocated inside the functions that use it, but + * rather globally, since memory allocation is slow, in particular in + * presence of multiple threads where synchronization makes things even + * slower. + */ + std::vector>> phi; /** - * Actually do the computation based on the evaluated gradients in - * ParallelData. + * A vector for the gradients of the finite element function on one cell + * + * Let psi be a short name for a grad u_h, where the third + * index be the component of the finite element, and the second index + * the number of the quadrature point. The first index denotes the index + * of the solution vector. */ - template - std::vector - integrate_over_face( - ParallelData & parallel_data, - const typename DoFHandlerType::face_iterator &face, - dealii::hp::FEFaceValues - &fe_face_values_cell) - { - const unsigned int n_q_points = parallel_data.psi[0].size(), - n_components = - parallel_data.finite_element.n_components(), - n_solution_vectors = parallel_data.psi.size(); - - // now psi contains the following: - // - for an internal face, psi=[grad u] - // - for a neumann boundary face, psi=grad u - // each component being the mentioned value at one of the quadrature - // points - - // next we have to multiply this with the normal vector. Since we have - // taken the difference of gradients for internal faces, we may chose - // the normal vector of one cell, taking that of the neighbor would only - // change the sign. We take the outward normal. - - parallel_data.normal_vectors = - fe_face_values_cell.get_present_fe_values().get_all_normal_vectors(); - - for (unsigned int n = 0; n < n_solution_vectors; ++n) - for (unsigned int component = 0; component < n_components; ++component) - for (unsigned int point = 0; point < n_q_points; ++point) - parallel_data.phi[n][point][component] = - (parallel_data.psi[n][point][component] * - parallel_data.normal_vectors[point]); - - if (face->at_boundary() == false) - { - // compute the jump in the gradients - - for (unsigned int n = 0; n < n_solution_vectors; ++n) - for (unsigned int component = 0; component < n_components; - ++component) - for (unsigned int p = 0; p < n_q_points; ++p) - parallel_data.phi[n][p][component] += - (parallel_data.neighbor_psi[n][p][component] * - parallel_data.neighbor_normal_vectors[p]); - } - - // if a coefficient was given: use that to scale the jump in the - // gradient - if (parallel_data.coefficients != nullptr) - { - // scalar coefficient - if (parallel_data.coefficients->n_components == 1) - { - parallel_data.coefficients->value_list( - fe_face_values_cell.get_present_fe_values() - .get_quadrature_points(), - parallel_data.coefficient_values1); - for (unsigned int n = 0; n < n_solution_vectors; ++n) - for (unsigned int component = 0; component < n_components; - ++component) - for (unsigned int point = 0; point < n_q_points; ++point) - parallel_data.phi[n][point][component] *= - parallel_data.coefficient_values1[point]; - } - else - // vector-valued coefficient - { - parallel_data.coefficients->vector_value_list( - fe_face_values_cell.get_present_fe_values() - .get_quadrature_points(), - parallel_data.coefficient_values); - for (unsigned int n = 0; n < n_solution_vectors; ++n) - for (unsigned int component = 0; component < n_components; - ++component) - for (unsigned int point = 0; point < n_q_points; ++point) - parallel_data.phi[n][point][component] *= - parallel_data.coefficient_values[point](component); - } - } - - - if (face->at_boundary() == true) - // neumann boundary face. compute difference between normal derivative - // and boundary function - { - const types::boundary_id boundary_id = face->boundary_id(); - - Assert(parallel_data.neumann_bc->find(boundary_id) != - parallel_data.neumann_bc->end(), - ExcInternalError()); - // get the values of the boundary function at the quadrature points - if (n_components == 1) - { - std::vector g(n_q_points); - parallel_data.neumann_bc->find(boundary_id) - ->second->value_list(fe_face_values_cell.get_present_fe_values() - .get_quadrature_points(), - g); - - for (unsigned int n = 0; n < n_solution_vectors; ++n) - for (unsigned int point = 0; point < n_q_points; ++point) - parallel_data.phi[n][point][0] -= g[point]; - } - else - { - std::vector> g( - n_q_points, dealii::Vector(n_components)); - parallel_data.neumann_bc->find(boundary_id) - ->second->vector_value_list(fe_face_values_cell - .get_present_fe_values() - .get_quadrature_points(), - g); - - for (unsigned int n = 0; n < n_solution_vectors; ++n) - for (unsigned int component = 0; component < n_components; - ++component) - for (unsigned int point = 0; point < n_q_points; ++point) - parallel_data.phi[n][point][component] -= - g[point](component); - } - } - - - - // now phi contains the following: - // - for an internal face, phi=[a du/dn] - // - for a neumann boundary face, phi=a du/dn-g - // each component being the mentioned value at one of the quadrature - // points - - parallel_data.JxW_values = - fe_face_values_cell.get_present_fe_values().get_JxW_values(); - - // take the square of the phi[i] for integration, and sum up - std::vector face_integral(n_solution_vectors, 0); - for (unsigned int n = 0; n < n_solution_vectors; ++n) - for (unsigned int component = 0; component < n_components; ++component) - if (parallel_data.component_mask[component] == true) - for (unsigned int p = 0; p < n_q_points; ++p) - face_integral[n] += numbers::NumberTraits::abs_square( - parallel_data.phi[n][p][component]) * - parallel_data.JxW_values[p]; + std::vector>>> psi; - return face_integral; - } + /** + * The same vector for a neighbor cell + */ + std::vector>>> + neighbor_psi; /** - * A factor to scale the integral for the face at the boundary. Used for - * Neumann BC. + * The normal vectors of the finite element function on one face */ - template - double - boundary_face_factor( - const typename DoFHandlerType::active_cell_iterator &cell, - const unsigned int face_no, - const dealii::hp::FEFaceValues - &fe_face_values_cell, - const typename KellyErrorEstimator< - DoFHandlerType::dimension, - DoFHandlerType::space_dimension>::Strategy strategy) - { - switch (strategy) - { - case KellyErrorEstimator< - DoFHandlerType::dimension, - DoFHandlerType::space_dimension>::cell_diameter_over_24: - { - return 1.0; - } - case KellyErrorEstimator< - DoFHandlerType::dimension, - DoFHandlerType::space_dimension>::cell_diameter: - { - return 1.0; - } - case KellyErrorEstimator:: - face_diameter_over_twice_max_degree: - { - const double cell_degree = - fe_face_values_cell.get_fe_collection()[cell->active_fe_index()] - .degree; - return cell->face(face_no)->diameter() / cell_degree; - } - default: - { - Assert(false, ExcNotImplemented()); - return -std::numeric_limits::max(); - } - } - } + std::vector> normal_vectors; + /** + * Normal vectors of the opposing face. + */ + std::vector> neighbor_normal_vectors; /** - * A factor to scale the integral for the regular face. + * Two arrays needed for the values of coefficients in the jumps, if + * they are given. */ - template - double - regular_face_factor( - const typename DoFHandlerType::active_cell_iterator &cell, - const unsigned int face_no, - const dealii::hp::FEFaceValues - &fe_face_values_cell, - const dealii::hp::FEFaceValues - &fe_face_values_neighbor, - const typename KellyErrorEstimator< - DoFHandlerType::dimension, - DoFHandlerType::space_dimension>::Strategy strategy) - { - switch (strategy) - { - case KellyErrorEstimator< - DoFHandlerType::dimension, - DoFHandlerType::space_dimension>::cell_diameter_over_24: - { - return 1.0; - } - case KellyErrorEstimator< - DoFHandlerType::dimension, - DoFHandlerType::space_dimension>::cell_diameter: - { - return 1.0; - } - case KellyErrorEstimator:: - face_diameter_over_twice_max_degree: - { - const double cell_degree = - fe_face_values_cell.get_fe_collection()[cell->active_fe_index()] - .degree; - const double neighbor_degree = - fe_face_values_neighbor - .get_fe_collection()[cell->neighbor(face_no) - ->active_fe_index()] - .degree; - return cell->face(face_no)->diameter() / - std::max(cell_degree, neighbor_degree) / 2.0; - } - default: - { - Assert(false, ExcNotImplemented()); - return -std::numeric_limits::max(); - } - } - } + std::vector coefficient_values1; + std::vector> coefficient_values; /** - * A factor to scale the integral for the irregular face. + * Array for the products of Jacobian determinants and weights of + * quadraturs points. */ - template - double - irregular_face_factor( - const typename DoFHandlerType::active_cell_iterator &cell, - const typename DoFHandlerType::active_cell_iterator &neighbor_child, - const unsigned int face_no, - const unsigned int subface_no, - const dealii::hp::FEFaceValues - &fe_face_values, - dealii::hp::FESubfaceValues - &fe_subface_values, - const typename KellyErrorEstimator< - DoFHandlerType::dimension, - DoFHandlerType::space_dimension>::Strategy strategy) - { - switch (strategy) - { - case KellyErrorEstimator< - DoFHandlerType::dimension, - DoFHandlerType::space_dimension>::cell_diameter_over_24: - { - return 1.0; - } - case KellyErrorEstimator< - DoFHandlerType::dimension, - DoFHandlerType::space_dimension>::cell_diameter: - { - return 1.0; - } - case KellyErrorEstimator:: - face_diameter_over_twice_max_degree: - { - const double cell_degree = - fe_face_values.get_fe_collection()[cell->active_fe_index()] - .degree; - const double neighbor_child_degree = - fe_subface_values - .get_fe_collection()[neighbor_child->active_fe_index()] - .degree; - return cell->face(face_no)->child(subface_no)->diameter() / - std::max(neighbor_child_degree, cell_degree) / 2.0; - } - default: - { - Assert(false, ExcNotImplemented()); - return -std::numeric_limits::max(); - } - } - } + std::vector JxW_values; /** - * A factor used when summing up all the contribution from different faces - * of each cell. + * The subdomain id we are to care for. */ - template - double - cell_factor(const typename DoFHandlerType::active_cell_iterator &cell, - const unsigned int /*face_no*/, - const DoFHandlerType & /*dof_handler*/, - const typename KellyErrorEstimator< - DoFHandlerType::dimension, - DoFHandlerType::space_dimension>::Strategy strategy) - { - switch (strategy) - { - case KellyErrorEstimator< - DoFHandlerType::dimension, - DoFHandlerType::space_dimension>::cell_diameter_over_24: - { - return cell->diameter() / 24; - } - case KellyErrorEstimator< - DoFHandlerType::dimension, - DoFHandlerType::space_dimension>::cell_diameter: - { - return cell->diameter(); - } - case KellyErrorEstimator:: - face_diameter_over_twice_max_degree: - { - return 1.0; - } - default: - { - Assert(false, ExcNotImplemented()); - return -std::numeric_limits::max(); - } - } - } + const types::subdomain_id subdomain_id; + /** + * The material id we are to care for. + */ + const types::material_id material_id; + /** + * Some more references to input data to the + * KellyErrorEstimator::estimate() function. + */ + const std::map *> + * neumann_bc; + const ComponentMask component_mask; + const Function *coefficients; + /** + * Constructor. + */ + template + ParallelData(const FE & fe, + const dealii::hp::QCollection &face_quadratures, + const dealii::hp::MappingCollection &mapping, + const bool need_quadrature_points, + const unsigned int n_solution_vectors, + const types::subdomain_id subdomain_id, + const types::material_id material_id, + const std::map *> *neumann_bc, + const ComponentMask & component_mask, + const Function *coefficients); /** - * Actually do the computation on a face which has no hanging nodes (it is - * regular), i.e. either on the other side there is nirvana (face is at - * boundary), or the other side's refinement level is the same as that of - * this side, then handle the integration of these both cases together. + * Resize the arrays so that they fit the number of quadrature points + * associated with the given finite element index into the hp + * collections. */ - template void - integrate_over_regular_face( - const std::vector &solutions, - ParallelData - ¶llel_data, - std::map> - & local_face_integrals, - const typename DoFHandlerType::active_cell_iterator &cell, - const unsigned int face_no, - dealii::hp::FEFaceValues - &fe_face_values_cell, - dealii::hp::FEFaceValues - &fe_face_values_neighbor, - const typename KellyErrorEstimator< - DoFHandlerType::dimension, - DoFHandlerType::space_dimension>::Strategy strategy) - { - const unsigned int dim = DoFHandlerType::dimension; - (void)dim; - - const typename DoFHandlerType::face_iterator face = cell->face(face_no); - const unsigned int n_solution_vectors = solutions.size(); - - - // initialize data of the restriction - // of this cell to the present face - fe_face_values_cell.reinit(cell, face_no, cell->active_fe_index()); - - // get gradients of the finite element - // function on this cell - for (unsigned int n = 0; n < n_solution_vectors; ++n) - fe_face_values_cell.get_present_fe_values().get_function_gradients( - *solutions[n], parallel_data.psi[n]); - - double factor; - // now compute over the other side of the face - if (face->at_boundary() == false) - // internal face; integrate jump of gradient across this face - { - Assert(cell->neighbor(face_no).state() == IteratorState::valid, - ExcInternalError()); - - const typename DoFHandlerType::active_cell_iterator neighbor = - cell->neighbor(face_no); - - // find which number the current face has relative to the - // neighboring cell - const unsigned int neighbor_neighbor = - cell->neighbor_of_neighbor(face_no); - Assert(neighbor_neighbor < GeometryInfo::faces_per_cell, - ExcInternalError()); - - // get restriction of finite element function of @p{neighbor} to the - // common face. in the hp case, use the quadrature formula that - // matches the one we would use for the present cell - fe_face_values_neighbor.reinit(neighbor, - neighbor_neighbor, - cell->active_fe_index()); - - factor = regular_face_factor(cell, - face_no, - fe_face_values_cell, - fe_face_values_neighbor, - strategy); - - // get gradients on neighbor cell - for (unsigned int n = 0; n < n_solution_vectors; ++n) - { - fe_face_values_neighbor.get_present_fe_values() - .get_function_gradients(*solutions[n], - parallel_data.neighbor_psi[n]); - } - - parallel_data.neighbor_normal_vectors = + resize(const unsigned int active_fe_index); + }; + + + template + template + ParallelData::ParallelData( + const FE & fe, + const dealii::hp::QCollection & face_quadratures, + const dealii::hp::MappingCollection &mapping, + const bool need_quadrature_points, + const unsigned int n_solution_vectors, + const types::subdomain_id subdomain_id, + const types::material_id material_id, + const std::map *> + * neumann_bc, + const ComponentMask & component_mask, + const Function *coefficients) + : finite_element(fe) + , face_quadratures(face_quadratures) + , fe_face_values_cell(mapping, + finite_element, + face_quadratures, + update_gradients | update_JxW_values | + (need_quadrature_points ? update_quadrature_points : + UpdateFlags()) | + update_normal_vectors) + , fe_face_values_neighbor(mapping, + finite_element, + face_quadratures, + update_gradients | update_normal_vectors) + , fe_subface_values(mapping, + finite_element, + face_quadratures, + update_gradients | update_normal_vectors) + , phi(n_solution_vectors, + std::vector>( + face_quadratures.max_n_quadrature_points(), + std::vector(fe.n_components()))) + , psi(n_solution_vectors, + std::vector>>( + face_quadratures.max_n_quadrature_points(), + std::vector>(fe.n_components()))) + , neighbor_psi(n_solution_vectors, + std::vector>>( + face_quadratures.max_n_quadrature_points(), + std::vector>( + fe.n_components()))) + , normal_vectors(face_quadratures.max_n_quadrature_points()) + , neighbor_normal_vectors(face_quadratures.max_n_quadrature_points()) + , coefficient_values1(face_quadratures.max_n_quadrature_points()) + , coefficient_values(face_quadratures.max_n_quadrature_points(), + dealii::Vector(fe.n_components())) + , JxW_values(face_quadratures.max_n_quadrature_points()) + , subdomain_id(subdomain_id) + , material_id(material_id) + , neumann_bc(neumann_bc) + , component_mask(component_mask) + , coefficients(coefficients) + {} + + + + template + void + ParallelData::resize( + const unsigned int active_fe_index) + { + const unsigned int n_q_points = face_quadratures[active_fe_index].size(); + const unsigned int n_components = finite_element.n_components(); + + normal_vectors.resize(n_q_points); + neighbor_normal_vectors.resize(n_q_points); + coefficient_values1.resize(n_q_points); + coefficient_values.resize(n_q_points); + JxW_values.resize(n_q_points); + + for (unsigned int i = 0; i < phi.size(); ++i) + { + phi[i].resize(n_q_points); + psi[i].resize(n_q_points); + neighbor_psi[i].resize(n_q_points); + + for (unsigned int qp = 0; qp < n_q_points; ++qp) + { + phi[i][qp].resize(n_components); + psi[i][qp].resize(n_components); + neighbor_psi[i][qp].resize(n_components); + } + } + + for (unsigned int qp = 0; qp < n_q_points; ++qp) + coefficient_values[qp].reinit(n_components); + } + + + + /** + * Copy data from the local_face_integrals map of a single ParallelData + * object into a global such map. This is the copier stage of a WorkStream + * pipeline. + */ + template + void + copy_local_to_global( + const std::map> + &local_face_integrals, + std::map> + &face_integrals) + { + // now copy locally computed elements into the global map + for (typename std::map>::const_iterator p = + local_face_integrals.begin(); + p != local_face_integrals.end(); + ++p) + { + // double check that the element does not already exists in the + // global map + Assert(face_integrals.find(p->first) == face_integrals.end(), + ExcInternalError()); + + for (unsigned int i = 0; i < p->second.size(); ++i) + { + Assert(numbers::is_finite(p->second[i]), ExcInternalError()); + Assert(p->second[i] >= 0, ExcInternalError()); + } + + face_integrals[p->first] = p->second; + } + } + + + /** + * Actually do the computation based on the evaluated gradients in + * ParallelData. + */ + template + std::vector + integrate_over_face(ParallelData ¶llel_data, + const typename DoFHandlerType::face_iterator &face, + dealii::hp::FEFaceValues + &fe_face_values_cell) + { + const unsigned int n_q_points = parallel_data.psi[0].size(), + n_components = + parallel_data.finite_element.n_components(), + n_solution_vectors = parallel_data.psi.size(); + + // now psi contains the following: + // - for an internal face, psi=[grad u] + // - for a neumann boundary face, psi=grad u + // each component being the mentioned value at one of the quadrature + // points + + // next we have to multiply this with the normal vector. Since we have + // taken the difference of gradients for internal faces, we may chose + // the normal vector of one cell, taking that of the neighbor would only + // change the sign. We take the outward normal. + + parallel_data.normal_vectors = + fe_face_values_cell.get_present_fe_values().get_all_normal_vectors(); + + for (unsigned int n = 0; n < n_solution_vectors; ++n) + for (unsigned int component = 0; component < n_components; ++component) + for (unsigned int point = 0; point < n_q_points; ++point) + parallel_data.phi[n][point][component] = + (parallel_data.psi[n][point][component] * + parallel_data.normal_vectors[point]); + + if (face->at_boundary() == false) + { + // compute the jump in the gradients + + for (unsigned int n = 0; n < n_solution_vectors; ++n) + for (unsigned int component = 0; component < n_components; + ++component) + for (unsigned int p = 0; p < n_q_points; ++p) + parallel_data.phi[n][p][component] += + (parallel_data.neighbor_psi[n][p][component] * + parallel_data.neighbor_normal_vectors[p]); + } + + // if a coefficient was given: use that to scale the jump in the + // gradient + if (parallel_data.coefficients != nullptr) + { + // scalar coefficient + if (parallel_data.coefficients->n_components == 1) + { + parallel_data.coefficients->value_list( + fe_face_values_cell.get_present_fe_values() + .get_quadrature_points(), + parallel_data.coefficient_values1); + for (unsigned int n = 0; n < n_solution_vectors; ++n) + for (unsigned int component = 0; component < n_components; + ++component) + for (unsigned int point = 0; point < n_q_points; ++point) + parallel_data.phi[n][point][component] *= + parallel_data.coefficient_values1[point]; + } + else + // vector-valued coefficient + { + parallel_data.coefficients->vector_value_list( + fe_face_values_cell.get_present_fe_values() + .get_quadrature_points(), + parallel_data.coefficient_values); + for (unsigned int n = 0; n < n_solution_vectors; ++n) + for (unsigned int component = 0; component < n_components; + ++component) + for (unsigned int point = 0; point < n_q_points; ++point) + parallel_data.phi[n][point][component] *= + parallel_data.coefficient_values[point](component); + } + } + + + if (face->at_boundary() == true) + // neumann boundary face. compute difference between normal derivative + // and boundary function + { + const types::boundary_id boundary_id = face->boundary_id(); + + Assert(parallel_data.neumann_bc->find(boundary_id) != + parallel_data.neumann_bc->end(), + ExcInternalError()); + // get the values of the boundary function at the quadrature points + if (n_components == 1) + { + std::vector g(n_q_points); + parallel_data.neumann_bc->find(boundary_id) + ->second->value_list(fe_face_values_cell.get_present_fe_values() + .get_quadrature_points(), + g); + + for (unsigned int n = 0; n < n_solution_vectors; ++n) + for (unsigned int point = 0; point < n_q_points; ++point) + parallel_data.phi[n][point][0] -= g[point]; + } + else + { + std::vector> g( + n_q_points, dealii::Vector(n_components)); + parallel_data.neumann_bc->find(boundary_id) + ->second->vector_value_list(fe_face_values_cell + .get_present_fe_values() + .get_quadrature_points(), + g); + + for (unsigned int n = 0; n < n_solution_vectors; ++n) + for (unsigned int component = 0; component < n_components; + ++component) + for (unsigned int point = 0; point < n_q_points; ++point) + parallel_data.phi[n][point][component] -= g[point](component); + } + } + + + + // now phi contains the following: + // - for an internal face, phi=[a du/dn] + // - for a neumann boundary face, phi=a du/dn-g + // each component being the mentioned value at one of the quadrature + // points + + parallel_data.JxW_values = + fe_face_values_cell.get_present_fe_values().get_JxW_values(); + + // take the square of the phi[i] for integration, and sum up + std::vector face_integral(n_solution_vectors, 0); + for (unsigned int n = 0; n < n_solution_vectors; ++n) + for (unsigned int component = 0; component < n_components; ++component) + if (parallel_data.component_mask[component] == true) + for (unsigned int p = 0; p < n_q_points; ++p) + face_integral[n] += numbers::NumberTraits::abs_square( + parallel_data.phi[n][p][component]) * + parallel_data.JxW_values[p]; + + return face_integral; + } + + /** + * A factor to scale the integral for the face at the boundary. Used for + * Neumann BC. + */ + template + double + boundary_face_factor( + const typename DoFHandlerType::active_cell_iterator &cell, + const unsigned int face_no, + const dealii::hp::FEFaceValues + &fe_face_values_cell, + const typename KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::Strategy strategy) + { + switch (strategy) + { + case KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::cell_diameter_over_24: + { + return 1.0; + } + case KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::cell_diameter: + { + return 1.0; + } + case KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::face_diameter_over_twice_max_degree: + { + const double cell_degree = + fe_face_values_cell.get_fe_collection()[cell->active_fe_index()] + .degree; + return cell->face(face_no)->diameter() / cell_degree; + } + default: + { + Assert(false, ExcNotImplemented()); + return -std::numeric_limits::max(); + } + } + } + + + /** + * A factor to scale the integral for the regular face. + */ + template + double + regular_face_factor( + const typename DoFHandlerType::active_cell_iterator &cell, + const unsigned int face_no, + const dealii::hp::FEFaceValues + &fe_face_values_cell, + const dealii::hp::FEFaceValues + &fe_face_values_neighbor, + const typename KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::Strategy strategy) + { + switch (strategy) + { + case KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::cell_diameter_over_24: + { + return 1.0; + } + case KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::cell_diameter: + { + return 1.0; + } + case KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::face_diameter_over_twice_max_degree: + { + const double cell_degree = + fe_face_values_cell.get_fe_collection()[cell->active_fe_index()] + .degree; + const double neighbor_degree = + fe_face_values_neighbor + .get_fe_collection()[cell->neighbor(face_no)->active_fe_index()] + .degree; + return cell->face(face_no)->diameter() / + std::max(cell_degree, neighbor_degree) / 2.0; + } + default: + { + Assert(false, ExcNotImplemented()); + return -std::numeric_limits::max(); + } + } + } + + /** + * A factor to scale the integral for the irregular face. + */ + template + double + irregular_face_factor( + const typename DoFHandlerType::active_cell_iterator &cell, + const typename DoFHandlerType::active_cell_iterator &neighbor_child, + const unsigned int face_no, + const unsigned int subface_no, + const dealii::hp::FEFaceValues + &fe_face_values, + dealii::hp::FESubfaceValues + &fe_subface_values, + const typename KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::Strategy strategy) + { + switch (strategy) + { + case KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::cell_diameter_over_24: + { + return 1.0; + } + case KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::cell_diameter: + { + return 1.0; + } + case KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::face_diameter_over_twice_max_degree: + { + const double cell_degree = + fe_face_values.get_fe_collection()[cell->active_fe_index()] + .degree; + const double neighbor_child_degree = + fe_subface_values + .get_fe_collection()[neighbor_child->active_fe_index()] + .degree; + return cell->face(face_no)->child(subface_no)->diameter() / + std::max(neighbor_child_degree, cell_degree) / 2.0; + } + default: + { + Assert(false, ExcNotImplemented()); + return -std::numeric_limits::max(); + } + } + } + + /** + * A factor used when summing up all the contribution from different faces + * of each cell. + */ + template + double + cell_factor(const typename DoFHandlerType::active_cell_iterator &cell, + const unsigned int /*face_no*/, + const DoFHandlerType & /*dof_handler*/, + const typename KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::Strategy strategy) + { + switch (strategy) + { + case KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::cell_diameter_over_24: + { + return cell->diameter() / 24; + } + case KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::cell_diameter: + { + return cell->diameter(); + } + case KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::face_diameter_over_twice_max_degree: + { + return 1.0; + } + default: + { + Assert(false, ExcNotImplemented()); + return -std::numeric_limits::max(); + } + } + } + + + + /** + * Actually do the computation on a face which has no hanging nodes (it is + * regular), i.e. either on the other side there is nirvana (face is at + * boundary), or the other side's refinement level is the same as that of + * this side, then handle the integration of these both cases together. + */ + template + void + integrate_over_regular_face( + const std::vector &solutions, + ParallelData + ¶llel_data, + std::map> + & local_face_integrals, + const typename DoFHandlerType::active_cell_iterator &cell, + const unsigned int face_no, + dealii::hp::FEFaceValues + &fe_face_values_cell, + dealii::hp::FEFaceValues + &fe_face_values_neighbor, + const typename KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::Strategy strategy) + { + const unsigned int dim = DoFHandlerType::dimension; + (void)dim; + + const typename DoFHandlerType::face_iterator face = cell->face(face_no); + const unsigned int n_solution_vectors = solutions.size(); + + + // initialize data of the restriction + // of this cell to the present face + fe_face_values_cell.reinit(cell, face_no, cell->active_fe_index()); + + // get gradients of the finite element + // function on this cell + for (unsigned int n = 0; n < n_solution_vectors; ++n) + fe_face_values_cell.get_present_fe_values().get_function_gradients( + *solutions[n], parallel_data.psi[n]); + + double factor; + // now compute over the other side of the face + if (face->at_boundary() == false) + // internal face; integrate jump of gradient across this face + { + Assert(cell->neighbor(face_no).state() == IteratorState::valid, + ExcInternalError()); + + const typename DoFHandlerType::active_cell_iterator neighbor = + cell->neighbor(face_no); + + // find which number the current face has relative to the + // neighboring cell + const unsigned int neighbor_neighbor = + cell->neighbor_of_neighbor(face_no); + Assert(neighbor_neighbor < GeometryInfo::faces_per_cell, + ExcInternalError()); + + // get restriction of finite element function of @p{neighbor} to the + // common face. in the hp case, use the quadrature formula that + // matches the one we would use for the present cell + fe_face_values_neighbor.reinit(neighbor, + neighbor_neighbor, + cell->active_fe_index()); + + factor = regular_face_factor(cell, + face_no, + fe_face_values_cell, + fe_face_values_neighbor, + strategy); + + // get gradients on neighbor cell + for (unsigned int n = 0; n < n_solution_vectors; ++n) + { fe_face_values_neighbor.get_present_fe_values() - .get_all_normal_vectors(); - } - else - { - factor = boundary_face_factor(cell, - face_no, - fe_face_values_cell, - strategy); - } - - // now go to the generic function that does all the other things - local_face_integrals[face] = - integrate_over_face(parallel_data, face, fe_face_values_cell); - - for (unsigned int i = 0; i < local_face_integrals[face].size(); i++) - local_face_integrals[face][i] *= factor; - } + .get_function_gradients(*solutions[n], + parallel_data.neighbor_psi[n]); + } + parallel_data.neighbor_normal_vectors = + fe_face_values_neighbor.get_present_fe_values() + .get_all_normal_vectors(); + } + else + { + factor = boundary_face_factor(cell, + face_no, + fe_face_values_cell, + strategy); + } + // now go to the generic function that does all the other things + local_face_integrals[face] = + integrate_over_face(parallel_data, face, fe_face_values_cell); + + for (unsigned int i = 0; i < local_face_integrals[face].size(); i++) + local_face_integrals[face][i] *= factor; + } + + + + /** + * The same applies as for the function above, except that integration is + * over face @p face_no of @p cell, where the respective neighbor is + * refined, so that the integration is a bit more complex. + */ + template + void + integrate_over_irregular_face( + const std::vector &solutions, + ParallelData + ¶llel_data, + std::map> + & local_face_integrals, + const typename DoFHandlerType::active_cell_iterator &cell, + const unsigned int face_no, + dealii::hp::FEFaceValues &fe_face_values, + dealii::hp::FESubfaceValues + &fe_subface_values, + const typename KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::Strategy strategy) + { + const unsigned int dim = DoFHandlerType::dimension; + (void)dim; + + const typename DoFHandlerType::cell_iterator neighbor = + cell->neighbor(face_no); + (void)neighbor; + const unsigned int n_solution_vectors = solutions.size(); + const typename DoFHandlerType::face_iterator face = cell->face(face_no); + + Assert(neighbor.state() == IteratorState::valid, ExcInternalError()); + Assert(face->has_children(), ExcInternalError()); + + // set up a vector of the gradients of the finite element function on + // this cell at the quadrature points + // + // let psi be a short name for [a grad u_h], where the second index be + // the component of the finite element, and the first index the number + // of the quadrature point + + // store which number @p{cell} has in the list of neighbors of + // @p{neighbor} + const unsigned int neighbor_neighbor = cell->neighbor_of_neighbor(face_no); + Assert(neighbor_neighbor < GeometryInfo::faces_per_cell, + ExcInternalError()); + + // loop over all subfaces + for (unsigned int subface_no = 0; subface_no < face->n_children(); + ++subface_no) + { + // get an iterator pointing to the cell behind the present subface + const typename DoFHandlerType::active_cell_iterator neighbor_child = + cell->neighbor_child_on_subface(face_no, subface_no); + Assert(!neighbor_child->has_children(), ExcInternalError()); + + // restrict the finite element on the present cell to the subface + fe_subface_values.reinit(cell, + face_no, + subface_no, + cell->active_fe_index()); + + // restrict the finite element on the neighbor cell to the common + // @p{subface}. + fe_face_values.reinit(neighbor_child, + neighbor_neighbor, + cell->active_fe_index()); + + const double factor = + irregular_face_factor(cell, + neighbor_child, + face_no, + subface_no, + fe_face_values, + fe_subface_values, + strategy); + + // store the gradient of the solution in psi + for (unsigned int n = 0; n < n_solution_vectors; ++n) + fe_subface_values.get_present_fe_values().get_function_gradients( + *solutions[n], parallel_data.psi[n]); - /** - * The same applies as for the function above, except that integration is - * over face @p face_no of @p cell, where the respective neighbor is - * refined, so that the integration is a bit more complex. - */ - template - void - integrate_over_irregular_face( - const std::vector &solutions, - ParallelData - ¶llel_data, - std::map> - & local_face_integrals, - const typename DoFHandlerType::active_cell_iterator &cell, - const unsigned int face_no, - dealii::hp::FEFaceValues &fe_face_values, - dealii::hp::FESubfaceValues - &fe_subface_values, - const typename KellyErrorEstimator< - DoFHandlerType::dimension, - DoFHandlerType::space_dimension>::Strategy strategy) - { - const unsigned int dim = DoFHandlerType::dimension; - (void)dim; - - const typename DoFHandlerType::cell_iterator neighbor = - cell->neighbor(face_no); - (void)neighbor; - const unsigned int n_solution_vectors = solutions.size(); - const typename DoFHandlerType::face_iterator face = cell->face(face_no); - - Assert(neighbor.state() == IteratorState::valid, ExcInternalError()); - Assert(face->has_children(), ExcInternalError()); - - // set up a vector of the gradients of the finite element function on - // this cell at the quadrature points - // - // let psi be a short name for [a grad u_h], where the second index be - // the component of the finite element, and the first index the number - // of the quadrature point - - // store which number @p{cell} has in the list of neighbors of - // @p{neighbor} - const unsigned int neighbor_neighbor = - cell->neighbor_of_neighbor(face_no); - Assert(neighbor_neighbor < GeometryInfo::faces_per_cell, - ExcInternalError()); - - // loop over all subfaces - for (unsigned int subface_no = 0; subface_no < face->n_children(); - ++subface_no) - { - // get an iterator pointing to the cell behind the present subface - const typename DoFHandlerType::active_cell_iterator neighbor_child = - cell->neighbor_child_on_subface(face_no, subface_no); - Assert(!neighbor_child->has_children(), ExcInternalError()); - - // restrict the finite element on the present cell to the subface - fe_subface_values.reinit(cell, - face_no, - subface_no, - cell->active_fe_index()); - - // restrict the finite element on the neighbor cell to the common - // @p{subface}. - fe_face_values.reinit(neighbor_child, - neighbor_neighbor, - cell->active_fe_index()); - - const double factor = - irregular_face_factor(cell, - neighbor_child, - face_no, - subface_no, - fe_face_values, - fe_subface_values, - strategy); - - // store the gradient of the solution in psi - for (unsigned int n = 0; n < n_solution_vectors; ++n) - fe_subface_values.get_present_fe_values().get_function_gradients( - *solutions[n], parallel_data.psi[n]); - - // store the gradient from the neighbor's side in @p{neighbor_psi} - for (unsigned int n = 0; n < n_solution_vectors; ++n) - fe_face_values.get_present_fe_values().get_function_gradients( - *solutions[n], parallel_data.neighbor_psi[n]); - - // call generic evaluate function - parallel_data.neighbor_normal_vectors = - fe_subface_values.get_present_fe_values().get_all_normal_vectors(); - - local_face_integrals[neighbor_child->face(neighbor_neighbor)] = - integrate_over_face(parallel_data, face, fe_face_values); - for (unsigned int i = 0; - i < local_face_integrals[neighbor_child->face(neighbor_neighbor)] - .size(); - i++) - local_face_integrals[neighbor_child->face(neighbor_neighbor)][i] *= - factor; - } - - // finally loop over all subfaces to collect the contributions of the - // subfaces and store them with the mother face - std::vector sum(n_solution_vectors, 0); - for (unsigned int subface_no = 0; subface_no < face->n_children(); - ++subface_no) - { - Assert(local_face_integrals.find(face->child(subface_no)) != - local_face_integrals.end(), - ExcInternalError()); - Assert(local_face_integrals[face->child(subface_no)][0] >= 0, - ExcInternalError()); - - for (unsigned int n = 0; n < n_solution_vectors; ++n) - sum[n] += local_face_integrals[face->child(subface_no)][n]; - } - - local_face_integrals[face] = sum; - } + // store the gradient from the neighbor's side in @p{neighbor_psi} + for (unsigned int n = 0; n < n_solution_vectors; ++n) + fe_face_values.get_present_fe_values().get_function_gradients( + *solutions[n], parallel_data.neighbor_psi[n]); + + // call generic evaluate function + parallel_data.neighbor_normal_vectors = + fe_subface_values.get_present_fe_values().get_all_normal_vectors(); + + local_face_integrals[neighbor_child->face(neighbor_neighbor)] = + integrate_over_face(parallel_data, face, fe_face_values); + for (unsigned int i = 0; + i < local_face_integrals[neighbor_child->face(neighbor_neighbor)] + .size(); + i++) + local_face_integrals[neighbor_child->face(neighbor_neighbor)][i] *= + factor; + } + // finally loop over all subfaces to collect the contributions of the + // subfaces and store them with the mother face + std::vector sum(n_solution_vectors, 0); + for (unsigned int subface_no = 0; subface_no < face->n_children(); + ++subface_no) + { + Assert(local_face_integrals.find(face->child(subface_no)) != + local_face_integrals.end(), + ExcInternalError()); + Assert(local_face_integrals[face->child(subface_no)][0] >= 0, + ExcInternalError()); - /** - * Computate the error on the faces of a single cell. - * - * This function is only needed in two or three dimensions. The error - * estimator in one dimension is implemented separately. - */ - template - void - estimate_one_cell( - const typename DoFHandlerType::active_cell_iterator &cell, - ParallelData - ¶llel_data, - std::map> - & local_face_integrals, - const std::vector &solutions, - const typename KellyErrorEstimator< - DoFHandlerType::dimension, - DoFHandlerType::space_dimension>::Strategy strategy) - { - const unsigned int dim = DoFHandlerType::dimension; - const unsigned int n_solution_vectors = solutions.size(); - - const types::subdomain_id subdomain_id = parallel_data.subdomain_id; - const unsigned int material_id = parallel_data.material_id; - - // empty our own copy of the local face integrals - local_face_integrals.clear(); - - // loop over all faces of this cell - for (unsigned int face_no = 0; - face_no < GeometryInfo::faces_per_cell; - ++face_no) - { - const typename DoFHandlerType::face_iterator face = - cell->face(face_no); - - // make sure we do work only once: this face may either be regular - // or irregular. if it is regular and has a neighbor, then we visit - // the face twice, once from every side. let the one with the lower - // index do the work. if it is at the boundary, or if the face is - // irregular, then do the work below - if ((face->has_children() == false) && !cell->at_boundary(face_no) && - (!cell->neighbor_is_coarser(face_no) && - (cell->neighbor(face_no)->index() < cell->index() || - (cell->neighbor(face_no)->index() == cell->index() && - cell->neighbor(face_no)->level() < cell->level())))) + for (unsigned int n = 0; n < n_solution_vectors; ++n) + sum[n] += local_face_integrals[face->child(subface_no)][n]; + } + + local_face_integrals[face] = sum; + } + + + /** + * Computate the error on the faces of a single cell. + * + * This function is only needed in two or three dimensions. The error + * estimator in one dimension is implemented separately. + */ + template + void + estimate_one_cell( + const typename DoFHandlerType::active_cell_iterator &cell, + ParallelData + ¶llel_data, + std::map> + & local_face_integrals, + const std::vector &solutions, + const typename KellyErrorEstimator< + DoFHandlerType::dimension, + DoFHandlerType::space_dimension>::Strategy strategy) + { + const unsigned int dim = DoFHandlerType::dimension; + const unsigned int n_solution_vectors = solutions.size(); + + const types::subdomain_id subdomain_id = parallel_data.subdomain_id; + const unsigned int material_id = parallel_data.material_id; + + // empty our own copy of the local face integrals + local_face_integrals.clear(); + + // loop over all faces of this cell + for (unsigned int face_no = 0; face_no < GeometryInfo::faces_per_cell; + ++face_no) + { + const typename DoFHandlerType::face_iterator face = cell->face(face_no); + + // make sure we do work only once: this face may either be regular + // or irregular. if it is regular and has a neighbor, then we visit + // the face twice, once from every side. let the one with the lower + // index do the work. if it is at the boundary, or if the face is + // irregular, then do the work below + if ((face->has_children() == false) && !cell->at_boundary(face_no) && + (!cell->neighbor_is_coarser(face_no) && + (cell->neighbor(face_no)->index() < cell->index() || + (cell->neighbor(face_no)->index() == cell->index() && + cell->neighbor(face_no)->level() < cell->level())))) + continue; + + // if the neighboring cell is less refined than the present one, + // then do nothing since we integrate over the subfaces when we + // visit the coarse cells. + if (face->at_boundary() == false) + if (cell->neighbor_is_coarser(face_no)) + continue; + + // if this face is part of the boundary but not of the neumann + // boundary -> nothing to do. However, to make things easier when + // summing up the contributions of the faces of cells, we enter this + // face into the list of faces with contribution zero. + if (face->at_boundary() && + (parallel_data.neumann_bc->find(face->boundary_id()) == + parallel_data.neumann_bc->end())) + { + local_face_integrals[face] = + std::vector(n_solution_vectors, 0.); continue; + } - // if the neighboring cell is less refined than the present one, - // then do nothing since we integrate over the subfaces when we - // visit the coarse cells. - if (face->at_boundary() == false) - if (cell->neighbor_is_coarser(face_no)) + // finally: note that we only have to do something if either the + // present cell is on the subdomain we care for (and the same for + // material_id), or if one of the neighbors behind the face is on + // the subdomain we care for + if (!(((subdomain_id == numbers::invalid_subdomain_id) || + (cell->subdomain_id() == subdomain_id)) && + ((material_id == numbers::invalid_material_id) || + (cell->material_id() == material_id)))) + { + // ok, cell is unwanted, but maybe its neighbor behind the face + // we presently work on? oh is there a face at all? + if (face->at_boundary()) continue; - // if this face is part of the boundary but not of the neumann - // boundary -> nothing to do. However, to make things easier when - // summing up the contributions of the faces of cells, we enter this - // face into the list of faces with contribution zero. - if (face->at_boundary() && - (parallel_data.neumann_bc->find(face->boundary_id()) == - parallel_data.neumann_bc->end())) - { - local_face_integrals[face] = - std::vector(n_solution_vectors, 0.); + bool care_for_cell = false; + if (face->has_children() == false) + care_for_cell |= + ((cell->neighbor(face_no)->subdomain_id() == subdomain_id) || + (subdomain_id == numbers::invalid_subdomain_id)) && + ((cell->neighbor(face_no)->material_id() == material_id) || + (material_id == numbers::invalid_material_id)); + else + { + for (unsigned int sf = 0; sf < face->n_children(); ++sf) + if (((cell->neighbor_child_on_subface(face_no, sf) + ->subdomain_id() == subdomain_id) && + (material_id == numbers::invalid_material_id)) || + ((cell->neighbor_child_on_subface(face_no, sf) + ->material_id() == material_id) && + (subdomain_id == numbers::invalid_subdomain_id))) + { + care_for_cell = true; + break; + } + } + + // so if none of the neighbors cares for this subdomain or + // material either, then try next face + if (care_for_cell == false) continue; - } - - // finally: note that we only have to do something if either the - // present cell is on the subdomain we care for (and the same for - // material_id), or if one of the neighbors behind the face is on - // the subdomain we care for - if (!(((subdomain_id == numbers::invalid_subdomain_id) || - (cell->subdomain_id() == subdomain_id)) && - ((material_id == numbers::invalid_material_id) || - (cell->material_id() == material_id)))) - { - // ok, cell is unwanted, but maybe its neighbor behind the face - // we presently work on? oh is there a face at all? - if (face->at_boundary()) - continue; - - bool care_for_cell = false; - if (face->has_children() == false) - care_for_cell |= - ((cell->neighbor(face_no)->subdomain_id() == subdomain_id) || - (subdomain_id == numbers::invalid_subdomain_id)) && - ((cell->neighbor(face_no)->material_id() == material_id) || - (material_id == numbers::invalid_material_id)); - else - { - for (unsigned int sf = 0; sf < face->n_children(); ++sf) - if (((cell->neighbor_child_on_subface(face_no, sf) - ->subdomain_id() == subdomain_id) && - (material_id == numbers::invalid_material_id)) || - ((cell->neighbor_child_on_subface(face_no, sf) - ->material_id() == material_id) && - (subdomain_id == numbers::invalid_subdomain_id))) - { - care_for_cell = true; - break; - } - } - - // so if none of the neighbors cares for this subdomain or - // material either, then try next face - if (care_for_cell == false) - continue; - } - - // so now we know that we care for this face, let's do something - // about it. first re-size the arrays we may use to the correct - // size: - parallel_data.resize(cell->active_fe_index()); - - - // then do the actual integration - if (face->has_children() == false) - // if the face is a regular one, i.e. either on the other side - // there is nirvana (face is at boundary), or the other side's - // refinement level is the same as that of this side, then handle - // the integration of these both cases together - integrate_over_regular_face(solutions, + } + + // so now we know that we care for this face, let's do something + // about it. first re-size the arrays we may use to the correct + // size: + parallel_data.resize(cell->active_fe_index()); + + + // then do the actual integration + if (face->has_children() == false) + // if the face is a regular one, i.e. either on the other side + // there is nirvana (face is at boundary), or the other side's + // refinement level is the same as that of this side, then handle + // the integration of these both cases together + integrate_over_regular_face(solutions, + parallel_data, + local_face_integrals, + cell, + face_no, + parallel_data.fe_face_values_cell, + parallel_data.fe_face_values_neighbor, + strategy); + + else + // otherwise we need to do some special computations which do not + // fit into the framework of the above function + integrate_over_irregular_face(solutions, parallel_data, local_face_integrals, cell, face_no, parallel_data.fe_face_values_cell, - parallel_data.fe_face_values_neighbor, + parallel_data.fe_subface_values, strategy); - - else - // otherwise we need to do some special computations which do not - // fit into the framework of the above function - integrate_over_irregular_face(solutions, - parallel_data, - local_face_integrals, - cell, - face_no, - parallel_data.fe_face_values_cell, - parallel_data.fe_subface_values, - strategy); - } - } - } // namespace + } + } } // namespace internal diff --git a/include/deal.II/numerics/matrix_creator.templates.h b/include/deal.II/numerics/matrix_creator.templates.h index 5c195fda90..e11890cd7c 100644 --- a/include/deal.II/numerics/matrix_creator.templates.h +++ b/include/deal.II/numerics/matrix_creator.templates.h @@ -1437,7 +1437,7 @@ namespace MatrixCreator - namespace + namespace internal { template void @@ -1799,7 +1799,7 @@ namespace MatrixCreator } } } - } // namespace + } // namespace internal @@ -1888,25 +1888,27 @@ namespace MatrixCreator MatrixCreator::internal::AssemblerBoundary::Scratch const &, MatrixCreator::internal::AssemblerBoundary:: CopyData, number> &)>>( - std::bind(&create_hp_boundary_mass_matrix_1, - std::placeholders::_1, - std::placeholders::_2, - std::placeholders::_3, - std::cref(mapping), - std::cref(fe_collection), - std::cref(q), - std::cref(boundary_functions), - coefficient, - std::cref(component_mapping))), + std::bind( + &internal::create_hp_boundary_mass_matrix_1, + std::placeholders::_1, + std::placeholders::_2, + std::placeholders::_3, + std::cref(mapping), + std::cref(fe_collection), + std::cref(q), + std::cref(boundary_functions), + coefficient, + std::cref(component_mapping))), static_cast, number> const &)>>( - std::bind(©_hp_boundary_mass_matrix_1, - std::placeholders::_1, - std::cref(boundary_functions), - std::cref(dof_to_boundary_mapping), - std::ref(matrix), - std::ref(rhs_vector))), + std::bind( + &internal::copy_hp_boundary_mass_matrix_1, + std::placeholders::_1, + std::cref(boundary_functions), + std::cref(dof_to_boundary_mapping), + std::ref(matrix), + std::ref(rhs_vector))), scratch, copy_data); } diff --git a/include/deal.II/numerics/vector_tools.templates.h b/include/deal.II/numerics/vector_tools.templates.h index 437bb13a2a..b1f9a0b8c8 100644 --- a/include/deal.II/numerics/vector_tools.templates.h +++ b/include/deal.II/numerics/vector_tools.templates.h @@ -97,10 +97,10 @@ DEAL_II_NAMESPACE_OPEN namespace VectorTools { - // This anonymous namespace contains the actual implementation called + // This namespace contains the actual implementation called // by VectorTools::interpolate and variants (such as // VectorTools::interpolate_by_material_id). - namespace + namespace internal { // A small helper function to transform a component range starting // at offset from the real to the unit cell according to the @@ -527,7 +527,7 @@ namespace VectorTools vec.compress(VectorOperation::insert); } - } // namespace + } // namespace internal @@ -556,7 +556,8 @@ namespace VectorTools return &function; }; - interpolate(mapping, dof_handler, function_map, vec, component_mask); + internal::interpolate( + mapping, dof_handler, function_map, vec, component_mask); } @@ -670,7 +671,8 @@ namespace VectorTools return nullptr; }; - interpolate(mapping, dof_handler, function_map, vec, component_mask); + internal::interpolate( + mapping, dof_handler, function_map, vec, component_mask); } @@ -885,7 +887,7 @@ namespace VectorTools } - namespace + namespace internal { /** * Compute the boundary values to be used in the project() functions. @@ -1666,7 +1668,7 @@ namespace VectorTools vec_result); vec_result.compress(VectorOperation::insert); } - } // namespace + } // namespace internal @@ -1684,19 +1686,19 @@ namespace VectorTools switch (dof.get_fe().degree) { case 1: - project_parallel( + internal::project_parallel( mapping, dof, constraints, quadrature, func, vec_result); break; case 2: - project_parallel( + internal::project_parallel( mapping, dof, constraints, quadrature, func, vec_result); break; case 3: - project_parallel( + internal::project_parallel( mapping, dof, constraints, quadrature, func, vec_result); break; default: - project_parallel( + internal::project_parallel( mapping, dof, constraints, quadrature, func, vec_result); } } @@ -1722,23 +1724,23 @@ namespace VectorTools switch (fe_degree) { case 1: - project_parallel( + internal::project_parallel( matrix_free, constraints, func, vec_result, fe_component); break; case 2: - project_parallel( + internal::project_parallel( matrix_free, constraints, func, vec_result, fe_component); break; case 3: - project_parallel( + internal::project_parallel( matrix_free, constraints, func, vec_result, fe_component); break; default: - project_parallel( + internal::project_parallel( matrix_free, constraints, func, vec_result, fe_component); } else - project_parallel( + internal::project_parallel( matrix_free, constraints, func, vec_result, fe_component); } @@ -1789,30 +1791,30 @@ namespace VectorTools &function); Assert(mapping_ptr != nullptr, ExcInternalError()); Assert(dof_ptr != nullptr, ExcInternalError()); - project(*mapping_ptr, - *dof_ptr, - constraints, - quadrature, - *function_ptr, - vec_result, - enforce_zero_boundary, - q_boundary, - project_to_boundary_first); + internal::project(*mapping_ptr, + *dof_ptr, + constraints, + quadrature, + *function_ptr, + vec_result, + enforce_zero_boundary, + q_boundary, + project_to_boundary_first); } else { Assert((dynamic_cast *>( &(dof.get_triangulation())) == nullptr), ExcNotImplemented()); - do_project(mapping, - dof, - constraints, - quadrature, - function, - vec_result, - enforce_zero_boundary, - q_boundary, - project_to_boundary_first); + internal::do_project(mapping, + dof, + constraints, + quadrature, + function, + vec_result, + enforce_zero_boundary, + q_boundary, + project_to_boundary_first); } } @@ -1864,15 +1866,15 @@ namespace VectorTools &(dof.get_triangulation())) == nullptr), ExcNotImplemented()); - do_project(mapping, - dof, - constraints, - quadrature, - function, - vec_result, - enforce_zero_boundary, - q_boundary, - project_to_boundary_first); + internal::do_project(mapping, + dof, + constraints, + quadrature, + function, + vec_result, + enforce_zero_boundary, + q_boundary, + project_to_boundary_first); } @@ -2737,7 +2739,7 @@ namespace VectorTools // ----------- interpolate_boundary_values for std::map -------------------- - namespace + namespace internal { template &boundary_values, const ComponentMask & component_mask_) { - do_interpolate_boundary_values( + internal::do_interpolate_boundary_values( mapping, dof, function_map, boundary_values, component_mask_); } @@ -3121,7 +3123,7 @@ namespace VectorTools std::map &boundary_values, const ComponentMask & component_mask_) { - do_interpolate_boundary_values( + internal::do_interpolate_boundary_values( mapping, dof, function_map, boundary_values, component_mask_); } @@ -3275,7 +3277,7 @@ namespace VectorTools // -------- implementation for project_boundary_values with std::map -------- - namespace + namespace internal { // keep the first argument non-reference since we use it // with 1e-8 * number @@ -3606,7 +3608,7 @@ namespace VectorTools boundary_projection(dof_to_boundary_mapping[i]); } } - } // namespace + } // namespace internal template void @@ -3619,7 +3621,7 @@ namespace VectorTools std::map &boundary_values, std::vector component_mapping) { - do_project_boundary_values( + internal::do_project_boundary_values( mapping, dof, boundary_functions, q, boundary_values, component_mapping); } @@ -3656,7 +3658,7 @@ namespace VectorTools std::map &boundary_values, std::vector component_mapping) { - do_project_boundary_values( + internal::do_project_boundary_values( mapping, dof, boundary_functions, q, boundary_values, component_mapping); } @@ -7501,7 +7503,7 @@ namespace VectorTools - namespace + namespace internal { template struct PointComparator @@ -7516,7 +7518,7 @@ namespace VectorTools return false; } }; - } // namespace + } // namespace internal @@ -7594,7 +7596,8 @@ namespace VectorTools // Extract a list that collects all vector components that belong to the // same node (scalar basis function). When creating that list, we use an // array of dim components that stores the global degree of freedom. - std::set, PointComparator> + std::set, + internal::PointComparator> vector_dofs; std::vector face_dofs; @@ -7673,7 +7676,7 @@ namespace VectorTools // can find constrained ones unsigned int n_total_constraints_found = 0; for (typename std::set, - PointComparator>::const_iterator it = + internal::PointComparator>::const_iterator it = vector_dofs.begin(); it != vector_dofs.end(); ++it) @@ -7836,7 +7839,7 @@ namespace VectorTools n_q_points, std::vector>(n_components)); } - namespace + namespace internal { template double @@ -7858,7 +7861,7 @@ namespace VectorTools "Mean value norm is not implemented for complex-valued vectors")); return mean_value.real(); } - } // namespace + } // namespace internal // avoid compiling inner function for many vector types when we always @@ -8143,7 +8146,7 @@ namespace VectorTools } if (norm == mean) - diff = mean_to_double(diff_mean); + diff = internal::mean_to_double(diff_mean); // append result of this cell to the end of the vector AssertIsFinite(diff); @@ -8911,13 +8914,12 @@ namespace VectorTools return gradient[0]; } - namespace + namespace internal { template typename std::enable_if::value == true>::type - internal_subtract_mean_value(VectorType & v, - const std::vector &p_select) + subtract_mean_value(VectorType &v, const std::vector &p_select) { if (p_select.size() == 0) { @@ -8958,25 +8960,24 @@ namespace VectorTools template typename std::enable_if::value == false>::type - internal_subtract_mean_value(VectorType & v, - const std::vector &p_select) + subtract_mean_value(VectorType &v, const std::vector &p_select) { (void)p_select; Assert(p_select.size() == 0, ExcNotImplemented()); // In case of an empty boolean mask operate on the whole vector: v.add(-v.mean_value()); } - } // namespace + } // namespace internal template void subtract_mean_value(VectorType &v, const std::vector &p_select) { - internal_subtract_mean_value(v, p_select); + internal::subtract_mean_value(v, p_select); } - namespace + namespace internal { template void @@ -8995,7 +8996,7 @@ namespace VectorTools { n = std::complex(r, i); } - } // namespace + } // namespace internal template @@ -9060,7 +9061,9 @@ namespace VectorTools p_triangulation->get_communicator()); AssertThrowMPI(ierr); - set_possibly_complex_number(global_values[0], global_values[1], mean); + internal::set_possibly_complex_number(global_values[0], + global_values[1], + mean); area = global_values[2]; } #endif -- 2.39.5