From: Martin Kronbichler Date: Wed, 26 Jun 2024 20:13:27 +0000 (+0200) Subject: Implement new multigrid transfer (for p-multigrid) X-Git-Tag: v9.6.0-rc1~52^2~3 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=fc6dd2757fd0ee7cfc0a714c03ee4b250d49fd84;p=dealii.git Implement new multigrid transfer (for p-multigrid) --- diff --git a/include/deal.II/multigrid/mg_transfer_global_coarsening.h b/include/deal.II/multigrid/mg_transfer_global_coarsening.h index 8d00a58819..689fd70bef 100644 --- a/include/deal.II/multigrid/mg_transfer_global_coarsening.h +++ b/include/deal.II/multigrid/mg_transfer_global_coarsening.h @@ -25,6 +25,7 @@ #include #include +#include #include #include @@ -399,9 +400,29 @@ protected: /** - * Class for transfer between two multigrid levels for p- or global coarsening. + * Class for transfer between two multigrid levels for p- or global + * coarsening. It relies on a list of DoF indices associated with the cells on + * the coarse and fine side of the transfer, and implements a cell-by-cell + * (matrix-free) interpolation setup with the reference-cell embedding + * matrices. * * The implementation of this class is explained in detail in @cite munch2022gc. + * + * There are two possible ways to use this class. In the first option, the + * transfer is built from the underlying DoFHandler and AffineConstraints + * objects on the coarse and fine side, collecting an explicit copy of all + * indices on both sides. This works for a relatively wide set of + * FiniteElement combinations, including p-adaptive schemes using + * hp::FECollection. The second, more setup-efficient approach is to build the + * transfer between two multigrid levels for polynomial coarsening + * (p-coarsening) from two MatrixFree objects that might already exist from + * other parts of the code. In this case, we require that both objects share + * the same triangulation (but differ through their DoFHandler descriptions) + * and are described by the respective DoFHandler/AffineConstraints pair. This + * second variant is more efficient because no queries to the DoFHandler need + * to be made, reducing both the setup time and the overall memory + * consumption. Note that not all options are supported for the second entry + * point, and we fall back to the first option in such a case. */ template class MGTwoLevelTransfer : public MGTwoLevelTransferBase @@ -462,7 +483,7 @@ public: const unsigned int mg_level_coarse = numbers::invalid_unsigned_int); /** - * Set up transfer operator between the given DoFHandler objects ( + * Set up the transfer operator between the given DoFHandler objects ( * @p dof_handler_fine and @p dof_handler_coarse). Depending on the * underlying Triangulation objects polynomial or geometrical global * coarsening is performed. @@ -484,6 +505,21 @@ public: const unsigned int mg_level_fine = numbers::invalid_unsigned_int, const unsigned int mg_level_coarse = numbers::invalid_unsigned_int); + /** + * Set up polynomial coarsening between the DoFHandler objects underlying + * two MatrixFree objects and the respective numbers for the DoFHandler + * objects within MatrixFree. This reinit() function allows for a more + * efficient setup of the transfer operator and reduces the overall memory + * consumption of a multigrid cycle in case the same MatrixFree objects are + * also used for smoothers and residual evaluation on the two involved + * levels. + */ + void + reinit(const MatrixFree &matrix_free_fine, + const unsigned int dof_no_fine, + const MatrixFree &matrix_free_coarse, + const unsigned int dof_no_coarse); + /** * Check if a fast templated version of the polynomial transfer between * @p fe_degree_fine and @p fe_degree_coarse is available. @@ -572,25 +608,16 @@ private: unsigned int degree_fine; /** - * Prolongation matrix for non-tensor-product elements. + * Prolongation matrix used for the prolongate_and_add() and + * restrict_and_add() functions. */ AlignedVector prolongation_matrix; /** - * 1d prolongation matrix for tensor-product elements. - */ - AlignedVector prolongation_matrix_1d; - - /** - * Restriction matrix for non-tensor-product elements. + * Restriction matrix used for the interpolate() function. */ AlignedVector restriction_matrix; - /** - * 1d restriction matrix for tensor-product elements. - */ - AlignedVector restriction_matrix_1d; - /** * ShapeInfo description of the coarse cell. Needed during the * fast application of hanging-node constraints. @@ -618,16 +645,63 @@ private: ConstraintInfo constraint_info_fine; + struct MatrixFreeRelatedData + { + /** + * Matrix-free object on the fine side. + */ + SmartPointer> matrix_free_fine; + + /** + * Index within the list of DoFHandler objects in the matrix_free_fine + * object. + */ + unsigned int dof_handler_index_fine; + + /** + * Matrix-free object on the coarse side. + */ + SmartPointer> matrix_free_coarse; + + /** + * Index within the list of DoFHandler objects in the matrix_free_coarse + * object. + */ + unsigned int dof_handler_index_coarse; + + /** + * The two matrix-free objects will in general not agree on the order the + * cells are traversed. Thus, the loop will be run by the matrix-free object + * on the fine side, and the coarse side will adapt to those cell indices. + */ + std::vector> + cell_list_fine_to_coarse; + }; + + /** + * In case this class is built with MatrixFree objects (see the respective + * reinit() function), we set up this data structure and skip the other + * fields of the class. + */ + std::unique_ptr matrix_free_data; + /** - * Weights for continuous elements. + * CRS-like pointer to the start into the weights array, as that array can + * be compressed or in full format. */ - std::vector weights; // TODO: vectorize + std::vector weights_start; /** - * Weights for continuous elements, compressed into 3^dim doubles per - * cell if possible. + * Weights for continuous elements, either in full format or compressed into + * 3^dim doubles per cell if possible. */ - AlignedVector weights_compressed; + AlignedVector weights; + + /** + * Store whether the weights are in compressed format or not, in the + * ordering of the weights_start array. + */ + std::vector weights_are_compressed; /** * Number of components. @@ -640,7 +714,7 @@ private: SmartPointer> dof_handler_fine; /** - * Muligird level used during initialization. + * Multigrid level used during initialization. */ unsigned int mg_level_fine; diff --git a/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h b/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h index 2fa991609c..6df7bcd789 100644 --- a/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h +++ b/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h @@ -41,7 +41,9 @@ #include #include +#include #include +#include #include #include @@ -120,11 +122,9 @@ namespace { public: CellProlongator(const AlignedVector &prolongation_matrix, - const AlignedVector &prolongation_matrix_1d, const Number2 *evaluation_data_coarse, Number2 *evaluation_data_fine) : prolongation_matrix(prolongation_matrix) - , prolongation_matrix_1d(prolongation_matrix_1d) , evaluation_data_coarse(evaluation_data_coarse) , evaluation_data_fine(evaluation_data_fine) {} @@ -134,7 +134,7 @@ namespace run(const unsigned int degree_fine_ = numbers::invalid_unsigned_int, const unsigned int degree_coarse_ = numbers::invalid_unsigned_int) { - Assert(prolongation_matrix_1d.size() > 0, ExcNotImplemented()); + Assert(prolongation_matrix.size() > 0, ExcNotImplemented()); internal::FEEvaluationImplBasisChange< internal::evaluate_general, @@ -142,7 +142,7 @@ namespace dim, degree_coarse + 1, degree_fine + 1>::do_forward(1, - prolongation_matrix_1d, + prolongation_matrix, evaluation_data_coarse, evaluation_data_fine, degree_coarse_ + 1, @@ -169,7 +169,6 @@ namespace private: const AlignedVector &prolongation_matrix; - const AlignedVector &prolongation_matrix_1d; const Number2 *evaluation_data_coarse; Number2 *evaluation_data_fine; }; @@ -182,11 +181,9 @@ namespace { public: CellRestrictor(const AlignedVector &prolongation_matrix, - const AlignedVector &prolongation_matrix_1d, Number2 *evaluation_data_fine, Number2 *evaluation_data_coarse) : prolongation_matrix(prolongation_matrix) - , prolongation_matrix_1d(prolongation_matrix_1d) , evaluation_data_fine(evaluation_data_fine) , evaluation_data_coarse(evaluation_data_coarse) {} @@ -196,7 +193,7 @@ namespace run(const unsigned int degree_fine_ = numbers::invalid_unsigned_int, const unsigned int degree_coarse_ = numbers::invalid_unsigned_int) { - Assert(prolongation_matrix_1d.size() > 0, ExcNotImplemented()); + Assert(prolongation_matrix.size() > 0, ExcNotImplemented()); internal::FEEvaluationImplBasisChange< internal::evaluate_general, @@ -205,7 +202,7 @@ namespace degree_coarse == 0 ? -1 : (degree_coarse + 1), degree_fine == 0 ? -1 : (degree_fine + 1)>:: do_backward(1, - prolongation_matrix_1d, + prolongation_matrix, false, evaluation_data_fine, evaluation_data_coarse, @@ -234,7 +231,6 @@ namespace private: const AlignedVector &prolongation_matrix; - const AlignedVector &prolongation_matrix_1d; Number2 *evaluation_data_fine; Number2 *evaluation_data_coarse; }; @@ -1492,8 +1488,8 @@ namespace internal setup_weights( const dealii::AffineConstraints &constraints_fine, MGTwoLevelTransfer> - &transfer, - bool is_feq) + &transfer, + const bool is_feq) { if (transfer.fine_element_is_continuous == false) return; // nothing to do @@ -1510,8 +1506,9 @@ namespace internal // ... invert valence for (unsigned int i = 0; i < weight_vector.locally_owned_size(); ++i) - weight_vector.local_element(i) = - Number(1.) / weight_vector.local_element(i); + if (weight_vector.local_element(i) > 0.) + weight_vector.local_element(i) = + Number(1.) / weight_vector.local_element(i); // ... clear constrained indices for (const auto &i : constraints_fine.get_lines()) @@ -1521,61 +1518,65 @@ namespace internal weight_vector.update_ghost_values(); // 2) store data cell-wise a DG format and try to compress - transfer.weights.resize(transfer.constraint_info_fine.dof_indices.size()); + const unsigned int n_lanes = VectorizedArray::size(); + std::size_t n_batches = 0; + for (const auto &scheme : transfer.schemes) + n_batches += (scheme.n_coarse_cells + n_lanes - 1) / n_lanes; + + transfer.weights.clear(); + transfer.weights.reserve(n_batches * Utilities::pow(3, dim)); + transfer.weights_start.clear(); + transfer.weights_start.reserve(n_batches + 1); + transfer.weights_are_compressed.clear(); + transfer.weights_are_compressed.resize(n_batches); - const unsigned int n_lanes = VectorizedArray::size(); - unsigned int offset = 0; + AlignedVector> local_weights; std::array, Utilities::pow(3, dim)> - mask_vectorized; - std::array mask; + local_weights_compressed; + unsigned int offset = 0; // ... loop over cells for (const auto &scheme : transfer.schemes) - for (unsigned int cell = 0; cell < scheme.n_coarse_cells; - cell += n_lanes) - { - const unsigned int n_lanes_filled = - (cell + n_lanes > scheme.n_coarse_cells) ? - (scheme.n_coarse_cells - cell) : - n_lanes; + { + local_weights.resize(scheme.n_dofs_per_cell_fine); + for (unsigned int cell = 0; cell < scheme.n_coarse_cells; + cell += n_lanes) + { + transfer.weights_start.push_back(transfer.weights.size()); - if (is_feq) - mask_vectorized.fill(VectorizedArray()); + const unsigned int n_lanes_filled = + (cell + n_lanes > scheme.n_coarse_cells) ? + (scheme.n_coarse_cells - cell) : + n_lanes; - for (unsigned int v = 0; v < n_lanes_filled; - ++v, offset += scheme.n_dofs_per_cell_fine) - { - // ... store data cell-wise a DG format + for (unsigned int v = 0; v < n_lanes_filled; + ++v, offset += scheme.n_dofs_per_cell_fine) for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; ++i) - transfer.weights[offset + i] = weight_vector.local_element( + local_weights[i][v] = weight_vector.local_element( transfer.constraint_info_fine.dof_indices[offset + i]); - if (is_feq) - { - // ... try to compress - is_feq = - compute_weights_fe_q_dofs_by_entity( - transfer.weights.data() + offset, - transfer.n_components, - scheme.degree_fine + 1, - mask.data()); - - // ... vectorize data - for (unsigned int j = 0; j < mask_vectorized.size(); ++j) - mask_vectorized[j][v] = mask[j]; - } - } - - if (is_feq) - transfer.weights_compressed.insert_back(mask_vectorized.begin(), - mask_vectorized.end()); - } + const bool can_compress_weights = + is_feq && compute_weights_fe_q_dofs_by_entity( + local_weights.data(), + transfer.n_components, + scheme.degree_fine + 1, + local_weights_compressed.data()); - // 3) clean up - if (is_feq) - transfer.weights.clear(); - else - transfer.weights_compressed.clear(); + if (can_compress_weights && scheme.degree_fine > 1) + { + for (const VectorizedArray weight : + local_weights_compressed) + transfer.weights.push_back(weight); + transfer + .weights_are_compressed[transfer.weights_start.size() - 1] = + 1; + } + else + for (const VectorizedArray weight : local_weights) + transfer.weights.push_back(weight); + } + } + AssertDimension(transfer.weights_start.size(), n_batches); } @@ -2073,8 +2074,8 @@ namespace internal { transfer.schemes[transfer_scheme_index] - .prolongation_matrix_1d.resize(fe->n_dofs_per_cell() * - n_child_dofs_1d); + .prolongation_matrix.resize(fe->n_dofs_per_cell() * + n_child_dofs_1d); for (unsigned int c = 0; c < GeometryInfo<1>::max_children_per_cell; @@ -2082,15 +2083,15 @@ namespace internal for (unsigned int i = 0; i < fe->n_dofs_per_cell(); ++i) for (unsigned int j = 0; j < fe->n_dofs_per_cell(); ++j) transfer.schemes[transfer_scheme_index] - .prolongation_matrix_1d[i * n_child_dofs_1d + j + - c * shift] = + .prolongation_matrix[i * n_child_dofs_1d + j + + c * shift] = fe->get_prolongation_matrix(c)(renumbering[j], renumbering[i]); } { transfer.schemes[transfer_scheme_index] - .restriction_matrix_1d.resize(fe->n_dofs_per_cell() * - n_child_dofs_1d); + .restriction_matrix.resize(fe->n_dofs_per_cell() * + n_child_dofs_1d); for (unsigned int c = 0; c < GeometryInfo<1>::max_children_per_cell; @@ -2100,8 +2101,8 @@ namespace internal for (unsigned int i = 0; i < fe->n_dofs_per_cell(); ++i) for (unsigned int j = 0; j < fe->n_dofs_per_cell(); ++j) transfer.schemes[transfer_scheme_index] - .restriction_matrix_1d[i * n_child_dofs_1d + j + - c * shift] += + .restriction_matrix[i * n_child_dofs_1d + j + + c * shift] += matrix(renumbering[i], renumbering[j]); } } @@ -2165,9 +2166,121 @@ namespace internal } } - // ------------------------------- weights ------------------------------- - setup_weights(constraints_fine, transfer, is_feq); + if (transfer.fine_element_is_continuous) + setup_weights(constraints_fine, transfer, is_feq); + } + + + + template + static void + compute_prolongation_and_restriction_matrices( + const FiniteElement &fe_fine, + const FiniteElement &fe_coarse, + typename MGTwoLevelTransfer< + dim, + LinearAlgebra::distributed::Vector>::MGTransferScheme &scheme) + { + AssertDimension(fe_fine.n_base_elements(), 1); + AssertDimension(fe_coarse.n_base_elements(), 1); + + const FiniteElement &fe_fine_scalar = fe_fine.base_element(0); + const FiniteElement &fe_coarse_scalar = fe_coarse.base_element(0); + + const auto reference_cell = fe_fine.reference_cell(); + + Assert(reference_cell == fe_coarse.reference_cell(), ExcNotImplemented()); + + if (reference_cell == ReferenceCells::get_hypercube() && + (fe_coarse != fe_fine) && + (fe_coarse.n_dofs_per_cell() != 0 && fe_fine.n_dofs_per_cell() != 0)) + { + const auto fe_fine_1d = create_1D_fe(fe_fine_scalar); + + std::vector renumbering_fine( + fe_fine_1d->n_dofs_per_cell()); + AssertIndexRange(fe_fine_1d->n_dofs_per_vertex(), 2); + renumbering_fine[0] = 0; + for (unsigned int i = 0; i < fe_fine_1d->dofs_per_line; ++i) + renumbering_fine[i + fe_fine_1d->n_dofs_per_vertex()] = + GeometryInfo<1>::vertices_per_cell * + fe_fine_1d->n_dofs_per_vertex() + + i; + if (fe_fine_1d->n_dofs_per_vertex() > 0) + renumbering_fine[fe_fine_1d->n_dofs_per_cell() - + fe_fine_1d->n_dofs_per_vertex()] = + fe_fine_1d->n_dofs_per_vertex(); + + const auto fe_coarse_1d = create_1D_fe(fe_coarse_scalar); + + std::vector renumbering_coarse( + fe_coarse_1d->n_dofs_per_cell()); + AssertIndexRange(fe_coarse_1d->n_dofs_per_vertex(), 2); + renumbering_coarse[0] = 0; + for (unsigned int i = 0; i < fe_coarse_1d->dofs_per_line; ++i) + renumbering_coarse[i + fe_coarse_1d->n_dofs_per_vertex()] = + GeometryInfo<1>::vertices_per_cell * + fe_coarse_1d->n_dofs_per_vertex() + + i; + if (fe_coarse_1d->n_dofs_per_vertex() > 0) + renumbering_coarse[fe_coarse_1d->n_dofs_per_cell() - + fe_coarse_1d->n_dofs_per_vertex()] = + fe_coarse_1d->n_dofs_per_vertex(); + + FullMatrix matrix(fe_fine_1d->n_dofs_per_cell(), + fe_coarse_1d->n_dofs_per_cell()); + FETools::get_projection_matrix(*fe_coarse_1d, *fe_fine_1d, matrix); + scheme.prolongation_matrix.resize(fe_fine_1d->n_dofs_per_cell() * + fe_coarse_1d->n_dofs_per_cell()); + + for (unsigned int i = 0, k = 0; i < fe_coarse_1d->n_dofs_per_cell(); + ++i) + for (unsigned int j = 0; j < fe_fine_1d->n_dofs_per_cell(); + ++j, ++k) + scheme.prolongation_matrix[k] = + matrix(renumbering_fine[j], renumbering_coarse[i]); + + matrix.reinit(fe_coarse_1d->n_dofs_per_cell(), + fe_fine_1d->n_dofs_per_cell()); + FETools::get_projection_matrix(*fe_fine_1d, *fe_coarse_1d, matrix); + scheme.restriction_matrix.resize(fe_fine_1d->n_dofs_per_cell() * + fe_coarse_1d->n_dofs_per_cell()); + + for (unsigned int i = 0, k = 0; i < fe_coarse_1d->n_dofs_per_cell(); + ++i) + for (unsigned int j = 0; j < fe_fine_1d->n_dofs_per_cell(); + ++j, ++k) + scheme.restriction_matrix[k] = + matrix(renumbering_coarse[i], renumbering_fine[j]); + } + else if (reference_cell != ReferenceCells::get_hypercube() && + (fe_fine != fe_coarse) && + (fe_fine.n_dofs_per_cell() != 0 && + fe_coarse.n_dofs_per_cell() != 0)) + { + FullMatrix matrix(fe_fine_scalar.n_dofs_per_cell(), + fe_coarse_scalar.n_dofs_per_cell()); + FETools::get_projection_matrix(fe_coarse_scalar, + fe_fine_scalar, + matrix); + scheme.prolongation_matrix.resize(matrix.m() * matrix.n()); + + for (unsigned int i = 0, k = 0; i < matrix.n(); ++i) + for (unsigned int j = 0; j < matrix.m(); ++j, ++k) + scheme.prolongation_matrix[k] = matrix(j, i); + + matrix.reinit(fe_coarse_scalar.n_dofs_per_cell(), + fe_fine_scalar.n_dofs_per_cell()); + FETools::get_projection_matrix(fe_fine_scalar, + fe_coarse_scalar, + matrix); + scheme.restriction_matrix.resize(matrix.m() * matrix.n()); + + for (unsigned int i = 0, k = 0; i < matrix.m(); ++i) + for (unsigned int j = 0; j < matrix.n(); ++j, ++k) + scheme.restriction_matrix[k] = matrix(i, j); + } } @@ -2184,22 +2297,18 @@ namespace internal MGTwoLevelTransfer> &transfer) { - Assert( - mg_level_fine == numbers::invalid_unsigned_int || - mg_level_fine <= MGTools::max_level_for_coarse_mesh( - dof_handler_fine.get_triangulation()), - ExcMessage( - "Polynomial transfer is only allowed on the active level " - "(numbers::invalid_unsigned_int) or on refinement levels without " - "hanging nodes.")); - Assert( - mg_level_coarse == numbers::invalid_unsigned_int || - mg_level_coarse <= MGTools::max_level_for_coarse_mesh( - dof_handler_coarse.get_triangulation()), - ExcMessage( - "Polynomial transfer is only allowed on the active level " - "(numbers::invalid_unsigned_int) or on refinement levels without " - "hanging nodes.")); + Assert(mg_level_fine == numbers::invalid_unsigned_int || + mg_level_fine <= MGTools::max_level_for_coarse_mesh( + dof_handler_fine.get_triangulation()), + ExcMessage("Polynomial transfer is only allowed on the active " + "level (numbers::invalid_unsigned_int) or on " + "refinement levels without hanging nodes.")); + Assert(mg_level_coarse == numbers::invalid_unsigned_int || + mg_level_coarse <= MGTools::max_level_for_coarse_mesh( + dof_handler_coarse.get_triangulation()), + ExcMessage("Polynomial transfer is only allowed on the active " + "level (numbers::invalid_unsigned_int) or on " + "refinement levels without hanging nodes.")); AssertDimension(constraints_fine.n_inhomogeneities(), 0); AssertDimension(constraints_coarse.n_inhomogeneities(), 0); @@ -2257,7 +2366,6 @@ namespace internal fine_element_is_discontinuous, ExcNotImplemented()); #endif - const bool is_feq = std::all_of(dof_handler_fine.get_fe_collection().begin(), dof_handler_fine.get_fe_collection().end(), @@ -2267,6 +2375,7 @@ namespace internal &fe.base_element(0)) != nullptr); }); + const auto process_cells = [&](const auto &fu) { loop_over_active_or_level_cells( dof_handler_coarse, mg_level_coarse, [&](const auto &cell_coarse) { @@ -2290,7 +2399,7 @@ namespace internal for (auto &f : fe_index_pairs) f.second = counter++; - transfer.schemes.resize(fe_index_pairs.size()); + transfer.schemes.resize(counter); // extract number of coarse cells { @@ -2484,151 +2593,14 @@ namespace internal // ------------------------- prolongation matrix ------------------------- for (const auto &fe_index_pair_ : fe_index_pairs) - { - const auto &fe_index_pair = fe_index_pair_.first; - const auto &fe_index_no = fe_index_pair_.second; - - AssertDimension( - dof_handler_fine.get_fe(fe_index_pair.second).n_base_elements(), 1); - AssertDimension( - dof_handler_coarse.get_fe(fe_index_pair.first).n_base_elements(), - 1); - - const auto reference_cell = - dof_handler_fine.get_fe(fe_index_pair_.first.second) - .reference_cell(); - - Assert(reference_cell == - dof_handler_coarse.get_fe(fe_index_pair_.first.first) - .reference_cell(), - ExcNotImplemented()); - - if (reference_cell == ReferenceCells::get_hypercube() && - (dof_handler_coarse.get_fe(fe_index_pair.first) != - dof_handler_fine.get_fe(fe_index_pair.second)) && - (dof_handler_coarse.get_fe(fe_index_pair.first) - .n_dofs_per_cell() != 0 && - dof_handler_fine.get_fe(fe_index_pair.second) - .n_dofs_per_cell() != 0)) - { - const auto fe_fine = create_1D_fe( - dof_handler_fine.get_fe(fe_index_pair.second).base_element(0)); - - std::vector renumbering_fine( - fe_fine->n_dofs_per_cell()); - { - AssertIndexRange(fe_fine->n_dofs_per_vertex(), 2); - renumbering_fine[0] = 0; - for (unsigned int i = 0; i < fe_fine->dofs_per_line; ++i) - renumbering_fine[i + fe_fine->n_dofs_per_vertex()] = - GeometryInfo<1>::vertices_per_cell * - fe_fine->n_dofs_per_vertex() + - i; - if (fe_fine->n_dofs_per_vertex() > 0) - renumbering_fine[fe_fine->n_dofs_per_cell() - - fe_fine->n_dofs_per_vertex()] = - fe_fine->n_dofs_per_vertex(); - } - - const auto fe_coarse = create_1D_fe( - dof_handler_coarse.get_fe(fe_index_pair.first).base_element(0)); - - std::vector renumbering_coarse( - fe_coarse->n_dofs_per_cell()); - { - AssertIndexRange(fe_coarse->n_dofs_per_vertex(), 2); - renumbering_coarse[0] = 0; - for (unsigned int i = 0; i < fe_coarse->dofs_per_line; ++i) - renumbering_coarse[i + fe_coarse->n_dofs_per_vertex()] = - GeometryInfo<1>::vertices_per_cell * - fe_coarse->n_dofs_per_vertex() + - i; - if (fe_coarse->n_dofs_per_vertex() > 0) - renumbering_coarse[fe_coarse->n_dofs_per_cell() - - fe_coarse->n_dofs_per_vertex()] = - fe_coarse->n_dofs_per_vertex(); - } - - { - FullMatrix matrix(fe_fine->n_dofs_per_cell(), - fe_coarse->n_dofs_per_cell()); - FETools::get_projection_matrix(*fe_coarse, *fe_fine, matrix); - transfer.schemes[fe_index_no].prolongation_matrix_1d.resize( - fe_fine->n_dofs_per_cell() * fe_coarse->n_dofs_per_cell()); - - for (unsigned int i = 0, k = 0; - i < fe_coarse->n_dofs_per_cell(); - ++i) - for (unsigned int j = 0; j < fe_fine->n_dofs_per_cell(); - ++j, ++k) - transfer.schemes[fe_index_no].prolongation_matrix_1d[k] = - matrix(renumbering_fine[j], renumbering_coarse[i]); - } - - { - FullMatrix matrix(fe_coarse->n_dofs_per_cell(), - fe_fine->n_dofs_per_cell()); - FETools::get_projection_matrix(*fe_fine, *fe_coarse, matrix); - transfer.schemes[fe_index_no].restriction_matrix_1d.resize( - fe_fine->n_dofs_per_cell() * fe_coarse->n_dofs_per_cell()); - - for (unsigned int i = 0, k = 0; - i < fe_coarse->n_dofs_per_cell(); - ++i) - for (unsigned int j = 0; j < fe_fine->n_dofs_per_cell(); - ++j, ++k) - transfer.schemes[fe_index_no].restriction_matrix_1d[k] = - matrix(renumbering_coarse[i], renumbering_fine[j]); - } - } - else if (reference_cell != ReferenceCells::get_hypercube() && - (dof_handler_coarse.get_fe(fe_index_pair.first) != - dof_handler_fine.get_fe(fe_index_pair.second)) && - (dof_handler_coarse.get_fe(fe_index_pair.first) - .n_dofs_per_cell() != 0 && - dof_handler_fine.get_fe(fe_index_pair.second) - .n_dofs_per_cell() != 0)) - { - const auto &fe_fine = - dof_handler_fine.get_fe(fe_index_pair.second).base_element(0); - - const auto &fe_coarse = - dof_handler_coarse.get_fe(fe_index_pair.first).base_element(0); - - { - FullMatrix matrix(fe_fine.n_dofs_per_cell(), - fe_coarse.n_dofs_per_cell()); - FETools::get_projection_matrix(fe_coarse, fe_fine, matrix); - transfer.schemes[fe_index_no].prolongation_matrix.resize( - fe_fine.n_dofs_per_cell() * fe_coarse.n_dofs_per_cell()); - - for (unsigned int i = 0, k = 0; i < fe_coarse.n_dofs_per_cell(); - ++i) - for (unsigned int j = 0; j < fe_fine.n_dofs_per_cell(); - ++j, ++k) - transfer.schemes[fe_index_no].prolongation_matrix[k] = - matrix(j, i); - } - - { - FullMatrix matrix(fe_coarse.n_dofs_per_cell(), - fe_fine.n_dofs_per_cell()); - FETools::get_projection_matrix(fe_fine, fe_coarse, matrix); - transfer.schemes[fe_index_no].restriction_matrix.resize( - fe_fine.n_dofs_per_cell() * fe_coarse.n_dofs_per_cell()); - - for (unsigned int i = 0, k = 0; i < fe_coarse.n_dofs_per_cell(); - ++i) - for (unsigned int j = 0; j < fe_fine.n_dofs_per_cell(); - ++j, ++k) - transfer.schemes[fe_index_no].restriction_matrix[k] = - matrix(i, j); - } - } - } + compute_prolongation_and_restriction_matrices( + dof_handler_fine.get_fe(fe_index_pair_.first.second), + dof_handler_coarse.get_fe(fe_index_pair_.first.first), + transfer.schemes[fe_index_pair_.second]); // ------------------------------- weights ------------------------------- - setup_weights(constraints_fine, transfer, is_feq); + if (transfer.fine_element_is_continuous) + setup_weights(constraints_fine, transfer, is_feq); } }; @@ -3070,123 +3042,181 @@ MGTwoLevelTransfer::prolongate_and_add_internal( VectorType &dst, const VectorType &src) const { - const unsigned int n_lanes = VectorizedArrayType::size(); - - AlignedVector evaluation_data_fine; - AlignedVector evaluation_data_coarse; + if (matrix_free_data.get() != nullptr) + { + matrix_free_data->matrix_free_fine->template cell_loop( + [&](const MatrixFree &data, + VectorType &dst, + const int &, + const std::pair &range) { + FEEvaluation eval_fine( + data, matrix_free_data->dof_handler_index_fine); + FEEvaluation eval_coarse( + *matrix_free_data->matrix_free_coarse, + matrix_free_data->dof_handler_index_coarse); + + CellTransferFactory cell_transfer( + eval_fine.get_shape_info().data[0].fe_degree, + eval_coarse.get_shape_info().data[0].fe_degree); + for (unsigned int cell = range.first; cell < range.second; ++cell) + { + eval_fine.reinit(cell); + eval_coarse.reinit( + matrix_free_data->cell_list_fine_to_coarse[cell]); - const Number *weights = nullptr; - const VectorizedArrayType *weights_compressed = nullptr; + eval_coarse.read_dof_values(src); + if (schemes[0].prolongation_matrix.empty() == false) + { + CellProlongator + cell_prolongator(schemes[0].prolongation_matrix, + eval_coarse.begin_dof_values(), + eval_fine.begin_dof_values()); + + if (schemes[0].prolongation_matrix.size() < + eval_fine.dofs_per_cell * eval_coarse.dofs_per_cell) + cell_transfer.run(cell_prolongator); + else + cell_prolongator.run_full(eval_fine.dofs_per_cell, + eval_coarse.dofs_per_cell); + } + else + for (unsigned int i = 0; i < eval_coarse.dofs_per_cell; ++i) + eval_fine.begin_dof_values()[i] = + eval_coarse.begin_dof_values()[i]; - if (this->fine_element_is_continuous) - { - weights = this->weights.data(); - weights_compressed = this->weights_compressed.data(); + if (weights.size() > 0) + { + const VectorizedArrayType *cell_weights = + weights.data() + weights_start[cell]; + if (weights_are_compressed[cell]) + internal:: + weight_fe_q_dofs_by_entity( + cell_weights, + 1, + eval_fine.get_shape_info().data[0].fe_degree + 1, + eval_fine.begin_dof_values()); + else + for (unsigned int i = 0; i < eval_fine.dofs_per_cell; ++i) + eval_fine.begin_dof_values()[i] *= cell_weights[i]; + } + eval_fine.distribute_local_to_global(dst); + } + }, + dst, + 0); } + else + { + const unsigned int n_lanes = VectorizedArrayType::size(); - unsigned int cell_counter = 0; + AlignedVector evaluation_data_fine; + AlignedVector evaluation_data_coarse; - for (const auto &scheme : schemes) - { - if (scheme.n_coarse_cells == 0) - continue; + unsigned int cell_counter = 0; + unsigned int batch_counter = 0; - const bool needs_interpolation = - (scheme.prolongation_matrix.empty() && - scheme.prolongation_matrix_1d.empty()) == false; + for (const auto &scheme : schemes) + { + if (scheme.n_coarse_cells == 0) + continue; - evaluation_data_fine.clear(); - evaluation_data_coarse.clear(); + const bool needs_interpolation = + scheme.prolongation_matrix.empty() == false; - const unsigned int max_n_dofs_per_cell = - std::max(scheme.n_dofs_per_cell_fine, scheme.n_dofs_per_cell_coarse); - evaluation_data_fine.resize(max_n_dofs_per_cell); - evaluation_data_coarse.resize(max_n_dofs_per_cell); + evaluation_data_fine.clear(); + evaluation_data_coarse.clear(); - CellTransferFactory cell_transfer(scheme.degree_fine, - scheme.degree_coarse); + const unsigned int max_n_dofs_per_cell = + std::max(scheme.n_dofs_per_cell_fine, + scheme.n_dofs_per_cell_coarse); + evaluation_data_fine.resize(max_n_dofs_per_cell); + evaluation_data_coarse.resize(max_n_dofs_per_cell); - const unsigned int n_scalar_dofs_fine = - scheme.n_dofs_per_cell_fine / n_components; - const unsigned int n_scalar_dofs_coarse = - scheme.n_dofs_per_cell_coarse / n_components; + CellTransferFactory cell_transfer(scheme.degree_fine, + scheme.degree_coarse); - for (unsigned int cell = 0; cell < scheme.n_coarse_cells; cell += n_lanes) - { - const unsigned int n_lanes_filled = - (cell + n_lanes > scheme.n_coarse_cells) ? - (scheme.n_coarse_cells - cell) : - n_lanes; - - // read from src vector (similar to FEEvaluation::read_dof_values()) - internal::VectorReader reader; - constraint_info_coarse.read_write_operation( - reader, - src, - evaluation_data_coarse.data(), - cell_counter, - n_lanes_filled, - scheme.n_dofs_per_cell_coarse, - true); - constraint_info_coarse.apply_hanging_node_constraints( - cell_counter, n_lanes_filled, false, evaluation_data_coarse); - - // ---------------------------- coarse ------------------------------- - if (needs_interpolation) - for (int c = n_components - 1; c >= 0; --c) - { - CellProlongator - cell_prolongator(scheme.prolongation_matrix, - scheme.prolongation_matrix_1d, - evaluation_data_coarse.begin() + - c * n_scalar_dofs_coarse, - evaluation_data_fine.begin() + - c * n_scalar_dofs_fine); - - if (scheme.prolongation_matrix_1d.size() > 0) - cell_transfer.run(cell_prolongator); - else - cell_prolongator.run_full(n_scalar_dofs_fine, - n_scalar_dofs_coarse); - } - else - evaluation_data_fine = evaluation_data_coarse; // TODO - // ------------------------------ fine ------------------------------- + const unsigned int n_scalar_dofs_fine = + scheme.n_dofs_per_cell_fine / n_components; + const unsigned int n_scalar_dofs_coarse = + scheme.n_dofs_per_cell_coarse / n_components; - // weight - if (this->fine_element_is_continuous && - this->weights_compressed.size() > 0) + for (unsigned int cell = 0; cell < scheme.n_coarse_cells; + cell += n_lanes, ++batch_counter) { - internal:: - weight_fe_q_dofs_by_entity( - weights_compressed, - n_components, - scheme.degree_fine + 1, - evaluation_data_fine.begin()); - weights_compressed += Utilities::pow(3, dim); - } - else if (this->fine_element_is_continuous) - { - for (unsigned int v = 0; v < n_lanes_filled; ++v) + const unsigned int n_lanes_filled = + (cell + n_lanes > scheme.n_coarse_cells) ? + (scheme.n_coarse_cells - cell) : + n_lanes; + + // read from src vector (similar to + // FEEvaluation::read_dof_values()) + internal::VectorReader reader; + constraint_info_coarse.read_write_operation( + reader, + src, + evaluation_data_coarse.data(), + cell_counter, + n_lanes_filled, + scheme.n_dofs_per_cell_coarse, + true); + constraint_info_coarse.apply_hanging_node_constraints( + cell_counter, n_lanes_filled, false, evaluation_data_coarse); + + // ---------------------------- coarse --------------------------- + if (needs_interpolation) + for (int c = n_components - 1; c >= 0; --c) + { + CellProlongator + cell_prolongator(scheme.prolongation_matrix, + evaluation_data_coarse.begin() + + c * n_scalar_dofs_coarse, + evaluation_data_fine.begin() + + c * n_scalar_dofs_fine); + + if (scheme.prolongation_matrix.size() < + n_scalar_dofs_fine * n_scalar_dofs_coarse) + cell_transfer.run(cell_prolongator); + else + cell_prolongator.run_full(n_scalar_dofs_fine, + n_scalar_dofs_coarse); + } + else + evaluation_data_fine = evaluation_data_coarse; // TODO + // ------------------------------ fine --------------------------- + + // weight + if (weights.size() > 0) { - for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; ++i) - evaluation_data_fine[i][v] *= weights[i]; - weights += scheme.n_dofs_per_cell_fine; + const VectorizedArrayType *cell_weights = + weights.data() + weights_start[batch_counter]; + if (weights_are_compressed[batch_counter]) + internal:: + weight_fe_q_dofs_by_entity( + cell_weights, + n_components, + scheme.degree_fine + 1, + evaluation_data_fine.begin()); + else + for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; + ++i) + evaluation_data_fine[i] *= cell_weights[i]; } - } - // add into dst vector - internal::VectorDistributorLocalToGlobal - writer; - constraint_info_fine.read_write_operation(writer, - dst, - evaluation_data_fine.data(), - cell_counter, - n_lanes_filled, - scheme.n_dofs_per_cell_fine, - false); - - cell_counter += n_lanes_filled; + // add into dst vector + internal::VectorDistributorLocalToGlobal + writer; + constraint_info_fine.read_write_operation( + writer, + dst, + evaluation_data_fine.data(), + cell_counter, + n_lanes_filled, + scheme.n_dofs_per_cell_fine, + false); + + cell_counter += n_lanes_filled; + } } } } @@ -3222,9 +3252,8 @@ MGTwoLevelTransferBase::restrict_and_add( if (use_dst_inplace == false) *vec_coarse_ptr = Number(0.0); - this->zero_out_ghost_values( - *vec_coarse_ptr); // since we might add into the - // ghost values and call compress + // since we might add into the ghost values and call compress + this->zero_out_ghost_values(*vec_coarse_ptr); this->restrict_and_add_internal(*vec_coarse_ptr, *vec_fine_ptr); @@ -3250,124 +3279,183 @@ MGTwoLevelTransfer::restrict_and_add_internal( VectorType &dst, const VectorType &src) const { - const unsigned int n_lanes = VectorizedArrayType::size(); + if (matrix_free_data.get() != nullptr) + { + int dummy = 0; + matrix_free_data->matrix_free_fine->template cell_loop( + [&](const MatrixFree &data, + int &, + const VectorType &src, + const std::pair &range) { + FEEvaluation eval_fine( + data, matrix_free_data->dof_handler_index_fine); + FEEvaluation eval_coarse( + *matrix_free_data->matrix_free_coarse, + matrix_free_data->dof_handler_index_coarse); + + CellTransferFactory cell_transfer( + eval_fine.get_shape_info().data[0].fe_degree, + eval_coarse.get_shape_info().data[0].fe_degree); + for (unsigned int cell = range.first; cell < range.second; ++cell) + { + eval_fine.reinit(cell); + eval_coarse.reinit( + matrix_free_data->cell_list_fine_to_coarse[cell]); - AlignedVector evaluation_data_fine; - AlignedVector evaluation_data_coarse; + eval_fine.read_dof_values(src); + if (weights.size() > 0) + { + const VectorizedArrayType *cell_weights = + weights.data() + weights_start[cell]; + if (weights_are_compressed[cell]) + internal:: + weight_fe_q_dofs_by_entity( + cell_weights, + 1, + eval_fine.get_shape_info().data[0].fe_degree + 1, + eval_fine.begin_dof_values()); + else + for (unsigned int i = 0; i < eval_fine.dofs_per_cell; ++i) + eval_fine.begin_dof_values()[i] *= cell_weights[i]; + } - const Number *weights = nullptr; - const VectorizedArrayType *weights_compressed = nullptr; + if (schemes[0].prolongation_matrix.empty() == false) + { + CellRestrictor + cell_restrictor(schemes[0].prolongation_matrix, + eval_fine.begin_dof_values(), + eval_coarse.begin_dof_values()); + + if (schemes[0].prolongation_matrix.size() < + eval_fine.dofs_per_cell * eval_coarse.dofs_per_cell) + cell_transfer.run(cell_restrictor); + else + cell_restrictor.run_full(eval_fine.dofs_per_cell, + eval_coarse.dofs_per_cell); + } + else + for (unsigned int i = 0; i < eval_fine.dofs_per_cell; ++i) + eval_coarse.begin_dof_values()[i] = + eval_fine.begin_dof_values()[i]; - if (this->fine_element_is_continuous) - { - weights = this->weights.data(); - weights_compressed = this->weights_compressed.data(); + eval_coarse.distribute_local_to_global(dst); + } + }, + dummy, + src); } + else + { + const unsigned int n_lanes = VectorizedArrayType::size(); - unsigned int cell_counter = 0; + AlignedVector evaluation_data_fine; + AlignedVector evaluation_data_coarse; - for (const auto &scheme : schemes) - { - if (scheme.n_coarse_cells == 0) - continue; + unsigned int cell_counter = 0; + unsigned int batch_counter = 0; - const bool needs_interpolation = - (scheme.prolongation_matrix.empty() && - scheme.prolongation_matrix_1d.empty()) == false; + for (const auto &scheme : schemes) + { + if (scheme.n_coarse_cells == 0) + continue; - evaluation_data_fine.clear(); - evaluation_data_coarse.clear(); + const bool needs_interpolation = + scheme.prolongation_matrix.empty() == false; - const unsigned int max_n_dofs_per_cell = - std::max(scheme.n_dofs_per_cell_fine, scheme.n_dofs_per_cell_coarse); - evaluation_data_fine.resize(max_n_dofs_per_cell); - evaluation_data_coarse.resize(max_n_dofs_per_cell); + evaluation_data_fine.clear(); + evaluation_data_coarse.clear(); - CellTransferFactory cell_transfer(scheme.degree_fine, - scheme.degree_coarse); + const unsigned int max_n_dofs_per_cell = + std::max(scheme.n_dofs_per_cell_fine, + scheme.n_dofs_per_cell_coarse); + evaluation_data_fine.resize(max_n_dofs_per_cell); + evaluation_data_coarse.resize(max_n_dofs_per_cell); - const unsigned int n_scalar_dofs_fine = - scheme.n_dofs_per_cell_fine / n_components; - const unsigned int n_scalar_dofs_coarse = - scheme.n_dofs_per_cell_coarse / n_components; + CellTransferFactory cell_transfer(scheme.degree_fine, + scheme.degree_coarse); - for (unsigned int cell = 0; cell < scheme.n_coarse_cells; cell += n_lanes) - { - const unsigned int n_lanes_filled = - (cell + n_lanes > scheme.n_coarse_cells) ? - (scheme.n_coarse_cells - cell) : - n_lanes; - - // read from source vector - internal::VectorReader reader; - constraint_info_fine.read_write_operation(reader, - src, - evaluation_data_fine.data(), - cell_counter, - n_lanes_filled, - scheme.n_dofs_per_cell_fine, - false); - - // weight - if (this->fine_element_is_continuous && - this->weights_compressed.size() > 0) - { - internal:: - weight_fe_q_dofs_by_entity( - weights_compressed, - n_components, - scheme.degree_fine + 1, - evaluation_data_fine.begin()); - weights_compressed += Utilities::pow(3, dim); - } - else if (this->fine_element_is_continuous) + const unsigned int n_scalar_dofs_fine = + scheme.n_dofs_per_cell_fine / n_components; + const unsigned int n_scalar_dofs_coarse = + scheme.n_dofs_per_cell_coarse / n_components; + + for (unsigned int cell = 0; cell < scheme.n_coarse_cells; + cell += n_lanes, ++batch_counter) { - for (unsigned int v = 0; v < n_lanes_filled; ++v) + const unsigned int n_lanes_filled = + (cell + n_lanes > scheme.n_coarse_cells) ? + (scheme.n_coarse_cells - cell) : + n_lanes; + + // read from source vector + internal::VectorReader reader; + constraint_info_fine.read_write_operation( + reader, + src, + evaluation_data_fine.data(), + cell_counter, + n_lanes_filled, + scheme.n_dofs_per_cell_fine, + false); + + // weight + if (weights.size() > 0) { - for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; ++i) - evaluation_data_fine[i][v] *= weights[i]; - weights += scheme.n_dofs_per_cell_fine; + const VectorizedArrayType *cell_weights = + weights.data() + weights_start[batch_counter]; + if (weights_are_compressed[batch_counter]) + internal:: + weight_fe_q_dofs_by_entity( + cell_weights, + n_components, + scheme.degree_fine + 1, + evaluation_data_fine.data()); + else + for (unsigned int i = 0; i < scheme.n_dofs_per_cell_fine; + ++i) + evaluation_data_fine[i] *= cell_weights[i]; } - } - // ------------------------------ fine ------------------------------- - if (needs_interpolation) - for (int c = n_components - 1; c >= 0; --c) - { - CellRestrictor - cell_restrictor(scheme.prolongation_matrix, - scheme.prolongation_matrix_1d, - evaluation_data_fine.begin() + - c * n_scalar_dofs_fine, - evaluation_data_coarse.begin() + - c * n_scalar_dofs_coarse); - - if (scheme.prolongation_matrix_1d.size() > 0) - cell_transfer.run(cell_restrictor); - else - cell_restrictor.run_full(n_scalar_dofs_fine, - n_scalar_dofs_coarse); - } - else - evaluation_data_coarse = evaluation_data_fine; // TODO - // ----------------------------- coarse ------------------------------ - - // write into dst vector (similar to - // FEEvaluation::distribute_global_to_local()) - internal::VectorDistributorLocalToGlobal - writer; - constraint_info_coarse.apply_hanging_node_constraints( - cell_counter, n_lanes_filled, true, evaluation_data_coarse); - constraint_info_coarse.read_write_operation( - writer, - dst, - evaluation_data_coarse.data(), - cell_counter, - n_lanes_filled, - scheme.n_dofs_per_cell_coarse, - true); - - cell_counter += n_lanes_filled; + // ------------------------------ fine --------------------------- + if (needs_interpolation) + for (int c = n_components - 1; c >= 0; --c) + { + CellRestrictor + cell_restrictor(scheme.prolongation_matrix, + evaluation_data_fine.begin() + + c * n_scalar_dofs_fine, + evaluation_data_coarse.begin() + + c * n_scalar_dofs_coarse); + + if (scheme.prolongation_matrix.size() < + n_scalar_dofs_fine * n_scalar_dofs_coarse) + cell_transfer.run(cell_restrictor); + else + cell_restrictor.run_full(n_scalar_dofs_fine, + n_scalar_dofs_coarse); + } + else + evaluation_data_coarse = evaluation_data_fine; // TODO + // ----------------------------- coarse -------------------------- + + // write into dst vector (similar to + // FEEvaluation::distribute_global_to_local()) + internal::VectorDistributorLocalToGlobal + writer; + constraint_info_coarse.apply_hanging_node_constraints( + cell_counter, n_lanes_filled, true, evaluation_data_coarse); + constraint_info_coarse.read_write_operation( + writer, + dst, + evaluation_data_coarse.data(), + cell_counter, + n_lanes_filled, + scheme.n_dofs_per_cell_coarse, + true); + + cell_counter += n_lanes_filled; + } } } } @@ -3379,128 +3467,179 @@ void MGTwoLevelTransfer::interpolate(VectorType &dst, const VectorType &src) const { - const unsigned int n_lanes = VectorizedArrayType::size(); + if (matrix_free_data.get() != nullptr) + { + FEEvaluation eval_fine( + *matrix_free_data->matrix_free_fine, + matrix_free_data->dof_handler_index_fine); + FEEvaluation eval_coarse( + *matrix_free_data->matrix_free_coarse, + matrix_free_data->dof_handler_index_coarse); + src.update_ghost_values(); + + CellTransferFactory cell_transfer( + eval_fine.get_shape_info().data[0].fe_degree, + eval_coarse.get_shape_info().data[0].fe_degree); + for (unsigned int cell = 0; + cell < matrix_free_data->matrix_free_fine->n_cell_batches(); + ++cell) + { + eval_fine.reinit(cell); + eval_coarse.reinit(matrix_free_data->cell_list_fine_to_coarse[cell]); - const bool use_src_inplace = this->vec_fine.size() == 0; - const auto *const vec_fine_ptr = use_src_inplace ? &src : &this->vec_fine; - Assert(vec_fine_ptr->get_partitioner().get() == this->partitioner_fine.get(), - ExcInternalError()); + eval_fine.read_dof_values(src); - const bool use_dst_inplace = this->vec_coarse.size() == 0; - auto *const vec_coarse_ptr = use_dst_inplace ? &dst : &this->vec_coarse; - Assert(vec_coarse_ptr->get_partitioner().get() == - this->partitioner_coarse.get(), - ExcInternalError()); + if (schemes[0].restriction_matrix.empty() == false) + { + CellRestrictor cell_restrictor( + schemes[0].restriction_matrix, + eval_fine.begin_dof_values(), + eval_coarse.begin_dof_values()); + + if (schemes[0].prolongation_matrix.size() < + eval_fine.dofs_per_cell * eval_coarse.dofs_per_cell) + cell_transfer.run(cell_restrictor); + else + cell_restrictor.run_full(eval_fine.dofs_per_cell, + eval_coarse.dofs_per_cell); + } + else + for (unsigned int i = 0; i < eval_fine.dofs_per_cell; ++i) + eval_coarse.begin_dof_values()[i] = + eval_fine.begin_dof_values()[i]; - const bool src_ghosts_have_been_set = src.has_ghost_elements(); + eval_coarse.set_dof_values(dst); + } + src.zero_out_ghost_values(); + dst.zero_out_ghost_values(); + } + else + { + const unsigned int n_lanes = VectorizedArrayType::size(); - if (use_src_inplace == false) - this->vec_fine.copy_locally_owned_data_from(src); + const bool use_src_inplace = this->vec_fine.size() == 0; + const auto *const vec_fine_ptr = use_src_inplace ? &src : &this->vec_fine; + Assert(vec_fine_ptr->get_partitioner().get() == + this->partitioner_fine.get(), + ExcInternalError()); - if ((use_src_inplace == false) || (this->vec_fine_needs_ghost_update && - (src_ghosts_have_been_set == false))) - this->update_ghost_values(*vec_fine_ptr); + const bool use_dst_inplace = this->vec_coarse.size() == 0; + auto *const vec_coarse_ptr = use_dst_inplace ? &dst : &this->vec_coarse; + Assert(vec_coarse_ptr->get_partitioner().get() == + this->partitioner_coarse.get(), + ExcInternalError()); - *vec_coarse_ptr = Number(0.0); + const bool src_ghosts_have_been_set = src.has_ghost_elements(); - AlignedVector evaluation_data_fine; - AlignedVector evaluation_data_coarse; + if (use_src_inplace == false) + this->vec_fine.copy_locally_owned_data_from(src); - unsigned int cell_counter = 0; + if ((use_src_inplace == false) || (this->vec_fine_needs_ghost_update && + (src_ghosts_have_been_set == false))) + this->update_ghost_values(*vec_fine_ptr); - for (const auto &scheme : schemes) - { - if (scheme.n_coarse_cells == 0) - continue; + *vec_coarse_ptr = Number(0.0); + + AlignedVector evaluation_data_fine; + AlignedVector evaluation_data_coarse; - if (scheme.n_dofs_per_cell_fine == 0 || - scheme.n_dofs_per_cell_coarse == 0) + unsigned int cell_counter = 0; + + for (const auto &scheme : schemes) { - cell_counter += scheme.n_coarse_cells; + if (scheme.n_coarse_cells == 0) + continue; - continue; - } + if (scheme.n_dofs_per_cell_fine == 0 || + scheme.n_dofs_per_cell_coarse == 0) + { + cell_counter += scheme.n_coarse_cells; - const bool needs_interpolation = - (scheme.restriction_matrix.empty() && - scheme.restriction_matrix_1d.empty()) == false; + continue; + } - // general case -> local restriction is needed - evaluation_data_fine.resize(scheme.n_dofs_per_cell_fine); - evaluation_data_coarse.resize(scheme.n_dofs_per_cell_fine); + const bool needs_interpolation = + scheme.restriction_matrix.empty() == false; - CellTransferFactory cell_transfer(scheme.degree_fine, - scheme.degree_coarse); + // general case -> local restriction is needed + evaluation_data_fine.resize(scheme.n_dofs_per_cell_fine); + evaluation_data_coarse.resize(scheme.n_dofs_per_cell_fine); - const unsigned int n_scalar_dofs_fine = - scheme.n_dofs_per_cell_fine / n_components; - const unsigned int n_scalar_dofs_coarse = - scheme.n_dofs_per_cell_coarse / n_components; + CellTransferFactory cell_transfer(scheme.degree_fine, + scheme.degree_coarse); - for (unsigned int cell = 0; cell < scheme.n_coarse_cells; cell += n_lanes) - { - const unsigned int n_lanes_filled = - (cell + n_lanes > scheme.n_coarse_cells) ? - (scheme.n_coarse_cells - cell) : - n_lanes; - - // read from source vector - internal::VectorReader reader; - constraint_info_fine.read_write_operation(reader, - *vec_fine_ptr, - evaluation_data_fine.data(), - cell_counter, - n_lanes_filled, - scheme.n_dofs_per_cell_fine, - false); - - // ------------------------------ fine ------------------------------- - if (needs_interpolation) - for (int c = n_components - 1; c >= 0; --c) - { - CellRestrictor - cell_restrictor(scheme.restriction_matrix, - scheme.restriction_matrix_1d, - evaluation_data_fine.begin() + - c * n_scalar_dofs_fine, - evaluation_data_coarse.begin() + - c * n_scalar_dofs_coarse); - - if (scheme.restriction_matrix_1d.size() > 0) - cell_transfer.run(cell_restrictor); - else - cell_restrictor.run_full(n_scalar_dofs_fine, - n_scalar_dofs_coarse); - } - else - evaluation_data_coarse = evaluation_data_fine; // TODO - // ----------------------------- coarse ------------------------------ - - // write into dst vector (similar to - // FEEvaluation::set_dof_values_plain()) - internal::VectorSetter writer; - constraint_info_coarse.read_write_operation( - writer, - *vec_coarse_ptr, - evaluation_data_coarse.data(), - cell_counter, - n_lanes_filled, - scheme.n_dofs_per_cell_coarse, - false); - - cell_counter += n_lanes_filled; + const unsigned int n_scalar_dofs_fine = + scheme.n_dofs_per_cell_fine / n_components; + const unsigned int n_scalar_dofs_coarse = + scheme.n_dofs_per_cell_coarse / n_components; + + for (unsigned int cell = 0; cell < scheme.n_coarse_cells; + cell += n_lanes) + { + const unsigned int n_lanes_filled = + (cell + n_lanes > scheme.n_coarse_cells) ? + (scheme.n_coarse_cells - cell) : + n_lanes; + + // read from source vector + internal::VectorReader reader; + constraint_info_fine.read_write_operation( + reader, + *vec_fine_ptr, + evaluation_data_fine.data(), + cell_counter, + n_lanes_filled, + scheme.n_dofs_per_cell_fine, + false); + + // ------------------------------ fine --------------------------- + if (needs_interpolation) + for (int c = n_components - 1; c >= 0; --c) + { + CellRestrictor + cell_restrictor(scheme.restriction_matrix, + evaluation_data_fine.begin() + + c * n_scalar_dofs_fine, + evaluation_data_coarse.begin() + + c * n_scalar_dofs_coarse); + + if (scheme.restriction_matrix.size() < + n_scalar_dofs_fine * n_scalar_dofs_coarse) + cell_transfer.run(cell_restrictor); + else + cell_restrictor.run_full(n_scalar_dofs_fine, + n_scalar_dofs_coarse); + } + else + evaluation_data_coarse = evaluation_data_fine; // TODO + // ----------------------------- coarse -------------------------- + + // write into dst vector (similar to + // FEEvaluation::set_dof_values_plain()) + internal::VectorSetter writer; + constraint_info_coarse.read_write_operation( + writer, + *vec_coarse_ptr, + evaluation_data_coarse.data(), + cell_counter, + n_lanes_filled, + scheme.n_dofs_per_cell_coarse, + false); + + cell_counter += n_lanes_filled; + } } - } - // clean up related to update_ghost_values() - if (use_src_inplace == false) - vec_fine_ptr->set_ghost_state(false); // internal vector - else if (this->fine_element_is_continuous && - (src_ghosts_have_been_set == false)) - this->zero_out_ghost_values(*vec_fine_ptr); // external vector + // clean up related to update_ghost_values() + if (use_src_inplace == false) + vec_fine_ptr->set_ghost_state(false); // internal vector + else if (this->fine_element_is_continuous && + (src_ghosts_have_been_set == false)) + this->zero_out_ghost_values(*vec_fine_ptr); // external vector - if (use_dst_inplace == false) - dst.copy_locally_owned_data_from(this->vec_coarse); + if (use_dst_inplace == false) + dst.copy_locally_owned_data_from(this->vec_coarse); + } } @@ -3552,6 +3691,49 @@ namespace internal return embedded_partitioner; } + + // Helper class to compute correct weights, which works by simply using + // the degrees of freedom stored in MatrixFree, bypassing the hanging node + // interpolation matrices. + template + class FEEvaluationNoConstraints : public FEEvaluation + { + public: + FEEvaluationNoConstraints(const MatrixFree &data, + const unsigned int dof_index) + : FEEvaluation(data, dof_index) + {} + + template + void + read_dof_values_unconstrained(VectorType &src) + { + std::array src_vector{{&src}}; + internal::VectorReader> reader; + this->template read_write_operation( + reader, + src_vector, + {}, + std::bitset::size()>().flip(), + false); + } + + template + void + distribute_local_to_global_unconstrained(VectorType &dst) + { + std::array dst_vector{{&dst}}; + internal::VectorDistributorLocalToGlobal> + writer; + this->template read_write_operation( + writer, + dst_vector, + {}, + std::bitset::size()>().flip(), + false); + } + }; } // namespace } // namespace internal @@ -3643,12 +3825,15 @@ MGTwoLevelTransfer::enable_inplace_operations_if_possible( const std::shared_ptr &external_partitioner_fine) { - return this->internal_enable_inplace_operations_if_possible( - external_partitioner_coarse, - external_partitioner_fine, - this->vec_fine_needs_ghost_update, - constraint_info_coarse, - constraint_info_fine.dof_indices); + if (matrix_free_data.get() != nullptr) + return std::make_pair(true, true); + else + return this->internal_enable_inplace_operations_if_possible( + external_partitioner_coarse, + external_partitioner_fine, + this->vec_fine_needs_ghost_update, + constraint_info_coarse, + constraint_info_fine.dof_indices); } @@ -3663,6 +3848,7 @@ MGTwoLevelTransfer::reinit_geometric_transfer( const unsigned int mg_level_fine, const unsigned int mg_level_coarse) { + matrix_free_data.reset(); internal::MGTwoLevelTransferImplementation::reinit_geometric_transfer( dof_handler_fine, dof_handler_coarse, @@ -3685,6 +3871,7 @@ MGTwoLevelTransfer::reinit_polynomial_transfer( const unsigned int mg_level_fine, const unsigned int mg_level_coarse) { + matrix_free_data.reset(); internal::MGTwoLevelTransferImplementation::reinit_polynomial_transfer( dof_handler_fine, dof_handler_coarse, @@ -3790,6 +3977,161 @@ MGTwoLevelTransfer::reinit( +template +void +MGTwoLevelTransfer::reinit( + const MatrixFree &matrix_free_fine, + const unsigned int dof_no_fine, + const MatrixFree &matrix_free_coarse, + const unsigned int dof_no_coarse) +{ + matrix_free_data = std::make_unique(); + + MatrixFreeRelatedData &data = *matrix_free_data; + data.matrix_free_fine = &matrix_free_fine; + data.matrix_free_coarse = &matrix_free_coarse; + AssertIndexRange(dof_no_fine, matrix_free_fine.n_components()); + data.dof_handler_index_fine = dof_no_fine; + AssertIndexRange(dof_no_coarse, matrix_free_coarse.n_components()); + data.dof_handler_index_coarse = dof_no_coarse; + + const DoFHandler &dof_fine = + matrix_free_fine.get_dof_handler(dof_no_fine); + const DoFHandler &dof_coarse = + matrix_free_coarse.get_dof_handler(dof_no_coarse); + + Assert(&dof_fine.get_triangulation() == &dof_coarse.get_triangulation(), + ExcMessage("You can only use this class if both MatrixFree objects " + "use the same underlying triangulation!")); + AssertDimension(matrix_free_fine.get_mg_level(), + matrix_free_coarse.get_mg_level()); + + AssertDimension(matrix_free_fine.n_physical_cells(), + matrix_free_coarse.n_physical_cells()); + + Assert(dof_fine.get_fe_collection().size() == 1, + ExcMessage("hp finite element objects are currently not supported " + "by this class.")); + Assert(dof_coarse.get_fe_collection().size() == 1, + ExcMessage("hp finite element objects are currently not supported " + "by this class.")); + + this->dof_handler_fine = &dof_fine; + this->mg_level_fine = matrix_free_fine.get_mg_level(); + + const FiniteElement &fe_fine = dof_fine.get_fe(); + + Assert(fe_fine.n_components() == 1, ExcNotImplemented()); + + // Compute the link between cells on matrix_free_coarse and + // matrix_free_fine, with the latter controlling the loop + const Triangulation &tria = dof_fine.get_triangulation(); + std::vector coarse_cell_indices(tria.n_active_cells()); + for (unsigned int cell = 0; cell < matrix_free_coarse.n_cell_batches(); + ++cell) + for (unsigned int v = 0; + v < matrix_free_coarse.n_active_entries_per_cell_batch(cell); + ++v) + { + coarse_cell_indices[matrix_free_coarse.get_cell_iterator(cell, v) + ->active_cell_index()] = + cell * VectorizedArrayType::size() + v; + } + + std::array default_entry; + default_entry.fill(numbers::invalid_unsigned_int); + data.cell_list_fine_to_coarse.resize(matrix_free_fine.n_cell_batches(), + default_entry); + for (unsigned int cell = 0; cell < matrix_free_fine.n_cell_batches(); ++cell) + for (unsigned int v = 0; + v < matrix_free_fine.n_active_entries_per_cell_batch(cell); + ++v) + { + data.cell_list_fine_to_coarse[cell][v] = + coarse_cell_indices[matrix_free_fine.get_cell_iterator(cell, v) + ->active_cell_index()]; + } + + // Compute weights + if (fe_fine.dofs_per_vertex > 0) + { + VectorType weight_vector; + matrix_free_fine.initialize_dof_vector(weight_vector, dof_no_fine); + + internal::FEEvaluationNoConstraints evaluator( + matrix_free_fine, dof_no_fine); + for (unsigned int cell = 0; cell < matrix_free_fine.n_cell_batches(); + ++cell) + { + evaluator.reinit(cell); + for (unsigned int i = 0; i < evaluator.dofs_per_cell; ++i) + evaluator.begin_dof_values()[i] = 1.; + evaluator.distribute_local_to_global_unconstrained(weight_vector); + } + weight_vector.compress(VectorOperation::add); + + for (unsigned int i = 0; i < weight_vector.locally_owned_size(); ++i) + if (weight_vector.local_element(i) > 0.) + weight_vector.local_element(i) = + Number(1.) / weight_vector.local_element(i); + for (const unsigned int index : + matrix_free_fine.get_constrained_dofs(dof_no_fine)) + weight_vector.local_element(index) = 0; + weight_vector.update_ghost_values(); + + weights.clear(); + weights.reserve(matrix_free_fine.n_cell_batches() * + Utilities::pow(3, dim)); + weights_start.clear(); + weights_start.resize(matrix_free_fine.n_cell_batches()); + weights_are_compressed.clear(); + weights_are_compressed.resize(weights_start.size()); + std::array + weights_compressed; + for (unsigned int cell = 0; cell < matrix_free_fine.n_cell_batches(); + ++cell) + { + weights_start[cell] = weights.size(); + + weights_compressed.fill(VectorizedArray()); + evaluator.reinit(cell); + evaluator.read_dof_values_unconstrained(weight_vector); + + const bool can_compress_weights = + internal::compute_weights_fe_q_dofs_by_entity( + evaluator.begin_dof_values(), + 1, + fe_fine.degree + 1, + weights_compressed.data()); + if (can_compress_weights && fe_fine.degree > 1) + { + for (const VectorizedArrayType weight : weights_compressed) + weights.push_back(weight); + weights_are_compressed[cell] = 1; + } + else + for (unsigned int i = 0; i < evaluator.dofs_per_cell; ++i) + weights.push_back(evaluator.begin_dof_values()[i]); + } + } + + // Compute interpolation matrices, possibly in the 1D version + schemes.resize(1); + internal::MGTwoLevelTransferImplementation:: + compute_prolongation_and_restriction_matrices(fe_fine, + dof_coarse.get_fe(), + schemes[0]); + + // We internally call the ghost updates through the matrix-free framework + this->vec_fine_needs_ghost_update = false; + + this->partitioner_fine = matrix_free_fine.get_vector_partitioner(dof_no_fine); + this->partitioner_coarse = + matrix_free_coarse.get_vector_partitioner(dof_no_coarse); +} + + + template bool MGTwoLevelTransfer::fast_polynomial_transfer_supported( @@ -3813,16 +4155,20 @@ MGTwoLevelTransfer::memory_consumption() const for (const auto &scheme : schemes) { size += scheme.prolongation_matrix.memory_consumption(); - size += scheme.prolongation_matrix_1d.memory_consumption(); size += scheme.restriction_matrix.memory_consumption(); - size += scheme.restriction_matrix_1d.memory_consumption(); } + if (matrix_free_data.get() != nullptr) + size += MemoryConsumption::memory_consumption( + matrix_free_data->cell_list_fine_to_coarse); + size += this->partitioner_fine->memory_consumption(); size += this->partitioner_coarse->memory_consumption(); size += this->vec_fine.memory_consumption(); size += this->vec_coarse.memory_consumption(); - size += MemoryConsumption::memory_consumption(weights); + size += weights.memory_consumption(); + size += MemoryConsumption::memory_consumption(weights_start); + size += MemoryConsumption::memory_consumption(weights_are_compressed); size += constraint_info_coarse.memory_consumption(); size += constraint_info_fine.memory_consumption();