From cf7a14264e02d5239a3bce49eddcfdd4efc0b13a Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Sat, 23 May 2020 16:49:27 +0200 Subject: [PATCH] Split up MatrixFree::initialize_indices to speed up compilation --- include/deal.II/matrix_free/dof_info.h | 15 + .../deal.II/matrix_free/dof_info.templates.h | 305 ++- .../deal.II/matrix_free/face_setup_internal.h | 23 +- include/deal.II/matrix_free/matrix_free.h | 60 +- .../matrix_free/matrix_free.templates.h | 2003 +++++++---------- source/matrix_free/CMakeLists.txt | 2 - source/matrix_free/matrix_free.cc | 4 - source/matrix_free/matrix_free.inst.in | 30 - source/matrix_free/matrix_free_inst2.cc | 17 - source/matrix_free/matrix_free_inst3.cc | 17 - 10 files changed, 1115 insertions(+), 1361 deletions(-) delete mode 100644 source/matrix_free/matrix_free_inst2.cc delete mode 100644 source/matrix_free/matrix_free_inst3.cc diff --git a/include/deal.II/matrix_free/dof_info.h b/include/deal.II/matrix_free/dof_info.h index 01b287a0ef..cef97730b9 100644 --- a/include/deal.II/matrix_free/dof_info.h +++ b/include/deal.II/matrix_free/dof_info.h @@ -31,6 +31,7 @@ #include #include +#include #include #include @@ -226,6 +227,20 @@ namespace internal const std::vector &renumbering, DynamicSparsityPattern &connectivity) const; + /** + * In case face integrals are enabled, find out whether certain loops + * over the unknowns only access a subset of all the ghost dofs we keep + * in the main partitioner. + */ + void + compute_tight_partitioners( + const Table<2, ShapeInfo> & shape_info, + const unsigned int n_owned_cells, + const unsigned int n_lanes, + const std::vector> &inner_faces, + const std::vector> &ghosted_faces, + const bool fill_cell_centric); + /** * Compute a renumbering of the degrees of freedom to improve the data * access patterns for this class that can be utilized by the categories diff --git a/include/deal.II/matrix_free/dof_info.templates.h b/include/deal.II/matrix_free/dof_info.templates.h index f596f2e8b2..75761d73db 100644 --- a/include/deal.II/matrix_free/dof_info.templates.h +++ b/include/deal.II/matrix_free/dof_info.templates.h @@ -1161,6 +1161,309 @@ namespace internal + void + DoFInfo::compute_tight_partitioners( + const Table<2, ShapeInfo> & shape_info, + const unsigned int n_owned_cells, + const unsigned int n_lanes, + const std::vector> &inner_faces, + const std::vector> &ghosted_faces, + const bool fill_cell_centric) + { + const Utilities::MPI::Partitioner &part = *vector_partitioner; + + // partitioner 0: no face integrals, simply use the indices present + // on the cells + std::vector ghost_indices; + { + const unsigned int n_components = start_components.back(); + for (unsigned int cell = 0; cell < n_owned_cells; ++cell) + { + for (unsigned int i = row_starts[cell * n_components].first; + i < row_starts[(cell + 1) * n_components].first; + ++i) + if (dof_indices[i] >= part.local_size()) + ghost_indices.push_back(part.local_to_global(dof_indices[i])); + + const unsigned int fe_index = + dofs_per_cell.size() == 1 ? 0 : + cell_active_fe_index[cell / n_lanes]; + const unsigned int dofs_this_cell = dofs_per_cell[fe_index]; + + for (unsigned int i = row_starts_plain_indices[cell]; + i < row_starts_plain_indices[cell] + dofs_this_cell; + ++i) + if (plain_dof_indices[i] >= part.local_size()) + ghost_indices.push_back( + part.local_to_global(plain_dof_indices[i])); + } + std::sort(ghost_indices.begin(), ghost_indices.end()); + ghost_indices.erase(std::unique(ghost_indices.begin(), + ghost_indices.end()), + ghost_indices.end()); + IndexSet compressed_set(part.size()); + compressed_set.add_indices(ghost_indices.begin(), ghost_indices.end()); + compressed_set.subtract_set(part.locally_owned_range()); + const bool all_ghosts_equal = + Utilities::MPI::min(compressed_set.n_elements() == + part.ghost_indices().n_elements(), + part.get_mpi_communicator()) != 0; + if (all_ghosts_equal) + vector_partitioner_face_variants[0] = vector_partitioner; + else + { + vector_partitioner_face_variants[0] = + std::make_shared( + part.locally_owned_range(), part.get_mpi_communicator()); + const_cast( + vector_partitioner_face_variants[0].get()) + ->set_ghost_indices(compressed_set, part.ghost_indices()); + } + } + + // construct a numbering of faces + std::vector> all_faces(inner_faces); + all_faces.insert(all_faces.end(), + ghosted_faces.begin(), + ghosted_faces.end()); + Table<2, unsigned int> cell_and_face_to_faces( + (row_starts.size() - 1) / start_components.back(), + 2 * shape_info(0, 0).n_dimensions); + cell_and_face_to_faces.fill(numbers::invalid_unsigned_int); + for (unsigned int f = 0; f < all_faces.size(); ++f) + { + cell_and_face_to_faces(all_faces[f].cells_interior[0], + all_faces[f].interior_face_no) = f; + Assert(all_faces[f].cells_exterior[0] != + numbers::invalid_unsigned_int, + ExcInternalError()); + cell_and_face_to_faces(all_faces[f].cells_exterior[0], + all_faces[f].exterior_face_no) = f; + } + + // lambda function to detect objects on face pairs + const auto loop_over_faces = + [&](const std::function< + void(const unsigned int, const unsigned int, const bool)> &fu) { + for (const auto &face : inner_faces) + { + AssertIndexRange(face.cells_interior[0], n_owned_cells); + fu(face.cells_exterior[0], face.exterior_face_no, false /*flag*/); + } + }; + + const auto loop_over_all_faces = + [&](const std::function< + void(const unsigned int, const unsigned int, const bool)> &fu) { + for (unsigned int c = 0; c < cell_and_face_to_faces.size(0); ++c) + for (unsigned int d = 0; d < cell_and_face_to_faces.size(1); ++d) + { + const unsigned int f = cell_and_face_to_faces(c, d); + if (f == numbers::invalid_unsigned_int) + continue; + + const unsigned int cell_m = all_faces[f].cells_interior[0]; + const unsigned int cell_p = all_faces[f].cells_exterior[0]; + + const bool ext = c == cell_m; + + if (ext && cell_p == numbers::invalid_unsigned_int) + continue; + + const unsigned int p = ext ? cell_p : cell_m; + const unsigned int face_no = ext ? + all_faces[f].exterior_face_no : + all_faces[f].interior_face_no; + + fu(p, face_no, true); + } + }; + + const auto process_values = + [&]( + std::shared_ptr + &vector_partitioner_values, + const std::function &)> &loop) { + bool all_nodal = true; + for (unsigned int c = 0; c < n_base_elements; ++c) + if (!shape_info(global_base_element_offset + c, 0) + .data.front() + .nodal_at_cell_boundaries) + all_nodal = false; + if (all_nodal == false) + vector_partitioner_values = vector_partitioner; + else + { + bool has_noncontiguous_cell = false; + + loop([&](const unsigned int cell_no, + const unsigned int face_no, + const bool flag) { + const unsigned int index = + dof_indices_contiguous[dof_access_cell][cell_no]; + if (flag || (index != numbers::invalid_unsigned_int && + index >= part.local_size())) + { + const unsigned int stride = + dof_indices_interleave_strides[dof_access_cell][cell_no]; + unsigned int i = 0; + for (unsigned int e = 0; e < n_base_elements; ++e) + for (unsigned int c = 0; c < n_components[e]; ++c) + { + const ShapeInfo &shape = + shape_info(global_base_element_offset + e, 0); + for (unsigned int j = 0; + j < shape.dofs_per_component_on_face; + ++j) + ghost_indices.push_back(part.local_to_global( + index + i + + shape.face_to_cell_index_nodal(face_no, j) * + stride)); + i += shape.dofs_per_component_on_cell * stride; + } + AssertDimension(i, dofs_per_cell[0] * stride); + } + else if (index == numbers::invalid_unsigned_int) + has_noncontiguous_cell = true; + }); + has_noncontiguous_cell = + Utilities::MPI::min(has_noncontiguous_cell, + part.get_mpi_communicator()) != 0; + + std::sort(ghost_indices.begin(), ghost_indices.end()); + ghost_indices.erase(std::unique(ghost_indices.begin(), + ghost_indices.end()), + ghost_indices.end()); + IndexSet compressed_set(part.size()); + compressed_set.add_indices(ghost_indices.begin(), + ghost_indices.end()); + compressed_set.subtract_set(part.locally_owned_range()); + const bool all_ghosts_equal = + Utilities::MPI::min(compressed_set.n_elements() == + part.ghost_indices().n_elements(), + part.get_mpi_communicator()) != 0; + if (all_ghosts_equal || has_noncontiguous_cell) + vector_partitioner_values = vector_partitioner; + else + { + vector_partitioner_values = + std::make_shared( + part.locally_owned_range(), part.get_mpi_communicator()); + const_cast( + vector_partitioner_values.get()) + ->set_ghost_indices(compressed_set, part.ghost_indices()); + } + } + }; + + + const auto process_gradients = + [&]( + const std::shared_ptr + &vector_partitoner_values, + std::shared_ptr + &vector_partitioner_gradients, + const std::function &)> &loop) { + bool all_hermite = true; + for (unsigned int c = 0; c < n_base_elements; ++c) + if (shape_info(global_base_element_offset + c, 0).element_type != + internal::MatrixFreeFunctions::tensor_symmetric_hermite) + all_hermite = false; + if (all_hermite == false || + vector_partitoner_values.get() == vector_partitioner.get()) + vector_partitioner_gradients = vector_partitioner; + else + { + loop([&](const unsigned int cell_no, + const unsigned int face_no, + const bool flag) { + const unsigned int index = + dof_indices_contiguous[dof_access_cell][cell_no]; + if (flag || (index != numbers::invalid_unsigned_int && + index >= part.local_size())) + { + const unsigned int stride = + dof_indices_interleave_strides[dof_access_cell][cell_no]; + unsigned int i = 0; + for (unsigned int e = 0; e < n_base_elements; ++e) + for (unsigned int c = 0; c < n_components[e]; ++c) + { + const ShapeInfo &shape = + shape_info(global_base_element_offset + e, 0); + for (unsigned int j = 0; + j < 2 * shape.dofs_per_component_on_face; + ++j) + ghost_indices.push_back(part.local_to_global( + index + i + + shape.face_to_cell_index_hermite(face_no, j) * + stride)); + i += shape.dofs_per_component_on_cell * stride; + } + AssertDimension(i, dofs_per_cell[0] * stride); + } + }); + std::sort(ghost_indices.begin(), ghost_indices.end()); + ghost_indices.erase(std::unique(ghost_indices.begin(), + ghost_indices.end()), + ghost_indices.end()); + IndexSet compressed_set(part.size()); + compressed_set.add_indices(ghost_indices.begin(), + ghost_indices.end()); + compressed_set.subtract_set(part.locally_owned_range()); + const bool all_ghosts_equal = + Utilities::MPI::min(compressed_set.n_elements() == + part.ghost_indices().n_elements(), + part.get_mpi_communicator()) != 0; + if (all_ghosts_equal) + vector_partitioner_gradients = vector_partitioner; + else + { + vector_partitioner_gradients = + std::make_shared( + part.locally_owned_range(), part.get_mpi_communicator()); + const_cast( + vector_partitioner_gradients.get()) + ->set_ghost_indices(compressed_set, part.ghost_indices()); + } + } + }; + + // partitioner 1: values on faces + process_values(vector_partitioner_face_variants[1], loop_over_faces); + + // partitioner 2: values and gradients on faces + process_gradients(vector_partitioner_face_variants[1], + vector_partitioner_face_variants[2], + loop_over_faces); + + if (fill_cell_centric) + { + ghost_indices.clear(); + // partitioner 3: values on all faces + process_values(vector_partitioner_face_variants[3], + loop_over_all_faces); + // partitioner 4: values and gradients on faces + process_gradients(vector_partitioner_face_variants[3], + vector_partitioner_face_variants[4], + loop_over_all_faces); + } + else + { + vector_partitioner_face_variants[3] = + std::make_shared( + part.locally_owned_range(), part.get_mpi_communicator()); + vector_partitioner_face_variants[4] = + std::make_shared( + part.locally_owned_range(), part.get_mpi_communicator()); + } + } + + + template void DoFInfo::compute_vector_zero_access_pattern( @@ -1733,7 +2036,7 @@ namespace internal } - } // end of namespace MatrixFreeFunctions + } // namespace MatrixFreeFunctions } // end of namespace internal DEAL_II_NAMESPACE_CLOSE diff --git a/include/deal.II/matrix_free/face_setup_internal.h b/include/deal.II/matrix_free/face_setup_internal.h index 4484a994d2..ae3b6b2d04 100644 --- a/include/deal.II/matrix_free/face_setup_internal.h +++ b/include/deal.II/matrix_free/face_setup_internal.h @@ -77,11 +77,11 @@ namespace internal * whether some of the faces should be considered for processing * locally. */ - template void initialize( - const dealii::Triangulation & triangulation, - const MFAddData & additional_data, + const dealii::Triangulation &triangulation, + const unsigned int mg_level, + const bool hold_all_faces_to_owned_cells, std::vector> &cell_levels); /** @@ -160,15 +160,14 @@ namespace internal template - template void FaceSetup::initialize( - const dealii::Triangulation & triangulation, - const MFAddData & additional_data, + const dealii::Triangulation &triangulation, + const unsigned int mg_level, + const bool hold_all_faces_to_owned_cells, std::vector> &cell_levels) { - use_active_cells = - additional_data.mg_level == numbers::invalid_unsigned_int; + use_active_cells = mg_level == numbers::invalid_unsigned_int; # ifdef DEBUG // safety check @@ -582,8 +581,7 @@ namespace internal // inside the domain in case of multigrid separately else if ((dcell->at_boundary(f) == false || dcell->has_periodic_neighbor(f)) && - additional_data.mg_level != - numbers::invalid_unsigned_int && + mg_level != numbers::invalid_unsigned_int && dcell->neighbor_or_periodic_neighbor(f)->level() < dcell->level()) { @@ -597,7 +595,7 @@ namespace internal // neighbor is refined -> face will be treated by neighbor if (use_active_cells && neighbor->has_children() && - additional_data.hold_all_faces_to_owned_cells == false) + hold_all_faces_to_owned_cells == false) continue; bool add_to_ghost = false; @@ -648,8 +646,7 @@ namespace internal add_to_ghost = (dcell->level_subdomain_id() != neighbor->level_subdomain_id()); } - else if (additional_data.hold_all_faces_to_owned_cells == - true) + else if (hold_all_faces_to_owned_cells == true) { // add all cells to ghost layer... face_is_owned[dcell->face(f)->index()] = diff --git a/include/deal.II/matrix_free/matrix_free.h b/include/deal.II/matrix_free/matrix_free.h index 6b0c854d26..28eea44a7b 100644 --- a/include/deal.II/matrix_free/matrix_free.h +++ b/include/deal.II/matrix_free/matrix_free.h @@ -1983,11 +1983,11 @@ private: * This is the actual reinit function that sets up the indices for the * DoFHandler case. */ - template class DoFHandlerType> + template void internal_reinit( const Mapping & mapping, - const std::vector *> & dof_handler, + const std::vector *> & dof_handlers, const std::vector *> &constraint, const std::vector & locally_owned_set, const std::vector> & quad, @@ -2009,51 +2009,15 @@ private: /** * Initializes the DoFHandlers based on a DoFHandler argument. */ - template void initialize_dof_handlers( - const std::vector &dof_handlers, - const AdditionalData & additional_data); - - /** - * Setup connectivity graph with information on the dependencies between - * block due to shared faces. - */ - void - make_connectivity_graph_faces(DynamicSparsityPattern &connectivity); - - /** - * This struct defines which DoFHandler has actually been given at - * construction, in order to define the correct behavior when querying the - * underlying DoFHandler. - */ - struct DoFHandlers - { - DoFHandlers() - : active_dof_handler(usual) - , n_dof_handlers(0) - {} - - std::vector>> dof_handler; - std::vector>> hp_dof_handler; - enum ActiveDoFHandler - { - /** - * Use DoFHandler. - */ - usual, - /** - * Use hp::DoFHandler. - */ - hp - } active_dof_handler; - unsigned int n_dof_handlers; - }; + const std::vector *> &dof_handlers, + const AdditionalData & additional_data); /** * Pointers to the DoFHandlers underlying the current problem. */ - DoFHandlers dof_handlers; + std::vector>> dof_handlers; /** * Contains the information about degrees of freedom on the individual cells @@ -2213,8 +2177,8 @@ template inline unsigned int MatrixFree::n_components() const { - AssertDimension(dof_handlers.n_dof_handlers, dof_info.size()); - return dof_handlers.n_dof_handlers; + AssertDimension(dof_handlers.size(), dof_info.size()); + return dof_handlers.size(); } @@ -2224,9 +2188,9 @@ inline unsigned int MatrixFree::n_base_elements( const unsigned int dof_no) const { - AssertDimension(dof_handlers.n_dof_handlers, dof_info.size()); - AssertIndexRange(dof_no, dof_handlers.n_dof_handlers); - return dof_handlers.dof_handler[dof_no]->get_fe().n_base_elements(); + AssertDimension(dof_handlers.size(), dof_info.size()); + AssertIndexRange(dof_no, dof_handlers.size()); + return dof_handlers[dof_no]->get_fe().n_base_elements(); } @@ -2841,7 +2805,7 @@ MatrixFree::reinit( const typename MatrixFree::AdditionalData &additional_data) { - std::vector dof_handlers; + std::vector *> dof_handlers; std::vector *> constraints; std::vector quads; @@ -2877,7 +2841,7 @@ MatrixFree::reinit( const typename MatrixFree::AdditionalData &additional_data) { - std::vector dof_handlers; + std::vector *> dof_handlers; std::vector *> constraints; dof_handlers.push_back(&dof_handler); diff --git a/include/deal.II/matrix_free/matrix_free.templates.h b/include/deal.II/matrix_free/matrix_free.templates.h index 15f7247d9b..e9a0888b2c 100644 --- a/include/deal.II/matrix_free/matrix_free.templates.h +++ b/include/deal.II/matrix_free/matrix_free.templates.h @@ -141,10 +141,9 @@ MatrixFree::get_dof_handler( { AssertIndexRange(dof_handler_index, n_components()); - auto dh = dynamic_cast( - &*dof_handlers.dof_handler[dof_handler_index]); - - Assert(dh != nullptr, ExcNotImplemented()); + auto dh = + dynamic_cast(&*dof_handlers[dof_handler_index]); + Assert(dh != nullptr, ExcNotInitialized()); return *dh; } @@ -158,31 +157,18 @@ MatrixFree::get_cell_iterator( const unsigned int vector_number, const unsigned int dof_handler_index) const { - AssertIndexRange(dof_handler_index, dof_handlers.n_dof_handlers); + AssertIndexRange(dof_handler_index, dof_handlers.size()); AssertIndexRange(macro_cell_number, task_info.cell_partition_data.back()); AssertIndexRange(vector_number, n_components_filled(macro_cell_number)); - const DoFHandler *dofh = nullptr; - if (dof_handlers.active_dof_handler == DoFHandlers::usual) - { - AssertDimension(dof_handlers.dof_handler.size(), - dof_handlers.n_dof_handlers); - dofh = dof_handlers.dof_handler[dof_handler_index]; - } - else - { - Assert(false, - ExcMessage("Cannot return DoFHandler::cell_iterator " - "for underlying DoFHandler!")); - } - std::pair index = cell_level_index[macro_cell_number * VectorizedArrayType::size() + vector_number]; - return typename DoFHandler::cell_iterator(&dofh->get_triangulation(), - index.first, - index.second, - dofh); + return typename DoFHandler::cell_iterator( + &dof_handlers[dof_handler_index]->get_triangulation(), + index.first, + index.second, + &*dof_handlers[dof_handler_index]); } @@ -212,34 +198,14 @@ MatrixFree::get_face_iterator( const bool interior, const unsigned int fe_component) const { - AssertIndexRange(fe_component, dof_handlers.n_dof_handlers); - if (interior) - { - AssertIndexRange(face_batch_number, - n_inner_face_batches() + n_boundary_face_batches()); - } - else - { - AssertIndexRange(face_batch_number, n_inner_face_batches()); - } + AssertIndexRange(fe_component, dof_handlers.size()); + AssertIndexRange(face_batch_number, + n_inner_face_batches() + + (interior ? n_boundary_face_batches() : 0)); AssertIndexRange(vector_number, n_active_entries_per_face_batch(face_batch_number)); - const DoFHandler *dofh = nullptr; - if (dof_handlers.active_dof_handler == DoFHandlers::usual) - { - AssertDimension(dof_handlers.dof_handler.size(), - dof_handlers.n_dof_handlers); - dofh = dof_handlers.dof_handler[fe_component]; - } - else - { - Assert(false, - ExcMessage("Cannot return DoFHandler::cell_iterator " - "for underlying DoFHandler!")); - } - const internal::MatrixFreeFunctions::FaceToCellTopology< VectorizedArrayType::size()> face2cell_info = get_face_info(face_batch_number); @@ -251,7 +217,10 @@ MatrixFree::get_face_iterator( std::pair index = cell_level_index[cell_index]; return {typename DoFHandler::cell_iterator( - &dofh->get_triangulation(), index.first, index.second, dofh), + &dof_handlers[fe_component]->get_triangulation(), + index.first, + index.second, + &*dof_handlers[fe_component]), interior ? face2cell_info.interior_face_no : face2cell_info.exterior_face_no}; } @@ -265,20 +234,18 @@ MatrixFree::get_hp_cell_iterator( const unsigned int vector_number, const unsigned int dof_handler_index) const { - AssertIndexRange(dof_handler_index, dof_handlers.n_dof_handlers); + AssertIndexRange(dof_handler_index, dof_handlers.size()); AssertIndexRange(macro_cell_number, task_info.cell_partition_data.back()); AssertIndexRange(vector_number, n_components_filled(macro_cell_number)); - Assert(dof_handlers.active_dof_handler == DoFHandlers::hp, - ExcNotImplemented()); - const DoFHandler *dofh = dof_handlers.hp_dof_handler[dof_handler_index]; std::pair index = cell_level_index[macro_cell_number * VectorizedArrayType::size() + vector_number]; - return typename DoFHandler::cell_iterator(&dofh->get_triangulation(), - index.first, - index.second, - dofh); + return typename DoFHandler::cell_iterator( + &dof_handlers[dof_handler_index]->get_triangulation(), + index.first, + index.second, + &*dof_handlers[dof_handler_index]); } @@ -307,13 +274,13 @@ MatrixFree::copy_from( template -template class DoFHandlerType> +template void MatrixFree::internal_reinit( const Mapping & mapping, - const std::vector *> & dof_handler, + const std::vector *> & dof_handler, const std::vector *> &constraint, - const std::vector & locally_owned_set, + const std::vector & locally_owned_dofs, const std::vector> & quad, const typename MatrixFree::AdditionalData &additional_data) @@ -355,7 +322,7 @@ MatrixFree::internal_reinit( clear(); Assert(dof_handler.size() > 0, ExcMessage("No DoFHandler is given.")); AssertDimension(dof_handler.size(), constraint.size()); - AssertDimension(dof_handler.size(), locally_owned_set.size()); + AssertDimension(dof_handler.size(), locally_owned_dofs.size()); // set variables that are independent of FE if (Utilities::MPI::job_supports_mpi() == true) @@ -408,7 +375,7 @@ MatrixFree::internal_reinit( // constraint_pool_data. It also reorders the way cells are gone through // (to separate cells with overlap to other processors from others // without). - initialize_indices(constraint, locally_owned_set, additional_data); + initialize_indices(constraint, locally_owned_dofs, additional_data); } // initialize bare structures @@ -482,14 +449,11 @@ MatrixFree::update_mapping( const Mapping &mapping) { AssertDimension(shape_info.size(1), mapping_info.cell_data.size()); - mapping_info.update_mapping( - dof_handlers.active_dof_handler == DoFHandlers::hp ? - dof_handlers.hp_dof_handler[0]->get_triangulation() : - dof_handlers.dof_handler[0]->get_triangulation(), - cell_level_index, - face_info, - dof_info[0].cell_active_fe_index, - mapping); + mapping_info.update_mapping(dof_handlers[0]->get_triangulation(), + cell_level_index, + face_info, + dof_info[0].cell_active_fe_index, + mapping); } @@ -563,101 +527,33 @@ namespace internal template -template void MatrixFree::initialize_dof_handlers( - const std::vector &dof_handler, - const AdditionalData & additional_data) + const std::vector *> &dof_handler_in, + const AdditionalData & additional_data) { - if (dof_handler.front()->hp_capability_enabled == false) - { - cell_level_index.clear(); - dof_handlers.active_dof_handler = DoFHandlers::usual; - dof_handlers.n_dof_handlers = dof_handler.size(); - dof_handlers.dof_handler.resize(dof_handlers.n_dof_handlers); - for (unsigned int no = 0; no < dof_handlers.n_dof_handlers; ++no) - dof_handlers.dof_handler[no] = dof_handler[no]; - - dof_info.resize(dof_handlers.n_dof_handlers); - for (unsigned int no = 0; no < dof_handlers.n_dof_handlers; ++no) - dof_info[no].vectorization_length = VectorizedArrayType::size(); - - const unsigned int n_mpi_procs = task_info.n_procs; - const unsigned int my_pid = task_info.my_pid; - - const Triangulation &tria = - dof_handlers.dof_handler[0]->get_triangulation(); - const unsigned int level = additional_data.mg_level; - if (level == numbers::invalid_unsigned_int) - { - if (n_mpi_procs == 1) - cell_level_index.reserve(tria.n_active_cells()); - // For serial Triangulations always take all cells - const unsigned int subdomain_id = - (dynamic_cast *>( - &dof_handler[0]->get_triangulation()) != nullptr) ? - my_pid : - numbers::invalid_subdomain_id; - - // Go through cells on zeroth level and then successively step down - // into children. This gives a z-ordering of the cells, which is - // beneficial when setting up neighboring relations between cells for - // thread parallelization - for (const auto &cell : tria.cell_iterators_on_level(0)) - internal::MatrixFreeFunctions::resolve_cell(cell, - cell_level_index, - subdomain_id); - - Assert(n_mpi_procs > 1 || - cell_level_index.size() == tria.n_active_cells(), - ExcInternalError()); - } - else - { - AssertIndexRange(level, tria.n_global_levels()); - if (level < tria.n_levels()) - { - cell_level_index.reserve(tria.n_cells(level)); - for (const auto &cell : tria.cell_iterators_on_level(level)) - if (cell->level_subdomain_id() == my_pid) - cell_level_index.emplace_back(cell->level(), cell->index()); - } - } - - // All these are cells local to this processor. Therefore, set - // cell_level_index_end_local to the size of cell_level_index. - cell_level_index_end_local = cell_level_index.size(); - } - else - { - (void)additional_data; - - cell_level_index.clear(); - dof_handlers.active_dof_handler = DoFHandlers::hp; - Assert(additional_data.mg_level == numbers::invalid_unsigned_int, - ExcNotImplemented()); - dof_handlers.n_dof_handlers = dof_handler.size(); - dof_handlers.hp_dof_handler.resize(dof_handlers.n_dof_handlers); - for (unsigned int no = 0; no < dof_handlers.n_dof_handlers; ++no) - dof_handlers.hp_dof_handler[no] = dof_handler[no]; - - dof_info.resize(dof_handlers.n_dof_handlers); - for (unsigned int no = 0; no < dof_handlers.n_dof_handlers; ++no) - dof_info[no].vectorization_length = VectorizedArrayType::size(); + cell_level_index.clear(); + dof_handlers.resize(dof_handler_in.size()); + for (unsigned int no = 0; no < dof_handler_in.size(); ++no) + dof_handlers[no] = dof_handler_in[no]; - const unsigned int n_mpi_procs = task_info.n_procs; - const unsigned int my_pid = task_info.my_pid; + dof_info.resize(dof_handlers.size()); + for (unsigned int no = 0; no < dof_handlers.size(); ++no) + dof_info[no].vectorization_length = VectorizedArrayType::size(); - // if we have no level given, use the same as for the standard DoFHandler, - // otherwise we must loop through the respective level - const Triangulation &tria = dof_handler[0]->get_triangulation(); + const unsigned int n_mpi_procs = task_info.n_procs; + const unsigned int my_pid = task_info.my_pid; + const Triangulation &tria = dof_handlers[0]->get_triangulation(); + const unsigned int level = additional_data.mg_level; + if (level == numbers::invalid_unsigned_int) + { if (n_mpi_procs == 1) cell_level_index.reserve(tria.n_active_cells()); // For serial Triangulations always take all cells const unsigned int subdomain_id = (dynamic_cast *>( - &dof_handler[0]->get_triangulation()) != nullptr) ? + &dof_handlers[0]->get_triangulation()) != nullptr) ? my_pid : numbers::invalid_subdomain_id; @@ -673,574 +569,765 @@ MatrixFree::initialize_dof_handlers( Assert(n_mpi_procs > 1 || cell_level_index.size() == tria.n_active_cells(), ExcInternalError()); - - // All these are cells local to this processor. Therefore, set - // cell_level_index_end_local to the size of cell_level_index. - cell_level_index_end_local = cell_level_index.size(); } + else + { + AssertIndexRange(level, tria.n_global_levels()); + if (level < tria.n_levels()) + { + cell_level_index.reserve(tria.n_cells(level)); + for (const auto &cell : tria.cell_iterators_on_level(level)) + if (cell->level_subdomain_id() == my_pid) + cell_level_index.emplace_back(cell->level(), cell->index()); + } + } + + // All these are cells local to this processor. Therefore, set + // cell_level_index_end_local to the size of cell_level_index. + cell_level_index_end_local = cell_level_index.size(); } -template -template -void -MatrixFree::initialize_indices( - const std::vector *> &constraint, - const std::vector & locally_owned_set, - const AdditionalData & additional_data) +namespace internal { - // insert possible ghost cells and construct face topology - const bool do_face_integrals = - (additional_data.mapping_update_flags_inner_faces | - additional_data.mapping_update_flags_boundary_faces) != update_default; - internal::MatrixFreeFunctions::FaceSetup face_setup; - if (do_face_integrals) - face_setup.initialize(dof_handlers.active_dof_handler == - DoFHandlers::usual ? - dof_handlers.dof_handler[0]->get_triangulation() : - dof_handlers.hp_dof_handler[0]->get_triangulation(), - additional_data, - cell_level_index); - - const unsigned int n_fe = dof_handlers.n_dof_handlers; - const unsigned int n_active_cells = cell_level_index.size(); +#ifdef DEAL_II_WITH_TBB - std::vector is_fe_dg(n_fe, false); + inline void + fill_index_subrange( + const unsigned int begin, + const unsigned int end, + const std::vector> &cell_level_index, + tbb::concurrent_unordered_map, + unsigned int> & map) + { + if (cell_level_index.empty()) + return; + unsigned int cell = begin; + if (cell == 0) + map.insert(std::make_pair(cell_level_index[cell++], 0U)); + for (; cell < end; ++cell) + if (cell_level_index[cell] != cell_level_index[cell - 1]) + map.insert(std::make_pair(cell_level_index[cell], cell)); + } - AssertDimension(n_active_cells, cell_level_index.size()); - AssertDimension(n_fe, locally_owned_set.size()); - AssertDimension(n_fe, constraint.size()); + template + inline void + fill_connectivity_subrange( + const unsigned int begin, + const unsigned int end, + const dealii::Triangulation & tria, + const std::vector> &cell_level_index, + const tbb::concurrent_unordered_map, + unsigned int> & map, + DynamicSparsityPattern &connectivity_direct) + { + std::vector new_indices; + for (unsigned int cell = begin; cell < end; ++cell) + { + new_indices.clear(); + typename dealii::Triangulation::cell_iterator dcell( + &tria, cell_level_index[cell].first, cell_level_index[cell].second); + for (auto f : GeometryInfo::face_indices()) + { + // Only inner faces couple different cells + if (dcell->at_boundary(f) == false && + dcell->neighbor_or_periodic_neighbor(f)->level_subdomain_id() == + dcell->level_subdomain_id()) + { + std::pair level_index( + dcell->neighbor_or_periodic_neighbor(f)->level(), + dcell->neighbor_or_periodic_neighbor(f)->index()); + auto it = map.find(level_index); + if (it != map.end()) + { + const unsigned int neighbor_cell = it->second; + if (neighbor_cell != cell) + new_indices.push_back(neighbor_cell); + } + } + } + std::sort(new_indices.begin(), new_indices.end()); + connectivity_direct.add_entries(cell, + new_indices.begin(), + std::unique(new_indices.begin(), + new_indices.end())); + } + } - std::vector local_dof_indices; - std::vector>> lexicographic(n_fe); + inline void + fill_connectivity_indirect_subrange( + const unsigned int begin, + const unsigned int end, + const DynamicSparsityPattern &connectivity_direct, + DynamicSparsityPattern & connectivity) + { + std::vector new_indices; + for (unsigned int block = begin; block < end; ++block) + { + new_indices.clear(); + for (DynamicSparsityPattern::iterator it = + connectivity_direct.begin(block); + it != connectivity_direct.end(block); + ++it) + { + new_indices.push_back(it->column()); + for (DynamicSparsityPattern::iterator it_neigh = + connectivity_direct.begin(it->column()); + it_neigh != connectivity_direct.end(it->column()); + ++it_neigh) + if (it_neigh->column() != block) + new_indices.push_back(it_neigh->column()); + } + std::sort(new_indices.begin(), new_indices.end()); + connectivity.add_entries(block, + new_indices.begin(), + std::unique(new_indices.begin(), + new_indices.end())); + } + } - internal::MatrixFreeFunctions::ConstraintValues constraint_values; +#endif - bool cell_categorization_enabled = - !additional_data.cell_vectorization_category.empty(); + template + std::vector + compute_dof_info( + const std::vector *> &constraint, + const std::vector &locally_owned_dofs, + const std::vector>> &dof_handler, + const Table<2, MatrixFreeFunctions::ShapeInfo> & shape_infos, + const unsigned int cell_level_index_end_local, + const unsigned int mg_level, + const bool hold_all_faces_to_owned_cells, + const std::vector &cell_vectorization_category, + const bool cell_vectorization_categories_strict, + const bool do_face_integrals, + const bool overlap_communication_computation, + MatrixFreeFunctions::TaskInfo & task_info, + std::vector> &cell_level_index, + std::vector & dof_info, + MatrixFreeFunctions::FaceSetup & face_setup, + MatrixFreeFunctions::ConstraintValues & constraint_values) + { + if (do_face_integrals) + face_setup.initialize(dof_handler[0]->get_triangulation(), + mg_level, + hold_all_faces_to_owned_cells, + cell_level_index); - for (unsigned int no = 0; no < n_fe; ++no) - { - std::vector *> fes; - if (dof_handlers.active_dof_handler == DoFHandlers::hp) - { - const DoFHandler * hpdof = dof_handlers.hp_dof_handler[no]; - const hp::FECollection &fe = hpdof->get_fe_collection(); - for (unsigned int f = 0; f < fe.size(); ++f) - fes.push_back(&fe[f]); + const unsigned int n_dof_handlers = dof_handler.size(); + const unsigned int n_active_cells = cell_level_index.size(); - if (fe.size() > 1) - { - dof_info[no].cell_active_fe_index.resize( - n_active_cells, numbers::invalid_unsigned_int); - is_fe_dg[no] = fe[0].dofs_per_vertex == 0; - } + const Triangulation &tria = dof_handler[0]->get_triangulation(); - Assert(additional_data.cell_vectorization_category.empty(), - ExcNotImplemented()); - } - else - { - const DoFHandler *dofh = &*dof_handlers.dof_handler[no]; - fes.push_back(&dofh->get_fe()); - if (cell_categorization_enabled == true) - dof_info[no].cell_active_fe_index.resize( - n_active_cells, numbers::invalid_unsigned_int); - is_fe_dg[no] = dofh->get_fe().dofs_per_vertex == 0; - } - lexicographic[no].resize(fes.size()); + AssertDimension(n_dof_handlers, locally_owned_dofs.size()); + AssertDimension(n_dof_handlers, constraint.size()); - dof_info[no].fe_index_conversion.resize(fes.size()); - dof_info[no].max_fe_index = fes.size(); + std::vector local_dof_indices; + std::vector>> lexicographic( + n_dof_handlers); - dof_info[no].component_dof_indices_offset.clear(); - dof_info[no].component_dof_indices_offset.resize(fes.size()); - for (unsigned int fe_index = 0; fe_index < fes.size(); ++fe_index) - { - const FiniteElement &fe = *fes[fe_index]; - // cache number of finite elements and dofs_per_cell - dof_info[no].dofs_per_cell.push_back(fe.dofs_per_cell); - dof_info[no].dofs_per_face.push_back(fe.dofs_per_face); - dof_info[no].dimension = dim; - dof_info[no].n_base_elements = fe.n_base_elements(); - dof_info[no].n_components.resize(dof_info[no].n_base_elements); - dof_info[no].start_components.resize(dof_info[no].n_base_elements + - 1); - dof_info[no].component_to_base_index.clear(); - dof_info[no].component_dof_indices_offset[fe_index].push_back(0); - dof_info[no].fe_index_conversion[fe_index].clear(); - for (unsigned int c = 0; c < dof_info[no].n_base_elements; ++c) - { - dof_info[no].n_components[c] = fe.element_multiplicity(c); - for (unsigned int l = 0; l < dof_info[no].n_components[c]; ++l) - { - dof_info[no].component_to_base_index.push_back(c); - dof_info[no].component_dof_indices_offset[fe_index].push_back( - dof_info[no].component_dof_indices_offset[fe_index].back() + - fe.base_element(c).dofs_per_cell); - dof_info[no].fe_index_conversion[fe_index].push_back( - fe.base_element(c).degree); - } - dof_info[no].start_components[c + 1] = - dof_info[no].start_components[c] + dof_info[no].n_components[c]; - lexicographic[no][fe_index].insert( - lexicographic[no][fe_index].end(), - shape_info( - dof_info[no].global_base_element_offset + c, 0, fe_index, 0) - .lexicographic_numbering.begin(), - shape_info( - dof_info[no].global_base_element_offset + c, 0, fe_index, 0) - .lexicographic_numbering.end()); - } + std::vector is_fe_dg(n_dof_handlers, false); - AssertDimension(lexicographic[no][fe_index].size(), - dof_info[no].dofs_per_cell[fe_index]); - AssertDimension( - dof_info[no].component_dof_indices_offset[fe_index].size() - 1, - dof_info[no].start_components.back()); - AssertDimension( - dof_info[no].component_dof_indices_offset[fe_index].back(), - dof_info[no].dofs_per_cell[fe_index]); - } + bool cell_categorization_enabled = !cell_vectorization_category.empty(); - // set locally owned range for each component - Assert(locally_owned_set[no].is_contiguous(), ExcNotImplemented()); - dof_info[no].vector_partitioner = - std::make_shared(locally_owned_set[no], - task_info.communicator); - - // initialize the arrays for indices - const unsigned int n_components_total = - dof_info[no].start_components.back(); - dof_info[no].row_starts.resize(n_active_cells * n_components_total + 1); - dof_info[no].row_starts[0].first = 0; - dof_info[no].row_starts[0].second = 0; - dof_info[no].dof_indices.reserve( - (n_active_cells * dof_info[no].dofs_per_cell[0] * 3) / 2); - - // cache the constrained indices for use in matrix-vector products and - // the like - const types::global_dof_index - start_index = dof_info[no].vector_partitioner->local_range().first, - end_index = dof_info[no].vector_partitioner->local_range().second; - for (types::global_dof_index i = start_index; i < end_index; ++i) - if (constraint[no]->is_constrained(i) == true) - dof_info[no].constrained_dofs.push_back( - static_cast(i - start_index)); - } + for (unsigned int no = 0; no < n_dof_handlers; ++no) + { + const dealii::hp::FECollection &fes = + dof_handler[no]->get_fe_collection(); - // extract all the global indices associated with the computation, and form - // the ghost indices - std::vector subdomain_boundary_cells; - for (unsigned int counter = 0; counter < n_active_cells; ++counter) - { - bool cell_at_subdomain_boundary = - (face_setup.at_processor_boundary.size() > counter && - face_setup.at_processor_boundary[counter]) || - (additional_data.overlap_communication_computation == false && - task_info.n_procs > 1); + if (fes.size() > 1) + { + Assert(cell_vectorization_category.empty(), ExcNotImplemented()); + dof_info[no].cell_active_fe_index.resize( + n_active_cells, numbers::invalid_unsigned_int); + } + else if (cell_categorization_enabled == true) + dof_info[no].cell_active_fe_index.resize( + n_active_cells, numbers::invalid_unsigned_int); - for (unsigned int no = 0; no < n_fe; ++no) - { - // read indices from standard DoFHandler in the usual way - if (dof_handlers.active_dof_handler == DoFHandlers::usual && - additional_data.mg_level == numbers::invalid_unsigned_int) - { - const DoFHandler *dofh = &*dof_handlers.dof_handler[no]; - typename DoFHandler::active_cell_iterator cell_it( - &dofh->get_triangulation(), - cell_level_index[counter].first, - cell_level_index[counter].second, - dofh); - local_dof_indices.resize(dof_info[no].dofs_per_cell[0]); - cell_it->get_dof_indices(local_dof_indices); - dof_info[no].read_dof_indices(local_dof_indices, - lexicographic[no][0], - *constraint[no], - counter, - constraint_values, - cell_at_subdomain_boundary); - if (cell_categorization_enabled) - { - AssertIndexRange( - cell_it->active_cell_index(), - additional_data.cell_vectorization_category.size()); - dof_info[no].cell_active_fe_index[counter] = - additional_data.cell_vectorization_category - [cell_it->active_cell_index()]; - } - } - // we are requested to use a multigrid level - else if (dof_handlers.active_dof_handler == DoFHandlers::usual && - additional_data.mg_level != numbers::invalid_unsigned_int) - { - const DoFHandler *dofh = dof_handlers.dof_handler[no]; - AssertIndexRange(additional_data.mg_level, - dofh->get_triangulation().n_levels()); - typename DoFHandler::cell_iterator cell_it( - &dofh->get_triangulation(), - cell_level_index[counter].first, - cell_level_index[counter].second, - dofh); - local_dof_indices.resize(dof_info[no].dofs_per_cell[0]); - cell_it->get_mg_dof_indices(local_dof_indices); - dof_info[no].read_dof_indices(local_dof_indices, - lexicographic[no][0], - *constraint[no], - counter, - constraint_values, - cell_at_subdomain_boundary); - if (cell_categorization_enabled) - { - AssertIndexRange( - cell_it->index(), - additional_data.cell_vectorization_category.size()); - dof_info[no].cell_active_fe_index[counter] = - additional_data.cell_vectorization_category - [cell_level_index[counter].second]; - } - } - // hp case where we need to decode the FE index and similar - else if (dof_handlers.active_dof_handler == DoFHandlers::hp) - { - const DoFHandler *dofh = dof_handlers.hp_dof_handler[no]; - typename DoFHandler::active_cell_iterator cell_it( - &dofh->get_triangulation(), - cell_level_index[counter].first, - cell_level_index[counter].second, - dofh); - if (dofh->get_fe_collection().size() > 1) - dof_info[no].cell_active_fe_index[counter] = - cell_it->active_fe_index(); - local_dof_indices.resize(cell_it->get_fe().dofs_per_cell); - cell_it->get_dof_indices(local_dof_indices); - dof_info[no].read_dof_indices( - local_dof_indices, - lexicographic[no][cell_it->active_fe_index()], - *constraint[no], - counter, - constraint_values, - cell_at_subdomain_boundary); - } - else - { - Assert(false, ExcNotImplemented()); - } - } + is_fe_dg[no] = fes[0].dofs_per_vertex == 0; - // if we found dofs on some FE component that belong to other - // processors, the cell is added to the boundary cells. - if (cell_at_subdomain_boundary == true && - counter < cell_level_index_end_local) - subdomain_boundary_cells.push_back(counter); - } + lexicographic[no].resize(fes.size()); - const unsigned int n_lanes = VectorizedArrayType::size(); - task_info.n_active_cells = cell_level_index_end_local; - task_info.n_ghost_cells = n_active_cells - cell_level_index_end_local; - task_info.vectorization_length = n_lanes; + dof_info[no].fe_index_conversion.resize(fes.size()); + dof_info[no].max_fe_index = fes.size(); - // Finalize the creation of the ghost indices - { - std::vector cells_with_ghosts(subdomain_boundary_cells); - for (unsigned int c = cell_level_index_end_local; c < n_active_cells; ++c) - cells_with_ghosts.push_back(c); - for (unsigned int no = 0; no < n_fe; ++no) - { - if (do_face_integrals && - additional_data.mg_level != numbers::invalid_unsigned_int) + dof_info[no].component_dof_indices_offset.clear(); + dof_info[no].component_dof_indices_offset.resize(fes.size()); + for (unsigned int fe_index = 0; fe_index < fes.size(); ++fe_index) { - // in case of adaptivity, go through the cells on the next finer - // level and check whether we need to get read access to some of - // those entries for the mg flux matrices - const DoFHandler &dof_handler = *dof_handlers.dof_handler[no]; - std::vector dof_indices; - if (additional_data.mg_level + 1 < - dof_handler.get_triangulation().n_global_levels()) - for (const auto &cell : dof_handler.cell_iterators_on_level( - additional_data.mg_level + 1)) - if (cell->level_subdomain_id() == task_info.my_pid) - for (const unsigned int f : GeometryInfo::face_indices()) - if ((cell->at_boundary(f) == false || - cell->has_periodic_neighbor(f) == true) && - cell->level() > - cell->neighbor_or_periodic_neighbor(f)->level() && - cell->neighbor_or_periodic_neighbor(f) - ->level_subdomain_id() != task_info.my_pid) - { - dof_indices.resize( - cell->neighbor_or_periodic_neighbor(f) - ->get_fe() - .dofs_per_cell); - cell->neighbor_or_periodic_neighbor(f) - ->get_mg_dof_indices(dof_indices); - for (const auto dof_index : dof_indices) - dof_info[no].ghost_dofs.push_back(dof_index); - } + const FiniteElement &fe = fes[fe_index]; + // cache number of finite elements and dofs_per_cell + dof_info[no].dofs_per_cell.push_back(fe.dofs_per_cell); + dof_info[no].dofs_per_face.push_back(fe.dofs_per_face); + dof_info[no].dimension = dim; + dof_info[no].n_base_elements = fe.n_base_elements(); + dof_info[no].n_components.resize(dof_info[no].n_base_elements); + dof_info[no].start_components.resize(dof_info[no].n_base_elements + + 1); + dof_info[no].component_to_base_index.clear(); + dof_info[no].component_dof_indices_offset[fe_index].push_back(0); + dof_info[no].fe_index_conversion[fe_index].clear(); + for (unsigned int c = 0; c < dof_info[no].n_base_elements; ++c) + { + dof_info[no].n_components[c] = fe.element_multiplicity(c); + for (unsigned int l = 0; l < dof_info[no].n_components[c]; ++l) + { + dof_info[no].component_to_base_index.push_back(c); + dof_info[no] + .component_dof_indices_offset[fe_index] + .push_back(dof_info[no] + .component_dof_indices_offset[fe_index] + .back() + + fe.base_element(c).dofs_per_cell); + dof_info[no].fe_index_conversion[fe_index].push_back( + fe.base_element(c).degree); + } + dof_info[no].start_components[c + 1] = + dof_info[no].start_components[c] + + dof_info[no].n_components[c]; + const auto &lex = + shape_infos(dof_info[no].global_base_element_offset + c, + fe_index) + .lexicographic_numbering; + lexicographic[no][fe_index].insert( + lexicographic[no][fe_index].end(), lex.begin(), lex.end()); + } + + AssertDimension(lexicographic[no][fe_index].size(), + dof_info[no].dofs_per_cell[fe_index]); + AssertDimension( + dof_info[no].component_dof_indices_offset[fe_index].size() - 1, + dof_info[no].start_components.back()); + AssertDimension( + dof_info[no].component_dof_indices_offset[fe_index].back(), + dof_info[no].dofs_per_cell[fe_index]); } - dof_info[no].assign_ghosts(cells_with_ghosts); + + // set locally owned range for each component + Assert(locally_owned_dofs[no].is_contiguous(), ExcNotImplemented()); + dof_info[no].vector_partitioner = + std::make_shared(locally_owned_dofs[no], + task_info.communicator); + + // initialize the arrays for indices + const unsigned int n_components_total = + dof_info[no].start_components.back(); + dof_info[no].row_starts.resize(n_active_cells * n_components_total + 1); + dof_info[no].row_starts[0].first = 0; + dof_info[no].row_starts[0].second = 0; + dof_info[no].dof_indices.reserve( + (n_active_cells * dof_info[no].dofs_per_cell[0] * 3) / 2); + + // cache the constrained indices for use in matrix-vector products and + // the like + const types::global_dof_index + start_index = dof_info[no].vector_partitioner->local_range().first, + end_index = dof_info[no].vector_partitioner->local_range().second; + for (types::global_dof_index i = start_index; i < end_index; ++i) + if (constraint[no]->is_constrained(i) == true) + dof_info[no].constrained_dofs.push_back( + static_cast(i - start_index)); } - } - std::vector renumbering; - std::vector irregular_cells; - if (task_info.scheme == internal::MatrixFreeFunctions::TaskInfo::none) - { - bool strict_categories = - additional_data.cell_vectorization_categories_strict || - dof_handlers.active_dof_handler == DoFHandlers::hp; - unsigned int dofs_per_cell = 0; - for (const auto &info : dof_info) - dofs_per_cell = std::max(dofs_per_cell, info.dofs_per_cell[0]); - - // Detect cells with the same parent to make sure they get scheduled - // together in the loop, which increases data locality. - std::vector parent_relation(task_info.n_active_cells + - task_info.n_ghost_cells, - numbers::invalid_unsigned_int); - std::map, std::vector> cell_parents; - for (unsigned int c = 0; c < cell_level_index_end_local; ++c) - if (cell_level_index[c].first > 0) - { - typename Triangulation::cell_iterator cell( - dof_handlers.active_dof_handler == DoFHandlers::usual ? - &dof_handlers.dof_handler[0]->get_triangulation() : - &dof_handlers.hp_dof_handler[0]->get_triangulation(), - cell_level_index[c].first, - cell_level_index[c].second); - Assert(cell->level() > 0, ExcInternalError()); - cell_parents[std::make_pair(cell->parent()->level(), - cell->parent()->index())] - .push_back(c); - } - unsigned int position = 0; - for (const auto &it : cell_parents) - if (it.second.size() == GeometryInfo::max_children_per_cell) + // extract all the global indices associated with the computation, and form + // the ghost indices + std::vector subdomain_boundary_cells; + for (unsigned int counter = 0; counter < n_active_cells; ++counter) + { + bool cell_at_subdomain_boundary = + (face_setup.at_processor_boundary.size() > counter && + face_setup.at_processor_boundary[counter]) || + (overlap_communication_computation == false && task_info.n_procs > 1); + + for (unsigned int no = 0; no < n_dof_handlers; ++no) { - for (auto i : it.second) - parent_relation[i] = position; - ++position; + // read indices from active cells + if (mg_level == numbers::invalid_unsigned_int) + { + const DoFHandler *dofh = &*dof_handler[no]; + typename DoFHandler::active_cell_iterator cell_it( + &tria, + cell_level_index[counter].first, + cell_level_index[counter].second, + dofh); + const unsigned int fe_index = + dofh->get_fe_collection().size() > 1 ? + cell_it->active_fe_index() : + 0; + if (dofh->get_fe_collection().size() > 1) + dof_info[no].cell_active_fe_index[counter] = fe_index; + local_dof_indices.resize(dof_info[no].dofs_per_cell[fe_index]); + cell_it->get_dof_indices(local_dof_indices); + dof_info[no].read_dof_indices(local_dof_indices, + lexicographic[no][fe_index], + *constraint[no], + counter, + constraint_values, + cell_at_subdomain_boundary); + if (dofh->get_fe_collection().size() == 1 && + cell_categorization_enabled) + { + AssertIndexRange(cell_it->active_cell_index(), + cell_vectorization_category.size()); + dof_info[no].cell_active_fe_index[counter] = + cell_vectorization_category[cell_it->active_cell_index()]; + } + } + // we are requested to use a multigrid level + else + { + const DoFHandler *dofh = dof_handler[no]; + AssertIndexRange(mg_level, tria.n_levels()); + typename DoFHandler::cell_iterator cell_it( + &tria, + cell_level_index[counter].first, + cell_level_index[counter].second, + dofh); + local_dof_indices.resize(dof_info[no].dofs_per_cell[0]); + cell_it->get_mg_dof_indices(local_dof_indices); + dof_info[no].read_dof_indices(local_dof_indices, + lexicographic[no][0], + *constraint[no], + counter, + constraint_values, + cell_at_subdomain_boundary); + if (cell_categorization_enabled) + { + AssertIndexRange(cell_it->index(), + cell_vectorization_category.size()); + dof_info[no].cell_active_fe_index[counter] = + cell_vectorization_category[cell_level_index[counter] + .second]; + } + } } - task_info.create_blocks_serial(subdomain_boundary_cells, - dofs_per_cell, - dof_handlers.active_dof_handler == - DoFHandlers::hp, - dof_info[0].cell_active_fe_index, - strict_categories, - parent_relation, - renumbering, - irregular_cells); - } - else + + // if we found dofs on some FE component that belong to other + // processors, the cell is added to the boundary cells. + if (cell_at_subdomain_boundary == true && + counter < cell_level_index_end_local) + subdomain_boundary_cells.push_back(counter); + } + + task_info.n_active_cells = cell_level_index_end_local; + task_info.n_ghost_cells = n_active_cells - cell_level_index_end_local; + + // Finalize the creation of the ghost indices { - task_info.make_boundary_cells_divisible(subdomain_boundary_cells); - - // For strategy with blocking before partitioning: reorganize the indices - // in order to overlap communication in MPI with computations: Place all - // cells with ghost indices into one chunk. Also reorder cells so that we - // can parallelize by threads - Assert(additional_data.cell_vectorization_category.empty(), - ExcNotImplemented()); - task_info.initial_setup_blocks_tasks(subdomain_boundary_cells, - renumbering, - irregular_cells); - task_info.guess_block_size(dof_info[0].dofs_per_cell[0]); - - unsigned int n_macro_cells_before = - *(task_info.cell_partition_data.end() - 2); - unsigned int n_ghost_slots = - *(task_info.cell_partition_data.end() - 1) - n_macro_cells_before; - - unsigned int start_nonboundary = numbers::invalid_unsigned_int; - if (task_info.scheme == - internal::MatrixFreeFunctions::TaskInfo::partition_color || - task_info.scheme == internal::MatrixFreeFunctions::TaskInfo::color) + std::vector cells_with_ghosts(subdomain_boundary_cells); + for (unsigned int c = cell_level_index_end_local; c < n_active_cells; ++c) + cells_with_ghosts.push_back(c); + for (unsigned int no = 0; no < n_dof_handlers; ++no) { - // set up partitions. if we just use coloring without partitions, do - // nothing here, assume all cells to belong to the zero partition - // (that we otherwise use for MPI boundary cells) - if (task_info.scheme == - internal::MatrixFreeFunctions::TaskInfo::color) + if (do_face_integrals && mg_level != numbers::invalid_unsigned_int) { - start_nonboundary = - task_info.n_procs > 1 ? - std::min(((task_info.cell_partition_data[2] - - task_info.cell_partition_data[1] + - task_info.block_size - 1) / - task_info.block_size) * - task_info.block_size, - task_info.cell_partition_data[3]) : - 0; + // in case of adaptivity, go through the cells on the next finer + // level and check whether we need to get read access to some of + // those entries for the mg flux matrices + std::vector dof_indices; + if (mg_level + 1 < tria.n_global_levels()) + for (const auto &cell : + dof_handler[no]->cell_iterators_on_level(mg_level + 1)) + if (cell->level_subdomain_id() == task_info.my_pid) + for (const unsigned int f : + GeometryInfo::face_indices()) + if ((cell->at_boundary(f) == false || + cell->has_periodic_neighbor(f) == true) && + cell->level() > + cell->neighbor_or_periodic_neighbor(f)->level() && + cell->neighbor_or_periodic_neighbor(f) + ->level_subdomain_id() != task_info.my_pid) + { + dof_indices.resize( + cell->neighbor_or_periodic_neighbor(f) + ->get_fe() + .dofs_per_cell); + cell->neighbor_or_periodic_neighbor(f) + ->get_mg_dof_indices(dof_indices); + for (const auto dof_index : dof_indices) + dof_info[no].ghost_dofs.push_back(dof_index); + } } - else + dof_info[no].assign_ghosts(cells_with_ghosts); + } + } + + bool hp_functionality_enabled = false; + for (const auto &dh : dof_handler) + if (dh->get_fe_collection().size() > 1) + hp_functionality_enabled = true; + const unsigned int n_lanes = task_info.vectorization_length; + std::vector renumbering; + std::vector irregular_cells; + if (task_info.scheme == internal::MatrixFreeFunctions::TaskInfo::none) + { + const bool strict_categories = + cell_vectorization_categories_strict || hp_functionality_enabled; + unsigned int dofs_per_cell = 0; + for (const auto &info : dof_info) + dofs_per_cell = std::max(dofs_per_cell, info.dofs_per_cell[0]); + + // Detect cells with the same parent to make sure they get scheduled + // together in the loop, which increases data locality. + std::vector parent_relation( + task_info.n_active_cells + task_info.n_ghost_cells, + numbers::invalid_unsigned_int); + std::map, std::vector> cell_parents; + for (unsigned int c = 0; c < cell_level_index_end_local; ++c) + if (cell_level_index[c].first > 0) { - if (task_info.n_procs > 1) - { - task_info.cell_partition_data[1] = 0; - task_info.cell_partition_data[2] = - task_info.cell_partition_data[3]; - } - start_nonboundary = task_info.cell_partition_data.back(); + typename Triangulation::cell_iterator cell( + &tria, cell_level_index[c].first, cell_level_index[c].second); + Assert(cell->level() > 0, ExcInternalError()); + cell_parents[std::make_pair(cell->parent()->level(), + cell->parent()->index())] + .push_back(c); } - - if (dof_handlers.active_dof_handler == DoFHandlers::hp) + unsigned int position = 0; + for (const auto &it : cell_parents) + if (it.second.size() == GeometryInfo::max_children_per_cell) { - irregular_cells.resize(0); - irregular_cells.resize(task_info.cell_partition_data.back() + - 2 * dof_info[0].max_fe_index); - std::vector> renumbering_fe_index; - renumbering_fe_index.resize(dof_info[0].max_fe_index); - unsigned int counter; - n_macro_cells_before = 0; - for (counter = 0; counter < std::min(start_nonboundary * n_lanes, - task_info.n_active_cells); - counter++) - { - AssertIndexRange(counter, renumbering.size()); - AssertIndexRange(renumbering[counter], - dof_info[0].cell_active_fe_index.size()); - renumbering_fe_index - [dof_info[0].cell_active_fe_index[renumbering[counter]]] - .push_back(renumbering[counter]); - } - counter = 0; - for (unsigned int j = 0; j < dof_info[0].max_fe_index; j++) - { - for (const auto jj : renumbering_fe_index[j]) - renumbering[counter++] = jj; - irregular_cells[renumbering_fe_index[j].size() / n_lanes + - n_macro_cells_before] = - renumbering_fe_index[j].size() % n_lanes; - n_macro_cells_before += - (renumbering_fe_index[j].size() + n_lanes - 1) / n_lanes; - renumbering_fe_index[j].resize(0); - } - - for (counter = start_nonboundary * n_lanes; - counter < task_info.n_active_cells; - counter++) - { - renumbering_fe_index - [dof_info[0].cell_active_fe_index.empty() ? - 0 : - dof_info[0].cell_active_fe_index[renumbering[counter]]] - .push_back(renumbering[counter]); - } - counter = start_nonboundary * n_lanes; - for (unsigned int j = 0; j < dof_info[0].max_fe_index; j++) - { - for (const auto jj : renumbering_fe_index[j]) - renumbering[counter++] = jj; - irregular_cells[renumbering_fe_index[j].size() / n_lanes + - n_macro_cells_before] = - renumbering_fe_index[j].size() % n_lanes; - n_macro_cells_before += - (renumbering_fe_index[j].size() + n_lanes - 1) / n_lanes; - } - AssertIndexRange(n_macro_cells_before, - task_info.cell_partition_data.back() + - 2 * dof_info[0].max_fe_index + 1); - irregular_cells.resize(n_macro_cells_before + n_ghost_slots); - *(task_info.cell_partition_data.end() - 2) = n_macro_cells_before; - *(task_info.cell_partition_data.end() - 1) = - n_macro_cells_before + n_ghost_slots; + for (auto i : it.second) + parent_relation[i] = position; + ++position; } - } + task_info.create_blocks_serial(subdomain_boundary_cells, + dofs_per_cell, + hp_functionality_enabled, + dof_info[0].cell_active_fe_index, + strict_categories, + parent_relation, + renumbering, + irregular_cells); + } + else + { + task_info.make_boundary_cells_divisible(subdomain_boundary_cells); + + // For strategy with blocking before partitioning: reorganize the + // indices in order to overlap communication in MPI with computations: + // Place all cells with ghost indices into one chunk. Also reorder cells + // so that we can parallelize by threads + Assert(cell_vectorization_category.empty(), ExcNotImplemented()); + task_info.initial_setup_blocks_tasks(subdomain_boundary_cells, + renumbering, + irregular_cells); + task_info.guess_block_size(dof_info[0].dofs_per_cell[0]); + + unsigned int n_macro_cells_before = + *(task_info.cell_partition_data.end() - 2); + unsigned int n_ghost_slots = + *(task_info.cell_partition_data.end() - 1) - n_macro_cells_before; + + unsigned int start_nonboundary = numbers::invalid_unsigned_int; + if (task_info.scheme == + internal::MatrixFreeFunctions::TaskInfo::partition_color || + task_info.scheme == internal::MatrixFreeFunctions::TaskInfo::color) + { + // set up partitions. if we just use coloring without partitions, do + // nothing here, assume all cells to belong to the zero partition + // (that we otherwise use for MPI boundary cells) + if (task_info.scheme == + internal::MatrixFreeFunctions::TaskInfo::color) + { + start_nonboundary = + task_info.n_procs > 1 ? + std::min(((task_info.cell_partition_data[2] - + task_info.cell_partition_data[1] + + task_info.block_size - 1) / + task_info.block_size) * + task_info.block_size, + task_info.cell_partition_data[3]) : + 0; + } + else + { + if (task_info.n_procs > 1) + { + task_info.cell_partition_data[1] = 0; + task_info.cell_partition_data[2] = + task_info.cell_partition_data[3]; + } + start_nonboundary = task_info.cell_partition_data.back(); + } - task_info.n_blocks = - (n_macro_cells() + task_info.block_size - 1) / task_info.block_size; - - DynamicSparsityPattern connectivity; - connectivity.reinit(task_info.n_active_cells, task_info.n_active_cells); - if ((additional_data.mapping_update_flags_inner_faces | - additional_data.mapping_update_flags_boundary_faces) != - update_default) - make_connectivity_graph_faces(connectivity); - if (task_info.n_active_cells > 0) - dof_info[0].make_connectivity_graph(task_info, - renumbering, - connectivity); - - task_info.make_thread_graph(dof_info[0].cell_active_fe_index, - connectivity, - renumbering, - irregular_cells, - dof_handlers.active_dof_handler == - DoFHandlers::hp); - - Assert(irregular_cells.size() >= task_info.cell_partition_data.back(), - ExcInternalError()); + if (hp_functionality_enabled) + { + irregular_cells.resize(0); + irregular_cells.resize(task_info.cell_partition_data.back() + + 2 * dof_info[0].max_fe_index); + std::vector> renumbering_fe_index; + renumbering_fe_index.resize(dof_info[0].max_fe_index); + unsigned int counter; + n_macro_cells_before = 0; + for (counter = 0; + counter < std::min(start_nonboundary * n_lanes, + task_info.n_active_cells); + counter++) + { + AssertIndexRange(counter, renumbering.size()); + AssertIndexRange(renumbering[counter], + dof_info[0].cell_active_fe_index.size()); + renumbering_fe_index + [dof_info[0].cell_active_fe_index[renumbering[counter]]] + .push_back(renumbering[counter]); + } + counter = 0; + for (unsigned int j = 0; j < dof_info[0].max_fe_index; j++) + { + for (const auto jj : renumbering_fe_index[j]) + renumbering[counter++] = jj; + irregular_cells[renumbering_fe_index[j].size() / n_lanes + + n_macro_cells_before] = + renumbering_fe_index[j].size() % n_lanes; + n_macro_cells_before += + (renumbering_fe_index[j].size() + n_lanes - 1) / n_lanes; + renumbering_fe_index[j].resize(0); + } + + for (counter = start_nonboundary * n_lanes; + counter < task_info.n_active_cells; + counter++) + { + renumbering_fe_index + [dof_info[0].cell_active_fe_index.empty() ? + 0 : + dof_info[0].cell_active_fe_index[renumbering[counter]]] + .push_back(renumbering[counter]); + } + counter = start_nonboundary * n_lanes; + for (unsigned int j = 0; j < dof_info[0].max_fe_index; j++) + { + for (const auto jj : renumbering_fe_index[j]) + renumbering[counter++] = jj; + irregular_cells[renumbering_fe_index[j].size() / n_lanes + + n_macro_cells_before] = + renumbering_fe_index[j].size() % n_lanes; + n_macro_cells_before += + (renumbering_fe_index[j].size() + n_lanes - 1) / n_lanes; + } + AssertIndexRange(n_macro_cells_before, + task_info.cell_partition_data.back() + + 2 * dof_info[0].max_fe_index + 1); + irregular_cells.resize(n_macro_cells_before + n_ghost_slots); + *(task_info.cell_partition_data.end() - 2) = + n_macro_cells_before; + *(task_info.cell_partition_data.end() - 1) = + n_macro_cells_before + n_ghost_slots; + } + } + + task_info.n_blocks = (*(task_info.cell_partition_data.end() - 2) + + task_info.block_size - 1) / + task_info.block_size; + + DynamicSparsityPattern connectivity; + connectivity.reinit(task_info.n_active_cells, task_info.n_active_cells); + if (do_face_integrals) + { +#ifdef DEAL_II_WITH_TBB + // step 1: build map between the index in the matrix-free context + // and the one in the triangulation + tbb::concurrent_unordered_map, + unsigned int> + map; + dealii::parallel::apply_to_subranges( + 0, + cell_level_index.size(), + [&cell_level_index, &map](const unsigned int begin, + const unsigned int end) { + fill_index_subrange(begin, end, cell_level_index, map); + }, + 50); + + // step 2: Make a list for all blocks with other blocks that write + // to the cell (due to the faces that are associated to it) + DynamicSparsityPattern connectivity_direct(connectivity.n_rows(), + connectivity.n_cols()); + dealii::parallel::apply_to_subranges( + 0, + task_info.n_active_cells, + [&cell_level_index, &tria, &map, &connectivity_direct]( + const unsigned int begin, const unsigned int end) { + fill_connectivity_subrange( + begin, end, tria, cell_level_index, map, connectivity_direct); + }, + 20); + connectivity_direct.symmetrize(); + + // step 3: Include also interaction between neighbors one layer away + // because faces might be assigned to cells differently + dealii::parallel::apply_to_subranges( + 0, + task_info.n_active_cells, + [&connectivity_direct, &connectivity](const unsigned int begin, + const unsigned int end) { + fill_connectivity_indirect_subrange(begin, + end, + connectivity_direct, + connectivity); + }, + 20); +#endif + } + if (task_info.n_active_cells > 0) + dof_info[0].make_connectivity_graph(task_info, + renumbering, + connectivity); + + task_info.make_thread_graph(dof_info[0].cell_active_fe_index, + connectivity, + renumbering, + irregular_cells, + hp_functionality_enabled); + + Assert(irregular_cells.size() >= task_info.cell_partition_data.back(), + ExcInternalError()); + + irregular_cells.resize(task_info.cell_partition_data.back() + + n_ghost_slots); + if (n_ghost_slots > 0) + { + for (unsigned int i = task_info.cell_partition_data.back(); + i < task_info.cell_partition_data.back() + n_ghost_slots - 1; + ++i) + irregular_cells[i] = 0; + irregular_cells.back() = task_info.n_ghost_cells % n_lanes; + } - irregular_cells.resize(task_info.cell_partition_data.back() + - n_ghost_slots); - if (n_ghost_slots > 0) { + unsigned int n_cells = 0; + for (unsigned int i = 0; i < task_info.cell_partition_data.back(); + ++i) + n_cells += irregular_cells[i] > 0 ? irregular_cells[i] : n_lanes; + AssertDimension(n_cells, task_info.n_active_cells); + n_cells = 0; for (unsigned int i = task_info.cell_partition_data.back(); - i < task_info.cell_partition_data.back() + n_ghost_slots - 1; + i < n_ghost_slots + task_info.cell_partition_data.back(); ++i) - irregular_cells[i] = 0; - irregular_cells.back() = task_info.n_ghost_cells % n_lanes; + n_cells += irregular_cells[i] > 0 ? irregular_cells[i] : n_lanes; + AssertDimension(n_cells, task_info.n_ghost_cells); } - { - unsigned int n_cells = 0; - for (unsigned int i = 0; i < task_info.cell_partition_data.back(); ++i) - n_cells += irregular_cells[i] > 0 ? irregular_cells[i] : n_lanes; - AssertDimension(n_cells, task_info.n_active_cells); - n_cells = 0; - for (unsigned int i = task_info.cell_partition_data.back(); - i < n_ghost_slots + task_info.cell_partition_data.back(); - ++i) - n_cells += irregular_cells[i] > 0 ? irregular_cells[i] : n_lanes; - AssertDimension(n_cells, task_info.n_ghost_cells); + task_info.cell_partition_data.push_back( + task_info.cell_partition_data.back() + n_ghost_slots); } - task_info.cell_partition_data.push_back( - task_info.cell_partition_data.back() + n_ghost_slots); + // Finally perform the renumbering. We also want to group several cells + // together to a batch of cells for SIMD (vectorized) execution (where the + // arithmetic operations of several cells will then be done + // simultaneously). +#ifdef DEBUG + { + AssertDimension(renumbering.size(), + task_info.n_active_cells + task_info.n_ghost_cells); + std::vector sorted_renumbering(renumbering); + std::sort(sorted_renumbering.begin(), sorted_renumbering.end()); + for (unsigned int i = 0; i < sorted_renumbering.size(); ++i) + Assert(sorted_renumbering[i] == i, ExcInternalError()); + } +#endif + { + std::vector> cell_level_index_old; + cell_level_index.swap(cell_level_index_old); + cell_level_index.reserve(task_info.cell_partition_data.back() * n_lanes); + unsigned int position_cell = 0; + for (unsigned int i = 0; i < task_info.cell_partition_data.back(); ++i) + { + unsigned int n_comp = + (irregular_cells[i] > 0) ? irregular_cells[i] : n_lanes; + for (unsigned int j = 0; j < n_comp; ++j) + cell_level_index.push_back( + cell_level_index_old[renumbering[position_cell + j]]); + + // generate a cell and level index also when we have not filled up + // vectorization_length cells. This is needed for MappingInfo when the + // transformation data is initialized. We just set the value to the + // last valid cell in that case. + for (unsigned int j = n_comp; j < n_lanes; ++j) + cell_level_index.push_back( + cell_level_index_old[renumbering[position_cell + n_comp - 1]]); + position_cell += n_comp; + } + AssertDimension(position_cell, + task_info.n_active_cells + task_info.n_ghost_cells); + AssertDimension(cell_level_index.size(), + task_info.cell_partition_data.back() * n_lanes); } - // Finally perform the renumbering. We also want to group several cells - // together to a batch of cells for SIMD (vectorized) execution (where the - // arithmetic operations of several cells will then be done simultaneously). -#ifdef DEBUG - { - AssertDimension(renumbering.size(), - task_info.n_active_cells + task_info.n_ghost_cells); - std::vector sorted_renumbering(renumbering); - std::sort(sorted_renumbering.begin(), sorted_renumbering.end()); - for (unsigned int i = 0; i < sorted_renumbering.size(); ++i) - Assert(sorted_renumbering[i] == i, ExcInternalError()); + std::vector constraint_pool_row_index; + constraint_pool_row_index.resize(1, 0); + for (const auto &it : constraint_values.constraints) + constraint_pool_row_index.push_back(constraint_pool_row_index.back() + + it.first.size()); + + for (unsigned int no = 0; no < n_dof_handlers; ++no) + dof_info[no].reorder_cells(task_info, + renumbering, + constraint_pool_row_index, + irregular_cells); + + return is_fe_dg; } -#endif +} // namespace internal + + + +template +template +void +MatrixFree::initialize_indices( + const std::vector *> &constraint, + const std::vector & locally_owned_dofs, + const AdditionalData & additional_data) +{ + // insert possible ghost cells and construct face topology + const bool do_face_integrals = + (additional_data.mapping_update_flags_inner_faces | + additional_data.mapping_update_flags_boundary_faces) != update_default; + internal::MatrixFreeFunctions::FaceSetup face_setup; + + // create a vector with the dummy information about dofs in ShapeInfo + // without the template of VectorizedArrayType + Table<2, internal::MatrixFreeFunctions::ShapeInfo> shape_info_dummy( + shape_info.size(0), shape_info.size(2)); { - std::vector> cell_level_index_old; - cell_level_index.swap(cell_level_index_old); - cell_level_index.reserve(task_info.cell_partition_data.back() * n_lanes); - unsigned int position_cell = 0; - for (unsigned int i = 0; i < task_info.cell_partition_data.back(); ++i) - { - unsigned int n_comp = - (irregular_cells[i] > 0) ? irregular_cells[i] : n_lanes; - for (unsigned int j = 0; j < n_comp; ++j) - cell_level_index.push_back( - cell_level_index_old[renumbering[position_cell + j]]); - - // generate a cell and level index also when we have not filled up - // vectorization_length cells. This is needed for MappingInfo when the - // transformation data is initialized. We just set the value to the - // last valid cell in that case. - for (unsigned int j = n_comp; j < n_lanes; ++j) - cell_level_index.push_back( - cell_level_index_old[renumbering[position_cell + n_comp - 1]]); - position_cell += n_comp; - } - AssertDimension(position_cell, - task_info.n_active_cells + task_info.n_ghost_cells); - AssertDimension(cell_level_index.size(), - task_info.cell_partition_data.back() * n_lanes); + QGauss<1> quad(1); + for (unsigned int no = 0, c = 0; no < dof_handlers.size(); no++) + for (unsigned int b = 0; + b < dof_handlers[no]->get_fe(0).n_base_elements(); + ++b, ++c) + for (unsigned int fe_no = 0; + fe_no < dof_handlers[no]->get_fe_collection().size(); + ++fe_no) + shape_info_dummy(c, fe_no).reinit(quad, + dof_handlers[no]->get_fe(fe_no), + b); } + const unsigned int n_lanes = VectorizedArrayType::size(); + task_info.vectorization_length = n_lanes; + internal::MatrixFreeFunctions::ConstraintValues constraint_values; + const std::vector is_fe_dg = internal::compute_dof_info( + constraint, + locally_owned_dofs, + dof_handlers, + shape_info_dummy, + cell_level_index_end_local, + additional_data.mg_level, + additional_data.hold_all_faces_to_owned_cells, + additional_data.cell_vectorization_category, + additional_data.cell_vectorization_categories_strict, + do_face_integrals, + additional_data.overlap_communication_computation, + task_info, + cell_level_index, + dof_info, + face_setup, + constraint_values); + // set constraint pool from the std::map and reorder the indices std::vector *> constraints( constraint_values.constraints.size()); @@ -1265,22 +1352,14 @@ MatrixFree::initialize_indices( } AssertDimension(constraint_pool_data.size(), length); - for (unsigned int no = 0; no < n_fe; ++no) - dof_info[no].reorder_cells(task_info, - renumbering, - constraint_pool_row_index, - irregular_cells); // Finally resort the faces and collect several faces for vectorization if ((additional_data.mapping_update_flags_inner_faces | additional_data.mapping_update_flags_boundary_faces) != update_default) { - face_setup.generate_faces( - dof_handlers.active_dof_handler == DoFHandlers::usual ? - dof_handlers.dof_handler[0]->get_triangulation() : - dof_handlers.hp_dof_handler[0]->get_triangulation(), - cell_level_index, - task_info); + face_setup.generate_faces(dof_handlers[0]->get_triangulation(), + cell_level_index, + task_info); if (additional_data.mapping_update_flags_inner_faces != update_default) Assert(face_setup.refinement_edge_faces.empty(), ExcNotImplemented("Setting up data structures on MG levels with " @@ -1337,8 +1416,8 @@ MatrixFree::initialize_indices( (task_info.refinement_edge_face_partition_data[1] - task_info.refinement_edge_face_partition_data[0])); - for (unsigned int no = 0; no < n_fe; ++no) - dof_info[no].compute_face_index_compression(face_info.faces); + for (auto &di : dof_info) + di.compute_face_index_compression(face_info.faces); // build the inverse map back from the faces array to // cell_and_face_to_plain_faces @@ -1394,393 +1473,20 @@ MatrixFree::initialize_indices( } // compute tighter index sets for various sets of face integrals - for (unsigned int no = 0; no < n_fe; ++no) - { - internal::MatrixFreeFunctions::DoFInfo &di = dof_info[no]; - - const Utilities::MPI::Partitioner &part = *di.vector_partitioner; - - // partitioner 0: no face integrals, simply use the indices present - // on the cells - unsigned int n_macro_cells_before = - *(task_info.cell_partition_data.end() - 2); - std::vector ghost_indices; - { - for (unsigned int cell = 0; - cell < VectorizedArrayType::size() * n_macro_cells_before; - ++cell) - if (cell == 0 || - cell_level_index[cell] != cell_level_index[cell - 1]) - { - for (unsigned int i = - di.row_starts[cell * di.start_components.back()].first; - i < - di.row_starts[(cell + 1) * di.start_components.back()] - .first; - ++i) - if (di.dof_indices[i] >= part.local_size()) - ghost_indices.push_back( - part.local_to_global(di.dof_indices[i])); - - const unsigned int fe_index = di.dofs_per_cell.size() == 1 ? - 0 : - di.cell_active_fe_index[cell]; - const unsigned int dofs_this_cell = - di.dofs_per_cell[fe_index]; - - for (unsigned int i = di.row_starts_plain_indices[cell]; - i < di.row_starts_plain_indices[cell] + dofs_this_cell; - ++i) - if (di.plain_dof_indices[i] >= part.local_size()) - ghost_indices.push_back( - part.local_to_global(di.plain_dof_indices[i])); - } - std::sort(ghost_indices.begin(), ghost_indices.end()); - ghost_indices.erase(std::unique(ghost_indices.begin(), - ghost_indices.end()), - ghost_indices.end()); - IndexSet compressed_set(part.size()); - compressed_set.add_indices(ghost_indices.begin(), - ghost_indices.end()); - compressed_set.subtract_set( - di.vector_partitioner->locally_owned_range()); - const bool all_ghosts_equal = - Utilities::MPI::min( - compressed_set.n_elements() == - di.vector_partitioner->ghost_indices().n_elements(), - di.vector_partitioner->get_mpi_communicator()) != 0; - if (all_ghosts_equal) - di.vector_partitioner_face_variants[0] = di.vector_partitioner; - else - { - di.vector_partitioner_face_variants[0] = - std::make_shared( - part.locally_owned_range(), part.get_mpi_communicator()); - const_cast( - di.vector_partitioner_face_variants[0].get()) - ->set_ghost_indices(compressed_set, part.ghost_indices()); - } - } - - const auto loop_over_faces = - [&](const std::function &fu) { - for (unsigned int f = 0; f < n_inner_face_batches(); ++f) - for (unsigned int v = 0; v < VectorizedArrayType::size() && - face_info.faces[f].cells_interior[v] != - numbers::invalid_unsigned_int; - ++v) - { - AssertIndexRange(face_info.faces[f].cells_interior[v], - n_macro_cells_before * - VectorizedArrayType::size()); - const unsigned int p = face_info.faces[f].cells_exterior[v]; - const unsigned int face_no = - face_info.faces[f].exterior_face_no; - - fu(p, face_no, f, v, true /*exterior*/, false /*flag*/); - } - }; - - const auto loop_over_all_faces = - [&](const std::function &fu) { - const unsigned int n_lanes = VectorizedArrayType::size(); - - for (unsigned int c = 0; c < n_cell_batches(); ++c) - for (const unsigned int d : GeometryInfo::face_indices()) - for (unsigned int v = 0; v < n_lanes; ++v) - { - unsigned int f = - this->face_info.cell_and_face_to_plain_faces(c, d, v); - if (f == numbers::invalid_unsigned_int) - continue; - - const unsigned int cell_this = c * n_lanes + v; - - const unsigned int cell_m = - this->get_face_info(f / n_lanes) - .cells_interior[f % n_lanes]; - const unsigned int cell_p = - this->get_face_info(f / n_lanes) - .cells_exterior[f % n_lanes]; - - const bool ext = cell_this == cell_m; - - if (ext && cell_p == numbers::invalid_unsigned_int) - continue; - - const unsigned int p = ext ? cell_p : cell_m; - const unsigned int face_no = - ext ? face_info.faces[f / n_lanes].exterior_face_no : - face_info.faces[f / n_lanes].interior_face_no; - - fu(p, face_no, f / n_lanes, v, ext, true); - } - }; - - const auto process_values = - [&](std::shared_ptr - &vector_partitioner_values, - const std::function &)> &loop) { - bool all_nodal = true; - for (unsigned int c = 0; c < di.n_base_elements; ++c) - if (!shape_info(di.global_base_element_offset + c, 0, 0, 0) - .data.front() - .nodal_at_cell_boundaries) - all_nodal = false; - if (all_nodal == false) - vector_partitioner_values = di.vector_partitioner; - else - { - bool has_noncontiguous_cell = false; - - loop([&](const unsigned int p, - const unsigned int face_no, - const unsigned int f, - const unsigned int /*v*/, - const bool ext, - const bool flag) { - if (flag || - (di.index_storage_variants - [ext ? internal::MatrixFreeFunctions::DoFInfo:: - dof_access_face_exterior : - internal::MatrixFreeFunctions::DoFInfo:: - dof_access_face_interior][f] >= - internal::MatrixFreeFunctions::DoFInfo:: - IndexStorageVariants::contiguous && - di.dof_indices_contiguous - [internal::MatrixFreeFunctions::DoFInfo:: - dof_access_cell][p] >= part.local_size())) - { - const unsigned int stride = - di.dof_indices_interleave_strides[2][p]; - unsigned int i = 0; - for (unsigned int e = 0; e < di.n_base_elements; ++e) - for (unsigned int c = 0; c < di.n_components[e]; ++c) - { - const internal::MatrixFreeFunctions::ShapeInfo< - VectorizedArrayType> &shape = - shape_info(di.global_base_element_offset + e, - 0, - 0, - 0); - for (unsigned int j = 0; - j < shape.dofs_per_component_on_face; - ++j) - ghost_indices.push_back(part.local_to_global( - di.dof_indices_contiguous - [internal::MatrixFreeFunctions::DoFInfo:: - dof_access_cell][p] + - i + - shape.face_to_cell_index_nodal(face_no, j) * - stride)); - i += shape.dofs_per_component_on_cell * stride; - } - AssertDimension(i, di.dofs_per_cell[0] * stride); - } - else if (di.index_storage_variants - [ext ? internal::MatrixFreeFunctions::DoFInfo:: - dof_access_face_exterior : - internal::MatrixFreeFunctions::DoFInfo:: - dof_access_face_interior][f] < - internal::MatrixFreeFunctions::DoFInfo:: - IndexStorageVariants::contiguous) - has_noncontiguous_cell = true; - }); - has_noncontiguous_cell = - Utilities::MPI::min(has_noncontiguous_cell, - task_info.communicator) != 0; - - std::sort(ghost_indices.begin(), ghost_indices.end()); - ghost_indices.erase(std::unique(ghost_indices.begin(), - ghost_indices.end()), - ghost_indices.end()); - IndexSet compressed_set(part.size()); - compressed_set.add_indices(ghost_indices.begin(), - ghost_indices.end()); - compressed_set.subtract_set( - di.vector_partitioner->locally_owned_range()); - const bool all_ghosts_equal = - Utilities::MPI::min( - compressed_set.n_elements() == - di.vector_partitioner->ghost_indices().n_elements(), - di.vector_partitioner->get_mpi_communicator()) != 0; - if (all_ghosts_equal || has_noncontiguous_cell) - vector_partitioner_values = di.vector_partitioner; - else - { - vector_partitioner_values = - std::make_shared( - part.locally_owned_range(), - part.get_mpi_communicator()); - const_cast( - vector_partitioner_values.get()) - ->set_ghost_indices(compressed_set, - part.ghost_indices()); - } - } - }; - - - const auto process_gradients = - [&](const std::shared_ptr - &vector_partitoner_values, - std::shared_ptr - &vector_partitioner_gradients, - const std::function &)> &loop) { - bool all_hermite = true; - for (unsigned int c = 0; c < di.n_base_elements; ++c) - if (shape_info(di.global_base_element_offset + c, 0, 0, 0) - .element_type != - internal::MatrixFreeFunctions::tensor_symmetric_hermite) - all_hermite = false; - if (all_hermite == false || - vector_partitoner_values.get() == di.vector_partitioner.get()) - vector_partitioner_gradients = di.vector_partitioner; - else - { - loop([&](const unsigned int p, - const unsigned int face_no, - const unsigned int f, - const unsigned int v, - const bool ext, - const bool flag) { - (void)v; - // The following statement could be simplified - // using AssertIndexRange, but that leads to an - // internal compiler error with GCC 7.4. Do things - // by hand instead. - Assert(face_info.faces[f].cells_interior[v] < - n_macro_cells_before * VectorizedArrayType::size(), - ExcIndexRange(face_info.faces[f].cells_interior[v], - 0, - n_macro_cells_before * - VectorizedArrayType::size())); - if (flag || - (di.index_storage_variants - [ext ? internal::MatrixFreeFunctions::DoFInfo:: - dof_access_face_exterior : - internal::MatrixFreeFunctions::DoFInfo:: - dof_access_face_interior][f] >= - internal::MatrixFreeFunctions::DoFInfo:: - IndexStorageVariants::contiguous && - di.dof_indices_contiguous - [internal::MatrixFreeFunctions::DoFInfo:: - dof_access_cell][p] >= part.local_size())) - { - const unsigned int stride = - di.dof_indices_interleave_strides[2][p]; - unsigned int i = 0; - for (unsigned int e = 0; e < di.n_base_elements; ++e) - for (unsigned int c = 0; c < di.n_components[e]; ++c) - { - const internal::MatrixFreeFunctions::ShapeInfo< - VectorizedArrayType> &shape = - shape_info(di.global_base_element_offset + e, - 0, - 0, - 0); - for (unsigned int j = 0; - j < 2 * shape.dofs_per_component_on_face; - ++j) - ghost_indices.push_back(part.local_to_global( - di.dof_indices_contiguous - [internal::MatrixFreeFunctions::DoFInfo:: - dof_access_cell][p] + - i + - shape.face_to_cell_index_hermite(face_no, j) * - stride)); - i += shape.dofs_per_component_on_cell * stride; - } - AssertDimension(i, di.dofs_per_cell[0] * stride); - } - }); - std::sort(ghost_indices.begin(), ghost_indices.end()); - ghost_indices.erase(std::unique(ghost_indices.begin(), - ghost_indices.end()), - ghost_indices.end()); - IndexSet compressed_set(part.size()); - compressed_set.add_indices(ghost_indices.begin(), - ghost_indices.end()); - compressed_set.subtract_set( - di.vector_partitioner->locally_owned_range()); - const bool all_ghosts_equal = - Utilities::MPI::min( - compressed_set.n_elements() == - di.vector_partitioner->ghost_indices().n_elements(), - di.vector_partitioner->get_mpi_communicator()) != 0; - if (all_ghosts_equal) - vector_partitioner_gradients = di.vector_partitioner; - else - { - vector_partitioner_gradients = - std::make_shared( - part.locally_owned_range(), - part.get_mpi_communicator()); - const_cast( - vector_partitioner_gradients.get()) - ->set_ghost_indices(compressed_set, - part.ghost_indices()); - } - } - }; - - // partitioner 1: values on faces - process_values(di.vector_partitioner_face_variants[1], - loop_over_faces); - - // partitioner 2: values and gradients on faces - process_gradients(di.vector_partitioner_face_variants[1], - di.vector_partitioner_face_variants[2], - loop_over_faces); - - - if (additional_data.hold_all_faces_to_owned_cells && is_fe_dg[no]) - { - ghost_indices.clear(); - // partitioner 3: values on all faces - process_values(di.vector_partitioner_face_variants[3], - loop_over_all_faces); - // partitioner 4: values and gradients on faces - process_gradients(di.vector_partitioner_face_variants[3], - di.vector_partitioner_face_variants[4], - loop_over_all_faces); - } - else - { - di.vector_partitioner_face_variants[3] = - std::make_shared( - part.locally_owned_range(), part.get_mpi_communicator()); - di.vector_partitioner_face_variants[4] = - std::make_shared( - part.locally_owned_range(), part.get_mpi_communicator()); - } - } + unsigned int count = 0; + for (auto &di : dof_info) + di.compute_tight_partitioners( + shape_info_dummy, + *(task_info.cell_partition_data.end() - 2) * + VectorizedArrayType::size(), + VectorizedArrayType::size(), + face_setup.inner_faces, + face_setup.inner_ghost_faces, + is_fe_dg[count++] && additional_data.hold_all_faces_to_owned_cells); } - for (unsigned int no = 0; no < n_fe; ++no) - dof_info[no].compute_vector_zero_access_pattern(task_info, face_info.faces); + for (auto &di : dof_info) + di.compute_vector_zero_access_pattern(task_info, face_info.faces); indices_are_initialized = true; } @@ -1795,8 +1501,7 @@ MatrixFree::clear() mapping_info.clear(); cell_level_index.clear(); task_info.clear(); - dof_handlers.dof_handler.clear(); - dof_handlers.hp_dof_handler.clear(); + dof_handlers.clear(); face_info.clear(); indices_are_initialized = false; mapping_is_initialized = false; @@ -1804,166 +1509,6 @@ MatrixFree::clear() -#ifdef DEAL_II_WITH_TBB - -namespace internal -{ - inline void - fill_index_subrange( - const unsigned int begin, - const unsigned int end, - const std::vector> &cell_level_index, - tbb::concurrent_unordered_map, - unsigned int> & map) - { - if (cell_level_index.empty()) - return; - unsigned int cell = begin; - if (cell == 0) - map.insert(std::make_pair(cell_level_index[cell++], 0U)); - for (; cell < end; ++cell) - if (cell_level_index[cell] != cell_level_index[cell - 1]) - map.insert(std::make_pair(cell_level_index[cell], cell)); - } - - template - inline void - fill_connectivity_subrange( - const unsigned int begin, - const unsigned int end, - const dealii::Triangulation & tria, - const std::vector> &cell_level_index, - const tbb::concurrent_unordered_map, - unsigned int> & map, - DynamicSparsityPattern &connectivity_direct) - { - std::vector new_indices; - for (unsigned int cell = begin; cell < end; ++cell) - { - new_indices.clear(); - typename dealii::Triangulation::cell_iterator dcell( - &tria, cell_level_index[cell].first, cell_level_index[cell].second); - for (auto f : GeometryInfo::face_indices()) - { - // Only inner faces couple different cells - if (dcell->at_boundary(f) == false && - dcell->neighbor_or_periodic_neighbor(f)->level_subdomain_id() == - dcell->level_subdomain_id()) - { - std::pair level_index( - dcell->neighbor_or_periodic_neighbor(f)->level(), - dcell->neighbor_or_periodic_neighbor(f)->index()); - auto it = map.find(level_index); - if (it != map.end()) - { - const unsigned int neighbor_cell = it->second; - if (neighbor_cell != cell) - new_indices.push_back(neighbor_cell); - } - } - } - std::sort(new_indices.begin(), new_indices.end()); - connectivity_direct.add_entries(cell, - new_indices.begin(), - std::unique(new_indices.begin(), - new_indices.end())); - } - } - - inline void - fill_connectivity_indirect_subrange( - const unsigned int begin, - const unsigned int end, - const DynamicSparsityPattern &connectivity_direct, - DynamicSparsityPattern & connectivity) - { - std::vector new_indices; - for (unsigned int block = begin; block < end; ++block) - { - new_indices.clear(); - for (DynamicSparsityPattern::iterator it = - connectivity_direct.begin(block); - it != connectivity_direct.end(block); - ++it) - { - new_indices.push_back(it->column()); - for (DynamicSparsityPattern::iterator it_neigh = - connectivity_direct.begin(it->column()); - it_neigh != connectivity_direct.end(it->column()); - ++it_neigh) - if (it_neigh->column() != block) - new_indices.push_back(it_neigh->column()); - } - std::sort(new_indices.begin(), new_indices.end()); - connectivity.add_entries(block, - new_indices.begin(), - std::unique(new_indices.begin(), - new_indices.end())); - } - } -} // namespace internal - -#endif - - - -template -void -MatrixFree::make_connectivity_graph_faces( - DynamicSparsityPattern &connectivity) -{ - (void)connectivity; -#ifdef DEAL_II_WITH_TBB - // step 1: build map between the index in the matrix-free context and the - // one in the triangulation - tbb::concurrent_unordered_map, - unsigned int> - map; - parallel::apply_to_subranges( - 0, - cell_level_index.size(), - [this, &map](const unsigned int begin, const unsigned int end) { - internal::fill_index_subrange(begin, end, cell_level_index, map); - }, - 50); - - // step 2: Make a list for all blocks with other blocks that write to the - // cell (due to the faces that are associated to it) - DynamicSparsityPattern connectivity_direct(connectivity.n_rows(), - connectivity.n_cols()); - const Triangulation &tria = - dof_handlers.active_dof_handler == DoFHandlers::usual ? - dof_handlers.dof_handler[0]->get_triangulation() : - dof_handlers.hp_dof_handler[0]->get_triangulation(); - parallel::apply_to_subranges( - 0, - task_info.n_active_cells, - [this, &tria, &map, &connectivity_direct](const unsigned int begin, - const unsigned int end) { - internal::fill_connectivity_subrange( - begin, end, tria, cell_level_index, map, connectivity_direct); - }, - 20); - connectivity_direct.symmetrize(); - - // step 3: Include also interaction between neighbors one layer away because - // faces might be assigned to cells differently - parallel::apply_to_subranges( - 0, - task_info.n_active_cells, - [&connectivity_direct, &connectivity](const unsigned int begin, - const unsigned int end) { - internal::fill_connectivity_indirect_subrange(begin, - end, - connectivity_direct, - connectivity); - }, - 20); -#endif -} - - - template std::size_t MatrixFree::memory_consumption() const diff --git a/source/matrix_free/CMakeLists.txt b/source/matrix_free/CMakeLists.txt index e0d5976dcc..348e4cf6c7 100644 --- a/source/matrix_free/CMakeLists.txt +++ b/source/matrix_free/CMakeLists.txt @@ -22,8 +22,6 @@ SET(_src mapping_info_inst2.cc mapping_info_inst3.cc matrix_free.cc - matrix_free_inst2.cc - matrix_free_inst3.cc shape_info.cc task_info.cc ) diff --git a/source/matrix_free/matrix_free.cc b/source/matrix_free/matrix_free.cc index c0ffd272c0..10d9c84d97 100644 --- a/source/matrix_free/matrix_free.cc +++ b/source/matrix_free/matrix_free.cc @@ -24,10 +24,6 @@ DEAL_II_NAMESPACE_OPEN -#define SPLIT_INSTANTIATIONS_COUNT 3 -#ifndef SPLIT_INSTANTIATIONS_INDEX -# define SPLIT_INSTANTIATIONS_INDEX 0 -#endif #include "matrix_free.inst" DEAL_II_NAMESPACE_CLOSE diff --git a/source/matrix_free/matrix_free.inst.in b/source/matrix_free/matrix_free.inst.in index f143a61fc9..5fae952f73 100644 --- a/source/matrix_free/matrix_free.inst.in +++ b/source/matrix_free/matrix_free.inst.in @@ -43,30 +43,11 @@ for (deal_II_dimension : DIMENSIONS; const std::vector> &, const AdditionalData &); - template void MatrixFree:: - internal_reinit( - const Mapping &, - const std::vector *> &, - const std::vector< - const AffineConstraints *> &, - const std::vector &, - const std::vector> &, - const AdditionalData &); - template const DoFHandler & MatrixFree:: get_dof_handler>(const unsigned int) const; - - template const hp::DoFHandler - &MatrixFree:: - get_dof_handler>(const unsigned int) - const; } @@ -83,17 +64,6 @@ for (deal_II_dimension : DIMENSIONS; const std::vector &, const std::vector> &, const AdditionalData &); - - template void MatrixFree:: - internal_reinit( - const Mapping &, - const std::vector *> &, - const std::vector *> &, - const std::vector &, - const std::vector> &, - const AdditionalData &); } diff --git a/source/matrix_free/matrix_free_inst2.cc b/source/matrix_free/matrix_free_inst2.cc deleted file mode 100644 index 998346dd19..0000000000 --- a/source/matrix_free/matrix_free_inst2.cc +++ /dev/null @@ -1,17 +0,0 @@ -// --------------------------------------------------------------------- -// -// Copyright (C) 2020 by the deal.II authors -// -// This file is part of the deal.II library. -// -// The deal.II library is free software; you can use it, redistribute -// it, and/or modify it under the terms of the GNU Lesser General -// Public License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// The full text of the license can be found in the file LICENSE.md at -// the top level directory of deal.II. -// -// --------------------------------------------------------------------- - -#define SPLIT_INSTANTIATIONS_INDEX 1 -#include "matrix_free.cc" diff --git a/source/matrix_free/matrix_free_inst3.cc b/source/matrix_free/matrix_free_inst3.cc deleted file mode 100644 index 8358b94d72..0000000000 --- a/source/matrix_free/matrix_free_inst3.cc +++ /dev/null @@ -1,17 +0,0 @@ -// --------------------------------------------------------------------- -// -// Copyright (C) 2020 by the deal.II authors -// -// This file is part of the deal.II library. -// -// The deal.II library is free software; you can use it, redistribute -// it, and/or modify it under the terms of the GNU Lesser General -// Public License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// The full text of the license can be found in the file LICENSE.md at -// the top level directory of deal.II. -// -// --------------------------------------------------------------------- - -#define SPLIT_INSTANTIATIONS_INDEX 2 -#include "matrix_free.cc" -- 2.39.5