From: Martin Kronbichler Date: Tue, 10 Sep 2019 08:07:09 +0000 (+0200) Subject: Add pre- and post-loops to matrix free cell loop X-Git-Tag: v9.2.0-rc1~1054^2~2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=6c0bf1c2cc4d713b115d96c164d5e5df4cf2ea20;p=dealii.git Add pre- and post-loops to matrix free cell loop --- diff --git a/include/deal.II/matrix_free/dof_info.h b/include/deal.II/matrix_free/dof_info.h index 19f3368845..ce449f03da 100644 --- a/include/deal.II/matrix_free/dof_info.h +++ b/include/deal.II/matrix_free/dof_info.h @@ -75,10 +75,12 @@ namespace internal * caches, saving one global vector access for the case where this is * applied rather than `vector = 0.;`. * - * Size of the chunk is set to 64 kByte which generally fits to current - * caches. + * We set the granularity to 64 - that is a number sufficiently large + * to minimize loop peel overhead within the work (and compatible with + * vectorization lengths of up to 16) and small enough to not waste on + * the size of the individual chunks. */ - static const unsigned int chunk_size_zero_vector = 8192; + static const unsigned int chunk_size_zero_vector = 64; /** * Default empty constructor. @@ -600,7 +602,33 @@ namespace internal /** * Stores the actual ranges in the vector to be cleared. */ - std::vector vector_zero_range_list; + std::vector> vector_zero_range_list; + + /** + * Stores an integer to each partition in TaskInfo that indicates when + * to schedule operations that will be done before any access to vector + * entries. + */ + std::vector cell_loop_pre_list_index; + + /** + * Stores the actual ranges of the operation before any access to vector + * entries. + */ + std::vector> cell_loop_pre_list; + + /** + * Stores an integer to each partition in TaskInfo that indicates when + * to schedule operations that will be done after all access to vector + * entries. + */ + std::vector cell_loop_post_list_index; + + /** + * Stores the actual ranges of the operation after all access to vector + * entries. + */ + std::vector> cell_loop_post_list; }; diff --git a/include/deal.II/matrix_free/dof_info.templates.h b/include/deal.II/matrix_free/dof_info.templates.h index 26f78fb88a..de4bbf4e3c 100644 --- a/include/deal.II/matrix_free/dof_info.templates.h +++ b/include/deal.II/matrix_free/dof_info.templates.h @@ -1214,7 +1214,10 @@ namespace internal const unsigned int n_components = start_components.back(); const unsigned int n_dofs = vector_partitioner->local_size() + vector_partitioner->n_ghost_indices(); - std::vector touched_by( + std::vector touched_first_by( + (n_dofs + chunk_size_zero_vector - 1) / chunk_size_zero_vector, + numbers::invalid_unsigned_int); + std::vector touched_last_by( (n_dofs + chunk_size_zero_vector - 1) / chunk_size_zero_vector, numbers::invalid_unsigned_int); for (unsigned int part = 0; @@ -1238,8 +1241,10 @@ namespace internal { const unsigned int myindex = dof_indices[it] / chunk_size_zero_vector; - if (touched_by[myindex] == numbers::invalid_unsigned_int) - touched_by[myindex] = chunk; + if (touched_first_by[myindex] == + numbers::invalid_unsigned_int) + touched_first_by[myindex] = chunk; + touched_last_by[myindex] = chunk; } } if (faces.size() > 0) @@ -1259,42 +1264,123 @@ namespace internal { const unsigned int myindex = dof_indices[it] / chunk_size_zero_vector; - if (touched_by[myindex] == + if (touched_first_by[myindex] == numbers::invalid_unsigned_int) - touched_by[myindex] = chunk; + touched_first_by[myindex] = chunk; + touched_last_by[myindex] = chunk; } } } + // ensure that all indices are touched at least during the last round - for (auto &index : touched_by) + for (auto &index : touched_first_by) if (index == numbers::invalid_unsigned_int) - index = task_info.cell_partition_data.back() - 1; - - vector_zero_range_list_index.resize( - 1 + task_info - .partition_row_index[task_info.partition_row_index.size() - 2], - numbers::invalid_unsigned_int); - std::map> chunk_must_zero_vector; - for (unsigned int i = 0; i < touched_by.size(); ++i) - chunk_must_zero_vector[touched_by[i]].push_back(i); - vector_zero_range_list.clear(); - vector_zero_range_list_index[0] = 0; - for (unsigned int chunk = 0; - chunk < vector_zero_range_list_index.size() - 1; - ++chunk) - { - auto it = chunk_must_zero_vector.find(chunk); - if (it != chunk_must_zero_vector.end()) + index = + task_info + .partition_row_index[task_info.partition_row_index.size() - 2] - + 1; + + // lambda to convert from a map, with keys associated to the buckets by + // which we sliced the index space, length chunk_size_zero_vector, and + // values equal to the slice index which are touched by the respective + // partition, to a "vectors-of-vectors" like data structure. Rather than + // using the vectors, we set up a sparsity-pattern like structure where + // one index specifies the start index (range_list_index), and the other + // the actual ranges (range_list). + auto convert_map_to_range_list = + [=](const unsigned int n_partitions, + const std::map> &ranges_in, + std::vector &range_list_index, + std::vector> &range_list, + const unsigned int max_size) { + range_list_index.resize(n_partitions + 1); + range_list_index[0] = 0; + range_list.clear(); + for (unsigned int partition = 0; partition < n_partitions; + ++partition) { - for (const auto i : it->second) - vector_zero_range_list.push_back(i); - vector_zero_range_list_index[chunk + 1] = - vector_zero_range_list.size(); + auto it = ranges_in.find(partition); + if (it != ranges_in.end()) + { + for (unsigned int i = 0; i < it->second.size(); ++i) + { + const unsigned int first_i = i; + while (i + 1 < it->second.size() && + it->second[i + 1] == it->second[i] + 1) + ++i; + range_list.emplace_back( + std::min(it->second[first_i] * chunk_size_zero_vector, + max_size), + std::min((it->second[i] + 1) * chunk_size_zero_vector, + max_size)); + } + range_list_index[partition + 1] = range_list.size(); + } + else + range_list_index[partition + 1] = range_list_index[partition]; } - else - vector_zero_range_list_index[chunk + 1] = - vector_zero_range_list_index[chunk]; - } + }; + + // first we determine the ranges to zero the vector + std::map> chunk_must_zero_vector; + for (unsigned int i = 0; i < touched_first_by.size(); ++i) + chunk_must_zero_vector[touched_first_by[i]].push_back(i); + const unsigned int n_partitions = + task_info.partition_row_index[task_info.partition_row_index.size() - 2]; + convert_map_to_range_list(n_partitions, + chunk_must_zero_vector, + vector_zero_range_list_index, + vector_zero_range_list, + vector_partitioner->local_size()); + + // the other two operations only work on the local range (without + // ghosts), so we skip the latter parts of the vector now + touched_first_by.resize( + (vector_partitioner->local_size() + chunk_size_zero_vector - 1) / + chunk_size_zero_vector); + + // set the import indices in the vector partitioner to one index higher + // to indicate that we want to process it first. This additional index + // is reflected in the argument 'n_partitions+1' in the + // convert_map_to_range_list function below. + for (auto it : vector_partitioner->import_indices()) + for (unsigned int i = it.first; i < it.second; ++i) + touched_first_by[i / chunk_size_zero_vector] = n_partitions; + std::map> chunk_must_do_pre; + for (unsigned int i = 0; i < touched_first_by.size(); ++i) + chunk_must_do_pre[touched_first_by[i]].push_back(i); + convert_map_to_range_list(n_partitions + 1, + chunk_must_do_pre, + cell_loop_pre_list_index, + cell_loop_pre_list, + vector_partitioner->local_size()); + + touched_last_by.resize( + (vector_partitioner->local_size() + chunk_size_zero_vector - 1) / + chunk_size_zero_vector); + + // set the indices which were not touched by the cell loop (i.e., + // constrained indices) to the last valid partition index. Since + // partition_row_index contains one extra slot for ghosted faces (which + // are not part of the cell/face loops), we use the second to last entry + // in the partition list. + for (auto &index : touched_last_by) + if (index == numbers::invalid_unsigned_int) + index = + task_info + .partition_row_index[task_info.partition_row_index.size() - 2] - + 1; + for (auto it : vector_partitioner->import_indices()) + for (unsigned int i = it.first; i < it.second; ++i) + touched_last_by[i / chunk_size_zero_vector] = n_partitions; + std::map> chunk_must_do_post; + for (unsigned int i = 0; i < touched_last_by.size(); ++i) + chunk_must_do_post[touched_last_by[i]].push_back(i); + convert_map_to_range_list(n_partitions + 1, + chunk_must_do_post, + cell_loop_post_list_index, + cell_loop_post_list, + vector_partitioner->local_size()); } diff --git a/include/deal.II/matrix_free/matrix_free.h b/include/deal.II/matrix_free/matrix_free.h index 5b3cf356d9..eec8213bea 100644 --- a/include/deal.II/matrix_free/matrix_free.h +++ b/include/deal.II/matrix_free/matrix_free.h @@ -684,7 +684,7 @@ public: }; /** - * @name 2: Loop over cells + * @name 2: Matrix-free loops */ //@{ /** @@ -704,7 +704,11 @@ public: * type LinearAlgebra::distributed::Vector (or composite objects thereof * such as LinearAlgebra::distributed::BlockVector), the loop calls * LinearAlgebra::distributed::Vector::compress() at the end of the call - * internally. + * internally. For other vectors, including parallel Trilinos or PETSc + * vectors, no such call is issued. Note that Trilinos/Epetra or PETSc + * vectors do currently not work in parallel because the present class uses + * MPI-local index addressing, as opposed to the global addressing implied + * by those external libraries. * * @param src Input vector. If the vector is of type * LinearAlgebra::distributed::Vector (or composite objects thereof such as @@ -756,7 +760,11 @@ public: * type LinearAlgebra::distributed::Vector (or composite objects thereof * such as LinearAlgebra::distributed::BlockVector), the loop calls * LinearAlgebra::distributed::Vector::compress() at the end of the call - * internally. + * internally. For other vectors, including parallel Trilinos or PETSc + * vectors, no such call is issued. Note that Trilinos/Epetra or PETSc + * vectors do currently not work in parallel because the present class uses + * MPI-local index addressing, as opposed to the global addressing implied + * by those external libraries. * * @param src Input vector. If the vector is of type * LinearAlgebra::distributed::Vector (or composite objects thereof such as @@ -802,6 +810,144 @@ public: const InVector &src, const bool zero_dst_vector = false) const; + /** + * This function is similar to the cell_loop with an std::function object to + * specify to operation to be performed on cells, but adds two additional + * functors to execute some additional work before and after the cell + * integrals are computed. + * + * The two additional functors work on a range of degrees of freedom, + * expressed in terms of the degree-of-freedom numbering of the selected + * DoFHandler `dof_handler_index_pre_post` in MPI-local indices. The + * arguments to the functors represent a range of degrees of freedom at a + * granularity of + * internal::MatrixFreeFunctions::DoFInfo::chunk_size_zero_vector entries + * (except for the last chunk which is set to the number of locally owned + * entries) in the form `[first, last)`. The idea of these functors is to + * bring operations on vectors closer to the point where they accessed in a + * matrix-free loop, with the goal to increase cache hits by temporal + * locality. This loop guarantees that the `operation_before_loop` hits all + * relevant unknowns before they are first touched in the cell_operation + * (including the MPI data exchange), allowing to execute some vector update + * that the `src` vector depends upon. The `operation_after_loop` is similar + * - it starts to execute on a range of DoFs once all DoFs in that range + * have been touched for the last time time by the `cell_operation` + * (including the MPI data exchange), allowing e.g. to compute some vector + * operations that depend on the result of the current cell loop in `dst` or + * want to modify `src`. The efficiency of caching depends on the numbering + * of the degrees of freedom because of the granularity of the ranges. + * + * @param cell_operation Pointer to member function of `CLASS` with the + * signature cell_operation (const MatrixFree &, OutVector &, + * InVector &, std::pair &) where the first + * argument passes the data of the calling class and the last argument + * defines the range of cells which should be worked on (typically more than + * one cell should be worked on in order to reduce overheads). + * + * @param owning_class The object which provides the `cell_operation` + * call. To be compatible with this interface, the class must allow to call + * `owning_class->cell_operation(...)`. + * + * @param dst Destination vector holding the result. If the vector is of + * type LinearAlgebra::distributed::Vector (or composite objects thereof + * such as LinearAlgebra::distributed::BlockVector), the loop calls + * LinearAlgebra::distributed::Vector::compress() at the end of the call + * internally. For other vectors, including parallel Trilinos or PETSc + * vectors, no such call is issued. Note that Trilinos/Epetra or PETSc + * vectors do currently not work in parallel because the present class uses + * MPI-local index addressing, as opposed to the global addressing implied + * by those external libraries. + * + * @param src Input vector. If the vector is of type + * LinearAlgebra::distributed::Vector (or composite objects thereof such as + * LinearAlgebra::distributed::BlockVector), the loop calls + * LinearAlgebra::distributed::Vector::update_ghost_values() at the start of + * the call internally to make sure all necessary data is locally + * available. Note, however, that the vector is reset to its original state + * at the end of the loop, i.e., if the vector was not ghosted upon entry of + * the loop, it will not be ghosted upon finishing the loop. + * + * @param operation_before_loop This functor can be used to perform an + * operation on entries of the `src` and `dst` vectors (or other vectors) + * before the operation on cells first touches a particular DoF according to + * the general description in the text above. This function is passed a + * range of the locally owned degrees of freedom on the selected + * `dof_handler_index_pre_post` (in MPI-local numbering). + * + * @param operation_after_loop This functor can be used to perform an + * operation on entries of the `src` and `dst` vectors (or other vectors) + * after the operation on cells last touches a particular DoF according to + * the general description in the text above. This function is passed a + * range of the locally owned degrees of freedom on the selected + * `dof_handler_index_pre_post` (in MPI-local numbering). + * + * @param dof_handler_index_pre_post Since MatrixFree can be initialized + * with a vector of DoFHandler objects, each of them will in general have + * vector sizes and thus different ranges returned to + * `operation_before_loop` and `operation_after_loop`. Use this variable to + * specify which one of the DoFHandler objects the index range should be + * associated to. Defaults to the `dof_handler_index` 0. + * + * @note The close locality of the `operation_before_loop` and + * `operation_after_loop` is currently only implemented for the MPI-only + * case. In case threading is enabled, the complete `operation_before_loop` + * is scheduled before the parallel loop, and `operation_after_loop` is + * scheduled strictly afterwards, due to the complicated dependencies. + */ + template + void + cell_loop(void (CLASS::*cell_operation)( + const MatrixFree &, + OutVector &, + const InVector &, + const std::pair &) const, + const CLASS * owning_class, + OutVector & dst, + const InVector &src, + const std::function + &operation_before_loop, + const std::function + & operation_after_loop, + const unsigned int dof_handler_index_pre_post = 0) const; + + /** + * Same as above, but for class member functions which are non-const. + */ + template + void + cell_loop(void (CLASS::*cell_operation)( + const MatrixFree &, + OutVector &, + const InVector &, + const std::pair &), + CLASS * owning_class, + OutVector & dst, + const InVector &src, + const std::function + &operation_before_loop, + const std::function + & operation_after_loop, + const unsigned int dof_handler_index_pre_post = 0) const; + + /** + * Same as above, but taking an `std::function` as the `cell_operation` + * rather than a class member function. + */ + template + void + cell_loop(const std::function &, + OutVector &, + const InVector &, + const std::pair &)> &cell_operation, + OutVector & dst, + const InVector & src, + const std::function + &operation_before_loop, + const std::function + & operation_after_loop, + const unsigned int dof_handler_index_pre_post = 0) const; + /** * This method runs a loop over all cells (in parallel) and performs the MPI * data exchange on the source vector and destination vector. As opposed to @@ -3334,20 +3480,11 @@ namespace internal dof_info.vector_zero_range_list_index[range_index]; id != dof_info.vector_zero_range_list_index[range_index + 1]; ++id) - { - const unsigned int start_pos = - dof_info.vector_zero_range_list[id] * - internal::MatrixFreeFunctions::DoFInfo::chunk_size_zero_vector; - const unsigned int end_pos = - std::min((dof_info.vector_zero_range_list[id] + 1) * - internal::MatrixFreeFunctions::DoFInfo:: - chunk_size_zero_vector, - dof_info.vector_partitioner->local_size() + - dof_info.vector_partitioner->n_ghost_indices()); - std::memset(vec.begin() + start_pos, - 0, - (end_pos - start_pos) * sizeof(Number)); - } + std::memset(vec.begin() + dof_info.vector_zero_range_list[id].first, + 0, + (dof_info.vector_zero_range_list[id].second - + dof_info.vector_zero_range_list[id].first) * + sizeof(Number)); } } @@ -4066,7 +4203,12 @@ namespace internal const typename MF::DataAccessOnFaces src_vector_face_access = MF::DataAccessOnFaces::none, const typename MF::DataAccessOnFaces dst_vector_face_access = - MF::DataAccessOnFaces::none) + MF::DataAccessOnFaces::none, + const std::function + &operation_before_loop = {}, + const std::function + & operation_after_loop = {}, + const unsigned int dof_handler_index_pre_post = 0) : matrix_free(matrix_free) , container(const_cast(container)) , cell_function(cell_function) @@ -4083,6 +4225,9 @@ namespace internal , src_and_dst_are_same(PointerComparison::equal(&src, &dst)) , zero_dst_vector_setting(zero_dst_vector_setting && !src_and_dst_are_same) + , operation_before_loop(operation_before_loop) + , operation_after_loop(operation_after_loop) + , dof_handler_index_pre_post(dof_handler_index_pre_post) {} // Runs the cell work. If no function is given, nothing is done @@ -4159,6 +4304,41 @@ namespace internal internal::zero_vector_region(range_index, dst, dst_data_exchanger); } + virtual void + cell_loop_pre_range(const unsigned int range_index) override + { + if (operation_before_loop) + { + const internal::MatrixFreeFunctions::DoFInfo &dof_info = + matrix_free.get_dof_info(dof_handler_index_pre_post); + AssertIndexRange(range_index, + dof_info.cell_loop_pre_list_index.size() - 1); + for (unsigned int id = dof_info.cell_loop_pre_list_index[range_index]; + id != dof_info.cell_loop_pre_list_index[range_index + 1]; + ++id) + operation_before_loop(dof_info.cell_loop_pre_list[id].first, + dof_info.cell_loop_pre_list[id].second); + } + } + + virtual void + cell_loop_post_range(const unsigned int range_index) override + { + if (operation_after_loop) + { + const internal::MatrixFreeFunctions::DoFInfo &dof_info = + matrix_free.get_dof_info(dof_handler_index_pre_post); + AssertIndexRange(range_index, + dof_info.cell_loop_post_list_index.size() - 1); + for (unsigned int id = + dof_info.cell_loop_post_list_index[range_index]; + id != dof_info.cell_loop_post_list_index[range_index + 1]; + ++id) + operation_after_loop(dof_info.cell_loop_post_list[id].first, + dof_info.cell_loop_post_list[id].second); + } + } + private: const MF & matrix_free; Container & container; @@ -4178,6 +4358,11 @@ namespace internal dst_data_exchanger; const bool src_and_dst_are_same; const bool zero_dst_vector_setting; + const std::function + operation_before_loop; + const std::function + operation_after_loop; + const unsigned int dof_handler_index_pre_post; }; @@ -4280,6 +4465,52 @@ MatrixFree::cell_loop( +template +template +inline void +MatrixFree::cell_loop( + const std::function &, + OutVector &, + const InVector &, + const std::pair &)> + & cell_operation, + OutVector & dst, + const InVector &src, + const std::function + &operation_before_loop, + const std::function + & operation_after_loop, + const unsigned int dof_handler_index_pre_post) const +{ + using Wrapper = + internal::MFClassWrapper, + InVector, + OutVector>; + Wrapper wrap(cell_operation, nullptr, nullptr); + internal::MFWorker, + InVector, + OutVector, + Wrapper, + true> + worker(*this, + src, + dst, + false, + wrap, + &Wrapper::cell_integrator, + &Wrapper::face_integrator, + &Wrapper::boundary_integrator, + DataAccessOnFaces::none, + DataAccessOnFaces::none, + operation_before_loop, + operation_after_loop, + dof_handler_index_pre_post); + + task_info.loop(worker); +} + + + template template inline void @@ -4363,6 +4594,47 @@ MatrixFree::cell_loop( +template +template +inline void +MatrixFree::cell_loop( + void (CLASS::*function_pointer)( + const MatrixFree &, + OutVector &, + const InVector &, + const std::pair &) const, + const CLASS * owning_class, + OutVector & dst, + const InVector &src, + const std::function + &operation_before_loop, + const std::function + & operation_after_loop, + const unsigned int dof_handler_index_pre_post) const +{ + internal::MFWorker, + InVector, + OutVector, + CLASS, + true> + worker(*this, + src, + dst, + false, + *owning_class, + function_pointer, + nullptr, + nullptr, + DataAccessOnFaces::none, + DataAccessOnFaces::none, + operation_before_loop, + operation_after_loop, + dof_handler_index_pre_post); + task_info.loop(worker); +} + + + template template inline void @@ -4441,6 +4713,47 @@ MatrixFree::cell_loop( +template +template +inline void +MatrixFree::cell_loop( + void (CLASS::*function_pointer)( + const MatrixFree &, + OutVector &, + const InVector &, + const std::pair &), + CLASS * owning_class, + OutVector & dst, + const InVector &src, + const std::function + &operation_before_loop, + const std::function + & operation_after_loop, + const unsigned int dof_handler_index_pre_post) const +{ + internal::MFWorker, + InVector, + OutVector, + CLASS, + false> + worker(*this, + src, + dst, + false, + *owning_class, + function_pointer, + nullptr, + nullptr, + DataAccessOnFaces::none, + DataAccessOnFaces::none, + operation_before_loop, + operation_after_loop, + dof_handler_index_pre_post); + task_info.loop(worker); +} + + + template template inline void diff --git a/include/deal.II/matrix_free/task_info.h b/include/deal.II/matrix_free/task_info.h index c8e59a777b..4f48a406ad 100644 --- a/include/deal.II/matrix_free/task_info.h +++ b/include/deal.II/matrix_free/task_info.h @@ -67,6 +67,12 @@ namespace internal virtual void zero_dst_vector_range(const unsigned int range_index) = 0; + virtual void + cell_loop_pre_range(const unsigned int range_index) = 0; + + virtual void + cell_loop_post_range(const unsigned int range_index) = 0; + /// Runs the cell work specified by MatrixFree::loop or /// MatrixFree::cell_loop virtual void diff --git a/source/matrix_free/task_info.cc b/source/matrix_free/task_info.cc index 7cf48c8b1e..9cf2d3f1a1 100644 --- a/source/matrix_free/task_info.cc +++ b/source/matrix_free/task_info.cc @@ -336,6 +336,10 @@ namespace internal void TaskInfo::loop(MFWorkerInterface &funct) const { + if (scheme == none) + funct.cell_loop_pre_range( + partition_row_index[partition_row_index.size() - 2]); + funct.vector_update_ghosts_start(); #ifdef DEAL_II_WITH_THREADS @@ -584,10 +588,11 @@ namespace internal i < partition_row_index[part + 1]; ++i) { + funct.cell_loop_pre_range(i); + funct.zero_dst_vector_range(i); AssertIndexRange(i + 1, cell_partition_data.size()); if (cell_partition_data[i + 1] > cell_partition_data[i]) { - funct.zero_dst_vector_range(i); funct.cell(std::make_pair(cell_partition_data[i], cell_partition_data[i + 1])); } @@ -603,6 +608,7 @@ namespace internal std::make_pair(boundary_partition_data[i], boundary_partition_data[i + 1])); } + funct.cell_loop_post_range(i); } if (part == 1) @@ -610,6 +616,9 @@ namespace internal } } funct.vector_compress_finish(); + if (scheme == none) + funct.cell_loop_post_range( + partition_row_index[partition_row_index.size() - 2]); }