From: Martin Kronbichler Date: Fri, 8 May 2020 09:39:15 +0000 (+0200) Subject: Fix matrix-free pre/post loops with threads X-Git-Tag: v9.2.0-rc1~67^2~1 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=dbe71e890d6351a1aeadd326058a161363dbb7d5;p=dealii.git Fix matrix-free pre/post loops with threads --- diff --git a/include/deal.II/matrix_free/matrix_free.h b/include/deal.II/matrix_free/matrix_free.h index 4427698ade..67c262b017 100644 --- a/include/deal.II/matrix_free/matrix_free.h +++ b/include/deal.II/matrix_free/matrix_free.h @@ -4568,13 +4568,26 @@ namespace internal { const internal::MatrixFreeFunctions::DoFInfo &dof_info = matrix_free.get_dof_info(dof_handler_index_pre_post); - AssertIndexRange(range_index, - dof_info.cell_loop_pre_list_index.size() - 1); - for (unsigned int id = dof_info.cell_loop_pre_list_index[range_index]; - id != dof_info.cell_loop_pre_list_index[range_index + 1]; - ++id) - operation_before_loop(dof_info.cell_loop_pre_list[id].first, - dof_info.cell_loop_pre_list[id].second); + if (range_index == numbers::invalid_unsigned_int) + { + // Case with threaded loop -> currently no overlap implemented + parallel::apply_to_subranges( + 0U, + dof_info.vector_partitioner->local_size(), + operation_before_loop, + internal::VectorImplementation::minimum_parallel_grain_size); + } + else + { + AssertIndexRange(range_index, + dof_info.cell_loop_pre_list_index.size() - 1); + for (unsigned int id = + dof_info.cell_loop_pre_list_index[range_index]; + id != dof_info.cell_loop_pre_list_index[range_index + 1]; + ++id) + operation_before_loop(dof_info.cell_loop_pre_list[id].first, + dof_info.cell_loop_pre_list[id].second); + } } } @@ -4585,14 +4598,26 @@ namespace internal { const internal::MatrixFreeFunctions::DoFInfo &dof_info = matrix_free.get_dof_info(dof_handler_index_pre_post); - AssertIndexRange(range_index, - dof_info.cell_loop_post_list_index.size() - 1); - for (unsigned int id = - dof_info.cell_loop_post_list_index[range_index]; - id != dof_info.cell_loop_post_list_index[range_index + 1]; - ++id) - operation_after_loop(dof_info.cell_loop_post_list[id].first, - dof_info.cell_loop_post_list[id].second); + if (range_index == numbers::invalid_unsigned_int) + { + // Case with threaded loop -> currently no overlap implemented + parallel::apply_to_subranges( + 0U, + dof_info.vector_partitioner->local_size(), + operation_after_loop, + internal::VectorImplementation::minimum_parallel_grain_size); + } + else + { + AssertIndexRange(range_index, + dof_info.cell_loop_post_list_index.size() - 1); + for (unsigned int id = + dof_info.cell_loop_post_list_index[range_index]; + id != dof_info.cell_loop_post_list_index[range_index + 1]; + ++id) + operation_after_loop(dof_info.cell_loop_post_list[id].first, + dof_info.cell_loop_post_list[id].second); + } } } diff --git a/source/matrix_free/task_info.cc b/source/matrix_free/task_info.cc index cbae4ea508..831b23fc0b 100644 --- a/source/matrix_free/task_info.cc +++ b/source/matrix_free/task_info.cc @@ -338,7 +338,13 @@ namespace internal void TaskInfo::loop(MFWorkerInterface &funct) const { - if (scheme == none) + // If we use thread parallelism, we do not currently support to schedule + // pieces of updates within the loop, so this index will collect all + // calls in that case and work like a single complete loop over all + // cells + if (scheme != none) + funct.cell_loop_pre_range(numbers::invalid_unsigned_int); + else funct.cell_loop_pre_range( partition_row_index[partition_row_index.size() - 2]); @@ -618,7 +624,10 @@ namespace internal } } funct.vector_compress_finish(); - if (scheme == none) + + if (scheme != none) + funct.cell_loop_post_range(numbers::invalid_unsigned_int); + else funct.cell_loop_post_range( partition_row_index[partition_row_index.size() - 2]); }