From: Wolfgang Bangerth Date: Fri, 6 Nov 2015 07:49:36 +0000 (-0600) Subject: Undo parallelization in FESystem. X-Git-Tag: v8.4.0-rc2~243^2~1 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=7eea997352f6a1ddbeaaaf3daaa040edb58c7df8;p=dealii.git Undo parallelization in FESystem. As explained in comments, parallelizing this sort of operation destroys data locality in NUMA contexts. --- diff --git a/source/fe/fe_system.cc b/source/fe/fe_system.cc index bbcf05c3ca..4570406cfa 100644 --- a/source/fe/fe_system.cc +++ b/source/fe/fe_system.cc @@ -873,34 +873,30 @@ FESystem::get_data (const UpdateFlags flags, data->update_each = requires_update_flags(flags); // get data objects from each of the base elements and store - // them. do the creation of these objects in parallel as their - // creation may be expensive (because we precompute a bunch of - // things) - std::vector::InternalDataBase *> > - get_data_tasks (this->n_base_elements()); - for (unsigned int base_no=0; base_non_base_elements(); ++base_no) - get_data_tasks[base_no] = Threads::new_task (std_cxx11::function::InternalDataBase * ()> - (std_cxx11::bind(&FiniteElement::get_data, - std_cxx11::cref(base_element(base_no)), - std_cxx11::cref(flags), - std_cxx11::cref(mapping), - std_cxx11::cref(quadrature)))); - - // then wait for each of these calls to finish in turn and initialize - // these objects + // them. one might think that doing this in parallel (over the + // base elements) would be a good idea, but this turns out to + // be wrong because we would then run these jobs on different + // threads/processors and this allocates memory in different + // NUMA domains; this has large detrimental effects when later + // writing into these objects in fill_fe_*_values. all of this + // is particularly true when using FEValues objects in + // WorkStream contexts where we explicitly make sure that + // every function only uses objects previously allocated + // in the same NUMA context and on the same thread as the + // function is called for (unsigned int base_no=0; base_non_base_elements(); ++base_no) { typename FiniteElement::InternalDataBase *base_fe_data = - get_data_tasks[base_no].return_value(); + base_element(base_no).get_data (flags, mapping, quadrature); internal::FEValues::FiniteElementRelatedData &base_fe_output_object = data->get_fe_output_object(base_no); base_fe_output_object.initialize (quadrature.size(), base_element(base_no), flags | base_fe_data->update_each); - // then store the pointer to the base internal object data->set_fe_data(base_no, base_fe_data); } + return data; } @@ -924,34 +920,30 @@ FESystem::get_face_data (const UpdateFlags flags, data->update_each = requires_update_flags(flags); // get data objects from each of the base elements and store - // them. do the creation of these objects in parallel as their - // creation may be expensive (because we precompute a bunch of - // things) - std::vector::InternalDataBase *> > - get_data_tasks (this->n_base_elements()); - for (unsigned int base_no=0; base_non_base_elements(); ++base_no) - get_data_tasks[base_no] = Threads::new_task (std_cxx11::function::InternalDataBase * ()> - (std_cxx11::bind(&FiniteElement::get_face_data, - std_cxx11::cref(base_element(base_no)), - std_cxx11::cref(flags), - std_cxx11::cref(mapping), - std_cxx11::cref(quadrature)))); - - // then wait for each of these calls to finish in turn and initialize - // these objects + // them. one might think that doing this in parallel (over the + // base elements) would be a good idea, but this turns out to + // be wrong because we would then run these jobs on different + // threads/processors and this allocates memory in different + // NUMA domains; this has large detrimental effects when later + // writing into these objects in fill_fe_*_values. all of this + // is particularly true when using FEValues objects in + // WorkStream contexts where we explicitly make sure that + // every function only uses objects previously allocated + // in the same NUMA context and on the same thread as the + // function is called for (unsigned int base_no=0; base_non_base_elements(); ++base_no) { typename FiniteElement::InternalDataBase *base_fe_data = - get_data_tasks[base_no].return_value(); + base_element(base_no).get_face_data (flags, mapping, quadrature); internal::FEValues::FiniteElementRelatedData &base_fe_output_object = data->get_fe_output_object(base_no); base_fe_output_object.initialize (quadrature.size(), base_element(base_no), flags | base_fe_data->update_each); - // then store the pointer to the base internal object data->set_fe_data(base_no, base_fe_data); } + return data; } @@ -977,34 +969,30 @@ FESystem::get_subface_data (const UpdateFlags flags, data->update_each = requires_update_flags(flags); // get data objects from each of the base elements and store - // them. do the creation of these objects in parallel as their - // creation may be expensive (because we precompute a bunch of - // things) - std::vector::InternalDataBase *> > - get_data_tasks (this->n_base_elements()); - for (unsigned int base_no=0; base_non_base_elements(); ++base_no) - get_data_tasks[base_no] = Threads::new_task (std_cxx11::function::InternalDataBase * ()> - (std_cxx11::bind(&FiniteElement::get_subface_data, - std_cxx11::cref(base_element(base_no)), - std_cxx11::cref(flags), - std_cxx11::cref(mapping), - std_cxx11::cref(quadrature)))); - - // then wait for each of these calls to finish in turn and initialize - // these objects + // them. one might think that doing this in parallel (over the + // base elements) would be a good idea, but this turns out to + // be wrong because we would then run these jobs on different + // threads/processors and this allocates memory in different + // NUMA domains; this has large detrimental effects when later + // writing into these objects in fill_fe_*_values. all of this + // is particularly true when using FEValues objects in + // WorkStream contexts where we explicitly make sure that + // every function only uses objects previously allocated + // in the same NUMA context and on the same thread as the + // function is called for (unsigned int base_no=0; base_non_base_elements(); ++base_no) { typename FiniteElement::InternalDataBase *base_fe_data = - get_data_tasks[base_no].return_value(); + base_element(base_no).get_subface_data (flags, mapping, quadrature); internal::FEValues::FiniteElementRelatedData &base_fe_output_object = data->get_fe_output_object(base_no); base_fe_output_object.initialize (quadrature.size(), base_element(base_no), flags | base_fe_data->update_each); - // then store the pointer to the base internal object data->set_fe_data(base_no, base_fe_data); } + return data; } @@ -1245,24 +1233,17 @@ compute_fill (const Mapping &mapping, if (flags & (update_values | update_gradients | update_hessians | update_3rd_derivatives )) - { - // let base elements update the necessary data - Threads::TaskGroup<> task_group; - for (unsigned int base_no=0; base_non_base_elements(); ++base_no) - task_group - += Threads::new_task (std_cxx11::function(std_cxx11::bind (&FESystem::template compute_fill_one_base, - this, - std_cxx11::cref(mapping), - std::make_pair(cell, CellSimilarity::none), - std::make_pair(face_no, sub_no), - std_cxx11::cref(quadrature), - std::make_pair(&mapping_internal, - &fedata), - base_no, - std_cxx11::ref(mapping_data), - std_cxx11::ref(output_data)))); - task_group.join_all(); - } + for (unsigned int base_no=0; base_non_base_elements(); ++base_no) + { + compute_fill_one_base (mapping, + std::make_pair(cell, CellSimilarity::none), + std::make_pair(face_no, sub_no), + quadrature, + std::make_pair(&mapping_internal, &fedata), + base_no, + mapping_data, + output_data); + } }