From 702ccd7e9c95e7d3b894a67a8540ca84b6f00d55 Mon Sep 17 00:00:00 2001 From: turcksin Date: Thu, 17 Oct 2013 16:58:29 +0000 Subject: [PATCH] Use WorkStream in Step-13. Need to update the documentation and to use TBB instead of new_thread. git-svn-id: https://svn.dealii.org/trunk@31285 0785d39b-7218-0410-832d-ea1e28bc413d --- deal.II/examples/step-13/step-13.cc | 112 ++++++++++++++-------------- 1 file changed, 58 insertions(+), 54 deletions(-) diff --git a/deal.II/examples/step-13/step-13.cc b/deal.II/examples/step-13/step-13.cc index fbef4d1660..32ee1a99e5 100644 --- a/deal.II/examples/step-13/step-13.cc +++ b/deal.II/examples/step-13/step-13.cc @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -61,6 +62,23 @@ namespace Step13 { using namespace dealii; + namespace Assembler + { + struct Scratch + { + Scratch() {} + }; + + struct CopyData + { + CopyData() {} + + unsigned int dofs_per_cell; + FullMatrix cell_matrix; + std::vector local_dof_indices; + }; + } + // @sect3{Evaluation of the solution} // As for the program itself, we first define classes that evaluate the @@ -646,10 +664,14 @@ namespace Step13 assemble_linear_system (LinearSystem &linear_system); void - assemble_matrix (LinearSystem &linear_system, - const typename DoFHandler::active_cell_iterator &begin_cell, - const typename DoFHandler::active_cell_iterator &end_cell, - Threads::Mutex &mutex) const; + assemble_matrix (const typename DoFHandler::active_cell_iterator &cell, + Assembler::Scratch &scratch, + Assembler::CopyData ©_data); + + + void + copy_local_to_global(Assembler::CopyData const ©_data, + LinearSystem &linear_system); }; @@ -743,27 +765,19 @@ namespace Step13 // of equal size. The number of blocks is set to the default number of // threads to be used, which by default is set to the number of // processors found in your computer at startup of the program: - const unsigned int n_threads = multithread_info.n_threads(); - std::vector > - thread_ranges - = Threads::split_range (dof_handler.begin_active (), - dof_handler.end (), - n_threads); // These ranges are then assigned to a number of threads which we create // next. Each will assemble the local cell matrices on the assigned // cells, and fill the matrix object with it. Since there is need for // synchronization when filling the same matrix from different threads, // we need a mutex here: - Threads::Mutex mutex; - Threads::ThreadGroup<> threads; - for (unsigned int thread=0; thread::assemble_matrix, - *this, - linear_system, - thread_ranges[thread].first, - thread_ranges[thread].second, - mutex); + + Assembler::Scratch scratch; + Assembler::CopyData copy_data; + WorkStream::run(dof_handler.begin_active(),dof_handler.end(), + std::bind(&Solver::assemble_matrix,this,std_cxx1x::_1,std_cxx1x::_2,std_cxx1x::_3), + std::bind(&Solver::copy_local_to_global,this,std_cxx1x::_1,std_cxx1x::ref(linear_system)), + scratch,copy_data); // While the new threads assemble the system matrix, we can already // compute the right hand side vector in the main thread, and condense @@ -783,7 +797,6 @@ namespace Step13 // If this is done, wait for the matrix assembling threads, and condense // the constraints in the matrix as well: - threads.join_all (); linear_system.hanging_node_constraints.condense (linear_system.matrix); // Now that we have the linear system, we can also treat boundary @@ -803,38 +816,39 @@ namespace Step13 // on it any more, except for one point below. template void - Solver::assemble_matrix (LinearSystem &linear_system, - const typename DoFHandler::active_cell_iterator &begin_cell, - const typename DoFHandler::active_cell_iterator &end_cell, - Threads::Mutex &mutex) const + Solver::assemble_matrix (const typename DoFHandler::active_cell_iterator &cell, + Assembler::Scratch &scratch, + Assembler::CopyData ©_data) { FEValues fe_values (*fe, *quadrature, update_gradients | update_JxW_values); - const unsigned int dofs_per_cell = fe->dofs_per_cell; + copy_data.dofs_per_cell = fe->dofs_per_cell; const unsigned int n_q_points = quadrature->size(); - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); + copy_data.cell_matrix = FullMatrix (copy_data.dofs_per_cell, copy_data.dofs_per_cell); - std::vector local_dof_indices (dofs_per_cell); + copy_data.local_dof_indices.resize(copy_data.dofs_per_cell); - for (typename DoFHandler::active_cell_iterator cell=begin_cell; - cell!=end_cell; ++cell) - { - cell_matrix = 0; + fe_values.reinit (cell); - fe_values.reinit (cell); + for (unsigned int q_point=0; q_pointget_dof_indices (copy_data.local_dof_indices); + } - cell->get_dof_indices (local_dof_indices); + template + void + Solver::copy_local_to_global(Assembler::CopyData const ©_data, + LinearSystem &linear_system) + { // In the step-9 program, we have shown that you have to use the // mutex to lock the matrix when copying the elements from the local // to the global matrix. This was necessary to avoid that two @@ -862,21 +876,11 @@ namespace Step13 // whether the operation completed successfully or not, whether the // exit path was something we implemented willfully or whether the // function was exited by an exception that we did not foresee. - // - // deal.II implements the scoped locking pattern in the - // Treads::Mutex::ScopedLock class: it takes the mutex in the - // constructor and locks it; in its destructor, it unlocks it - // again. So here is how it is used: - Threads::Mutex::ScopedLock lock (mutex); - for (unsigned int i=0; ilock variable goes out of existence and its - // destructor the mutex is unlocked. - }; + for (unsigned int i=0; i