From 883ea080182adbae12d360bcc4faff4119e41a62 Mon Sep 17 00:00:00 2001 From: bangerth Date: Fri, 25 Oct 2013 22:45:22 +0000 Subject: [PATCH] Document the change-over to WorkStream. git-svn-id: https://svn.dealii.org/trunk@31427 0785d39b-7218-0410-832d-ea1e28bc413d --- deal.II/examples/step-9/step-9.cc | 368 +++++++++++++----------------- 1 file changed, 157 insertions(+), 211 deletions(-) diff --git a/deal.II/examples/step-9/step-9.cc b/deal.II/examples/step-9/step-9.cc index f6cb5f4a78..f81f6ac395 100644 --- a/deal.II/examples/step-9/step-9.cc +++ b/deal.II/examples/step-9/step-9.cc @@ -48,8 +48,8 @@ // The following two files provide classes and information for multithreaded // programs. In the first one, the classes and functions are declared which we -// need to start new threads and to wait for threads to return (i.e. the -// Thread class and the new_thread functions). The +// need to do assembly in parallel (i.e. the +// WorkStream namespace). The // second file has a class MultithreadInfo (and a global object // multithread_info of that type) which can be used to query the // number of processors in your system, which is often useful when deciding @@ -112,13 +112,33 @@ namespace Step9 // AssemblyScratchData and // AssemblyCopyData structures is). Rather, we will // only discuss the specific implementation. + // + // If you read the page mentioned above, you will find that in + // order to parallelize assembly, we need two data structures -- + // one that corresponds to data that we need during local + // integration ("scratch data", i.e., things we only need as + // temporary storage), and one that carries information from the + // local integration to the function that then adds the local + // contributions to the corresponding elements of the global + // matrix. The former of these typically contains the FEValues and + // FEFaceValues objects, whereas the latter has the local matrix, + // local right hand side, and information about which degrees of + // freedom live on the cell for which we are assembling a local + // contribution. With this information, the following should be + // relatively self-explanatory: struct AssemblyScratchData - {}; + { + AssemblyScratchData (const FiniteElement &fe); + AssemblyScratchData (const AssemblyScratchData &scratch_data); + + FEValues fe_values; + FEFaceValues fe_face_values; + }; struct AssemblyCopyData { - FullMatrix cell_matrix; - Vector cell_rhs; + FullMatrix cell_matrix; + Vector cell_rhs; std::vector local_dof_indices; }; @@ -508,147 +528,127 @@ namespace Step9 // In the following function, the matrix and right hand side are // assembled. As stated in the documentation of the main class above, it // does not do this itself, but rather delegates to the function following - // next, by splitting up the range of cells into chunks of approximately the - // same size and assembling on each of these chunks in parallel. + // next, utilizing the WorkStream concept discussed in @ref threads . + // + // If you have looked through the @ref threads module, you will have + // seen that assembling in parallel does not take an incredible + // amount of extra code as long as you diligently describe what the + // scratch and copy data objects are, and if you define suitable + // functios for the local assembly and the copy operation from local + // contributions to global objects. This done, the following will do + // all the heavy lifting to get these operations done on multiple + // threads on as many cores as you have in your system: template void AdvectionProblem::assemble_system () { - // First, we want to find out how many threads shall assemble the matrix - // in parallel. A reasonable choice would be that each processor in your - // system processes one chunk of cells; if we were to use this - // information, we could use the value of the global variable - // multithread_info.n_cpus, which is determined at start-up - // time of your program automatically. (Note that if the library was not - // configured for multithreading, then the number of CPUs is set to one.) - // However, sometimes there might be reasons to use another value. For - // example, you might want to use less processors than there are in your - // system in order not to use too many computational resources. For - // this reason, the number of threads can be set by - // MultithreadInfo::set_thread_limit and the current value - // is returned by n_threads(). This - // is also queried by functions inside the library to determine - // how many threads they shall create. - - // It is worth noting, however, that this setup determines the load - // distribution onto processor in a static way: it does not take into - // account that some other part of our program may also be running - // something in parallel at the same time as we get here (this is not the - // case in the current program, but may easily be the case in more complex - // applications). A discussion of how to deal with this case can be found - // in the @ref threads module. - // - // Next, we need an object which is capable of keeping track of the - // threads we created, and allows us to wait until they all have finished - // (to join them in the language of threads). The - // Threads::ThreadGroup class does this, which is basically just a - // container for objects of type Threads::Thread that represent a single - // thread; Threads::Thread is what the Threads::new_thread function below - // will return when we start a new thread. - // - // Note that both Threads::ThreadGroup and Threads::Thread have a template - // argument that represents the return type of the function being called - // on a separate thread. Since most of the functions that we will call on - // different threads have return type void, the template - // argument has a default value void, so that in that case it - // can be omitted. (However, you still need to write the angle brackets, - // even if they are empty.) - // - // If you did not configure for multithreading, then the - // new_thread function that is supposed to start a new thread - // in parallel only executes the function which should be run in parallel, - // waits for it to return (i.e. the function is executed sequentially), - // and puts the return value into the Thread - // object. Likewise, the function join that is supposed to - // wait for all spawned threads to return, returns immediately, as there - // can't be any threads running. - - // Now we have to split the range of cells into chunks of approximately - // the same size. Each thread will then assemble the local contributions - // of the cells within its chunk and transfer these contributions to the - // global matrix. As splitting a range of cells is a rather common task - // when using multithreading, there is a function in the - // Threads namespace that does exactly this. In fact, it does - // this not only for a range of cell iterators, but for iterators in - // general, so you could use it for std::vector::iterator or - // usual pointers as well. - // - // The function returns a vector of pairs of iterators, where the first - // denotes the first cell of each chunk, while the second denotes the one - // past the last (this half-open interval is the usual convention in the - // C++ standard library, so we keep to it). Note that we have to specify - // the actual data type of the iterators in angle brackets to the - // function. This is necessary, since it is a template function which - // takes the data type of the iterators as template argument; in the - // present case, however, the data types of the two first parameters - // differ (begin_active returns an - // active_iterator, while end returns a - // raw_iterator), and in this case the C++ language requires - // us to specify the template type explicitly. For brevity, we first - // typedef this data type to an alias. - - typedef typename DoFHandler::active_cell_iterator active_cell_iterator; - - // Finally, for each of the chunks of iterators we have computed, start - // one thread (or if not in multithread mode: execute assembly on these - // chunks sequentially). This is done using the following sequence of - // function calls: WorkStream::run(dof_handler.begin_active(), dof_handler.end(), *this, &AdvectionProblem::local_assemble_system, &AdvectionProblem::copy_local_to_global, - AssemblyScratchData(), + AssemblyScratchData(fe), AssemblyCopyData()); - // The reasons and internal workings of these functions can be found in - // the report on the subject of multithreading, which is available online - // as well. Suffice it to say that we create a new thread that calls the - // assemble_system_interval function on the present object - // (the this pointer), with the arguments following in the - // second set of parentheses passed as parameters. The Threads::new_thread - // function returns an object of type Threads::Thread, which we put into - // the threads container. If a thread exits, the return value - // of the function being called is put into a place such that the thread - // objects can access it using their return_value function; - // since the function we call doesn't have a return value, this does not - // apply here. Note that you can copy around thread objects freely, and - // that of course they will still represent the same thread. - - // When all the threads are running, the only thing we have to do is wait - // for them to finish. This is necessary of course, as we can't proceed - // with our tasks before the matrix and right hand side are - // assembled. Waiting for all the threads to finish can be done using the - // join_all function in the ThreadGroup - // container, which just calls join on each of the thread - // objects it stores. - // - // Again, if the library was not configured to use multithreading, then - // no threads can run in parallel and the function returns immediately. - - // After the matrix has been assembled in parallel, we still have to // eliminate hanging node constraints. This is something that can't be // done on each of the threads separately, so we have to do it now. - hanging_node_constraints.condense (system_matrix); - hanging_node_constraints.condense (system_rhs); // Note also, that unlike in previous examples, there are no boundary // conditions to be applied to the system of equations. This, of course, // is due to the fact that we have included them into the weak formulation // of the problem. + hanging_node_constraints.condense (system_matrix); + hanging_node_constraints.condense (system_rhs); } + // As already mentioned above, we need to have scratch objects for + // the parallel computation of local contributions. These objects + // contain FEValues and FEFaceValues objects, and so we will need to + // have constructors and copy constructors that allow us to create + // them. In initializing them, note first that we use bilinear + // elements, soGauss formulae with two points in each space + // direction are sufficient. For the cell terms we need the values + // and gradients of the shape functions, the quadrature points in + // order to determine the source density and the advection field at + // a given point, and the weights of the quadrature points times the + // determinant of the Jacobian at these points. In contrast, for the + // boundary integrals, we don't need the gradients, but rather the + // normal vectors to the cells. This determines which update flags + // we will have to pass to the constructors of the members of the + // class: + template + AdvectionProblem::AssemblyScratchData:: + AssemblyScratchData (const FiniteElement &fe) + : + fe_values (fe, + QGauss(2), + update_values | update_gradients | + update_quadrature_points | update_JxW_values), + fe_face_values (fe, + QGauss(2), + update_values | update_quadrature_points | + update_JxW_values | update_normal_vectors) + {} + + + + template + AdvectionProblem::AssemblyScratchData:: + AssemblyScratchData (const AssemblyScratchData &scratch_data) + : + fe_values (scratch_data.fe_values.get_fe(), + scratch_data.fe_values.get_quadrature(), + update_values | update_gradients | + update_quadrature_points | update_JxW_values), + fe_face_values (scratch_data.fe_face_values.get_fe(), + scratch_data.fe_face_values.get_quadrature(), + update_values | update_quadrature_points | + update_JxW_values | update_normal_vectors) + {} + + + + // Now, this is the function that does the actual work. It is not very // different from the assemble_system functions of previous // example programs, so we will again only comment on the differences. The // mathematical stuff follows closely what we have said in the introduction. + // + // There are a number of points worth mentioning here, though. The + // first one is that we have moved the FEValues and FEFaceValues + // objects into the ScratchData object. We have done so because the + // alternative would have been to simply create one every time we + // get into this function -- i.e., on every cell. It now turns out + // that the FEValues classes were written with the explicit goal of + // moving everything that remains the same from cell to cell into + // the construction of the object, and only do as little work as + // possible in FEValues::reinit() whenever we move to a new + // cell. What this means is that it would be very expensive to + // create a new object of this kind in this function as we would + // have to do it for every cell -- exactly the thing we wanted to + // avoid with the FEValues class. Instead, what we do is create it + // only once (or a small number of times) in the scratch objects and + // then re-use it as often as we can. + // + // This begs the question of whether there are other objects we + // create in this function whose creation is expensive compared to + // its use. Indeed, at the top of the function, we declare all sorts + // of objects. The AdvectionField, + // RightHandSide and BoundaryValues do not + // cost much to create, so there is no harm here. However, + // allocating memory in creating the rhs_values and + // similar variables below typically costs a significant amount of + // time, compared to just accessing the (temporary) values we store + // in them. Consequently, these would be candidates for moving into + // the AssemblyScratchData class. We will leave this as + // an exercise. template void AdvectionProblem:: local_assemble_system (const typename DoFHandler::active_cell_iterator &cell, - AssemblyScratchData &scratch, + AssemblyScratchData &scratch_data, AssemblyCopyData ©_data) { // First of all, we will need some objects that describe boundary values, @@ -660,35 +660,14 @@ namespace Step9 const RightHandSide right_hand_side; const BoundaryValues boundary_values; - // Next we need quadrature formula for the cell terms, but also for the - // integral over the inflow boundary, which will be a face integral. As we - // use bilinear elements, Gauss formulae with two points in each space - // direction are sufficient. - QGauss quadrature_formula(2); - QGauss face_quadrature_formula(2); - - // Finally, we need objects of type FEValues and - // FEFaceValues. For the cell terms we need the values and - // gradients of the shape functions, the quadrature points in order to - // determine the source density and the advection field at a given point, - // and the weights of the quadrature points times the determinant of the - // Jacobian at these points. In contrast, for the boundary integrals, we - // don't need the gradients, but rather the normal vectors to the cells. - FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); - FEFaceValues fe_face_values (fe, face_quadrature_formula, - update_values | update_quadrature_points | - update_JxW_values | update_normal_vectors); - // Then we define some abbreviations to avoid unnecessarily long lines: const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.size(); - const unsigned int n_face_q_points = face_quadrature_formula.size(); + const unsigned int n_q_points = scratch_data.fe_values.get_quadrature().size(); + const unsigned int n_face_q_points = scratch_data.fe_face_values.get_quadrature().size(); // We declare cell matrix and cell right hand side... - copy_data.cell_matrix = FullMatrix (dofs_per_cell, dofs_per_cell); - copy_data.cell_rhs = Vector (dofs_per_cell); + copy_data.cell_matrix.reinit (dofs_per_cell, dofs_per_cell); + copy_data.cell_rhs.reinit (dofs_per_cell); // ... an array to hold the global indices of the degrees of freedom of // the cell on which we are presently working... @@ -704,13 +683,13 @@ namespace Step9 // ... then initialize the FEValues object... - fe_values.reinit (cell); + scratch_data.fe_values.reinit (cell); // ... obtain the values of right hand side and advection directions // at the quadrature points... - advection_field.value_list (fe_values.get_quadrature_points(), + advection_field.value_list (scratch_data.fe_values.get_quadrature_points(), advection_directions); - right_hand_side.value_list (fe_values.get_quadrature_points(), + right_hand_side.value_list (scratch_data.fe_values.get_quadrature_points(), rhs_values); // ... set the value of the streamline diffusion parameter as @@ -724,22 +703,22 @@ namespace Step9 { for (unsigned int j=0; j void AdvectionProblem::copy_local_to_global (const AssemblyCopyData ©_data) { - // Up until now we have not taken care of the fact that this function - // might run more than once in parallel, as the operations above only - // work on variables that are local to this function, or if they are - // global (such as the information on the grid, the DoF handler, or - // the DoF numbers) they are only read. Thus, the different threads do - // not disturb each other. - // - // On the other hand, we would now like to write the local - // contributions to the global system of equations into the global - // objects. This needs some kind of synchronization, as if we would - // not take care of the fact that multiple threads write into the - // matrix at the same time, we might be surprised that one threads - // reads data from the matrix that another thread is presently - // overwriting, or similar things. Thus, to make sure that only one - // thread operates on these objects at a time, we have to lock - // it. This is done using a Mutex, which is short for - // mutually exclusive: a thread that wants to write to - // the global objects acquires this lock, but has to wait if it is - // presently owned by another thread. If it has acquired the lock, it - // can be sure that no other thread is presently writing to the - // matrix, and can do so freely. When finished, we release the lock - // again so as to allow other threads to acquire it and write to the - // matrix. for (unsigned int i=0; ilock and release - // functions are no-ops, i.e. they return without doing anything. - // - // 2. In order to work properly, it is essential that all threads try - // to acquire the same lock. This, of course, can not be achieved if - // the lock is a local variable, as then each thread would acquire its - // own lock. Therefore, the lock variable is a member variable of the - // class; since all threads execute member functions of the same - // object, they have the same this pointer and therefore - // also operate on the same lock. } @@ -944,7 +890,7 @@ namespace Step9 else { refine_grid (); - }; + } std::cout << " Number of active cells: " @@ -960,7 +906,7 @@ namespace Step9 assemble_system (); solve (); output_results (cycle); - }; + } DataOut data_out; data_out.attach_dof_handler (dof_handler); @@ -1211,8 +1157,8 @@ namespace Step9 for (unsigned int subface_no=0; subface_non_children(); ++subface_no) active_neighbors.push_back ( cell->neighbor_child_on_subface(face_no, subface_no)); - }; - }; + } + } // OK, now that we have all the neighbors, lets start the computation // on each of them. First we do some preliminaries: find out about the @@ -1272,7 +1218,7 @@ namespace Step9 this_midpoint_value[0]) / distance * y; - }; + } // If now, after collecting all the information from the neighbors, we // can determine an approximation of the gradient for the present @@ -1310,7 +1256,7 @@ namespace Step9 *error_on_this_cell = (std::pow(cell->diameter(), 1+1.0*dim/2) * std::sqrt(gradient.square())); - }; + } } } @@ -1351,7 +1297,7 @@ int main () << "----------------------------------------------------" << std::endl; return 1; - }; + } return 0; } -- 2.39.5