From: Katharina Kormann Date: Tue, 10 Apr 2018 15:57:51 +0000 (+0200) Subject: Rework task info. X-Git-Tag: v9.0.0-rc1~165^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=5a7eeb5885f161a3478655f706f48a5e2bec834a;p=dealii.git Rework task info. --- diff --git a/include/deal.II/matrix_free/dof_info.h b/include/deal.II/matrix_free/dof_info.h index 1ee78f453a..15a40be997 100644 --- a/include/deal.II/matrix_free/dof_info.h +++ b/include/deal.II/matrix_free/dof_info.h @@ -199,75 +199,21 @@ namespace internal * together and interprets them as one cell only, as is needed for * vectorization. */ - void reorder_cells (const SizeInfo &size_info, - const std::vector &renumbering, - const std::vector &constraint_pool_row_index, - const std::vector &irregular_cells, - const unsigned int vectorization_length); - - /** - * This helper function determines a block size if the user decided not - * to force a block size through MatrixFree::AdditionalData. This is - * computed based on the number of hardware threads on the system and - * the number of macro cells that we should work on. - */ - void guess_block_size (const SizeInfo &size_info, - TaskInfo &task_info); - - /** - * This method goes through all cells that have been filled into @p - * dof_indices and finds out which cells can be worked on independently - * and which ones are neighboring and need to be done at different times - * when used in parallel. - * - * The strategy is based on a two-level approach. The outer level is - * subdivided into partitions similar to the type of neighbors in - * Cuthill-McKee, and the inner level is subdivided via colors (for - * chunks within the same color, can work independently). One task is - * represented by a chunk of cells. The cell chunks are formed before - * subdivision into partitions and colors. - */ - void - make_thread_graph_partition_color (SizeInfo &size_info, - TaskInfo &task_info, - std::vector &renumbering, - std::vector &irregular_cells, - const bool hp_bool); - - /** - * This function goes through all cells that have been filled into @p - * dof_indices and finds out which cells can be worked on independently - * and which ones are neighboring and need to be done at different times - * when used in parallel. - * - * The strategy is based on a two-level approach. The outer level is - * subdivided into partitions similar to the type of neighbors in - * Cuthill-McKee, and the inner level is again subdivided into Cuthill- - * McKee-like partitions (partitions whose level differs by more than 2 - * can be worked on independently). One task is represented by a chunk - * of cells. The cell chunks are formed after subdivision into the two - * levels of partitions. - */ - void - make_thread_graph_partition_partition (SizeInfo &size_info, - TaskInfo &task_info, - std::vector &renumbering, - std::vector &irregular_cells, - const bool hp_bool); + void reorder_cells (const SizeInfo &size_info, + const std::vector &renumbering, + const std::vector &constraint_pool_row_index, + const std::vector &irregular_cells, + const unsigned int vectorization_length); /** * This function computes the connectivity of the currently stored - * indices and fills the structure into a sparsity pattern. The - * parameter block_size can be used to specify whether several cells - * should be treated as one. + * indices in terms of connections between the individual cells and + * fills the structure into a sparsity pattern. */ void - make_connectivity_graph (const SizeInfo &size_info, - const TaskInfo &task_info, + make_connectivity_graph (const TaskInfo &task_info, const std::vector &renumbering, - const std::vector &irregular_cells, - const bool do_blocking, - DynamicSparsityPattern &connectivity) const; + DynamicSparsityPattern &connectivity) const; /** * Renumbers the degrees of freedom to give good access for this class. diff --git a/include/deal.II/matrix_free/dof_info.templates.h b/include/deal.II/matrix_free/dof_info.templates.h index 894167a1ca..0c4da1c566 100644 --- a/include/deal.II/matrix_free/dof_info.templates.h +++ b/include/deal.II/matrix_free/dof_info.templates.h @@ -420,161 +420,20 @@ no_constraint: void - DoFInfo::compute_renumber_serial (const std::vector &boundary_cells, - const SizeInfo &size_info, - std::vector &renumbering) - { - std::vector reverse_numbering (size_info.n_active_cells, - numbers::invalid_unsigned_int); - const unsigned int n_boundary_cells = boundary_cells.size(); - for (unsigned int j=0; j &renumbering, - std::vector &irregular_cells) - { - if (max_fe_index < 2) - return; - const unsigned int n_active_cells = size_info.n_active_cells; - const unsigned int vectorization_length = size_info.vectorization_length; - irregular_cells.resize (0); - irregular_cells.resize (size_info.n_macro_cells+3*max_fe_index); - std::vector > renumbering_fe_index; - renumbering_fe_index.resize(max_fe_index); - unsigned int counter,n_macro_cells_before = 0; - const unsigned int - start_bound = std::min (size_info.n_active_cells, - size_info.boundary_cells_start*vectorization_length), - end_bound = std::min (size_info.n_active_cells, - size_info.boundary_cells_end*vectorization_length); - for (counter=0; counter &boundary_cells, - SizeInfo &size_info, - std::vector &renumbering) - { - std::vector reverse_numbering (size_info.n_active_cells, - numbers::invalid_unsigned_int); - const unsigned int n_boundary_cells = boundary_cells.size(); - for (unsigned int j=0; j &renumbering, - const std::vector &constraint_pool_row_index, - const std::vector &irregular_cells, - const unsigned int vectorization_length) + DoFInfo::reorder_cells (const TaskInfo &task_info, + const std::vector &renumbering, + const std::vector &constraint_pool_row_index, + const std::vector &irregular_cells, + const unsigned int vectorization_length) { // first reorder the active fe index. if (cell_active_fe_index.size() > 0) { std::vector new_active_fe_index; - new_active_fe_index.reserve (size_info.n_macro_cells); + new_active_fe_index.reserve (task_info.cell_partition_data.back()); std::vector fe_indices(vectorization_length); unsigned int position_cell = 0; - for (unsigned int cell=0; cell 0 ? irregular_cells[cell] : vectorization_length); @@ -597,12 +456,12 @@ no_constraint: new_constraint_indicator; std::vector new_plain_indices, new_rowstart_plain; unsigned int position_cell = 0; - new_row_starts.resize (size_info.n_macro_cells + 1); + new_row_starts.resize(task_info.cell_partition_data.back()+1); new_dof_indices.reserve (dof_indices.size()); new_constraint_indicator.reserve (constraint_indicator.size()); if (store_plain_indices == true) { - new_rowstart_plain.resize (size_info.n_macro_cells + 1, + new_rowstart_plain.resize (task_info.cell_partition_data.back()+1, numbers::invalid_unsigned_int); new_plain_indices.reserve (plain_dof_indices.size()); } @@ -617,7 +476,7 @@ no_constraint: std::vector*> constr_ind(vectorization_length), constr_end(vectorization_length); std::vector index(vectorization_length); - for (unsigned int i=0; i @@ -744,884 +603,20 @@ no_constraint: } } - // sanity check 3: all non-boundary cells should have indices that only - // refer to the locally owned range - const unsigned int local_size = (vector_partitioner->local_range().second- - vector_partitioner->local_range().first); - for (unsigned int row=0; row 0) + n_active_cells += irregular_cells[c]; + else + n_active_cells += vectorization_length; + AssertDimension(n_active_cells, task_info.n_active_cells); #endif } - void DoFInfo::guess_block_size (const SizeInfo &size_info, - TaskInfo &task_info) - { - // user did not say a positive number, so we have to guess - if (task_info.block_size == 0) - { - // we would like to have enough work to do, so as first guess, try - // to get 50 times as many chunks as we have threads on the system. - task_info.block_size = - size_info.n_macro_cells / (MultithreadInfo::n_threads() * 50); - - // if there are too few degrees of freedom per cell, need to - // increase the block size - const unsigned int minimum_parallel_grain_size = 500; - if (dofs_per_cell[0] * task_info.block_size < - minimum_parallel_grain_size) - task_info.block_size = (minimum_parallel_grain_size / - dofs_per_cell[0] + 1); - } - if (task_info.block_size > size_info.n_macro_cells) - task_info.block_size = size_info.n_macro_cells; - } - - - - void DoFInfo::make_thread_graph_partition_color - (SizeInfo &size_info, - TaskInfo &task_info, - std::vector &renumbering, - std::vector &irregular_cells, - const bool hp_bool) - { - if (size_info.n_macro_cells == 0) - return; - - const std::size_t vectorization_length = size_info.vectorization_length; - Assert (vectorization_length > 0, ExcInternalError()); - - guess_block_size (size_info, task_info); - - // set up partitions. if we just use coloring without partitions, do - // nothing here, assume all cells to belong to the zero partition (that - // we otherwise use for MPI boundary cells) - unsigned int start_up = 0, - start_nonboundary = numbers::invalid_unsigned_int; - if (task_info.use_coloring_only == false) - { - start_nonboundary = - std::min(((size_info.boundary_cells_end+task_info.block_size-1)/ - task_info.block_size)*task_info.block_size, - size_info.n_macro_cells); - size_info.boundary_cells_end = start_nonboundary; - } - else - { - start_nonboundary = size_info.n_macro_cells; - size_info.boundary_cells_start = 0; - size_info.boundary_cells_end = size_info.n_macro_cells; - } - if (hp_bool == true) - { - irregular_cells.resize (0); - irregular_cells.resize (size_info.n_macro_cells+2*max_fe_index); - std::vector > renumbering_fe_index; - renumbering_fe_index.resize(max_fe_index); - unsigned int counter,n_macro_cells_before = 0; - for (counter=0; counter cell_partition(task_info.n_blocks, - size_info.n_macro_cells); - std::vector neighbor_list; - std::vector neighbor_neighbor_list; - - // In element j of this variable, one puts the old number of the block - // that should be the jth block in the new numeration. - std::vector partition_list (task_info.n_blocks,0); - std::vector partition_color_list(task_info.n_blocks,0); - - // This vector points to the start of each partition. - std::vector partition_blocks (2,0); - std::vector cell_color(task_info.n_blocks, - size_info.n_macro_cells); - std::vector color_finder; - - // this performs a classical breath-first search in the connectivity - // graph of the cell chunks - while (work) - { - // put all cells up to begin_inner_cells into first partition. if - // the numbers do not add up exactly, assign an additional block - if (start_nonboundary>0) - { - unsigned int n_blocks = ((start_nonboundary+task_info.block_size-1) - /task_info.block_size); - start_nonboundary = 0; - for (unsigned int cell=0; cell0) - { - partition++; - partition_blocks.push_back(partition_blocks.back()); - for (unsigned int j=0; jcolumn()]==size_info.n_macro_cells) - { - partition_blocks.back()++; - cell_partition[neighbor->column()] = partition; - neighbor_neighbor_list.push_back(neighbor->column()); - partition_list[counter++] = neighbor->column(); - } - } - } - neighbor_list = neighbor_neighbor_list; - neighbor_neighbor_list.resize(0); - } - - // One has to check if the graph is not connected so we have to find - // another partition. - work = false; - for (unsigned int j=start_up; jcolumn()] == part && - cell_color[neighbor->column()] <= n_neighbors) - color_finder[cell_color[neighbor->column()]] = false; - } - // Choose the smallest color that is not taken for the block - cell_color[cell]=0; - while (color_finder[cell_color[cell]] == false) - cell_color[cell]++; - if (cell_color[cell] > max_color) - max_color = cell_color[cell]; - } - // Reorder within partition: First, all blocks that belong the 0 and - // then so on until those with color max (Note that the smaller the - // number the larger the partition) - for (unsigned int color=0; color<=max_color; color++) - { - task_info.partition_color_blocks_data.push_back(color_counter); - index_counter++; - for (unsigned int k=partition_blocks[part]; - k sorted_pc_list (partition_color_list); - std::sort(sorted_pc_list.begin(), sorted_pc_list.end()); - for (unsigned int i=0; i block_start(size_info.n_macro_cells+1); - std::vector irregular(size_info.n_macro_cells); - - unsigned int mcell_start=0; - block_start[0] = 0; - for (unsigned int block=0; block0) - ?irregular_cells[mcell]:size_info.vectorization_length; - block_start[block+1] += n_comp; - ++counter; - } - mcell_start += task_info.block_size; - } - counter = 0; - unsigned int counter_macro = 0; - for (unsigned int block=0; block sorted_renumbering (renumbering); - std::sort(sorted_renumbering.begin(), sorted_renumbering.end()); - for (unsigned int i=0; i &renumbering, - std::vector &irregular_cells, - const bool hp_bool) - { - if (size_info.n_macro_cells == 0) - return; - - const std::size_t vectorization_length = size_info.vectorization_length; - Assert (vectorization_length > 0, ExcInternalError()); - - guess_block_size (size_info, task_info); - - // assume that all FEs have the same connectivity graph, so take the - // zeroth FE - task_info.n_blocks = (size_info.n_macro_cells+task_info.block_size-1)/ - task_info.block_size; - task_info.block_size_last = size_info.n_macro_cells- - (task_info.block_size*(task_info.n_blocks-1)); - task_info.position_short_block = task_info.n_blocks-1; - unsigned int cluster_size = task_info.block_size*vectorization_length; - - // create the connectivity graph without internal blocking - DynamicSparsityPattern connectivity; - make_connectivity_graph (size_info, task_info, renumbering,irregular_cells, - false, connectivity); - - // Create cell-block partitioning. - - // For each block of cells, this variable saves to which partitions the - // block belongs. Initialize all to n_macro_cells to mark them as not - // yet assigned a partition. - std::vector cell_partition (size_info.n_active_cells, - size_info.n_active_cells); - std::vector neighbor_list; - std::vector neighbor_neighbor_list; - - // In element j of this variable, one puts the old number of the block - // that should be the jth block in the new numeration. - std::vector partition_list(size_info.n_active_cells,0); - std::vector partition_partition_list(size_info.n_active_cells,0); - - // This vector points to the start of each partition. - std::vector partition_size(2,0); - - unsigned int partition = 0,start_up=0,counter=0; - unsigned int start_nonboundary = vectorization_length * size_info.boundary_cells_end; - if (start_nonboundary > size_info.n_active_cells) - start_nonboundary = size_info.n_active_cells; - bool work = true; - unsigned int remainder = cluster_size; - - // this performs a classical breath-first search in the connectivity - // graph of the cells under the restriction that the size of the - // partitions should be a multiple of the given block size - while (work) - { - // put the cells with neighbors on remote MPI processes up front - if (start_nonboundary>0) - { - for (unsigned int cell=0; cell0) - { - if (index==index_stop) - { - index = neighbor_list.size(); - if (index == index_before) - { - neighbor_list.resize(0); - goto not_connect; - } - index_stop = index_before; - index_before = index; - } - index--; - unsigned int additional = neighbor_list[index]; - DynamicSparsityPattern::iterator neighbor = - connectivity.begin(additional), - end = connectivity.end(additional); - for (; neighbor!=end ; ++neighbor) - { - if (cell_partition[neighbor->column()]==size_info.n_active_cells) - { - partition_size.back()++; - cell_partition[neighbor->column()] = partition; - neighbor_list.push_back(neighbor->column()); - partition_list[counter++] = neighbor->column(); - remainder--; - if (remainder == 0) - break; - } - } - } - - while (neighbor_list.size()>0) - { - partition++; - unsigned int partition_counter = 0; - partition_size.push_back(partition_size.back()); - - for (unsigned int j=0; jcolumn()]==size_info.n_active_cells) - { - partition_size.back()++; - cell_partition[neighbor->column()] = partition; - neighbor_neighbor_list.push_back(neighbor->column()); - partition_list[counter++] = neighbor->column(); - partition_counter++; - } - } - } - remainder = cluster_size-(partition_counter%cluster_size); - if (remainder == cluster_size) - remainder = 0; - int index_stop = 0; - int index_before = neighbor_neighbor_list.size(), index = index_before; - while (remainder>0) - { - if (index==index_stop) - { - index = neighbor_neighbor_list.size(); - if (index == index_before) - { - neighbor_neighbor_list.resize(0); - break; - } - index_stop = index_before; - index_before = index; - } - index--; - unsigned int additional = neighbor_neighbor_list[index]; - DynamicSparsityPattern::iterator neighbor = - connectivity.begin(additional), - end = connectivity.end(additional); - for (; neighbor!=end ; ++neighbor) - { - if (cell_partition[neighbor->column()]==size_info.n_active_cells) - { - partition_size.back()++; - cell_partition[neighbor->column()] = partition; - neighbor_neighbor_list.push_back(neighbor->column()); - partition_list[counter++] = neighbor->column(); - remainder--; - if (remainder == 0) - break; - } - } - } - - neighbor_list = neighbor_neighbor_list; - neighbor_neighbor_list.resize(0); - } -not_connect: - // One has to check if the graph is not connected so we have to find - // another partition. - work = false; - for (unsigned int j=start_up; j cell_partition_l2(size_info.n_active_cells, - size_info.n_active_cells); - task_info.partition_color_blocks_row_index.resize(partition+1,0); - task_info.partition_color_blocks_data.resize(1,0); - - counter = 0; - unsigned int missing_macros; - for (unsigned int part=0; partcolumn()] == part && - cell_partition_l2[neighbor->column()]== - size_info.n_active_cells) - { - cell_partition_l2[neighbor->column()] = partition_l2; - neighbor_neighbor_list.push_back(neighbor->column()); - partition_partition_list[counter++] = neighbor->column(); - partition_counter++; - } - } - } - } - if (partition_counter>0) - { - int index_before = neighbor_neighbor_list.size(), - index = index_before; - { - // put the cells into separate lists for each FE index - // within one partition-partition - missing_macros = 0; - std::vector remaining_per_macro_cell - (max_fe_index); - std::vector > - renumbering_fe_index; - unsigned int cell; - bool filled = true; - if (hp_bool == true) - { - renumbering_fe_index.resize(max_fe_index); - for (cell=counter-partition_counter; cell0 || filled == false) - { - if (index==0) - { - index = neighbor_neighbor_list.size(); - if (index == index_before) - { - if (missing_macros != 0) - { - neighbor_neighbor_list.resize(0); - } - start_up--; - break;// not connected - start again - } - index_before = index; - } - index--; - unsigned int additional = neighbor_neighbor_list - [index]; - - // go through the neighbors of the last cell in the - // current partition and check if we find some to - // fill up with. - DynamicSparsityPattern::iterator - neighbor = connectivity.begin(additional), - end = connectivity.end(additional); - for (; neighbor!=end ; ++neighbor) - { - if (cell_partition[neighbor->column()] == part && - cell_partition_l2[neighbor->column()] == - size_info.n_active_cells) - { - unsigned int this_index = 0; - if (hp_bool == true) - this_index = cell_active_fe_index.empty() ? 0 : - cell_active_fe_index[neighbor->column()]; - - // Only add this cell if we need more macro - // cells in the current block or if there is - // a macro cell with the FE index that is - // not yet fully populated - if (missing_macros > 0 || - remaining_per_macro_cell[this_index] > 0) - { - cell_partition_l2[neighbor->column()] = partition_l2; - neighbor_neighbor_list.push_back(neighbor->column()); - if (hp_bool == true) - renumbering_fe_index[this_index]. - push_back(neighbor->column()); - partition_partition_list[counter] = - neighbor->column(); - counter++; - partition_counter++; - if (remaining_per_macro_cell[this_index] - == 0 && missing_macros > 0) - missing_macros--; - remaining_per_macro_cell[this_index]++; - if (remaining_per_macro_cell[this_index] - == vectorization_length) - { - remaining_per_macro_cell[this_index] = 0; - } - if (missing_macros == 0) - { - filled = true; - for (unsigned int fe_ind=0; - fe_ind0) - size_info.boundary_cells_end = task_info.partition_color_blocks_data - [task_info.partition_color_blocks_row_index[1]]; - - if (hp_bool == false) - renumbering.swap(partition_partition_list); - irregular_cells.resize(n_macro_cells_before); - size_info.n_macro_cells = n_macro_cells_before; - - task_info.evens = (partition+1)/2; - task_info.odds = partition/2; - task_info.n_blocked_workers = - task_info.odds-(task_info.odds+task_info.evens+1)%2; - task_info.n_workers = task_info.evens+task_info.odds- - task_info.n_blocked_workers; - task_info.partition_evens.resize(partition); - task_info.partition_odds.resize(partition); - task_info.partition_n_blocked_workers.resize(partition); - task_info.partition_n_workers.resize(partition); - for (unsigned int part=0; part @@ -1664,188 +659,171 @@ not_connect: ++dat; } }; + + // We construct the connectivity graph in parallel. we use one lock for + // 256 degrees of freedom to keep the number of locks down to a + // reasonable level and reduce the cost of locking to some extent. + static constexpr unsigned int bucket_size_threading = 256; + + void compute_row_lengths(const unsigned int begin, + const unsigned int end, + const DoFInfo &dof_info, + std::vector &mutexes, + std::vector &row_lengths) + { + std::vector scratch; + constexpr unsigned int n_components = 1; + for (unsigned int block=begin; block::const_iterator end_unique = + std::unique(scratch.begin(), scratch.end()); + std::vector::const_iterator it = scratch.begin(); + while (it != end_unique) + { + // In this code, the procedure is that we insert all elements + // that are within the range of one lock at once + const unsigned int next_bucket = (*it/bucket_size_threading+1)* + bucket_size_threading; + Threads::Mutex::ScopedLock lock(mutexes[*it/bucket_size_threading]); + for ( ; it != end_unique && *it < next_bucket; ++it) + { + AssertIndexRange(*it, row_lengths.size()); + row_lengths[*it]++; + } + } + } + } + + void fill_connectivity_dofs(const unsigned int begin, + const unsigned int end, + const DoFInfo &dof_info, + const std::vector &row_lengths, + std::vector &mutexes, + dealii::SparsityPattern &connectivity_dof) + { + std::vector scratch; + const unsigned int n_components = 1; + for (unsigned int block=begin; block::const_iterator end_unique = + std::unique(scratch.begin(), scratch.end()); + std::vector::const_iterator it = scratch.begin(); + while (it != end_unique) + { + const unsigned int next_bucket = (*it/bucket_size_threading+1)* + bucket_size_threading; + Threads::Mutex::ScopedLock lock(mutexes[*it/bucket_size_threading]); + for ( ; it != end_unique && *it < next_bucket; ++it) + if (row_lengths[*it]>0) + connectivity_dof.add(*it, block); + } + } + } + + void fill_connectivity(const unsigned int begin, + const unsigned int end, + const DoFInfo &dof_info, + const std::vector &renumbering, + const dealii::SparsityPattern &connectivity_dof, + DynamicSparsityPattern &connectivity) + { + ordered_vector row_entries; + const unsigned int n_components = 1; + for (unsigned int block=begin; block < end; ++block) + { + row_entries.clear(); + + const unsigned int + *it = &dof_info.dof_indices[dof_info.row_starts[block*n_components][0]], + *end_cell = &dof_info.dof_indices[dof_info.row_starts[(block+1)*n_components][0]]; + for ( ; it != end_cell; ++it) + { + SparsityPattern::iterator sp = connectivity_dof.begin(*it); + std::vector::iterator insert_pos = row_entries.begin(); + for ( ; sp != connectivity_dof.end(*it); ++sp) + if (sp->column() != block) + row_entries.insert (renumbering[sp->column()], insert_pos); + } + connectivity.add_entries (renumbering[block], row_entries.begin(), row_entries.end()); + } + } } void DoFInfo::make_connectivity_graph - (const SizeInfo &size_info, - const TaskInfo &task_info, + (const TaskInfo &task_info, const std::vector &renumbering, - const std::vector &irregular_cells, - const bool do_blocking, - DynamicSparsityPattern &connectivity) const + DynamicSparsityPattern &connectivity) const { - AssertDimension (row_starts.size()-1, size_info.n_active_cells); - const unsigned int n_rows = + unsigned int n_rows = (vector_partitioner->local_range().second- vector_partitioner->local_range().first) + vector_partitioner->ghost_indices().n_elements(); - const unsigned int n_blocks = (do_blocking == true) ? - task_info.n_blocks : size_info.n_active_cells; + + // Avoid square sparsity patterns that allocate the diagonal entry + if (n_rows == task_info.n_active_cells) + ++n_rows; // first determine row lengths std::vector row_lengths(n_rows); - unsigned int cell_start = 0, mcell_start = 0; - std::vector scratch; - for (unsigned int block = 0; block < n_blocks; ++block) - { - // if we have the blocking variant (used in the coloring scheme), we - // want to build a graph with the blocks with interaction with - // remote MPI processes up front. in the non-blocking variant, we do - // not do this here. TODO: unify this approach!!! - if (do_blocking == true) - { - scratch.clear(); - for (unsigned int mcell=mcell_start; mcell< - std::min(mcell_start+task_info.block_size, - size_info.n_macro_cells); - ++mcell) - { - unsigned int n_comp = (irregular_cells[mcell]>0) - ?irregular_cells[mcell]:size_info.vectorization_length; - for (unsigned int cell = cell_start; cell < cell_start+n_comp; - ++cell) - scratch.insert(scratch.end(), - begin_indices(renumbering[cell]), - end_indices(renumbering[cell])); - cell_start += n_comp; - } - std::sort(scratch.begin(), scratch.end()); - const unsigned int n_unique = - std::unique(scratch.begin(), scratch.end())-scratch.begin(); - for (unsigned int i=0; i mutexes(n_rows/bucket_size_threading+1); + parallel::apply_to_subranges(0, task_info.n_active_cells, + std::bind(&compute_row_lengths, + std::placeholders::_1, + std::placeholders::_2, + std::cref(*this), + std::ref(mutexes), + std::ref(row_lengths)), 20); + + // disregard dofs that only sit on a single cell because they cannot + // couple for (unsigned int row=0; row0) - ?irregular_cells[mcell]:size_info.vectorization_length; - for (unsigned int cell = cell_start; cell < cell_start+n_comp; - ++cell) - { - const unsigned int - *it = begin_indices (renumbering[cell]), - *end_cell = end_indices (renumbering[cell]); - for ( ; it != end_cell; ++it) - if (row_lengths[*it]>0) - connectivity_dof.add(*it, block); - } - cell_start += n_comp; - } - mcell_start += task_info.block_size; - } - else - { - const unsigned int - *it = begin_indices (block), - *end_cell = end_indices (block); - for ( ; it != end_cell; ++it) - if (row_lengths[*it]>0) - connectivity_dof.add(*it, block); - } - } + // Create a temporary sparsity pattern that holds to each degree of + // freedom on which cells it appears, i.e., store the connectivity + // between cells and dofs + SparsityPattern connectivity_dof (n_rows, task_info.n_active_cells, + row_lengths); + parallel::apply_to_subranges(0, task_info.n_active_cells, + std::bind(&fill_connectivity_dofs, + std::placeholders::_1, + std::placeholders::_2, + std::cref(*this), + std::cref(row_lengths), + std::ref(mutexes), + std::ref(connectivity_dof)), 20); connectivity_dof.compress(); - connectivity.reinit (n_blocks, n_blocks); - internal::ordered_vector row_entries; - cell_start = 0; - mcell_start = 0; - for (unsigned int block=0; block < n_blocks; ++block) - { - row_entries.clear(); - if (do_blocking==true) - { - for (unsigned int mcell=mcell_start; mcell< - std::min(mcell_start+task_info.block_size, - size_info.n_macro_cells); - ++mcell) - { - unsigned int n_comp = (irregular_cells[mcell]>0) - ?irregular_cells[mcell]:size_info.vectorization_length; - for (unsigned int cell = cell_start; cell < cell_start+n_comp; - ++cell) - { - // apply renumbering when we do blocking - const unsigned int - *it = begin_indices (renumbering[cell]), - *end_cell = end_indices (renumbering[cell]); - for ( ; it != end_cell; ++it) - if (row_lengths[*it] > 0) - { - SparsityPattern::iterator sp = connectivity_dof.begin(*it); - // jump over diagonal for square patterns - if (connectivity_dof.n_rows()==connectivity_dof.n_cols()) - ++sp; - row_entries.reserve (row_entries.size() + end_cell - it); - std::vector::iterator insert_pos = row_entries.begin(); - for ( ; sp != connectivity_dof.end(*it); ++sp) - if (sp->column() >= block) - break; - else - row_entries.insert (sp->column(), insert_pos); - } - } - cell_start +=n_comp; - } - mcell_start += task_info.block_size; - } - else - { - const unsigned int *it = begin_indices (block), - * end_cell = end_indices (block); - for ( ; it != end_cell; ++it) - if (row_lengths[*it] > 0) - { - SparsityPattern::iterator sp = connectivity_dof.begin(*it); - // jump over diagonal for square patterns - if (connectivity_dof.n_rows()==connectivity_dof.n_cols()) - ++sp; - row_entries.reserve (row_entries.size() + end_cell - it); - std::vector::iterator insert_pos = row_entries.begin(); - for ( ; sp != connectivity_dof.end(*it); ++sp) - if (sp->column() >= block) - break; - else - row_entries.insert (sp->column(), insert_pos); - } - } - connectivity.add_entries (block, row_entries.begin(), row_entries.end()); - } - connectivity.symmetrize (); + // Invert renumbering for use in fill_connectivity. + std::vector reverse_numbering(task_info.n_active_cells); + reverse_numbering = Utilities::invert_permutation(renumbering); + + // From the above connectivity between dofs and cells, we can finally + // create a connectivity list between cells. The connectivity graph + // should apply the renumbering, i.e., the entry for cell j is the entry + // for cell renumbering[j] in the original ordering. + parallel::apply_to_subranges(0, task_info.n_active_cells, + std::bind(&fill_connectivity, + std::placeholders::_1, + std::placeholders::_2, + std::cref(*this), + std::cref(reverse_numbering), + std::cref(connectivity_dof), + std::ref(connectivity)), 20); } @@ -1917,23 +895,23 @@ not_connect: template void DoFInfo::print_memory_consumption (StreamType &out, - const SizeInfo &size_info) const + const TaskInfo &task_info) const { out << " Memory row starts indices: "; - size_info.print_memory_statistics + task_info.print_memory_statistics (out, (row_starts.capacity()*sizeof(std::array))); out << " Memory dof indices: "; - size_info.print_memory_statistics + task_info.print_memory_statistics (out, MemoryConsumption::memory_consumption (dof_indices)); out << " Memory constraint indicators: "; - size_info.print_memory_statistics + task_info.print_memory_statistics (out, MemoryConsumption::memory_consumption (constraint_indicator)); out << " Memory plain indices: "; - size_info.print_memory_statistics + task_info.print_memory_statistics (out, MemoryConsumption::memory_consumption (row_starts_plain_indices)+ MemoryConsumption::memory_consumption (plain_dof_indices)); out << " Memory vector partitioner: "; - size_info.print_memory_statistics + task_info.print_memory_statistics (out, MemoryConsumption::memory_consumption (*vector_partitioner)); } diff --git a/include/deal.II/matrix_free/helper_functions.h b/include/deal.II/matrix_free/helper_functions.h index 1720820106..9b5d5a4d8d 100644 --- a/include/deal.II/matrix_free/helper_functions.h +++ b/include/deal.II/matrix_free/helper_functions.h @@ -1,6 +1,6 @@ // --------------------------------------------------------------------- // -// Copyright (C) 2011 - 2017 by the deal.II authors +// Copyright (C) 2011 - 2018 by the deal.II authors // // This file is part of the deal.II library. // @@ -25,116 +25,7 @@ #include #include -DEAL_II_NAMESPACE_OPEN +#include - -namespace internal -{ - namespace MatrixFreeFunctions - { - /** - * A struct that collects all information related to parallelization with - * threads: The work is subdivided into tasks that can be done - * independently. - */ - struct TaskInfo - { - /** - * Constructor. - */ - TaskInfo (); - - /** - * Clear all the data fields and resets them to zero. - */ - void clear (); - - /** - * Return the memory consumption of the class. - */ - std::size_t memory_consumption () const; - - unsigned int block_size; - unsigned int n_blocks; - unsigned int block_size_last; - unsigned int position_short_block; - bool use_multithreading; - bool use_partition_partition; - bool use_coloring_only; - - std::vector partition_color_blocks_row_index; - std::vector partition_color_blocks_data; - unsigned int evens; - unsigned int odds; - unsigned int n_blocked_workers; - unsigned int n_workers; - - std::vector partition_evens; - std::vector partition_odds; - std::vector partition_n_blocked_workers; - std::vector partition_n_workers; - }; - - - - /** - * A struct that collects all information related to the size of the - * problem and MPI parallelization. - */ - struct SizeInfo - { - /** - * Constructor. - */ - SizeInfo (); - - /** - * Clear all data fields and resets the sizes to zero. - */ - void clear(); - - /** - * Prints minimum, average, and maximal memory consumption over the MPI - * processes. - */ - template - void print_memory_statistics (StreamType &out, - std::size_t data_length) const; - - /** - * Determines the position of cells with ghosts for distributed-memory - * calculations. - */ - void make_layout (const unsigned int n_active_cells_in, - const unsigned int vectorization_length_in, - std::vector &boundary_cells, - std::vector &irregular_cells); - - unsigned int n_active_cells; - unsigned int n_macro_cells; - unsigned int boundary_cells_start; - unsigned int boundary_cells_end; - unsigned int vectorization_length; - - /** - * index sets to describe the layout of cells: locally owned cells and - * locally active cells - */ - IndexSet locally_owned_cells; - IndexSet ghost_cells; - - /** - * MPI communicator - */ - MPI_Comm communicator; - unsigned int my_pid; - unsigned int n_procs; - }; - - } // end of namespace MatrixFreeFunctions -} // end of namespace internal - -DEAL_II_NAMESPACE_CLOSE - #endif diff --git a/include/deal.II/matrix_free/mapping_info.h b/include/deal.II/matrix_free/mapping_info.h index e8121ed550..ed4f4ad8d1 100644 --- a/include/deal.II/matrix_free/mapping_info.h +++ b/include/deal.II/matrix_free/mapping_info.h @@ -315,7 +315,7 @@ namespace internal */ template void print_memory_consumption(StreamType &out, - const SizeInfo &task_info) const; + const TaskInfo &task_info) const; /** * Stores whether a cell is Cartesian (cell type 0), has constant diff --git a/include/deal.II/matrix_free/matrix_free.h b/include/deal.II/matrix_free/matrix_free.h index 4d3daa83ed..9d3abe222a 100644 --- a/include/deal.II/matrix_free/matrix_free.h +++ b/include/deal.II/matrix_free/matrix_free.h @@ -34,18 +34,11 @@ #include #include #include -#include +#include #include #include #include -#ifdef DEAL_II_WITH_THREADS -#include -#include -#include -#include -#endif - #include #include #include @@ -805,7 +798,8 @@ public: /** * Return information on system size. */ - const internal::MatrixFreeFunctions::SizeInfo & + DEAL_II_DEPRECATED + const internal::MatrixFreeFunctions::TaskInfo & get_size_info () const; /* @@ -992,13 +986,18 @@ private: std::vector > cell_level_index; /** - * Stores how many cells we have, how many cells that we see after applying - * vectorization (i.e., the number of macro cells), and MPI-related stuff. - */ - internal::MatrixFreeFunctions::SizeInfo size_info; + * For discontinuous Galerkin, the cell_level_index includes cells that are + * not on the local processor but that are needed to evaluate the cell + * integrals. In cell_level_index_end_local, we store the number of local + * cells. + **/ + unsigned int cell_level_index_end_local; /** - * Information regarding the shared memory parallelization. + * Stores how many cells we have, how many cells that we see after applying + * vectorization (i.e., the number of macro cells), MPI-related stuff, and, + * if threads are enabled, information regarding the shared memory + * parallelization. */ internal::MatrixFreeFunctions::TaskInfo task_info; @@ -1100,10 +1099,10 @@ MatrixFree::get_task_info () const template inline -const internal::MatrixFreeFunctions::SizeInfo & +const internal::MatrixFreeFunctions::TaskInfo & MatrixFree::get_size_info () const { - return size_info; + return task_info; } @@ -1113,7 +1112,7 @@ inline unsigned int MatrixFree::n_macro_cells () const { - return size_info.n_macro_cells; + return *(task_info.cell_partition_data.end()-2); } @@ -1123,7 +1122,7 @@ inline unsigned int MatrixFree::n_physical_cells () const { - return size_info.n_active_cells; + return task_info.n_active_cells; } @@ -1297,7 +1296,7 @@ MatrixFree::get_cell_iterator(const unsigned int macro_cell_number, const unsigned int vectorization_length=VectorizedArray::n_array_elements; #ifdef DEBUG AssertIndexRange (dof_index, dof_handlers.n_dof_handlers); - AssertIndexRange (macro_cell_number, size_info.n_macro_cells); + AssertIndexRange (macro_cell_number, n_macro_cells()); AssertIndexRange (vector_number, vectorization_length); const unsigned int irreg_filled = dof_info[dof_index].row_starts[macro_cell_number][2]; if (irreg_filled > 0) @@ -1335,7 +1334,7 @@ MatrixFree::get_hp_cell_iterator(const unsigned int macro_cell_numbe const unsigned int vectorization_length=VectorizedArray::n_array_elements; #ifdef DEBUG AssertIndexRange (dof_index, dof_handlers.n_dof_handlers); - AssertIndexRange (macro_cell_number, size_info.n_macro_cells); + AssertIndexRange (macro_cell_number, n_macro_cells()); AssertIndexRange (vector_number, vectorization_length); const unsigned int irreg_filled = dof_info[dof_index].row_starts[macro_cell_number][2]; if (irreg_filled > 0) @@ -1358,7 +1357,7 @@ inline bool MatrixFree::at_irregular_cell (const unsigned int macro_cell) const { - AssertIndexRange (macro_cell, size_info.n_macro_cells); + AssertIndexRange (macro_cell, n_macro_cells()); return dof_info[0].row_starts[macro_cell][2] > 0; } @@ -1369,7 +1368,7 @@ inline unsigned int MatrixFree::n_components_filled (const unsigned int macro_cell) const { - AssertIndexRange (macro_cell, size_info.n_macro_cells); + AssertIndexRange (macro_cell, n_macro_cells()); const unsigned int n_filled = dof_info[0].row_starts[macro_cell][2]; if (n_filled == 0) return VectorizedArray::n_array_elements; @@ -1753,9 +1752,10 @@ reinit(const Mapping &mapping, // functions: for generic vectors, do nothing at all. For distributed vectors, // call update_ghost_values_start function and so on. If we have collections // of vectors, just do the individual functions of the components. In order to -// keep ghost values consistent (whether we are in read or write mode). the whole situation is a bit complicated by the fact -// that we need to treat block vectors differently, which use some additional -// helper functions to select the blocks and template magic. +// keep ghost values consistent (whether we are in read or write mode). the +// whole situation is a bit complicated by the fact that we need to treat +// block vectors differently, which use some additional helper functions to +// select the blocks and template magic. namespace internal { template @@ -2081,257 +2081,192 @@ namespace internal -#ifdef DEAL_II_WITH_THREADS - // This defines the TBB data structures that are needed to schedule the - // partition-partition variant - - namespace partition + namespace MatrixFreeFunctions { - template - class CellWork : public tbb::task - { - public: - CellWork (const Worker &worker_in, - const unsigned int partition_in, - const internal::MatrixFreeFunctions::TaskInfo &task_info_in, - const bool is_blocked_in) - : - dummy (nullptr), - worker (worker_in), - partition (partition_in), - task_info (task_info_in), - is_blocked (is_blocked_in) - {}; - tbb::task *execute () - { - std::pair cell_range - (task_info.partition_color_blocks_data[partition], - task_info.partition_color_blocks_data[partition+1]); - worker(cell_range); - if (is_blocked==true) - dummy->spawn (*dummy); - return (nullptr); - } - - tbb::empty_task *dummy; + // struct to select between a const interface and a non-const interface + // for MFWorker + template + struct InterfaceSelector + {}; - private: - const Worker &worker; - const unsigned int partition; - const internal::MatrixFreeFunctions::TaskInfo &task_info; - const bool is_blocked; + // Version of constant functions + template + struct InterfaceSelector + { + typedef void (Container::*function_type) + (const MF &, OutVector &, const InVector &, + const std::pair &)const; }; - - - template - class PartitionWork : public tbb::task + // Version for non-constant functions + template + struct InterfaceSelector { - public: - PartitionWork (const Worker &function_in, - const unsigned int partition_in, - const internal::MatrixFreeFunctions::TaskInfo &task_info_in, - const bool is_blocked_in = false) - : - dummy (nullptr), - function (function_in), - partition (partition_in), - task_info (task_info_in), - is_blocked (is_blocked_in) - {}; - tbb::task *execute () - { - tbb::empty_task *root = new ( tbb::task::allocate_root() ) - tbb::empty_task; - unsigned int evens = task_info.partition_evens[partition]; - unsigned int odds = task_info.partition_odds[partition]; - unsigned int n_blocked_workers = - task_info.partition_n_blocked_workers[partition]; - unsigned int n_workers = task_info.partition_n_workers[partition]; - std::vector*> worker(n_workers); - std::vector*> blocked_worker(n_blocked_workers); - - root->set_ref_count(evens+1); - for (unsigned int j=0; jallocate_child()) - CellWork(function, task_info. - partition_color_blocks_row_index[partition]+2*j, - task_info, false); - if (j>0) - { - worker[j]->set_ref_count(2); - blocked_worker[j-1]->dummy = new (worker[j]->allocate_child()) - tbb::empty_task; - worker[j-1]->spawn(*blocked_worker[j-1]); - } - else - worker[j]->set_ref_count(1); - if (jallocate_child()) - CellWork(function, task_info. - partition_color_blocks_row_index - [partition] + 2*j+1, task_info, true); - } - else - { - if (odds==evens) - { - worker[evens] = new (worker[j]->allocate_child()) - CellWork(function, task_info. - partition_color_blocks_row_index[partition]+2*j+1, - task_info, false); - worker[j]->spawn(*worker[evens]); - } - else - { - tbb::empty_task *child = new (worker[j]->allocate_child()) - tbb::empty_task(); - worker[j]->spawn(*child); - } - } - } - - root->wait_for_all(); - root->destroy(*root); - if (is_blocked==true) - dummy->spawn (*dummy); - return (nullptr); - } - - tbb::empty_task *dummy; - - private: - const Worker &function; - const unsigned int partition; - const internal::MatrixFreeFunctions::TaskInfo &task_info; - const bool is_blocked; + typedef void (Container::*function_type) + (const MF &, OutVector &, const InVector &, + const std::pair &); }; - - } // end of namespace partition + } - namespace color + // A implementation class for the worker object that runs the various + // operations we want to perform during the matrix-free loop + template + class MFWorker : public MFWorkerInterface { - template - class CellWork - { - public: - CellWork (const Worker &worker_in, - const internal::MatrixFreeFunctions::TaskInfo &task_info_in) - : - worker (worker_in), - task_info (task_info_in) - {}; - void operator()(const tbb::blocked_range &r) const - { - for (unsigned int block=r.begin(); block cell_range; - if (task_info.position_short_block::function_type function_type; + + // constructor, binds all the arguments to this class + MFWorker (const MF &matrix_free, + const InVector &src, + OutVector &dst, + const bool zero_dst_vector_setting, + const Container &container, + function_type cell_function, + function_type face_function, + function_type boundary_function) + : + matrix_free (matrix_free), + container (const_cast(container)), + cell_function (cell_function), + face_function (face_function), + boundary_function (boundary_function), + src (src), + dst (dst), + ghosts_were_set(false), + src_and_dst_are_same (PointerComparison::equal(&src, &dst)), + zero_dst_vector_setting(zero_dst_vector_setting &&!src_and_dst_are_same) + {} + // Runs the cell work. If no function is given, nothing is done + virtual void cell(const std::pair &cell_range) override + { + if (cell_function != nullptr && cell_range.second > cell_range.first) + (container.*cell_function)(matrix_free, this->dst, this->src, cell_range); + } - template - class PartitionWork : public tbb::task + // Runs the assembler on interior faces. If no function is given, nothing + // is done + virtual void face(const std::pair &face_range) override { - public: - PartitionWork (const Worker &worker_in, - const unsigned int partition_in, - const internal::MatrixFreeFunctions::TaskInfo &task_info_in, - const bool is_blocked_in) - : - dummy (nullptr), - worker (worker_in), - partition (partition_in), - task_info (task_info_in), - is_blocked (is_blocked_in) - {}; - tbb::task *execute () - { - unsigned int lower = task_info.partition_color_blocks_data[partition], - upper = task_info.partition_color_blocks_data[partition+1]; - parallel_for(tbb::blocked_range(lower,upper,1), - CellWork (worker,task_info)); - if (is_blocked==true) - dummy->spawn (*dummy); - return (nullptr); - } + if (face_function != nullptr && face_range.second > face_range.first) + (container.*face_function)(matrix_free, this->dst, this->src, face_range); + } - tbb::empty_task *dummy; + // Runs the assembler on boundary faces. If no function is given, nothing + // is done + virtual void boundary(const std::pair &face_range) override + { + if (boundary_function != nullptr && face_range.second > face_range.first) + (container.*boundary_function)(matrix_free, this->dst, this->src, face_range); + } - private: - const Worker &worker; - const unsigned int partition; - const internal::MatrixFreeFunctions::TaskInfo &task_info; - const bool is_blocked; - }; + // Starts the communication for the update ghost values operation. We + // cannot call this update if ghost and destination are the same because + // that would introduce spurious entries in the destination (there is also + // the problem that reading from a vector that we also write to is usually + // not intended in case there is overlap, but this is up to the + // application code to decide and we cannot catch this case here). + virtual void vector_update_ghosts_start() override + { + if (!src_and_dst_are_same) + ghosts_were_set = internal::update_ghost_values_start(src); + } - } // end of namespace color + // Finishes the communication for the update ghost values operation + virtual void vector_update_ghosts_finish() override + { + if (!src_and_dst_are_same) + internal::update_ghost_values_finish(src); + } + // Starts the communication for the vector compress operation + virtual void vector_compress_start() override + { + internal::compress_start(dst); + } - template - class MPIComDistribute : public tbb::task - { - public: - MPIComDistribute (const VectorStruct &src_in) - : - src(src_in) - {}; + // Finishes the communication for the vector compress operation + virtual void vector_compress_finish() override + { + internal::compress_finish(dst); + if (!src_and_dst_are_same) + internal::reset_ghost_values(src, !ghosts_were_set); + } - tbb::task *execute () + // Zeros the given input vector + virtual void zero_dst_vector_range(const unsigned int /*range_index*/) override { - internal::update_ghost_values_finish(src); - return nullptr; + // currently not implemented + (void)zero_dst_vector_setting; } private: - const VectorStruct &src; + const MF &matrix_free; + Container &container; + function_type cell_function; + function_type face_function; + function_type boundary_function; + + const InVector &src; + OutVector &dst; + bool ghosts_were_set; + const bool src_and_dst_are_same; + const bool zero_dst_vector_setting; }; - template - class MPIComCompress : public tbb::task + /** + * An internal class to convert three function pointers to the + * scheme with virtual functions above. + */ + template + struct MFClassWrapper { - public: - MPIComCompress (VectorStruct &dst_in) + typedef std::function &)> function_type; + + MFClassWrapper (const function_type cell, + const function_type face, + const function_type boundary) : - dst(dst_in) - {}; + cell (cell), + face (face), + boundary (boundary) + {} - tbb::task *execute () + void cell_integrator (const MF &mf, OutVector &dst, const InVector &src, + const std::pair &range) const { - internal::compress_start(dst); - return nullptr; + if (cell) + cell(mf, dst, src, range); } - private: - VectorStruct &dst; - }; + void face_integrator (const MF &mf, OutVector &dst, const InVector &src, + const std::pair &range) const + { + if (face) + face(mf, dst, src, range); + } + + void boundary_integrator (const MF &mf, OutVector &dst, const InVector &src, + const std::pair &range) const + { + if (boundary) + boundary(mf, dst, src, range); + } -#endif // DEAL_II_WITH_THREADS + const function_type cell; + const function_type face; + const function_type boundary; + }; } // end of namespace internal @@ -2350,275 +2285,13 @@ MatrixFree::cell_loop OutVector &dst, const InVector &src) const { - // in any case, need to start the ghost import at the beginning - bool ghosts_were_not_set = internal::update_ghost_values_start (src); - -#ifdef DEAL_II_WITH_THREADS - - // Use multithreading if so requested and if there is enough work to do in - // parallel (the code might hang if there are less than two chunks!) - if (task_info.use_multithreading == true && task_info.n_blocks > 3) - { - // to simplify the function calls, bind away all arguments except the - // cell range - typedef - std::function &range)> - Worker; - - const Worker func = std::bind (std::ref(cell_operation), - std::cref(*this), - std::ref(dst), - std::cref(src), - std::placeholders::_1); - - if (task_info.use_partition_partition == true) - { - tbb::empty_task *root = new ( tbb::task::allocate_root() ) - tbb::empty_task; - unsigned int evens = task_info.evens; - unsigned int odds = task_info.odds; - root->set_ref_count(evens+1); - unsigned int n_blocked_workers = task_info.n_blocked_workers; - unsigned int n_workers = task_info.n_workers; - std::vector*> - worker(n_workers); - std::vector*> - blocked_worker(n_blocked_workers); - internal::MPIComCompress *worker_compr = - new (root->allocate_child()) - internal::MPIComCompress(dst); - worker_compr->set_ref_count(1); - for (unsigned int j=0; j0) - { - worker[j] = new (root->allocate_child()) - internal::partition::PartitionWork - (func,2*j,task_info,false); - worker[j]->set_ref_count(2); - blocked_worker[j-1]->dummy = new (worker[j]->allocate_child()) - tbb::empty_task; - if (j>1) - worker[j-1]->spawn(*blocked_worker[j-1]); - else - worker_compr->spawn(*blocked_worker[j-1]); - } - else - { - worker[j] = new (worker_compr->allocate_child()) - internal::partition::PartitionWork - (func,2*j,task_info,false); - worker[j]->set_ref_count(2); - internal::MPIComDistribute *worker_dist = - new (worker[j]->allocate_child()) - internal::MPIComDistribute(src); - worker_dist->spawn(*worker_dist); - } - if (jallocate_child()) - internal::partition::PartitionWork - (func,2*j+1,task_info,true); - } - else - { - if (odds==evens) - { - worker[evens] = new (worker[j]->allocate_child()) - internal::partition::PartitionWork - (func,2*j+1,task_info,false); - worker[j]->spawn(*worker[evens]); - } - else - { - tbb::empty_task *child = new (worker[j]->allocate_child()) - tbb::empty_task(); - worker[j]->spawn(*child); - } - } - } - - root->wait_for_all(); - root->destroy(*root); - } - else // end of partition-partition, start of partition-color - { - unsigned int evens = task_info.evens; - unsigned int odds = task_info.odds; - - // check whether there is only one partition. if not, build up the - // tree of partitions - if (odds > 0) - { - tbb::empty_task *root = new ( tbb::task::allocate_root() ) tbb::empty_task; - root->set_ref_count(evens+1); - unsigned int n_blocked_workers = odds-(odds+evens+1)%2; - unsigned int n_workers = task_info.partition_color_blocks_data.size()-1- - n_blocked_workers; - std::vector*> worker(n_workers); - std::vector*> blocked_worker(n_blocked_workers); - unsigned int worker_index = 0, slice_index = 0; - unsigned int spawn_index = 0; - int spawn_index_child = -2; - internal::MPIComCompress *worker_compr = new (root->allocate_child()) - internal::MPIComCompress(dst); - worker_compr->set_ref_count(1); - for (unsigned int part=0; - partallocate_child()) - internal::color::PartitionWork(func,slice_index,task_info,false); - else - worker[worker_index] = new (root->allocate_child()) - internal::color::PartitionWork(func,slice_index,task_info,false); - slice_index++; - for (; slice_indexset_ref_count(1); - worker_index++; - worker[worker_index] = new (worker[worker_index-1]->allocate_child()) - internal::color::PartitionWork(func,slice_index,task_info,false); - } - worker[worker_index]->set_ref_count(2); - if (part>0) - { - blocked_worker[(part-1)/2]->dummy = - new (worker[worker_index]->allocate_child()) tbb::empty_task; - worker_index++; - if (spawn_index_child == -1) - worker[spawn_index]->spawn(*blocked_worker[(part-1)/2]); - else - { - Assert(spawn_index_child>=0, ExcInternalError()); - worker[spawn_index]->spawn(*worker[spawn_index_child]); - } - spawn_index = spawn_index_new; - } - else - { - internal::MPIComDistribute *worker_dist = - new (worker[worker_index]->allocate_child()) - internal::MPIComDistribute(src); - worker_dist->spawn(*worker_dist); - worker_index++; - } - part += 1; - if (partallocate_child()) - internal::color::PartitionWork(func,slice_index,task_info,true); - slice_index++; - if (slice_index< - task_info.partition_color_blocks_row_index[part+1]) - { - blocked_worker[part/2]->set_ref_count(1); - worker[worker_index] = new (blocked_worker[part/2]->allocate_child()) - internal::color::PartitionWork(func,slice_index,task_info,false); - slice_index++; - } - else - { - spawn_index_child = -1; - continue; - } - } - for (; slice_index - task_info.partition_color_blocks_row_index[part]) - { - worker[worker_index]->set_ref_count(1); - worker_index++; - } - worker[worker_index] = new (worker[worker_index-1]->allocate_child()) - internal::color::PartitionWork(func,slice_index,task_info,false); - } - spawn_index_child = worker_index; - worker_index++; - } - else - { - tbb::empty_task *final = new (worker[worker_index-1]->allocate_child()) - tbb::empty_task; - worker[spawn_index]->spawn(*final); - spawn_index_child = worker_index-1; - } - } - if (evens==odds) - { - Assert(spawn_index_child>=0, ExcInternalError()); - worker[spawn_index]->spawn(*worker[spawn_index_child]); - } - root->wait_for_all(); - root->destroy(*root); - } - // case when we only have one partition: this is the usual coloring - // scheme, and we just schedule a parallel for loop for each color - else - { - Assert(evens==1,ExcInternalError()); - internal::update_ghost_values_finish(src); - - for (unsigned int color=0; - color < task_info.partition_color_blocks_row_index[1]; - ++color) - { - unsigned int lower = task_info.partition_color_blocks_data[color], - upper = task_info.partition_color_blocks_data[color+1]; - parallel_for(tbb::blocked_range(lower,upper,1), - internal::color::CellWork - (func,task_info)); - } - - internal::compress_start(dst); - } - } - } - else -#endif - // serial loop - { - std::pair cell_range; - - // First operate on cells where no ghost data is needed (inner cells) - { - cell_range.first = 0; - cell_range.second = size_info.boundary_cells_start; - cell_operation (*this, dst, src, cell_range); - } - - // before starting operations on cells that contain ghost nodes (outer - // cells), wait for the MPI commands to finish - internal::update_ghost_values_finish(src); - - // For the outer cells, do the same procedure as for inner cells. - if (size_info.boundary_cells_end > size_info.boundary_cells_start) - { - cell_range.first = size_info.boundary_cells_start; - cell_range.second = size_info.boundary_cells_end; - cell_operation (*this, dst, src, cell_range); - } - - internal::compress_start(dst); - - // Finally operate on cells where no ghost data is needed (inner cells) - if (size_info.n_macro_cells > size_info.boundary_cells_end) - { - cell_range.first = size_info.boundary_cells_end; - cell_range.second = size_info.n_macro_cells; - cell_operation (*this, dst, src, cell_range); - } - } + typedef internal::MFClassWrapper, InVector, OutVector> Wrapper; + Wrapper wrap (cell_operation, nullptr, nullptr); + internal::MFWorker, InVector, OutVector, Wrapper, true> + worker(*this, src, dst, false, wrap, &Wrapper::cell_integrator, + &Wrapper::face_integrator, &Wrapper::boundary_integrator); - // In every case, we need to finish transfers at the very end - internal::compress_finish(dst); - internal::reset_ghost_values(src, ghosts_were_not_set); + task_info.loop (worker); } @@ -2637,20 +2310,9 @@ MatrixFree::cell_loop OutVector &dst, const InVector &src) const { - // here, use std::bind to hand a function handler with the appropriate - // argument to the other loop function - std::function &, - OutVector &, - const InVector &, - const std::pair &)> - function = std::bind(function_pointer, - owning_class, - std::placeholders::_1, - std::placeholders::_2, - std::placeholders::_3, - std::placeholders::_4); - cell_loop (function, dst, src); + internal::MFWorker, InVector, OutVector, CLASS, true> + worker(*this, src, dst, false, *owning_class, function_pointer, nullptr, nullptr); + task_info.loop(worker); } @@ -2669,20 +2331,9 @@ MatrixFree::cell_loop OutVector &dst, const InVector &src) const { - // here, use std::bind to hand a function handler with the appropriate - // argument to the other loop function - std::function &, - OutVector &, - const InVector &, - const std::pair &)> - function = std::bind(function_pointer, - owning_class, - std::placeholders::_1, - std::placeholders::_2, - std::placeholders::_3, - std::placeholders::_4); - cell_loop (function, dst, src); + internal::MFWorker, InVector, OutVector, CLASS, false> + worker(*this, src, dst, false, *owning_class, function_pointer, nullptr, nullptr); + task_info.loop(worker); } diff --git a/include/deal.II/matrix_free/matrix_free.templates.h b/include/deal.II/matrix_free/matrix_free.templates.h index 0864dc8f02..390f0143c7 100644 --- a/include/deal.II/matrix_free/matrix_free.templates.h +++ b/include/deal.II/matrix_free/matrix_free.templates.h @@ -71,7 +71,6 @@ copy_from (const MatrixFree &v) shape_info = v.shape_info; cell_level_index = v.cell_level_index; task_info = v.task_info; - size_info = v.size_info; indices_are_initialized = v.indices_are_initialized; mapping_is_initialized = v.mapping_is_initialized; } @@ -115,19 +114,19 @@ internal_reinit(const Mapping &mapping, const parallel::Triangulation *dist_tria = dynamic_cast*> (&(dof_handler[0]->get_triangulation())); - size_info.communicator = dist_tria != nullptr ? + task_info.communicator = dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF; - size_info.my_pid = - Utilities::MPI::this_mpi_process(size_info.communicator); - size_info.n_procs = - Utilities::MPI::n_mpi_processes(size_info.communicator); + task_info.my_pid = + Utilities::MPI::this_mpi_process(task_info.communicator); + task_info.n_procs = + Utilities::MPI::n_mpi_processes(task_info.communicator); } else { - size_info.communicator = MPI_COMM_SELF; - size_info.my_pid = 0; - size_info.n_procs = 1; + task_info.communicator = MPI_COMM_SELF; + task_info.my_pid = 0; + task_info.n_procs = 1; } initialize_dof_handlers (dof_handler, additional_data.level_mg_handler); @@ -140,18 +139,12 @@ internal_reinit(const Mapping &mapping, if (additional_data.tasks_parallel_scheme != AdditionalData::none && MultithreadInfo::n_threads() > 1) { - task_info.use_multithreading = true; + task_info.scheme = internal::MatrixFreeFunctions::TaskInfo::TasksParallelScheme(static_cast(additional_data.tasks_parallel_scheme)); task_info.block_size = additional_data.tasks_block_size; - task_info.use_partition_partition = - (additional_data.tasks_parallel_scheme == - AdditionalData::partition_partition ? true : false); - task_info.use_coloring_only = - (additional_data.tasks_parallel_scheme == - AdditionalData::color ? true : false); } else #endif - task_info.use_multithreading = false; + task_info.scheme = internal::MatrixFreeFunctions::TaskInfo::none; // set dof_indices together with constraint_indicator and // constraint_pool_data. It also reorders the way cells are gone through @@ -165,15 +158,16 @@ internal_reinit(const Mapping &mapping, { initialize_dof_handlers(dof_handler, additional_data.level_mg_handler); std::vector dummy; - size_info.make_layout (cell_level_index.size(), - VectorizedArray::n_array_elements, - dummy, dummy); + std::vector dummy2; + task_info.collect_boundary_cells (cell_level_index.size(), cell_level_index.size(), + VectorizedArray::n_array_elements, dummy); + task_info.create_blocks_serial(dummy, dummy, 1, dummy, false, dummy, dummy2); for (unsigned int i=0; iget_fe().element_multiplicity(0); dof_info[i].dofs_per_cell.push_back(dof_handler[i]->get_fe().dofs_per_cell); - dof_info[i].row_starts.resize(size_info.n_macro_cells+1); + dof_info[i].row_starts.resize(task_info.cell_partition_data.back()+1); dof_info[i].row_starts.back()[2] = cell_level_index.size() % VectorizedArray::n_array_elements; @@ -249,19 +243,19 @@ internal_reinit(const Mapping &mapping, const parallel::Triangulation *dist_tria = dynamic_cast*> (&(dof_handler[0]->get_triangulation())); - size_info.communicator = dist_tria != nullptr ? + task_info.communicator = dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF; - size_info.my_pid = - Utilities::MPI::this_mpi_process(size_info.communicator); - size_info.n_procs = - Utilities::MPI::n_mpi_processes(size_info.communicator); + task_info.my_pid = + Utilities::MPI::this_mpi_process(task_info.communicator); + task_info.n_procs = + Utilities::MPI::n_mpi_processes(task_info.communicator); } else { - size_info.communicator = MPI_COMM_SELF; - size_info.my_pid = 0; - size_info.n_procs = 1; + task_info.communicator = MPI_COMM_SELF; + task_info.my_pid = 0; + task_info.n_procs = 1; } initialize_dof_handlers (dof_handler, additional_data.level_mg_handler); @@ -274,18 +268,12 @@ internal_reinit(const Mapping &mapping, if (additional_data.tasks_parallel_scheme != AdditionalData::none && MultithreadInfo::n_threads() > 1) { - task_info.use_multithreading = true; + task_info.scheme = internal::MatrixFreeFunctions::TaskInfo::TasksParallelScheme(static_cast(additional_data.tasks_parallel_scheme)); task_info.block_size = additional_data.tasks_block_size; - task_info.use_partition_partition = - (additional_data.tasks_parallel_scheme == - AdditionalData::partition_partition ? true : false); - task_info.use_coloring_only = - (additional_data.tasks_parallel_scheme == - AdditionalData::color ? true : false); } else #endif - task_info.use_multithreading = false; + task_info.scheme = internal::MatrixFreeFunctions::TaskInfo::none; // set dof_indices together with constraint_indicator and // constraint_pool_data. It also reorders the way cells are gone through @@ -299,16 +287,17 @@ internal_reinit(const Mapping &mapping, { initialize_dof_handlers(dof_handler, additional_data.level_mg_handler); std::vector dummy; - size_info.make_layout (cell_level_index.size(), - VectorizedArray::n_array_elements, - dummy, dummy); + std::vector dummy2; + task_info.collect_boundary_cells (cell_level_index.size(), cell_level_index.size(), + VectorizedArray::n_array_elements, dummy); + task_info.create_blocks_serial(dummy, dummy, 1, dummy, false, dummy, dummy2); for (unsigned int i=0; iget_fe_collection().size() == 1, ExcNotImplemented()); dof_info[i].dimension = dim; dof_info[i].n_components = dof_handler[i]->get_fe(0).element_multiplicity(0); dof_info[i].dofs_per_cell.push_back(dof_handler[i]->get_fe(0).dofs_per_cell); - dof_info[i].row_starts.resize(size_info.n_macro_cells+1); + dof_info[i].row_starts.resize(task_info.cell_partition_data.back()+1); dof_info[i].row_starts.back()[2] = cell_level_index.size() % VectorizedArray::n_array_elements; @@ -410,8 +399,8 @@ initialize_dof_handlers (const std::vector*> &dof_handler, // Go through cells on zeroth level and then successively step down into // children. This gives a z-ordering of the cells, which is beneficial when // setting up neighboring relations between cells for thread parallelization - const unsigned int n_mpi_procs = size_info.n_procs; - const unsigned int my_pid = size_info.my_pid; + const unsigned int n_mpi_procs = task_info.n_procs; + const unsigned int my_pid = task_info.my_pid; const Triangulation &tria = dof_handlers.dof_handler[0]->get_triangulation(); if (level == numbers::invalid_unsigned_int) @@ -444,6 +433,10 @@ initialize_dof_handlers (const std::vector*> &dof_handler, cell_level_index.emplace_back (cell->level(), cell->index()); } } + + // All these are cells local to this processor. Therefore, set + // cell_level_index_end_local to the size of cell_level_index. + cell_level_index_end_local = cell_level_index.size(); } @@ -464,8 +457,8 @@ initialize_dof_handlers (const std::vector*> &dof_hand // go through cells on zeroth level and then successively step down into // children. This gives a z-ordering of the cells, which is beneficial when // setting up neighboring relations between cells for thread parallelization - const unsigned int n_mpi_procs = size_info.n_procs; - const unsigned int my_pid = size_info.my_pid; + const unsigned int n_mpi_procs = task_info.n_procs; + const unsigned int my_pid = task_info.my_pid; // if we have no level given, use the same as for the standard DoFHandler, // otherwise we must loop through the respective level @@ -488,6 +481,10 @@ initialize_dof_handlers (const std::vector*> &dof_hand Assert(n_mpi_procs>1 || cell_level_index.size()==tria.n_active_cells(), ExcInternalError()); + + // All these are cells local to this processor. Therefore, set + // cell_level_index_end_local to the size of cell_level_index. + cell_level_index_end_local = cell_level_index.size(); } @@ -559,7 +556,7 @@ void MatrixFree::initialize_indices // set locally owned range for each component Assert (locally_owned_set[no].is_contiguous(), ExcNotImplemented()); dof_info[no].vector_partitioner.reset - (new Utilities::MPI::Partitioner(locally_owned_set[no], size_info.communicator)); + (new Utilities::MPI::Partitioner(locally_owned_set[no], task_info.communicator)); // initialize the arrays for indices dof_info[no].row_starts.resize (n_active_cells+1); @@ -654,50 +651,174 @@ void MatrixFree::initialize_indices // if we found dofs on some FE component that belong to other // processors, the cell is added to the boundary cells. - if (cell_at_boundary == true) + if (cell_at_boundary == true && counter < cell_level_index_end_local) boundary_cells.push_back(counter); } const unsigned int vectorization_length = VectorizedArray::n_array_elements; - std::vector irregular_cells; - size_info.make_layout (n_active_cells, vectorization_length, boundary_cells, - irregular_cells); + task_info.collect_boundary_cells (cell_level_index_end_local, + n_active_cells, vectorization_length, + boundary_cells); + // finalize the creation of ghosts for (unsigned int no=0; no renumbering; - if (task_info.use_multithreading == true) + std::vector irregular_cells; + if (task_info.scheme == internal::MatrixFreeFunctions::TaskInfo::none) { - dof_info[0].compute_renumber_parallel (boundary_cells, size_info, - renumbering); - if (task_info.use_partition_partition == true) - dof_info[0].make_thread_graph_partition_partition - (size_info, task_info, renumbering, irregular_cells, - dof_handlers.active_dof_handler == DoFHandlers::hp); - else - dof_info[0].make_thread_graph_partition_color - (size_info, task_info, renumbering, irregular_cells, - dof_handlers.active_dof_handler == DoFHandlers::hp); + const bool strict_categories = dof_handlers.active_dof_handler == DoFHandlers::hp; + unsigned int dofs_per_cell = 0; + for (unsigned int no=0; no(), + dofs_per_cell, + dof_info[0].cell_active_fe_index, + strict_categories, + renumbering, irregular_cells); } else { - // In case, we have an hp-dofhandler, we have to reorder the cell - // according to the polynomial degree on the cell. - dof_info[0].compute_renumber_serial (boundary_cells, size_info, - renumbering); - if (dof_handlers.active_dof_handler == DoFHandlers::hp) - dof_info[0].compute_renumber_hp_serial (size_info, renumbering, - irregular_cells); + // For strategy with blocking before partitioning: reorganize the indices + // in order to overlap communication in MPI with computations: Place all + // cells with ghost indices into one chunk. Also reorder cells so that we + // can parallelize by threads + task_info.initial_setup_blocks_tasks(boundary_cells, renumbering, + irregular_cells); + task_info.guess_block_size (dof_info[0].dofs_per_cell[0]); + + unsigned int n_macro_cells_before = *(task_info.cell_partition_data.end()-2); + unsigned int n_ghost_slots = *(task_info.cell_partition_data.end()-1)- + n_macro_cells_before; + + unsigned int start_nonboundary = numbers::invalid_unsigned_int; + + if (task_info.scheme = internal::MatrixFreeFunctions::TaskInfo::partition_color) + { + // set up partitions. if we just use coloring without partitions, do + // nothing here, assume all cells to belong to the zero partition (that + // we otherwise use for MPI boundary cells) + if (task_info.scheme == internal::MatrixFreeFunctions::TaskInfo::color) + { + start_nonboundary = task_info.n_procs > 1 ? + std::min(((task_info.cell_partition_data[2]- + task_info.cell_partition_data[1]+task_info.block_size-1)/ + task_info.block_size)*task_info.block_size, + task_info.cell_partition_data[3]) : 0; + } + else + { + if (task_info.n_procs > 1) + { + task_info.cell_partition_data[1] = 0; + task_info.cell_partition_data[2] = task_info.cell_partition_data[3]; + } + start_nonboundary = task_info.cell_partition_data.back(); + } + + if (dof_handlers.active_dof_handler == DoFHandlers::hp) + { + irregular_cells.resize (0); + irregular_cells.resize (task_info.cell_partition_data.back()+ + 2*dof_info[0].max_fe_index); + std::vector > renumbering_fe_index; + renumbering_fe_index.resize(dof_info[0].max_fe_index); + unsigned int counter; + n_macro_cells_before = 0; + for (counter=0; counter 0) + dof_info[0].make_connectivity_graph(task_info, renumbering, connectivity); + + task_info.make_thread_graph(dof_info[0].cell_active_fe_index, + connectivity, renumbering, irregular_cells, + dof_handlers.active_dof_handler == DoFHandlers::hp); + + Assert(irregular_cells.size() >= task_info.cell_partition_data.back(), + ExcInternalError()); + + irregular_cells.resize(task_info.cell_partition_data.back()+n_ghost_slots); + if (n_ghost_slots > 0) + { + for (unsigned int i=task_info.cell_partition_data.back(); + i 0 ? irregular_cells[i] : vectorization_length; + AssertDimension(n_cells, task_info.n_active_cells); + n_cells = 0; + for (unsigned int i=task_info.cell_partition_data.back(); + i 0 ? irregular_cells[i] : vectorization_length; + AssertDimension(n_cells, task_info.n_ghost_cells); + } + + task_info.cell_partition_data + .push_back(task_info.cell_partition_data.back()+n_ghost_slots); } - // Finally perform the renumbering. We also want to group several cells - // together to one "macro-cell" for vectorization (where the arithmetic - // operations will then be done simultaneously). + // Finally perform the renumbering of the degree of freedom number data. We + // also want to group several cells together to one "macro-cell" for + // vectorization (where the arithmetic operations will then be done + // simultaneously). #ifdef DEBUG { std::vector sorted_renumbering (renumbering); @@ -710,9 +831,9 @@ void MatrixFree::initialize_indices std::vector > cell_level_index_old; cell_level_index.swap (cell_level_index_old); - cell_level_index.reserve(size_info.n_macro_cells*vectorization_length); + cell_level_index.reserve(task_info.cell_partition_data.back()*vectorization_length); unsigned int position_cell=0; - for (unsigned int i=0; i0)? irregular_cells[i] : vectorization_length; @@ -729,8 +850,9 @@ void MatrixFree::initialize_indices (cell_level_index_old[renumbering[position_cell+n_comp-1]]); position_cell += n_comp; } - AssertDimension (position_cell, size_info.n_active_cells); - AssertDimension (cell_level_index.size(),size_info.n_macro_cells*vectorization_length); + AssertDimension (position_cell, task_info.n_active_cells + task_info.n_ghost_cells); + AssertDimension (cell_level_index.size(),task_info.cell_partition_data.back()* + vectorization_length); } // set constraint pool from the std::map and reorder the indices @@ -761,7 +883,7 @@ void MatrixFree::initialize_indices } AssertDimension(constraint_pool_data.size(), length); for (unsigned int no=0; no::clear() dof_info.clear(); mapping_info.clear(); cell_level_index.clear(); - size_info.clear(); task_info.clear(); dof_handlers.dof_handler.clear(); dof_handlers.hp_dof_handler.clear(); @@ -806,26 +927,26 @@ template void MatrixFree::print_memory_consumption (StreamType &out) const { out << " Memory cell FE operator total: --> "; - size_info.print_memory_statistics (out, memory_consumption()); + task_info.print_memory_statistics (out, memory_consumption()); out << " Memory cell index: "; - size_info.print_memory_statistics + task_info.print_memory_statistics (out, MemoryConsumption::memory_consumption (cell_level_index)); for (unsigned int j=0; j::print (std::ostream &out) const -/*-------------------- Implementation of helper functions ------------------*/ - -namespace internal -{ - namespace MatrixFreeFunctions - { - - TaskInfo::TaskInfo () - { - clear(); - } - - - - void TaskInfo::clear () - { - block_size = 0; - n_blocks = 0; - block_size_last = 0; - position_short_block = 0; - use_multithreading = false; - use_partition_partition = false; - use_coloring_only = false; - partition_color_blocks_row_index.clear(); - partition_color_blocks_data.clear(); - evens = 0; - odds = 0; - n_blocked_workers = 0; - n_workers = 0; - partition_evens.clear(); - partition_odds.clear(); - partition_n_blocked_workers.clear(); - partition_n_workers.clear(); - } - - - - std::size_t - TaskInfo::memory_consumption () const - { - return (sizeof(*this)+ - MemoryConsumption::memory_consumption (partition_color_blocks_row_index) + - MemoryConsumption::memory_consumption (partition_color_blocks_data)+ - MemoryConsumption::memory_consumption (partition_evens) + - MemoryConsumption::memory_consumption (partition_odds) + - MemoryConsumption::memory_consumption (partition_n_blocked_workers) + - MemoryConsumption::memory_consumption (partition_n_workers)); - } - - - - SizeInfo::SizeInfo () - { - clear(); - } - - - - void SizeInfo::clear() - { - n_active_cells = 0; - n_macro_cells = 0; - boundary_cells_start = 0; - boundary_cells_end = 0; - vectorization_length = 0; - locally_owned_cells = IndexSet(); - ghost_cells = IndexSet(); - communicator = MPI_COMM_SELF; - my_pid = 0; - n_procs = 0; - } - - - - template - void SizeInfo::print_memory_statistics (StreamType &out, - std::size_t data_length) const - { - Utilities::MPI::MinMaxAvg memory_c - = Utilities::MPI::min_max_avg (1e-6*data_length, communicator); - if (n_procs < 2) - out << memory_c.min; - else - out << memory_c.min << "/" << memory_c.avg << "/" << memory_c.max; - out << " MB" << std::endl; - } - - - - inline - void SizeInfo::make_layout (const unsigned int n_active_cells_in, - const unsigned int vectorization_length_in, - std::vector &boundary_cells, - std::vector &irregular_cells) - { - vectorization_length = vectorization_length_in; - n_active_cells = n_active_cells_in; - - unsigned int n_max_boundary_cells = boundary_cells.size(); - unsigned int n_boundary_cells = n_max_boundary_cells; - - // try to make the number of boundary cells divisible by the number of - // vectors in vectorization - - /* - // try to balance the number of cells before and after the boundary part - // on each processor. probably not worth it! - #ifdef DEAL_II_WITH_MPI - MPI_Allreduce (&n_boundary_cells, &n_max_boundary_cells, 1, MPI_UNSIGNED, - MPI_MAX, size_info.communicator); - #endif - if (n_max_boundary_cells > n_active_cells) - n_max_boundary_cells = n_active_cells; - */ - - unsigned int fillup_needed = - (vectorization_length - n_boundary_cells%vectorization_length)%vectorization_length; - if (fillup_needed > 0 && n_boundary_cells < n_active_cells) - { - // fill additional cells into the list of boundary cells to get a - // balanced number. Go through the indices successively until we - // found enough indices - std::vector new_boundary_cells; - new_boundary_cells.reserve (n_max_boundary_cells); - - unsigned int next_free_slot = 0, bound_index = 0; - while (fillup_needed > 0 && bound_index < boundary_cells.size()) - { - if (next_free_slot < boundary_cells[bound_index]) - { - // check if there are enough cells to fill with in the - // current slot - if (next_free_slot + fillup_needed <= boundary_cells[bound_index]) - { - for (unsigned int j=boundary_cells[bound_index]-fillup_needed; - j < boundary_cells[bound_index]; ++j) - new_boundary_cells.push_back(j); - fillup_needed = 0; - } - // ok, not enough indices, so just take them all up to the - // next boundary cell - else - { - for (unsigned int j=next_free_slot; - j 0 && (new_boundary_cells.size()==0 || - new_boundary_cells.back() n_active_cells) - { - irregular_cells[n_macro_cells-1] = - vectorization_length - (n_macro_cells*vectorization_length - n_active_cells); - } - if (n_procs > 1) - { - const unsigned int n_macro_boundary_cells = - (n_boundary_cells+vectorization_length-1)/vectorization_length; - boundary_cells_start = (n_macro_cells-n_macro_boundary_cells)/2; - boundary_cells_end = boundary_cells_start + n_macro_boundary_cells; - } - else - boundary_cells_start = boundary_cells_end = n_macro_cells; - } - - } -} - - DEAL_II_NAMESPACE_CLOSE #endif diff --git a/include/deal.II/matrix_free/task_info.h b/include/deal.II/matrix_free/task_info.h new file mode 100644 index 0000000000..a9e9c10a5d --- /dev/null +++ b/include/deal.II/matrix_free/task_info.h @@ -0,0 +1,518 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2011 - 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +#ifndef __deal2__matrix_free_task_info_h +#define __deal2__matrix_free_task_info_h + + +#include +#include +#include +#include +#include +#include +#include +#include + + +DEAL_II_NAMESPACE_OPEN + + + +namespace internal +{ + /** + * An interface for the worker object that runs the various operations we + * want to perform during the matrix-free loop. + * + * @author Katharina Kormann, Martin Kronbichler, 2018 + */ + struct MFWorkerInterface + { + public: + virtual ~MFWorkerInterface() {} + + /// Starts the communication for the update ghost values operation + virtual void vector_update_ghosts_start() = 0; + + /// Finishes the communication for the update ghost values operation + virtual void vector_update_ghosts_finish() = 0; + + /// Starts the communication for the vector compress operation + virtual void vector_compress_start() = 0; + + /// Finishes the communication for the vector compress operation + virtual void vector_compress_finish() = 0; + + /// Zeros part of the vector accroding to a given range as stored in + /// DoFInfo + virtual void zero_dst_vector_range(const unsigned int range_index) = 0; + + /// Runs the cell work specified by MatrixFree::loop or + /// MatrixFree::cell_loop + virtual void cell(const std::pair &cell_range) = 0; + + /// Runs the body of the work on interior faces specified by + /// MatrixFree::loop + virtual void face(const std::pair &face_range) = 0; + + /// Runs the body of the work on boundary faces specified by + /// MatrixFree::loop + virtual void boundary(const std::pair &face_range) = 0; + }; + + + + namespace MatrixFreeFunctions + { + // forward declaration of internal data structure + template struct ConstraintValues; + + /** + * A struct that collects all information related to parallelization with + * threads: The work is subdivided into tasks that can be done + * independently. + * + * @author Katharina Kormann, Martin Kronbichler, 2011, 2018 + */ + struct TaskInfo + { + // enum for choice of how to build the task graph. Odd add versions with + // preblocking and even versions with postblocking. partition_partition + // and partition_color are deprecated but kept for backward + // compatibility. + enum TasksParallelScheme {none, + partition_partition, + partition_color, + color + }; + + /** + * Constructor. + */ + TaskInfo (); + + /** + * Clears all the data fields and resets them + * to zero. + */ + void clear (); + + /** + * Runs the matrix-free loop. + */ + void loop(MFWorkerInterface &worker) const; + + /** + * Determines the position of cells with ghosts for distributed-memory + * calculations. + */ + void collect_boundary_cells (const unsigned int n_active_cells, + const unsigned int n_active_and_ghost_cells, + const unsigned int vectorization_length, + std::vector &boundary_cells); + + /** + * Sets up the blocks for running the cell loop based on the options + * controlled by the input arguments. + * + * @param boundary_cells A list of cells that need to exchange data prior + * to performing computations. These will be given a certain id in the + * partitioning. + * + * @param dofs_per_cell Gives an expected value for the number of degrees + * of freedom on a cell, which is used to determine the block size for + * interleaving cell and face integrals. + * + * @param cell_vectorization_categories This set of categories defines + * the cells that should be grouped together inside the lanes of a + * vectorized array. This can be the polynomial degree in an hp-element + * or a user-provided grouping. + * + * @param cell_vectorization_categories_strict Defines whether the + * categories defined by the previous variables should be separated + * strictly or whether it is allowed to insert lower categories into the + * next high one(s). + * + * @param renumbering When leaving this function, the vector contains a + * new numbering of the cells that aligns with the grouping stored in + * this class. + * + * @param incompletely_filled_vectorization Given the vectorized layout + * of this class, some cell batches might have components in the + * vectorized array (SIMD lanes) that are not used and do not carray + * valid data. This array indicates the cell batches where this occurs + * according to the renumbering returned by this function. + */ + void + create_blocks_serial (const std::vector &boundary_cells, + const std::vector &cells_close_to_boundary, + const unsigned int dofs_per_cell, + const std::vector &cell_vectorization_categories, + const bool cell_vectorization_categories_strict, + std::vector &renumbering, + std::vector &incompletely_filled_vectorization); + + /** + * First step in the block creation for the task-parallel blocking setup. + * + * @param boundary_cells A list of cells that need to exchange data prior + * to performing computations. These will be given a certain id in the + * partitioning. + * + * @param renumbering When leaving this function, the vector contains a + * new numbering of the cells that aligns with the grouping stored in + * this class (before actually creating the tasks). + * + * @param incompletely_filled_vectorization Given the vectorized layout + * of this class, some cell batches might have components in the + * vectorized array (SIMD lanes) that are not used and do not carray + * valid data. This array indicates the cell batches where this occurs + * according to the renumbering returned by this function. + */ + void + initial_setup_blocks_tasks (const std::vector &boundary_cells, + std::vector &renumbering, + std::vector &incompletely_filled_vectorization); + + /** + * This helper function determines a block size if the user decided not + * to force a block size through MatrixFree::AdditionalData. This is + * computed based on the number of hardware threads on the system and + * the number of macro cells that we should work on. + */ + void guess_block_size (const unsigned int dofs_per_cell); + + /** + * This method goes through all cells that have been filled into @p + * dof_indices and finds out which cells can be worked on independently + * and which ones are neighboring and need to be done at different times + * when used in parallel. + * + * The strategy is based on a two-level approach. The outer level is + * subdivided into partitions similar to the type of neighbors in + * Cuthill-McKee, and the inner level is subdivided via colors (for + * chunks within the same color, can work independently). One task is + * represented by a chunk of cells. The cell chunks are formed before + * subdivision into partitions and colors. + * + * @param renumbering At output, the element j of this variable gives + * the original number of the cell that is reordered to place j by the + * ordering due to the thread graph. + */ + void + make_thread_graph_partition_color (DynamicSparsityPattern &connectivity, + std::vector &renumbering, + std::vector &irregular_cells, + const bool hp_bool); + + /** + * This function goes through all cells that have been filled into @p + * dof_indices and finds out which cells can be worked on independently + * and which ones are neighboring and need to be done at different times + * when used in parallel. + * + * The strategy is based on a two-level approach. The outer level is + * subdivided into partitions similar to the type of neighbors in + * Cuthill-McKee, and the inner level is again subdivided into Cuthill- + * McKee-like partitions (partitions whose level differs by more than 2 + * can be worked on independently). One task is represented by a chunk + * of cells. The cell chunks are formed after subdivision into the two + * levels of partitions. + * + * @param renumbering At output, the element j of this variable gives + * the original number of the cell that is reordered to place j by the + * ordering due to the thread graph. + */ + void + make_thread_graph_partition_partition (const std::vector &cell_active_fe_index, + DynamicSparsityPattern &connectivity, + std::vector &renumbering, + std::vector &irregular_cells, + const bool hp_bool); + + /** + * Either calls make_thread_graph_partition_color() or + * make_thread_graph_partition_partition() accessible from the outside, + * depending on the setting in the data structure. + * + * @param renumbering At output, the element j of this variable gives + * the original number of the cell that is reordered to place j by the + * ordering due to the thread graph. + */ + void + make_thread_graph (const std::vector &cell_active_fe_index, + DynamicSparsityPattern &connectivity, + std::vector &renumbering, + std::vector &irregular_cells, + const bool hp_bool); + + /** + * This function computes the connectivity between blocks of cells from + * the connectivity between the individual cells. + */ + void make_connectivity_cells_to_blocks + (const std::vector &irregular_cells, + const DynamicSparsityPattern &connectivity_cells, + DynamicSparsityPattern &connectivity_blocks) const; + + /** + * Function to create coloring on the second layer within each + * partition. + */ + void make_coloring_within_partitions_pre_blocked + (const DynamicSparsityPattern &connectivity, + const unsigned int partition, + const std::vector &cell_partition, + const std::vector &partition_list, + const std::vector &partition_size, + std::vector &partition_color_list); + + /** + * Function to create partitioning on the second layer within each + * partition. + */ + void make_partitioning_within_partitions_post_blocked + (const DynamicSparsityPattern &connectivity, + const std::vector &cell_active_fe_index, + const unsigned int partition, + const unsigned int cluster_size, + const bool hp_bool, + const std::vector &cell_partition, + const std::vector &partition_list, + const std::vector &partition_size, + std::vector &partition_partition_list, + std::vector &irregular_cells); + + /** + * This function creates partitions according to the provided connectivity graph. + * + * @param connectivity Connectivity between (blocks of cells) + * + * @param cluster_size The number of cells in each partition should be a + * multiple of cluster_size (for blocking later on) + * + * @param cell_partition Saves of each (block of cells) to which + * partition the block belongs + * + * @param partition_list partition_list[j] gives the old number of the + * block that should be renumbered to j due to the partitioning + * + * @param partition_size Vector pointing to start of each partition (on + * output) + * + * @param partition number of partitions created + */ + void + make_partitioning (const DynamicSparsityPattern &connectivity, + const unsigned int cluster_size, + std::vector &cell_partition, + std::vector &partition_list, + std::vector &partition_size, + unsigned int &partition) const; + + /** + * Update fields of task info for task graph set up in make_thread_graph. + */ + void + update_task_info (const unsigned int partition); + + /** + * Creates a task graph from a connectivity structure. + */ + void create_flow_graph(); + + /** + * Returns the memory consumption of the class. + */ + std::size_t memory_consumption () const; + + /** + * Prints minimum, average, and maximal memory consumption over the MPI + * processes. + */ + template + void print_memory_statistics (StreamType &out, + std::size_t data_length) const; + + /** + * Number of physical cells in the mesh, not cell batches after + * vectorization + */ + unsigned int n_active_cells; + + /** + * Number of physical ghost cells in the mesh which are subject to + * special treatment and should not be included in loops + */ + unsigned int n_ghost_cells; + + /** + * Number of lanes in the SIMD array that are used for vectorization + */ + unsigned int vectorization_length; + + /** + * Block size information for multithreading + */ + unsigned int block_size; + + /** + * Number of blocks for multithreading + */ + unsigned int n_blocks; + + /** + * Parallel scheme applied by multithreading + */ + TasksParallelScheme scheme; + + /** + * The blocks are organized by a vector-of-vector concept, and this data + * field @p partition_row_index stores the distance from one 'vector' to + * the next within the linear storage of all data to the two-level + * partitioning. + */ + std::vector partition_row_index; + + /** + * This is a linear storage of all partitions, building a range of + * indices of the form cell_partition_data[idx] to + * cell_partition_data[idx+1] within the integer list of all cells in + * MatrixFree, subdivided into chunks by @p partition_row_index. + */ + std::vector cell_partition_data; + + /** + * This is a linear storage of all partitions of inner faces, building a + * range of indices of the form face_partition_data[idx] to + * face_partition_data[idx+1] within the integer list of all interior + * faces in MatrixFree, subdivided into chunks by @p + * partition_row_index. + */ + std::vector face_partition_data; + + /** + * This is a linear storage of all partitions of boundary faces, + * building a range of indices of the form boundary_partition_data[idx] + * to boundary_partition_data[idx+1] within the integer list of all + * boundary faces in MatrixFree, subdivided into chunks by @p + * partition_row_index. + */ + std::vector boundary_partition_data; + + /** + * This is a linear storage of all partitions of interior faces on + * boundaries to other processors that are not locally used, building a + * range of indices of the form ghost_face_partition_data[idx] to + * ghost_face_partition_data[idx+1] within the integer list of all such + * faces in MatrixFree, subdivided into chunks by @p + * partition_row_index. + */ + std::vector ghost_face_partition_data; + + /** + * This is a linear storage of all partitions of faces for multigrid + * levels that have a coarser neighbor and are only included in certain + * residual computations but not in smoothing, building a range of + * indices of the form refinement_edge_face_partition_data[idx] to + * refinement_edge_face_partition_data[idx+1] within the integer list of + * all such faces in MatrixFree, subdivided into chunks by @p + * partition_row_index. + */ + std::vector refinement_edge_face_partition_data; + + /** + * Thread information (which chunk to start 'even' partitions from) to + * be handed to the dynamic task scheduler + */ + std::vector partition_evens; + + /** + * Thread information (which chunk to start 'odd' partitions from) to be + * handed to the dynamic task scheduler + */ + std::vector partition_odds; + + /** + * Thread information regarding the dependencies for partitions handed + * to the dynamic task scheduler + */ + std::vector partition_n_blocked_workers; + + /** + * Thread information regarding the dependencies for partitions handed + * to the dynamic task scheduler + */ + std::vector partition_n_workers; + + /** + * Number of even partitions accumulated over the field @p + * partitions_even + */ + unsigned int evens; + + /** + * Number of odd partitions accumulated over the field @p + * partitions_odd + */ + unsigned int odds; + + /** + * Number of blocked workers accumulated over the field @p + * partition_n_blocked_workers + */ + unsigned int n_blocked_workers; + + /** + * Number of workers accumulated over the field @p partition_n_workers + */ + unsigned int n_workers; + + /** + * Stores whether a particular task is at an MPI boundary and needs data + * exchange + */ + std::vector task_at_mpi_boundary; + + /** + * MPI communicator + */ + MPI_Comm communicator; + + /** + * Rank of MPI process + */ + unsigned int my_pid; + + /** + * Number of MPI rank for the current communicator + */ + unsigned int n_procs; + }; + + /** + * Typedef to deprecated name. + */ + DEAL_II_DEPRECATED + typedef TaskInfo SizeInfo; + + } // end of namespace MatrixFreeFunctions +} // end of namespace internal + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/source/matrix_free/CMakeLists.txt b/source/matrix_free/CMakeLists.txt index 00b539b577..1dd55492b0 100644 --- a/source/matrix_free/CMakeLists.txt +++ b/source/matrix_free/CMakeLists.txt @@ -18,6 +18,7 @@ INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_BINARY_DIR}) SET(_src matrix_free.cc evaluation_selector.cc + task_info.cc ) SET(_inst diff --git a/source/matrix_free/task_info.cc b/source/matrix_free/task_info.cc new file mode 100644 index 0000000000..13ee3c0ff1 --- /dev/null +++ b/source/matrix_free/task_info.cc @@ -0,0 +1,2015 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +#include +#include +#include +#include +#include +#include + +#include + + +#ifdef DEAL_II_WITH_THREADS +#include +#include +#include +#include +#endif + +#include +#include + +DEAL_II_NAMESPACE_OPEN + + + +/*-------------------- Implementation of the matrix-free loop --------------*/ +namespace internal +{ + namespace MatrixFreeFunctions + { +#ifdef DEAL_II_WITH_THREADS + + // This defines the TBB data structures that are needed to schedule the + // partition-partition variant + + namespace partition + { + class ActualCellWork + { + public: + ActualCellWork (MFWorkerInterface **worker_pointer, + const unsigned int partition, + const TaskInfo &task_info) + : + worker(nullptr), + worker_pointer(worker_pointer), + partition (partition), + task_info (task_info) + {} + + ActualCellWork (MFWorkerInterface &worker, + const unsigned int partition, + const TaskInfo &task_info) + : + worker (&worker), + worker_pointer (nullptr), + partition (partition), + task_info (task_info) + {} + + void operator() () const + { + MFWorkerInterface *used_worker = worker != 0 ? worker : *worker_pointer; + Assert(used_worker != 0, ExcInternalError()); + used_worker->cell(std::make_pair(task_info.cell_partition_data[partition], + task_info.cell_partition_data[partition+1])); + + if (task_info.face_partition_data.empty() == false) + { + used_worker->face(std::make_pair(task_info.face_partition_data[partition], + task_info.face_partition_data[partition+1])); + + used_worker->boundary(std::make_pair(task_info.boundary_partition_data[partition], + task_info.boundary_partition_data[partition+1])); + } + } + + private: + MFWorkerInterface *worker; + MFWorkerInterface **worker_pointer; + const unsigned int partition; + const TaskInfo &task_info; + }; + + class CellWork : public tbb::task + { + public: + CellWork (MFWorkerInterface &worker, + const unsigned int partition, + const TaskInfo &task_info, + const bool is_blocked) + : + dummy (nullptr), + work (worker, partition, task_info), + is_blocked (is_blocked) + {} + + tbb::task *execute () + { + work(); + + if (is_blocked==true) + dummy->spawn (*dummy); + return nullptr; + } + + tbb::empty_task *dummy; + + private: + ActualCellWork work; + const bool is_blocked; + }; + + + + class PartitionWork : public tbb::task + { + public: + PartitionWork (MFWorkerInterface &function_in, + const unsigned int partition_in, + const TaskInfo &task_info_in, + const bool is_blocked_in = false) + : + dummy (nullptr), + function (function_in), + partition (partition_in), + task_info (task_info_in), + is_blocked (is_blocked_in) + {}; + tbb::task *execute () + { + tbb::empty_task *root = new( tbb::task::allocate_root() )tbb::empty_task; + const unsigned int evens = task_info.partition_evens[partition]; + const unsigned int odds = task_info.partition_odds[partition]; + const unsigned int n_blocked_workers = + task_info.partition_n_blocked_workers[partition]; + const unsigned int n_workers = task_info.partition_n_workers[partition]; + std::vector worker(n_workers); + std::vector blocked_worker(n_blocked_workers); + + root->set_ref_count(evens+1); + for (unsigned int j=0; jallocate_child()) + CellWork(function, task_info. + partition_row_index[partition]+2*j, + task_info, false); + if (j>0) + { + worker[j]->set_ref_count(2); + blocked_worker[j-1]->dummy = new(worker[j]->allocate_child()) + tbb::empty_task; + worker[j-1]->spawn(*blocked_worker[j-1]); + } + else + worker[j]->set_ref_count(1); + if (jallocate_child()) + CellWork(function, task_info. + partition_row_index + [partition] + 2*j+1, task_info, true); + } + else + { + if (odds==evens) + { + worker[evens] = new(worker[j]->allocate_child()) + CellWork(function, task_info. + partition_row_index[partition]+2*j+1, + task_info, false); + worker[j]->spawn(*worker[evens]); + } + else + { + tbb::empty_task *child = new(worker[j]->allocate_child()) + tbb::empty_task(); + worker[j]->spawn(*child); + } + } + } + + root->wait_for_all(); + root->destroy(*root); + if (is_blocked==true) + dummy->spawn (*dummy); + return nullptr; + } + + tbb::empty_task *dummy; + + private: + MFWorkerInterface &function; + const unsigned int partition; + const TaskInfo &task_info; + const bool is_blocked; + }; + + } // end of namespace partition + + + + namespace color + { + class CellWork + { + public: + CellWork (MFWorkerInterface &worker_in, + const TaskInfo &task_info_in, + const unsigned int partition_in) + : + worker (worker_in), + task_info (task_info_in), + partition (partition_in) + {}; + void operator()(const tbb::blocked_range &r) const + { + const unsigned int start_index = task_info.cell_partition_data[partition] + + task_info.block_size * r.begin(); + const unsigned int end_index = std::min(start_index + + task_info.block_size*(r.end()-r.begin()), + task_info.cell_partition_data[partition+1]); + worker.cell(std::make_pair(start_index, end_index)); + + if (task_info.face_partition_data.empty() == false) + { + AssertThrow(false, ExcNotImplemented()); + } + } + private: + MFWorkerInterface &worker; + const TaskInfo &task_info; + const unsigned int partition; + }; + + + + class PartitionWork : public tbb::task + { + public: + PartitionWork (MFWorkerInterface &worker_in, + const unsigned int partition_in, + const TaskInfo &task_info_in, + const bool is_blocked_in) + : + dummy (nullptr), + worker (worker_in), + partition (partition_in), + task_info (task_info_in), + is_blocked (is_blocked_in) + {}; + tbb::task *execute () + { + const unsigned int n_chunks = (task_info.cell_partition_data[partition+1]- + task_info.cell_partition_data[partition]+ + task_info.block_size-1)/task_info.block_size; + parallel_for(tbb::blocked_range(0,n_chunks,1), + CellWork (worker,task_info,partition)); + if (is_blocked==true) + dummy->spawn (*dummy); + return nullptr; + } + + tbb::empty_task *dummy; + + private: + MFWorkerInterface &worker; + const unsigned int partition; + const TaskInfo &task_info; + const bool is_blocked; + }; + + } // end of namespace color + + + + class MPICommunication : public tbb::task + { + public: + MPICommunication (MFWorkerInterface &worker_in, + const bool do_compress) + : + worker(worker_in), + do_compress(do_compress) + {}; + + tbb::task *execute () + { + if (do_compress == false) + worker.vector_update_ghosts_finish(); + else + worker.vector_compress_start(); + return 0; + } + + private: + MFWorkerInterface &worker; + const bool do_compress; + }; + +#endif // DEAL_II_WITH_THREADS + + + + void + TaskInfo::loop(MFWorkerInterface &funct) const + { + funct.vector_update_ghosts_start(); + +#ifdef DEAL_II_WITH_THREADS + + if (scheme != none) + { + funct.zero_dst_vector_range(numbers::invalid_unsigned_int); + if (scheme == partition_partition) + { + tbb::empty_task *root = new( tbb::task::allocate_root() ) + tbb::empty_task; + root->set_ref_count(evens+1); + std::vector worker(n_workers); + std::vector + blocked_worker(n_blocked_workers); + MPICommunication *worker_compr = + new(root->allocate_child())MPICommunication(funct, true); + worker_compr->set_ref_count(1); + for (unsigned int j=0; j0) + { + worker[j] = new(root->allocate_child()) + partition::PartitionWork (funct,2*j,*this,false); + worker[j]->set_ref_count(2); + blocked_worker[j-1]->dummy = new(worker[j]->allocate_child()) + tbb::empty_task; + if (j>1) + worker[j-1]->spawn(*blocked_worker[j-1]); + else + worker_compr->spawn(*blocked_worker[j-1]); + } + else + { + worker[j] = new(worker_compr->allocate_child()) + partition::PartitionWork (funct,2*j,*this,false); + worker[j]->set_ref_count(2); + MPICommunication *worker_dist = + new (worker[j]->allocate_child())MPICommunication(funct, false); + worker_dist->spawn(*worker_dist); + } + if (jallocate_child()) + partition::PartitionWork(funct,2*j+1,*this,true); + } + else + { + if (odds==evens) + { + worker[evens] = new(worker[j]->allocate_child()) + partition::PartitionWork(funct,2*j+1,*this,false); + worker[j]->spawn(*worker[evens]); + } + else + { + tbb::empty_task *child = new(worker[j]->allocate_child()) + tbb::empty_task(); + worker[j]->spawn(*child); + } + } + } + + root->wait_for_all(); + root->destroy(*root); + } + else // end of partition-partition, start of partition-color + { + // check whether there is only one partition. if not, build up the + // tree of partitions + if (odds > 0) + { + tbb::empty_task *root = new( tbb::task::allocate_root() ) tbb::empty_task; + root->set_ref_count(evens+1); + const unsigned int n_blocked_workers = odds-(odds+evens+1)%2; + const unsigned int n_workers = cell_partition_data.size()-1- + n_blocked_workers; + std::vector worker(n_workers); + std::vector blocked_worker(n_blocked_workers); + unsigned int worker_index = 0, slice_index = 0; + unsigned int spawn_index = 0; + int spawn_index_child = -2; + MPICommunication *worker_compr = + new (root->allocate_child())MPICommunication(funct, true); + worker_compr->set_ref_count(1); + for (unsigned int part=0; partallocate_child()) + color::PartitionWork(funct,slice_index,*this,false); + else + worker[worker_index] = new(root->allocate_child()) + color::PartitionWork(funct,slice_index,*this,false); + slice_index++; + for (; slice_indexset_ref_count(1); + worker_index++; + worker[worker_index] = new (worker[worker_index-1]->allocate_child()) + color::PartitionWork(funct,slice_index,*this,false); + } + worker[worker_index]->set_ref_count(2); + if (part>0) + { + blocked_worker[(part-1)/2]->dummy = + new (worker[worker_index]->allocate_child()) tbb::empty_task; + worker_index++; + if (spawn_index_child == -1) + worker[spawn_index]->spawn(*blocked_worker[(part-1)/2]); + else + { + Assert(spawn_index_child>=0, ExcInternalError()); + worker[spawn_index]->spawn(*worker[spawn_index_child]); + } + spawn_index = spawn_index_new; + spawn_index_child = -2; + } + else + { + MPICommunication *worker_dist = + new (worker[worker_index]->allocate_child())MPICommunication(funct,false); + worker_dist->spawn(*worker_dist); + worker_index++; + } + part += 1; + if (partallocate_child()) + color::PartitionWork(funct,slice_index,*this,true); + slice_index++; + if (slice_indexset_ref_count(1); + worker[worker_index] = new(blocked_worker[part/2]->allocate_child()) + color::PartitionWork(funct,slice_index,*this,false); + slice_index++; + } + else + { + spawn_index_child = -1; + continue; + } + } + for (; slice_indexpartition_row_index[part]) + { + worker[worker_index]->set_ref_count(1); + worker_index++; + } + worker[worker_index] = new (worker[worker_index-1]->allocate_child()) + color::PartitionWork(funct,slice_index,*this,false); + } + spawn_index_child = worker_index; + worker_index++; + } + else + { + tbb::empty_task *final = new (worker[worker_index-1]->allocate_child()) + tbb::empty_task; + worker[spawn_index]->spawn(*final); + spawn_index_child = worker_index-1; + } + } + if (evens==odds) + { + Assert(spawn_index_child>=0, ExcInternalError()); + worker[spawn_index]->spawn(*worker[spawn_index_child]); + } + root->wait_for_all(); + root->destroy(*root); + } + // case when we only have one partition: this is the usual coloring + // scheme, and we just schedule a parallel for loop for each color + else + { + Assert(evens<=1,ExcInternalError()); + funct.vector_update_ghosts_finish(); + + for (unsigned int color=0; color < partition_row_index[1]; ++color) + { + tbb::empty_task *root = new( tbb::task::allocate_root() ) tbb::empty_task; + root->set_ref_count(2); + color::PartitionWork *worker = + new (root->allocate_child())color::PartitionWork(funct,color,*this,false); + root->spawn(*worker); + root->wait_for_all(); + root->destroy(*root); + } + + funct.vector_compress_start(); + } + } + } + else +#endif + // serial loop, go through up to three times and do the MPI transfer at + // the beginning/end of the second part + { + for (unsigned int part = 0; part < partition_row_index.size()-2; ++part) + { + if (part == 1) + funct.vector_update_ghosts_finish(); + + for (unsigned int i=partition_row_index[part]; i cell_partition_data[i]) + { + funct.zero_dst_vector_range(i); + funct.cell(std::make_pair(cell_partition_data[i], + cell_partition_data[i+1])); + } + + if (face_partition_data.empty() == false) + { + if (face_partition_data[i+1] > face_partition_data[i]) + funct.face(std::make_pair(face_partition_data[i], + face_partition_data[i+1])); + if (boundary_partition_data[i+1] > boundary_partition_data[i]) + funct.boundary(std::make_pair(boundary_partition_data[i], + boundary_partition_data[i+1])); + } + } + + if (part == 1) + funct.vector_compress_start(); + } + } + funct.vector_compress_finish(); + } + + + + TaskInfo::TaskInfo () + { + clear(); + } + + + + void TaskInfo::clear () + { + n_active_cells = 0; + n_ghost_cells = 0; + vectorization_length = 1; + block_size = 0; + n_blocks = 0; + scheme = none; + partition_row_index.clear(); + cell_partition_data.clear(); + face_partition_data.clear(); + boundary_partition_data.clear(); + evens = 0; + odds = 0; + n_blocked_workers = 0; + n_workers = 0; + partition_evens.clear(); + partition_odds.clear(); + partition_n_blocked_workers.clear(); + partition_n_workers.clear(); + communicator = MPI_COMM_SELF; + my_pid = 0; + n_procs = 1; + } + + + + template + void TaskInfo::print_memory_statistics (StreamType &out, + const std::size_t data_length) const + { + Utilities::MPI::MinMaxAvg memory_c + = Utilities::MPI::min_max_avg (1e-6*data_length, communicator); + if (n_procs < 2) + out << memory_c.min; + else + out << memory_c.min << "/" << memory_c.avg << "/" << memory_c.max; + out << " MB" << std::endl; + } + + + + std::size_t + TaskInfo::memory_consumption () const + { + return (sizeof(*this)+ + MemoryConsumption::memory_consumption (partition_row_index) + + MemoryConsumption::memory_consumption (cell_partition_data) + + MemoryConsumption::memory_consumption (face_partition_data) + + MemoryConsumption::memory_consumption (boundary_partition_data) + + MemoryConsumption::memory_consumption (partition_evens) + + MemoryConsumption::memory_consumption (partition_odds) + + MemoryConsumption::memory_consumption (partition_n_blocked_workers) + + MemoryConsumption::memory_consumption (partition_n_workers)); + } + + + + void + TaskInfo::collect_boundary_cells (const unsigned int n_active_cells_in, + const unsigned int n_active_and_ghost_cells, + const unsigned int vectorization_length_in, + std::vector &boundary_cells) + { + vectorization_length = vectorization_length_in; + n_active_cells = n_active_cells_in; + n_ghost_cells = n_active_and_ghost_cells - n_active_cells; + + // try to make the number of boundary cells divisible by the number of + // vectors in vectorization + unsigned int fillup_needed = + (vectorization_length - boundary_cells.size()%vectorization_length)%vectorization_length; + if (fillup_needed > 0 && boundary_cells.size() < n_active_cells) + { + // fill additional cells into the list of boundary cells to get a + // balanced number. Go through the indices successively until we + // found enough indices + std::vector new_boundary_cells; + new_boundary_cells.reserve (boundary_cells.size()); + + unsigned int next_free_slot = 0, bound_index = 0; + while (fillup_needed > 0 && bound_index < boundary_cells.size()) + { + if (next_free_slot < boundary_cells[bound_index]) + { + // check if there are enough cells to fill with in the + // current slot + if (next_free_slot + fillup_needed <= boundary_cells[bound_index]) + { + for (unsigned int j=boundary_cells[bound_index]-fillup_needed; + j < boundary_cells[bound_index]; ++j) + new_boundary_cells.push_back(j); + fillup_needed = 0; + } + // ok, not enough indices, so just take them all up to the + // next boundary cell + else + { + for (unsigned int j=next_free_slot; + j 0 && (new_boundary_cells.size()==0 || + new_boundary_cells.back() &boundary_cells, + const std::vector &cells_close_to_boundary, + const unsigned int dofs_per_cell, + const std::vector &cell_vectorization_categories, + const bool cell_vectorization_categories_strict, + std::vector &renumbering, + std::vector &incompletely_filled_vectorization) + { + const unsigned int n_macro_cells = + (n_active_cells + vectorization_length - 1) / vectorization_length; + const unsigned int n_ghost_slots = + (n_ghost_cells + vectorization_length - 1) / vectorization_length; + const unsigned int n_boundary_cells = boundary_cells.size(); + + incompletely_filled_vectorization.resize (n_macro_cells+n_ghost_slots); + renumbering.resize(n_active_cells + n_ghost_cells, + numbers::invalid_unsigned_int); + + // Define the outer number of partitions. In the MPI case, we have three + // partitions (part before comm, part with comm, part after comm) + if (n_procs == 1) + partition_row_index.resize(3); + else + partition_row_index.resize(5); + + // Initially mark the cells according to the MPI ranking + std::vector cell_marked(n_active_cells+n_ghost_cells, 0); + if (n_procs > 1) + { + for (unsigned int i=0; i tight_category_map; + if (cell_vectorization_categories.empty() == false) + { + AssertDimension(cell_vectorization_categories.size(), + n_active_cells+n_ghost_cells); + + // create a tight map of categories for not taking exceeding amounts + // of memory below. Sort the new categories by the numbers in the + // old one. + tight_category_map.reserve(n_active_cells+n_ghost_cells); + std::set used_categories; + for (unsigned int i=0; i used_categories_vector(used_categories.size()); + n_categories = 0; + for (auto &it : used_categories) + used_categories_vector[n_categories++] = it; + for (unsigned int i=0; i > renumbering_category(n_categories); + for (unsigned int block=1; block<(n_procs>1 ? 5 : 3); ++block) + { + // step 1: sort by category + for (unsigned int i=0; i 1) + for (unsigned int j=n_categories-1; j>0; --j) + { + unsigned int lower_index = j-1; + while (renumbering_category[j].size()%vectorization_length) + { + while (renumbering_category[j].size()%vectorization_length && + !renumbering_category[lower_index].empty()) + { + renumbering_category[j].push_back(renumbering_category[lower_index].back()); + renumbering_category[lower_index].pop_back(); + } + if (lower_index == 0) + break; + else + --lower_index; + } + } + + // step 3: append cells according to categories + for (unsigned int j=0; j= n_macro_cells+n_ghost_slots, ExcInternalError()) + else + AssertDimension(n_cells, n_macro_cells+n_ghost_slots); + AssertDimension(cell_partition_data.back(), n_cells); + AssertDimension(counter, n_active_cells+n_ghost_cells); + + incompletely_filled_vectorization.resize(cell_partition_data.back()); + } + + + + void + TaskInfo + ::initial_setup_blocks_tasks(const std::vector &boundary_cells, + std::vector &renumbering, + std::vector &incompletely_filled_vectorization) + { + const unsigned int n_macro_cells = + (n_active_cells + vectorization_length - 1) / vectorization_length; + const unsigned int n_ghost_slots = + (n_ghost_cells + vectorization_length - 1) / vectorization_length; + incompletely_filled_vectorization.resize (n_macro_cells+n_ghost_slots); + if (n_macro_cells*vectorization_length > n_active_cells) + incompletely_filled_vectorization[n_macro_cells-1] = + vectorization_length - (n_macro_cells*vectorization_length - n_active_cells); + if (n_ghost_slots*vectorization_length > n_ghost_cells) + incompletely_filled_vectorization[n_macro_cells+n_ghost_slots-1] = + vectorization_length - (n_ghost_slots*vectorization_length - n_ghost_cells); + + std::vector reverse_numbering (n_active_cells, + numbers::invalid_unsigned_int); + for (unsigned int j=0; j 1) + { + const unsigned int n_macro_boundary_cells = + (boundary_cells.size()+vectorization_length-1)/vectorization_length; + cell_partition_data.push_back((n_macro_cells-n_macro_boundary_cells)/2); + cell_partition_data.push_back(cell_partition_data[1] + n_macro_boundary_cells); + } + else + AssertDimension(boundary_cells.size(), 0); + cell_partition_data.push_back(n_macro_cells); + cell_partition_data.push_back(cell_partition_data.back() + n_ghost_slots); + partition_row_index.resize(n_procs > 1 ? 4 : 2); + partition_row_index[0] = 0; + partition_row_index[1] = 1; + if (n_procs > 1) + { + partition_row_index[2] = 2; + partition_row_index[3] = 3; + } + } + + + + void + TaskInfo::guess_block_size (const unsigned int dofs_per_cell) + { + // user did not say a positive number, so we have to guess + if (block_size == 0) + { + // we would like to have enough work to do, so as first guess, try + // to get 16 times as many chunks as we have threads on the system. + block_size = n_active_cells / (MultithreadInfo::n_threads() * + 16 * vectorization_length); + + // if there are too few degrees of freedom per cell, need to + // increase the block size + const unsigned int minimum_parallel_grain_size = 200; + if (dofs_per_cell * block_size < + minimum_parallel_grain_size) + block_size = (minimum_parallel_grain_size / + dofs_per_cell + 1); + if (dofs_per_cell * block_size > 10000) + block_size /= 4; + + block_size = 1 << (unsigned int)(log2(block_size+1)); + } + if (block_size > n_active_cells) + block_size = std::max(1U, n_active_cells); + } + + + + void + TaskInfo::make_thread_graph_partition_color + (DynamicSparsityPattern &connectivity_large, + std::vector &renumbering, + std::vector &irregular_cells, + const bool ) + { + const unsigned int n_macro_cells = *(cell_partition_data.end()-2); + if (n_macro_cells == 0) + return; + + Assert (vectorization_length > 0, ExcInternalError()); + + unsigned int partition = 0, counter = 0; + + // Create connectivity graph for blocks based on connectivity graph for cells. + DynamicSparsityPattern connectivity(n_blocks, + n_blocks); + make_connectivity_cells_to_blocks(irregular_cells, connectivity_large, connectivity); + + // Create cell-block partitioning. + + // For each block of cells, this variable saves to which partitions the + // block belongs. Initialize all to -1 to mark them as not yet assigned + // a partition. + std::vector cell_partition(n_blocks, + numbers::invalid_unsigned_int); + + // In element j of this variable, one puts the old number of the block + // that should be the jth block in the new numeration. + std::vector partition_list (n_blocks,0); + std::vector partition_color_list(n_blocks,0); + + // This vector points to the start of each partition. + std::vector partition_size (2,0); + + //blocking_connectivity = true; + + // The cluster_size in make_partitioning defines that the no. of cells + // in each partition should be a multiple of cluster_size. + unsigned int cluster_size = 1; + + // Make the partitioning of the first layer of the blocks of cells. + make_partitioning( connectivity, cluster_size, cell_partition, + partition_list, partition_size, partition ); + + // Color the cells within each partition + make_coloring_within_partitions_pre_blocked + (connectivity, partition, cell_partition, + partition_list, partition_size, partition_color_list); + + partition_list = renumbering; + +#ifdef DEBUG + // in debug mode, check that the partition color list is one-to-one + { + std::vector sorted_pc_list (partition_color_list); + std::sort(sorted_pc_list.begin(), sorted_pc_list.end()); + for (unsigned int i=0; i block_start(n_macro_cells+1); + std::vector irregular(n_macro_cells); + + unsigned int mcell_start=0; + block_start[0] = 0; + for (unsigned int block=0; block0) + ?irregular_cells[mcell]:vectorization_length; + block_start[block+1] += n_comp; + ++counter; + } + mcell_start += block_size; + } + counter = 0; + unsigned int counter_macro = 0; + unsigned int block_size_last = + n_macro_cells - block_size * (n_blocks-1); + if (block_size_last == 0) + block_size_last = block_size; + + unsigned int tick = 0; + for (unsigned int block=0; block sorted_renumbering (renumbering); + std::sort(sorted_renumbering.begin(), sorted_renumbering.end()); + for (unsigned int i=0; i &cell_active_fe_index, + DynamicSparsityPattern &connectivity, + std::vector &renumbering, + std::vector &irregular_cells, + const bool hp_bool) + { + const unsigned int n_macro_cells = *(cell_partition_data.end()-2); + if (n_macro_cells == 0) + return; + + Assert (vectorization_length > 0, ExcInternalError()); + + // if we want to block before partitioning, create connectivity graph + // for blocks based on connectivity graph for cells. + DynamicSparsityPattern connectivity_blocks(n_blocks, + n_blocks); + make_connectivity_cells_to_blocks(irregular_cells, connectivity, connectivity_blocks); + + unsigned int n_blocks = 0; + if ( scheme == partition_color || scheme == color )// blocking_connectivity == true + n_blocks = this->n_blocks; + else + n_blocks = n_active_cells; + + // For each block of cells, this variable saves to which partitions the + // block belongs. Initialize all to -1 to mark them as not yet assigned + // a partition. + std::vector cell_partition(n_blocks, + numbers::invalid_unsigned_int); + + // In element j of this variable, one puts the old number (but after + // renumbering according to the input renumbering) of the block that + // should be the jth block in the new numeration. + std::vector partition_list (n_blocks,0); + std::vector partition_2layers_list(n_blocks,0); + + // This vector points to the start of each partition. + std::vector partition_size (2,0); + + unsigned int partition = 0; + + // Within the partitions we want to be able to block for the case that + // we do not block already in the connectivity. The cluster_size in + // make_partitioning defines that the no. of cells in each partition + // should be a multiple of cluster_size. + unsigned int cluster_size = 1; + if (scheme == partition_partition) + cluster_size = block_size*vectorization_length; + + // Make the partitioning of the first layer of the blocks of cells. + if ( scheme == partition_color || scheme == color ) + make_partitioning( connectivity_blocks, cluster_size, cell_partition, + partition_list, partition_size, partition); + else + make_partitioning( connectivity, cluster_size, cell_partition, + partition_list, partition_size, partition); + + // Partition or color second layer + if ( scheme == partition_partition ) + + { + // Partition within partitions. + make_partitioning_within_partitions_post_blocked + (connectivity, cell_active_fe_index, partition, cluster_size, hp_bool, + cell_partition, partition_list, partition_size, + partition_2layers_list, irregular_cells); + } + else if ( scheme == partition_color || scheme == color ) + { + make_coloring_within_partitions_pre_blocked + (connectivity_blocks, partition, cell_partition, + partition_list, partition_size, partition_2layers_list); + } + + // in debug mode, check that the partition_2layers_list is one-to-one +#ifdef DEBUG + { + std::vector sorted_pc_list (partition_2layers_list); + std::sort(sorted_pc_list.begin(), sorted_pc_list.end()); + for (unsigned int i=0; i renumbering_in (n_active_cells,0); + renumbering_in.swap(renumbering); + if (scheme == partition_partition) // blocking_connectivity == false + { + // This is the simple case. The renumbering is just a combination of + // the renumbering that we were given as an input and the + // renumbering of partition/coloring given in partition_2layers_list + for (unsigned int j=0; j block_start(n_macro_cells+1); + std::vector irregular(n_macro_cells); + + unsigned int counter = 0; + unsigned int mcell_start=0; + block_start[0] = 0; + for (unsigned int block=0; block0) + ?irregular_cells[mcell]:vectorization_length; + block_start[block+1] += n_comp; + ++counter; + } + mcell_start += block_size; + } + counter = 0; + unsigned int counter_macro = 0; + unsigned int block_size_last = + n_macro_cells - block_size * (n_blocks-1); + if (block_size_last == 0) + block_size_last = block_size; + + unsigned int tick = 0; + for (unsigned int block=0; block sorted_renumbering (renumbering); + std::sort(sorted_renumbering.begin(), sorted_renumbering.end()); + for (unsigned int i=0; i &cell_active_fe_index, + DynamicSparsityPattern &connectivity, + std::vector &renumbering, + std::vector &irregular_cells, + const bool hp_bool) + { + const unsigned int n_macro_cells = *(cell_partition_data.end()-2); + if (n_macro_cells == 0) + return; + + const unsigned int cluster_size = block_size*vectorization_length; + + // Create cell-block partitioning. + + // For each block of cells, this variable saves to which partitions the + // block belongs. Initialize all to n_macro_cells to mark them as not + // yet assigned a partition. + std::vector cell_partition (n_active_cells, + numbers::invalid_unsigned_int); + + + // In element j of this variable, one puts the old number of the block + // that should be the jth block in the new numeration. + std::vector partition_list(n_active_cells,0); + std::vector partition_partition_list(n_active_cells,0); + + // This vector points to the start of each partition. + std::vector partition_size(2,0); + + unsigned int partition = 0; + // Here, we do not block inside the connectivity graph + //blocking_connectivity = false; + + // Make the partitioning of the first layer of the blocks of cells. + make_partitioning( connectivity, cluster_size, cell_partition, + partition_list, partition_size, partition); + + // Partition within partitions. + make_partitioning_within_partitions_post_blocked + (connectivity, cell_active_fe_index, partition, cluster_size, hp_bool, + cell_partition, + partition_list, partition_size, + partition_partition_list, + irregular_cells); + + partition_list.swap(renumbering); + + for (unsigned int j=0; j &irregular_cells, + const DynamicSparsityPattern &connectivity_cells, + DynamicSparsityPattern &connectivity_blocks) const + { + std::vector > cell_blocks(n_blocks); + std::vector touched_cells(n_active_cells); + unsigned int cell = 0; + for (unsigned int i=0, mcell=0; i0) + ?irregular_cells[mcell]:vectorization_length; + for (unsigned int c=0; ccolumn()] != i) + connectivity_blocks.add(i,touched_cells[it->column()]); + } + } + } + + + + + // Function to create partitioning on the second layer within each + // partition. Version without preblocking. + void + TaskInfo::make_partitioning_within_partitions_post_blocked + (const DynamicSparsityPattern &connectivity, + const std::vector &cell_active_fe_index, + const unsigned int partition, + const unsigned int cluster_size, + const bool hp_bool, + const std::vector &cell_partition, + const std::vector &partition_list, + const std::vector &partition_size, + std::vector &partition_partition_list, + std::vector &irregular_cells) + { + + const unsigned int n_macro_cells = *(cell_partition_data.end()-2); + const unsigned int n_ghost_slots = + *(cell_partition_data.end()-1)-n_macro_cells; + + // List of cells in previous partition + std::vector neighbor_list; + // List of cells in current partition for use as neighbors in next partition + std::vector neighbor_neighbor_list; + + std::vector renumbering(n_active_cells); + + irregular_cells.back() = 0; + irregular_cells.resize(n_active_cells+n_ghost_slots); + + unsigned int max_fe_index = 0; + for (unsigned int i=0; i cell_partition_l2(n_active_cells, + numbers::invalid_unsigned_int); + partition_row_index.clear(); + partition_row_index.resize(partition+1,0); + cell_partition_data.resize(1,0); + + unsigned int start_up = 0; + unsigned int counter = 0; + unsigned int missing_macros; + for (unsigned int part=0; partcolumn()] == part && + cell_partition_l2[neighbor->column()]== + numbers::invalid_unsigned_int) + { + cell_partition_l2[neighbor->column()] = partition_l2; + neighbor_neighbor_list.push_back(neighbor->column()); + partition_partition_list[counter++] = neighbor->column(); + partition_counter++; + } + } + } + } + if (partition_counter>0) + { + int index_before = neighbor_neighbor_list.size(), + index = index_before; + { + // put the cells into separate lists for each FE index + // within one partition-partition + missing_macros = 0; + std::vector remaining_per_macro_cell + (max_fe_index); + std::vector > + renumbering_fe_index; + unsigned int cell; + bool filled = true; + if (hp_bool == true) + { + renumbering_fe_index.resize(max_fe_index); + for (cell=counter-partition_counter; cell0 || filled == false) + { + if (index==0) + { + index = neighbor_neighbor_list.size(); + if (index == index_before) + { + if (missing_macros != 0) + { + neighbor_neighbor_list.resize(0); + } + start_up--; + break;// not connected - start again + } + index_before = index; + } + index--; + unsigned int additional = neighbor_neighbor_list + [index]; + + // go through the neighbors of the last cell in the + // current partition and check if we find some to + // fill up with. + DynamicSparsityPattern::iterator + neighbor = connectivity.begin(additional), + end = connectivity.end(additional); + for (; neighbor!=end ; ++neighbor) + { + if (cell_partition[neighbor->column()] == part && + cell_partition_l2[neighbor->column()] == + numbers::invalid_unsigned_int) + { + unsigned int this_index = 0; + if (hp_bool == true) + this_index = cell_active_fe_index.empty() ? 0 : + cell_active_fe_index[neighbor->column()]; + + // Only add this cell if we need more macro + // cells in the current block or if there is + // a macro cell with the FE index that is + // not yet fully populated + if (missing_macros > 0 || + remaining_per_macro_cell[this_index] > 0) + { + cell_partition_l2[neighbor->column()] = partition_l2; + neighbor_neighbor_list.push_back(neighbor->column()); + if (hp_bool == true) + renumbering_fe_index[this_index]. + push_back(neighbor->column()); + partition_partition_list[counter] = + neighbor->column(); + counter++; + partition_counter++; + if (remaining_per_macro_cell[this_index] + == 0 && missing_macros > 0) + missing_macros--; + remaining_per_macro_cell[this_index]++; + if (remaining_per_macro_cell[this_index] + == vectorization_length) + { + remaining_per_macro_cell[this_index] = 0; + } + if (missing_macros == 0) + { + filled = true; + for (unsigned int fe_ind=0; + fe_ind &cell_partition, + const std::vector &partition_list, + const std::vector &partition_size, + std::vector &partition_color_list) + { + + const unsigned int n_macro_cells = *(cell_partition_data.end()-2); + std::vector neighbor_list; + std::vector neighbor_neighbor_list; + std::vector cell_color(n_blocks, n_macro_cells); + std::vector color_finder; + + partition_row_index.resize(partition+1); + cell_partition_data.clear(); + unsigned int color_counter = 0, index_counter = 0; + for (unsigned int part=0; partcolumn()] == part && + cell_color[neighbor->column()] <= n_neighbors) + color_finder[cell_color[neighbor->column()]] = false; + } + // Choose the smallest color that is not taken for the block + cell_color[cell]=0; + while (color_finder[cell_color[cell]] == false) + cell_color[cell]++; + if (cell_color[cell] > max_color) + max_color = cell_color[cell]; + } + // Reorder within partition: First, all blocks that belong the 0 and + // then so on until those with color max (Note that the smaller the + // number the larger the partition) + for (unsigned int color=0; color<=max_color; color++) + { + cell_partition_data.push_back(color_counter); + index_counter++; + for (unsigned int k=partition_size[part]; + k &cell_partition, + std::vector &partition_list, + std::vector &partition_size, + unsigned int &partition) const + + { + // For each block of cells, this variable saves to which partitions the + // block belongs. Initialize all to n_macro_cells to mark them as not + // yet assigned a partition. + //std::vector cell_partition (n_active_cells, + // numbers::invalid_unsigned_int); + // List of cells in previous partition + std::vector neighbor_list; + // List of cells in current partition for use as neighbors in next partition + std::vector neighbor_neighbor_list; + + // In element j of this variable, one puts the old number of the block + // that should be the jth block in the new numeration. + //std::vector partition_list(n_active_cells,0); + + // This vector points to the start of each partition. + //std::vector partition_size(2,0); + + partition = 0; + unsigned int counter=0; + unsigned int start_nonboundary = cell_partition_data.size() == 5 ? + vectorization_length * (cell_partition_data[2] - + cell_partition_data[1]) : 0; + + const unsigned int n_macro_cells = *(cell_partition_data.end()-2); + if (n_macro_cells == 0) + return; + if (scheme == color) + start_nonboundary = n_macro_cells; + if ( scheme == partition_color || scheme == color ) // blocking_connectivity == true + start_nonboundary = ((start_nonboundary+block_size-1) + /block_size); + unsigned int n_blocks; + if ( scheme == partition_color || scheme == color ) // blocking_connectivity == true + n_blocks = this->n_blocks; + else + n_blocks = n_active_cells; + + if (start_nonboundary > n_blocks) + start_nonboundary = n_blocks; + + + unsigned int start_up = 0; + bool work = true; + unsigned int remainder = cluster_size; + + // this performs a classical breath-first search in the connectivity + // graph of the cells under the restriction that the size of the + // partitions should be a multiple of the given block size + while (work) + { + // put the cells with neighbors on remote MPI processes up front + if (start_nonboundary>0) + { + for (unsigned int cell=0; cell0) + { + if (index==index_stop) + { + index = neighbor_list.size(); + if (index == index_before) + { + neighbor_list.resize(0); + goto not_connect; + } + index_stop = index_before; + index_before = index; + } + index--; + unsigned int additional = neighbor_list[index]; + DynamicSparsityPattern::iterator neighbor = + connectivity.begin(additional), + end = connectivity.end(additional); + for (; neighbor!=end ; ++neighbor) + { + if (cell_partition[neighbor->column()]==numbers::invalid_unsigned_int) + { + partition_size.back()++; + cell_partition[neighbor->column()] = partition; + neighbor_list.push_back(neighbor->column()); + partition_list[counter++] = neighbor->column(); + remainder--; + if (remainder == 0) + break; + } + } + } + + while (neighbor_list.size()>0) + { + partition++; + + // counter for number of cells so far in current partition + unsigned int partition_counter = 0; + + // Mark the start of the new partition + partition_size.push_back(partition_size.back()); + + // Loop through the list of cells in previous partition and put + // all their neighbors in current partition + for (unsigned int j=0; jcolumn()]==numbers::invalid_unsigned_int) + { + partition_size.back()++; + cell_partition[neighbor->column()] = partition; + + // collect the cells of the current partition for + // use as neighbors in next partition + neighbor_neighbor_list.push_back(neighbor->column()); + partition_list[counter++] = neighbor->column(); + partition_counter++; + } + } + } + remainder = cluster_size-(partition_counter%cluster_size); + if (remainder == cluster_size) + remainder = 0; + int index_stop = 0; + int index_before = neighbor_neighbor_list.size(), index = index_before; + while (remainder>0) + { + if (index==index_stop) + { + index = neighbor_neighbor_list.size(); + if (index == index_before) + { + neighbor_neighbor_list.resize(0); + break; + } + index_stop = index_before; + index_before = index; + } + index--; + unsigned int additional = neighbor_neighbor_list[index]; + DynamicSparsityPattern::iterator neighbor = + connectivity.begin(additional), + end = connectivity.end(additional); + for (; neighbor!=end ; ++neighbor) + { + if (cell_partition[neighbor->column()]==numbers::invalid_unsigned_int) + { + partition_size.back()++; + cell_partition[neighbor->column()] = partition; + neighbor_neighbor_list.push_back(neighbor->column()); + partition_list[counter++] = neighbor->column(); + remainder--; + if (remainder == 0) + break; + } + } + } + + neighbor_list = neighbor_neighbor_list; + neighbor_neighbor_list.resize(0); + } +not_connect: + // One has to check if the graph is not connected so we have to find + // another partition. + work = false; + for (unsigned int j=start_up; j (std::ostream &, const std::size_t) const; +template void internal::MatrixFreeFunctions::TaskInfo:: +print_memory_statistics (ConditionalOStream &, const std::size_t) const; + + +DEAL_II_NAMESPACE_CLOSE diff --git a/tests/matrix_free/step-48.cc b/tests/matrix_free/step-48.cc index 64eefbbe49..486e94339a 100644 --- a/tests/matrix_free/step-48.cc +++ b/tests/matrix_free/step-48.cc @@ -91,7 +91,7 @@ namespace Step48 FEEvaluation fe_eval(data); const unsigned int n_q_points = fe_eval.n_q_points; - for (unsigned int cell=0; cell fe_eval(data); const unsigned int n_q_points = fe_eval.n_q_points; - for (unsigned int cell=0; cell sg_problem; sg_problem.run (); } - diff --git a/tests/matrix_free/step-48c.cc b/tests/matrix_free/step-48c.cc index 59703f4e62..a680ad0cf2 100644 --- a/tests/matrix_free/step-48c.cc +++ b/tests/matrix_free/step-48c.cc @@ -91,7 +91,7 @@ namespace Step48 FEEvaluation fe_eval(data); const unsigned int n_q_points = fe_eval.n_q_points; - for (unsigned int cell=0; cell