* together and interprets them as one cell only, as is needed for
* vectorization.
*/
- void reorder_cells (const SizeInfo &size_info,
- const std::vector<unsigned int> &renumbering,
- const std::vector<unsigned int> &constraint_pool_row_index,
- const std::vector<unsigned int> &irregular_cells,
- const unsigned int vectorization_length);
-
- /**
- * This helper function determines a block size if the user decided not
- * to force a block size through MatrixFree::AdditionalData. This is
- * computed based on the number of hardware threads on the system and
- * the number of macro cells that we should work on.
- */
- void guess_block_size (const SizeInfo &size_info,
- TaskInfo &task_info);
-
- /**
- * This method goes through all cells that have been filled into @p
- * dof_indices and finds out which cells can be worked on independently
- * and which ones are neighboring and need to be done at different times
- * when used in parallel.
- *
- * The strategy is based on a two-level approach. The outer level is
- * subdivided into partitions similar to the type of neighbors in
- * Cuthill-McKee, and the inner level is subdivided via colors (for
- * chunks within the same color, can work independently). One task is
- * represented by a chunk of cells. The cell chunks are formed before
- * subdivision into partitions and colors.
- */
- void
- make_thread_graph_partition_color (SizeInfo &size_info,
- TaskInfo &task_info,
- std::vector<unsigned int> &renumbering,
- std::vector<unsigned int> &irregular_cells,
- const bool hp_bool);
-
- /**
- * This function goes through all cells that have been filled into @p
- * dof_indices and finds out which cells can be worked on independently
- * and which ones are neighboring and need to be done at different times
- * when used in parallel.
- *
- * The strategy is based on a two-level approach. The outer level is
- * subdivided into partitions similar to the type of neighbors in
- * Cuthill-McKee, and the inner level is again subdivided into Cuthill-
- * McKee-like partitions (partitions whose level differs by more than 2
- * can be worked on independently). One task is represented by a chunk
- * of cells. The cell chunks are formed after subdivision into the two
- * levels of partitions.
- */
- void
- make_thread_graph_partition_partition (SizeInfo &size_info,
- TaskInfo &task_info,
- std::vector<unsigned int> &renumbering,
- std::vector<unsigned int> &irregular_cells,
- const bool hp_bool);
+ void reorder_cells (const SizeInfo &size_info,
+ const std::vector<unsigned int> &renumbering,
+ const std::vector<unsigned int> &constraint_pool_row_index,
+ const std::vector<unsigned char> &irregular_cells,
+ const unsigned int vectorization_length);
/**
* This function computes the connectivity of the currently stored
- * indices and fills the structure into a sparsity pattern. The
- * parameter block_size can be used to specify whether several cells
- * should be treated as one.
+ * indices in terms of connections between the individual cells and
+ * fills the structure into a sparsity pattern.
*/
void
- make_connectivity_graph (const SizeInfo &size_info,
- const TaskInfo &task_info,
+ make_connectivity_graph (const TaskInfo &task_info,
const std::vector<unsigned int> &renumbering,
- const std::vector<unsigned int> &irregular_cells,
- const bool do_blocking,
- DynamicSparsityPattern &connectivity) const;
+ DynamicSparsityPattern &connectivity) const;
/**
* Renumbers the degrees of freedom to give good access for this class.
void
- DoFInfo::compute_renumber_serial (const std::vector<unsigned int> &boundary_cells,
- const SizeInfo &size_info,
- std::vector<unsigned int> &renumbering)
- {
- std::vector<unsigned int> reverse_numbering (size_info.n_active_cells,
- numbers::invalid_unsigned_int);
- const unsigned int n_boundary_cells = boundary_cells.size();
- for (unsigned int j=0; j<n_boundary_cells; ++j)
- reverse_numbering[boundary_cells[j]] =
- j + size_info.vectorization_length*size_info.boundary_cells_start;
- unsigned int counter = 0;
- unsigned int j = 0;
- while (counter < size_info.n_active_cells &&
- counter < size_info.vectorization_length * size_info.boundary_cells_start)
- {
- if (reverse_numbering[j] == numbers::invalid_unsigned_int)
- reverse_numbering[j] = counter++;
- j++;
- }
- counter = std::min (size_info.vectorization_length*
- size_info.boundary_cells_start+n_boundary_cells,
- size_info.n_active_cells);
- if (counter < size_info.n_active_cells)
- {
- for ( ; j<size_info.n_active_cells; ++j)
- if (reverse_numbering[j] == numbers::invalid_unsigned_int)
- reverse_numbering[j] = counter++;
- }
- AssertDimension (counter, size_info.n_active_cells);
- renumbering = Utilities::invert_permutation (reverse_numbering);
- }
-
-
-
- void
- DoFInfo::compute_renumber_hp_serial (SizeInfo &size_info,
- std::vector<unsigned int> &renumbering,
- std::vector<unsigned int> &irregular_cells)
- {
- if (max_fe_index < 2)
- return;
- const unsigned int n_active_cells = size_info.n_active_cells;
- const unsigned int vectorization_length = size_info.vectorization_length;
- irregular_cells.resize (0);
- irregular_cells.resize (size_info.n_macro_cells+3*max_fe_index);
- std::vector<std::vector<unsigned int> > renumbering_fe_index;
- renumbering_fe_index.resize(max_fe_index);
- unsigned int counter,n_macro_cells_before = 0;
- const unsigned int
- start_bound = std::min (size_info.n_active_cells,
- size_info.boundary_cells_start*vectorization_length),
- end_bound = std::min (size_info.n_active_cells,
- size_info.boundary_cells_end*vectorization_length);
- for (counter=0; counter<start_bound; counter++)
- {
- renumbering_fe_index[cell_active_fe_index[renumbering[counter]]].
- push_back(renumbering[counter]);
- }
- counter = 0;
- for (unsigned int j=0; j<max_fe_index; j++)
- {
- for (unsigned int jj=0; jj<renumbering_fe_index[j].size(); jj++)
- renumbering[counter++] = renumbering_fe_index[j][jj];
- irregular_cells[renumbering_fe_index[j].size()/vectorization_length+
- n_macro_cells_before] =
- renumbering_fe_index[j].size()%vectorization_length;
- n_macro_cells_before += (renumbering_fe_index[j].size()+vectorization_length-1)/
- vectorization_length;
- renumbering_fe_index[j].resize(0);
- }
- unsigned int new_boundary_start = n_macro_cells_before;
- for (counter = start_bound; counter < end_bound; counter++)
- {
- renumbering_fe_index[cell_active_fe_index[renumbering[counter]]].
- push_back(renumbering[counter]);
- }
- counter = start_bound;
- for (unsigned int j=0; j<max_fe_index; j++)
- {
- for (unsigned int jj=0; jj<renumbering_fe_index[j].size(); jj++)
- renumbering[counter++] = renumbering_fe_index[j][jj];
- irregular_cells[renumbering_fe_index[j].size()/vectorization_length+
- n_macro_cells_before] =
- renumbering_fe_index[j].size()%vectorization_length;
- n_macro_cells_before += (renumbering_fe_index[j].size()+vectorization_length-1)/
- vectorization_length;
- renumbering_fe_index[j].resize(0);
- }
- unsigned int new_boundary_end = n_macro_cells_before;
- for (counter=end_bound; counter<n_active_cells; counter++)
- {
- renumbering_fe_index[cell_active_fe_index[renumbering[counter]]].
- push_back(renumbering[counter]);
- }
- counter = end_bound;
- for (unsigned int j=0; j<max_fe_index; j++)
- {
- for (unsigned int jj=0; jj<renumbering_fe_index[j].size(); jj++)
- renumbering[counter++] = renumbering_fe_index[j][jj];
- irregular_cells[renumbering_fe_index[j].size()/vectorization_length+
- n_macro_cells_before] =
- renumbering_fe_index[j].size()%vectorization_length;
- n_macro_cells_before += (renumbering_fe_index[j].size()+vectorization_length-1)/
- vectorization_length;
- }
- AssertIndexRange (n_macro_cells_before,
- size_info.n_macro_cells + 3*max_fe_index+1);
- irregular_cells.resize (n_macro_cells_before);
- size_info.n_macro_cells = n_macro_cells_before;
- size_info.boundary_cells_start = new_boundary_start;
- size_info.boundary_cells_end = new_boundary_end;
- }
-
-
-
- void
- DoFInfo::compute_renumber_parallel (const std::vector<unsigned int> &boundary_cells,
- SizeInfo &size_info,
- std::vector<unsigned int> &renumbering)
- {
- std::vector<unsigned int> reverse_numbering (size_info.n_active_cells,
- numbers::invalid_unsigned_int);
- const unsigned int n_boundary_cells = boundary_cells.size();
- for (unsigned int j=0; j<n_boundary_cells; ++j)
- reverse_numbering[boundary_cells[j]] = j;
- unsigned int counter = n_boundary_cells;
- for (unsigned int j=0; j<size_info.n_active_cells; ++j)
- if (reverse_numbering[j] == numbers::invalid_unsigned_int)
- reverse_numbering[j] = counter++;
-
- size_info.boundary_cells_end = (size_info.boundary_cells_end -
- size_info.boundary_cells_start);
- size_info.boundary_cells_start = 0;
-
- AssertDimension (counter, size_info.n_active_cells);
- renumbering = Utilities::invert_permutation (reverse_numbering);
- }
-
-
-
- void
- DoFInfo::reorder_cells (const SizeInfo &size_info,
- const std::vector<unsigned int> &renumbering,
- const std::vector<unsigned int> &constraint_pool_row_index,
- const std::vector<unsigned int> &irregular_cells,
- const unsigned int vectorization_length)
+ DoFInfo::reorder_cells (const TaskInfo &task_info,
+ const std::vector<unsigned int> &renumbering,
+ const std::vector<unsigned int> &constraint_pool_row_index,
+ const std::vector<unsigned char> &irregular_cells,
+ const unsigned int vectorization_length)
{
// first reorder the active fe index.
if (cell_active_fe_index.size() > 0)
{
std::vector<unsigned int> new_active_fe_index;
- new_active_fe_index.reserve (size_info.n_macro_cells);
+ new_active_fe_index.reserve (task_info.cell_partition_data.back());
std::vector<unsigned int> fe_indices(vectorization_length);
unsigned int position_cell = 0;
- for (unsigned int cell=0; cell<size_info.n_macro_cells; ++cell)
+ for (unsigned int cell=0; cell<task_info.cell_partition_data.back(); ++cell)
{
const unsigned int n_comp = (irregular_cells[cell] > 0 ?
irregular_cells[cell] : vectorization_length);
new_constraint_indicator;
std::vector<unsigned int> new_plain_indices, new_rowstart_plain;
unsigned int position_cell = 0;
- new_row_starts.resize (size_info.n_macro_cells + 1);
+ new_row_starts.resize(task_info.cell_partition_data.back()+1);
new_dof_indices.reserve (dof_indices.size());
new_constraint_indicator.reserve (constraint_indicator.size());
if (store_plain_indices == true)
{
- new_rowstart_plain.resize (size_info.n_macro_cells + 1,
+ new_rowstart_plain.resize (task_info.cell_partition_data.back()+1,
numbers::invalid_unsigned_int);
new_plain_indices.reserve (plain_dof_indices.size());
}
std::vector<const std::pair<unsigned short,unsigned short>*>
constr_ind(vectorization_length), constr_end(vectorization_length);
std::vector<unsigned int> index(vectorization_length);
- for (unsigned int i=0; i<size_info.n_macro_cells; ++i)
+ for (unsigned int i=0; i<task_info.cell_partition_data.back(); ++i)
{
const unsigned int dofs_mcell =
dofs_per_cell[cell_active_fe_index.size() == 0 ? 0 :
}
AssertDimension (position_cell+1, row_starts.size());
- new_row_starts[size_info.n_macro_cells][0] = new_dof_indices.size();
- new_row_starts[size_info.n_macro_cells][1] = new_constraint_indicator.size();
- new_row_starts[size_info.n_macro_cells][2] = 0;
+ new_row_starts[task_info.cell_partition_data.back()][0] = new_dof_indices.size();
+ new_row_starts[task_info.cell_partition_data.back()][1] = new_constraint_indicator.size();
+ new_row_starts[task_info.cell_partition_data.back()][2] = 0;
AssertDimension(dof_indices.size(), new_dof_indices.size());
AssertDimension(constraint_indicator.size(),
// be smaller than the number of indices in the row, and the second
// index should be smaller than the number of constraints in the
// constraint pool.
- for (unsigned int row=0; row<size_info.n_macro_cells; ++row)
+ for (unsigned int row=0; row<task_info.cell_partition_data.back(); ++row)
{
const unsigned int row_length_ind = row_length_indices(row);
const std::pair<unsigned short,unsigned short>
}
}
- // sanity check 3: all non-boundary cells should have indices that only
- // refer to the locally owned range
- const unsigned int local_size = (vector_partitioner->local_range().second-
- vector_partitioner->local_range().first);
- for (unsigned int row=0; row<size_info.boundary_cells_start; ++row)
- {
- const unsigned int *ptr = begin_indices(row);
- const unsigned int *end_ptr = end_indices (row);
- for ( ; ptr != end_ptr; ++ptr)
- AssertIndexRange (*ptr, local_size);
- }
- for (unsigned int row=size_info.boundary_cells_end;
- row<size_info.n_macro_cells; ++row)
- {
- const unsigned int *ptr = begin_indices(row);
- const unsigned int *end_ptr = end_indices (row);
- for ( ; ptr != end_ptr; ++ptr)
- AssertIndexRange (*ptr, local_size);
- }
+ // sanity check 3: check the number of cells once again
+ unsigned int n_active_cells = 0;
+ for (unsigned int c=0; c<*(task_info.cell_partition_data.end()-2); ++c)
+ if (irregular_cells[c] > 0)
+ n_active_cells += irregular_cells[c];
+ else
+ n_active_cells += vectorization_length;
+ AssertDimension(n_active_cells, task_info.n_active_cells);
#endif
}
- void DoFInfo::guess_block_size (const SizeInfo &size_info,
- TaskInfo &task_info)
- {
- // user did not say a positive number, so we have to guess
- if (task_info.block_size == 0)
- {
- // we would like to have enough work to do, so as first guess, try
- // to get 50 times as many chunks as we have threads on the system.
- task_info.block_size =
- size_info.n_macro_cells / (MultithreadInfo::n_threads() * 50);
-
- // if there are too few degrees of freedom per cell, need to
- // increase the block size
- const unsigned int minimum_parallel_grain_size = 500;
- if (dofs_per_cell[0] * task_info.block_size <
- minimum_parallel_grain_size)
- task_info.block_size = (minimum_parallel_grain_size /
- dofs_per_cell[0] + 1);
- }
- if (task_info.block_size > size_info.n_macro_cells)
- task_info.block_size = size_info.n_macro_cells;
- }
-
-
-
- void DoFInfo::make_thread_graph_partition_color
- (SizeInfo &size_info,
- TaskInfo &task_info,
- std::vector<unsigned int> &renumbering,
- std::vector<unsigned int> &irregular_cells,
- const bool hp_bool)
- {
- if (size_info.n_macro_cells == 0)
- return;
-
- const std::size_t vectorization_length = size_info.vectorization_length;
- Assert (vectorization_length > 0, ExcInternalError());
-
- guess_block_size (size_info, task_info);
-
- // set up partitions. if we just use coloring without partitions, do
- // nothing here, assume all cells to belong to the zero partition (that
- // we otherwise use for MPI boundary cells)
- unsigned int start_up = 0,
- start_nonboundary = numbers::invalid_unsigned_int;
- if (task_info.use_coloring_only == false)
- {
- start_nonboundary =
- std::min(((size_info.boundary_cells_end+task_info.block_size-1)/
- task_info.block_size)*task_info.block_size,
- size_info.n_macro_cells);
- size_info.boundary_cells_end = start_nonboundary;
- }
- else
- {
- start_nonboundary = size_info.n_macro_cells;
- size_info.boundary_cells_start = 0;
- size_info.boundary_cells_end = size_info.n_macro_cells;
- }
- if (hp_bool == true)
- {
- irregular_cells.resize (0);
- irregular_cells.resize (size_info.n_macro_cells+2*max_fe_index);
- std::vector<std::vector<unsigned int> > renumbering_fe_index;
- renumbering_fe_index.resize(max_fe_index);
- unsigned int counter,n_macro_cells_before = 0;
- for (counter=0; counter<start_nonboundary*vectorization_length;
- counter++)
- {
- renumbering_fe_index[cell_active_fe_index[renumbering[counter]]].
- push_back(renumbering[counter]);
- }
- counter = 0;
- for (unsigned int j=0; j<max_fe_index; j++)
- {
- for (unsigned int jj=0; jj<renumbering_fe_index[j].size(); jj++)
- renumbering[counter++] = renumbering_fe_index[j][jj];
- irregular_cells[renumbering_fe_index[j].size()/vectorization_length+
- n_macro_cells_before] =
- renumbering_fe_index[j].size()%vectorization_length;
- n_macro_cells_before += (renumbering_fe_index[j].size()+vectorization_length-1)/
- vectorization_length;
- renumbering_fe_index[j].resize(0);
- }
-
- unsigned int new_boundary_end = n_macro_cells_before;
- for (counter=start_nonboundary*vectorization_length;
- counter<size_info.n_active_cells; counter++)
- {
- renumbering_fe_index[cell_active_fe_index.empty() ? 0 :
- cell_active_fe_index[renumbering[counter]]].
- push_back(renumbering[counter]);
- }
- counter = start_nonboundary * vectorization_length;
- for (unsigned int j=0; j<max_fe_index; j++)
- {
- for (unsigned int jj=0; jj<renumbering_fe_index[j].size(); jj++)
- renumbering[counter++] = renumbering_fe_index[j][jj];
- irregular_cells[renumbering_fe_index[j].size()/vectorization_length+
- n_macro_cells_before] =
- renumbering_fe_index[j].size()%vectorization_length;
- n_macro_cells_before += (renumbering_fe_index[j].size()+vectorization_length-1)/
- vectorization_length;
- }
- AssertIndexRange (n_macro_cells_before,
- size_info.n_macro_cells + 2*max_fe_index+1);
- irregular_cells.resize (n_macro_cells_before);
- size_info.n_macro_cells = n_macro_cells_before;
- size_info.boundary_cells_start = 0;
- size_info.boundary_cells_end = new_boundary_end;
- task_info.n_blocks = (size_info.n_macro_cells+task_info.block_size-1)
- /task_info.block_size;
- task_info.block_size_last = size_info.n_macro_cells%task_info.block_size;
- if (task_info.block_size_last == 0)
- task_info.block_size_last = task_info.block_size;
- }
-
- // assume that all FEs have the same connectivity graph, so take the
- // zeroth FE
- task_info.n_blocks = (size_info.n_macro_cells+task_info.block_size-1)/
- task_info.block_size;
- task_info.block_size_last = size_info.n_macro_cells-
- (task_info.block_size*(task_info.n_blocks-1));
-
- // create the connectivity graph with internal blocking
- DynamicSparsityPattern connectivity;
- make_connectivity_graph (size_info, task_info, renumbering,irregular_cells,
- true, connectivity);
-
- // Create cell-block partitioning.
- unsigned int partition = 0, counter = 0;
- bool work = true;
-
- // For each block of cells, this variable saves to which partitions the
- // block belongs. Initialize all to n_macro_cells to mark them as not
- // yet assigned a partition.
- std::vector<unsigned int> cell_partition(task_info.n_blocks,
- size_info.n_macro_cells);
- std::vector<unsigned int> neighbor_list;
- std::vector<unsigned int> neighbor_neighbor_list;
-
- // In element j of this variable, one puts the old number of the block
- // that should be the jth block in the new numeration.
- std::vector<unsigned int> partition_list (task_info.n_blocks,0);
- std::vector<unsigned int> partition_color_list(task_info.n_blocks,0);
-
- // This vector points to the start of each partition.
- std::vector<unsigned int> partition_blocks (2,0);
- std::vector<unsigned int> cell_color(task_info.n_blocks,
- size_info.n_macro_cells);
- std::vector<bool> color_finder;
-
- // this performs a classical breath-first search in the connectivity
- // graph of the cell chunks
- while (work)
- {
- // put all cells up to begin_inner_cells into first partition. if
- // the numbers do not add up exactly, assign an additional block
- if (start_nonboundary>0)
- {
- unsigned int n_blocks = ((start_nonboundary+task_info.block_size-1)
- /task_info.block_size);
- start_nonboundary = 0;
- for (unsigned int cell=0; cell<n_blocks; ++cell)
- {
- cell_partition[cell] = partition;
- neighbor_list.push_back(cell);
- partition_list[counter++] = cell;
- partition_blocks.back()++;
- }
- }
- else
- {
- // To start up, set the start_up cell to partition and list all
- // its neighbors.
- AssertIndexRange(start_up, cell_partition.size());
- cell_partition[start_up] = partition;
- neighbor_list.push_back(start_up);
- partition_list[counter++] = start_up;
- partition_blocks.back()++;
- }
-
- while (neighbor_list.size()>0)
- {
- partition++;
- partition_blocks.push_back(partition_blocks.back());
- for (unsigned int j=0; j<neighbor_list.size(); ++j)
- {
- Assert(cell_partition[neighbor_list[j]]==partition-1,
- ExcInternalError());
- DynamicSparsityPattern::iterator neighbor =
- connectivity.begin(neighbor_list[j]),
- end = connectivity.end(neighbor_list[j]);
- for (; neighbor!=end ; ++neighbor)
- {
- if (cell_partition[neighbor->column()]==size_info.n_macro_cells)
- {
- partition_blocks.back()++;
- cell_partition[neighbor->column()] = partition;
- neighbor_neighbor_list.push_back(neighbor->column());
- partition_list[counter++] = neighbor->column();
- }
- }
- }
- neighbor_list = neighbor_neighbor_list;
- neighbor_neighbor_list.resize(0);
- }
-
- // One has to check if the graph is not connected so we have to find
- // another partition.
- work = false;
- for (unsigned int j=start_up; j<task_info.n_blocks; ++j)
- if (cell_partition[j] == size_info.n_macro_cells)
- {
- start_up = j;
- work = true;
- break;
- }
- }
- AssertDimension (partition_blocks[partition], task_info.n_blocks);
-
-
- // Color the cells within each partition
- task_info.partition_color_blocks_row_index.resize(partition+1);
- unsigned int color_counter = 0, index_counter = 0;
- for (unsigned int part=0; part<partition; part++)
- {
- task_info.partition_color_blocks_row_index[part] = index_counter;
- unsigned int max_color = 0;
- for (unsigned int k=partition_blocks[part]; k<partition_blocks[part+1];
- k++)
- {
- unsigned int cell = partition_list[k];
- unsigned int n_neighbors = connectivity.row_length(cell);
-
- // In the worst case, each neighbor has a different color. So we
- // find at least one available color between 0 and n_neighbors.
- color_finder.resize(n_neighbors+1);
- for (unsigned int j=0; j<=n_neighbors; ++j)
- color_finder[j]=true;
- DynamicSparsityPattern::iterator
- neighbor = connectivity.begin(cell),
- end = connectivity.end(cell);
- for (; neighbor!=end ; ++neighbor)
- {
- // Mark the color that a neighbor within the partition has
- // as taken
- if (cell_partition[neighbor->column()] == part &&
- cell_color[neighbor->column()] <= n_neighbors)
- color_finder[cell_color[neighbor->column()]] = false;
- }
- // Choose the smallest color that is not taken for the block
- cell_color[cell]=0;
- while (color_finder[cell_color[cell]] == false)
- cell_color[cell]++;
- if (cell_color[cell] > max_color)
- max_color = cell_color[cell];
- }
- // Reorder within partition: First, all blocks that belong the 0 and
- // then so on until those with color max (Note that the smaller the
- // number the larger the partition)
- for (unsigned int color=0; color<=max_color; color++)
- {
- task_info.partition_color_blocks_data.push_back(color_counter);
- index_counter++;
- for (unsigned int k=partition_blocks[part];
- k<partition_blocks[part+1]; k++)
- {
- unsigned int cell=partition_list[k];
- if (cell_color[cell] == color)
- {
- partition_color_list[color_counter++] = cell;
- }
- }
- }
- }
- task_info.partition_color_blocks_data.push_back(task_info.n_blocks);
- task_info.partition_color_blocks_row_index[partition] = index_counter;
- AssertDimension (color_counter, task_info.n_blocks);
-
- partition_list = renumbering;
-
- // in debug mode, check that the partition color list is one-to-one
-#ifdef DEBUG
- {
- std::vector<unsigned int> sorted_pc_list (partition_color_list);
- std::sort(sorted_pc_list.begin(), sorted_pc_list.end());
- for (unsigned int i=0; i<sorted_pc_list.size(); ++i)
- Assert(sorted_pc_list[i] == i, ExcInternalError());
- }
-#endif
-
- // set the start list for each block and compute the renumbering of
- // cells
- std::vector<unsigned int> block_start(size_info.n_macro_cells+1);
- std::vector<unsigned int> irregular(size_info.n_macro_cells);
-
- unsigned int mcell_start=0;
- block_start[0] = 0;
- for (unsigned int block=0; block<task_info.n_blocks; block++)
- {
- block_start[block+1] = block_start[block];
- for (unsigned int mcell=mcell_start; mcell<
- std::min(mcell_start+task_info.block_size,
- size_info.n_macro_cells);
- ++mcell)
- {
- unsigned int n_comp = (irregular_cells[mcell]>0)
- ?irregular_cells[mcell]:size_info.vectorization_length;
- block_start[block+1] += n_comp;
- ++counter;
- }
- mcell_start += task_info.block_size;
- }
- counter = 0;
- unsigned int counter_macro = 0;
- for (unsigned int block=0; block<task_info.n_blocks; block++)
- {
- unsigned int present_block = partition_color_list[block];
- for (unsigned int cell = block_start[present_block];
- cell<block_start[present_block+1]; ++cell)
- renumbering[counter++] = partition_list[cell];
- unsigned int this_block_size = (present_block == task_info.n_blocks-1)?
- task_info.block_size_last:task_info.block_size;
- for (unsigned int j=0; j<this_block_size; j++)
- irregular[counter_macro++] =
- irregular_cells[present_block*task_info.block_size+j];
- if (present_block == task_info.n_blocks-1)
- task_info.position_short_block = block;
- }
- irregular_cells.swap(irregular);
- AssertDimension (counter, size_info.n_active_cells);
- AssertDimension (counter_macro, size_info.n_macro_cells);
-
- // check that the renumbering is one-to-one
-#ifdef DEBUG
- {
- std::vector<unsigned int> sorted_renumbering (renumbering);
- std::sort(sorted_renumbering.begin(), sorted_renumbering.end());
- for (unsigned int i=0; i<sorted_renumbering.size(); ++i)
- Assert(sorted_renumbering[i] == i, ExcInternalError());
- }
-#endif
- AssertDimension(counter,size_info.n_active_cells);
- task_info.evens = (partition+1)/2;
- task_info.odds = (partition)/2;
- task_info.n_blocked_workers = task_info.odds-
- (task_info.odds+task_info.evens+1)%2;
- task_info.n_workers = task_info.partition_color_blocks_data.size()-1-
- task_info.n_blocked_workers;
- }
-
-
-
- void
- DoFInfo::make_thread_graph_partition_partition
- (SizeInfo &size_info,
- TaskInfo &task_info,
- std::vector<unsigned int> &renumbering,
- std::vector<unsigned int> &irregular_cells,
- const bool hp_bool)
- {
- if (size_info.n_macro_cells == 0)
- return;
-
- const std::size_t vectorization_length = size_info.vectorization_length;
- Assert (vectorization_length > 0, ExcInternalError());
-
- guess_block_size (size_info, task_info);
-
- // assume that all FEs have the same connectivity graph, so take the
- // zeroth FE
- task_info.n_blocks = (size_info.n_macro_cells+task_info.block_size-1)/
- task_info.block_size;
- task_info.block_size_last = size_info.n_macro_cells-
- (task_info.block_size*(task_info.n_blocks-1));
- task_info.position_short_block = task_info.n_blocks-1;
- unsigned int cluster_size = task_info.block_size*vectorization_length;
-
- // create the connectivity graph without internal blocking
- DynamicSparsityPattern connectivity;
- make_connectivity_graph (size_info, task_info, renumbering,irregular_cells,
- false, connectivity);
-
- // Create cell-block partitioning.
-
- // For each block of cells, this variable saves to which partitions the
- // block belongs. Initialize all to n_macro_cells to mark them as not
- // yet assigned a partition.
- std::vector<unsigned int> cell_partition (size_info.n_active_cells,
- size_info.n_active_cells);
- std::vector<unsigned int> neighbor_list;
- std::vector<unsigned int> neighbor_neighbor_list;
-
- // In element j of this variable, one puts the old number of the block
- // that should be the jth block in the new numeration.
- std::vector<unsigned int> partition_list(size_info.n_active_cells,0);
- std::vector<unsigned int> partition_partition_list(size_info.n_active_cells,0);
-
- // This vector points to the start of each partition.
- std::vector<unsigned int> partition_size(2,0);
-
- unsigned int partition = 0,start_up=0,counter=0;
- unsigned int start_nonboundary = vectorization_length * size_info.boundary_cells_end;
- if (start_nonboundary > size_info.n_active_cells)
- start_nonboundary = size_info.n_active_cells;
- bool work = true;
- unsigned int remainder = cluster_size;
-
- // this performs a classical breath-first search in the connectivity
- // graph of the cells under the restriction that the size of the
- // partitions should be a multiple of the given block size
- while (work)
- {
- // put the cells with neighbors on remote MPI processes up front
- if (start_nonboundary>0)
- {
- for (unsigned int cell=0; cell<start_nonboundary; ++cell)
- {
- const unsigned int cell_nn = renumbering[cell];
- cell_partition[cell_nn] = partition;
- neighbor_list.push_back(cell_nn);
- partition_list[counter++] = cell_nn;
- partition_size.back()++;
- }
- remainder -= (start_nonboundary%cluster_size);
- if (remainder == cluster_size)
- remainder = 0;
-
- // adjust end of boundary cells to the remainder
- size_info.boundary_cells_end += (remainder+vectorization_length-1)/vectorization_length;
- start_nonboundary = 0;
- }
- else
- {
- // To start up, set the start_up cell to partition and list all
- // its neighbors.
- cell_partition[start_up] = partition;
- neighbor_list.push_back(start_up);
- partition_list[counter++] = start_up;
- partition_size.back()++;
- start_up++;
- remainder--;
- if (remainder == cluster_size)
- remainder = 0;
- }
- int index_before = neighbor_list.size(), index = index_before,
- index_stop = 0;
- while (remainder>0)
- {
- if (index==index_stop)
- {
- index = neighbor_list.size();
- if (index == index_before)
- {
- neighbor_list.resize(0);
- goto not_connect;
- }
- index_stop = index_before;
- index_before = index;
- }
- index--;
- unsigned int additional = neighbor_list[index];
- DynamicSparsityPattern::iterator neighbor =
- connectivity.begin(additional),
- end = connectivity.end(additional);
- for (; neighbor!=end ; ++neighbor)
- {
- if (cell_partition[neighbor->column()]==size_info.n_active_cells)
- {
- partition_size.back()++;
- cell_partition[neighbor->column()] = partition;
- neighbor_list.push_back(neighbor->column());
- partition_list[counter++] = neighbor->column();
- remainder--;
- if (remainder == 0)
- break;
- }
- }
- }
-
- while (neighbor_list.size()>0)
- {
- partition++;
- unsigned int partition_counter = 0;
- partition_size.push_back(partition_size.back());
-
- for (unsigned int j=0; j<neighbor_list.size(); ++j)
- {
- Assert(cell_partition[neighbor_list[j]]==partition-1,
- ExcInternalError());
- DynamicSparsityPattern::iterator neighbor =
- connectivity.begin(neighbor_list[j]),
- end = connectivity.end(neighbor_list[j]);
- for (; neighbor!=end ; ++neighbor)
- {
- if (cell_partition[neighbor->column()]==size_info.n_active_cells)
- {
- partition_size.back()++;
- cell_partition[neighbor->column()] = partition;
- neighbor_neighbor_list.push_back(neighbor->column());
- partition_list[counter++] = neighbor->column();
- partition_counter++;
- }
- }
- }
- remainder = cluster_size-(partition_counter%cluster_size);
- if (remainder == cluster_size)
- remainder = 0;
- int index_stop = 0;
- int index_before = neighbor_neighbor_list.size(), index = index_before;
- while (remainder>0)
- {
- if (index==index_stop)
- {
- index = neighbor_neighbor_list.size();
- if (index == index_before)
- {
- neighbor_neighbor_list.resize(0);
- break;
- }
- index_stop = index_before;
- index_before = index;
- }
- index--;
- unsigned int additional = neighbor_neighbor_list[index];
- DynamicSparsityPattern::iterator neighbor =
- connectivity.begin(additional),
- end = connectivity.end(additional);
- for (; neighbor!=end ; ++neighbor)
- {
- if (cell_partition[neighbor->column()]==size_info.n_active_cells)
- {
- partition_size.back()++;
- cell_partition[neighbor->column()] = partition;
- neighbor_neighbor_list.push_back(neighbor->column());
- partition_list[counter++] = neighbor->column();
- remainder--;
- if (remainder == 0)
- break;
- }
- }
- }
-
- neighbor_list = neighbor_neighbor_list;
- neighbor_neighbor_list.resize(0);
- }
-not_connect:
- // One has to check if the graph is not connected so we have to find
- // another partition.
- work = false;
- for (unsigned int j=start_up; j<size_info.n_active_cells; ++j)
- if (cell_partition[j] == size_info.n_active_cells)
- {
- start_up = j;
- work = true;
- if (remainder == 0)
- remainder = cluster_size;
- break;
- }
- }
- if (remainder != 0)
- partition++;
-
- for (unsigned int j=0; j<renumbering.size(); j++)
- renumbering[j] = 0;
- irregular_cells.back() = 0;
- irregular_cells.resize(size_info.n_active_cells);
- unsigned int n_macro_cells_before = 0;
- {
- // Create partitioning within partitions.
-
- // For each block of cells, this variable saves to which partitions
- // the block belongs. Initialize all to n_macro_cells to mark them as
- // not yet assigned a partition.
- std::vector<unsigned int> cell_partition_l2(size_info.n_active_cells,
- size_info.n_active_cells);
- task_info.partition_color_blocks_row_index.resize(partition+1,0);
- task_info.partition_color_blocks_data.resize(1,0);
-
- counter = 0;
- unsigned int missing_macros;
- for (unsigned int part=0; part<partition; ++part)
- {
- neighbor_neighbor_list.resize(0);
- neighbor_list.resize(0);
- bool work = true;
- unsigned int partition_l2 = 0;
- start_up = partition_size[part];
- unsigned int partition_counter = 0;
- while (work)
- {
- if (neighbor_list.size()==0)
- {
- work = false;
- partition_counter = 0;
- for (unsigned int j=start_up; j<partition_size[part+1]; ++j)
- if (cell_partition[partition_list[j]] == part &&
- cell_partition_l2[partition_list[j]] == size_info.n_active_cells)
- {
- start_up = j;
- work = true;
- partition_counter = 1;
- // To start up, set the start_up cell to partition
- // and list all its neighbors.
- AssertIndexRange (start_up, partition_size[part+1]);
- cell_partition_l2[partition_list[start_up]] =
- partition_l2;
- neighbor_neighbor_list.push_back
- (partition_list[start_up]);
- partition_partition_list[counter++] =
- partition_list[start_up];
- start_up++;
- break;
- }
- }
- else
- {
- partition_counter = 0;
- for (unsigned int j=0; j<neighbor_list.size(); ++j)
- {
- Assert(cell_partition[neighbor_list[j]]==part,
- ExcInternalError());
- Assert(cell_partition_l2[neighbor_list[j]]==partition_l2-1,
- ExcInternalError());
- DynamicSparsityPattern::iterator neighbor =
- connectivity.begin(neighbor_list[j]),
- end = connectivity.end(neighbor_list[j]);
- for (; neighbor!=end ; ++neighbor)
- {
- if (cell_partition[neighbor->column()] == part &&
- cell_partition_l2[neighbor->column()]==
- size_info.n_active_cells)
- {
- cell_partition_l2[neighbor->column()] = partition_l2;
- neighbor_neighbor_list.push_back(neighbor->column());
- partition_partition_list[counter++] = neighbor->column();
- partition_counter++;
- }
- }
- }
- }
- if (partition_counter>0)
- {
- int index_before = neighbor_neighbor_list.size(),
- index = index_before;
- {
- // put the cells into separate lists for each FE index
- // within one partition-partition
- missing_macros = 0;
- std::vector<unsigned int> remaining_per_macro_cell
- (max_fe_index);
- std::vector<std::vector<unsigned int> >
- renumbering_fe_index;
- unsigned int cell;
- bool filled = true;
- if (hp_bool == true)
- {
- renumbering_fe_index.resize(max_fe_index);
- for (cell=counter-partition_counter; cell<counter; ++cell)
- {
- renumbering_fe_index
- [cell_active_fe_index.empty() ? 0 :
- cell_active_fe_index[partition_partition_list
- [cell]]].
- push_back(partition_partition_list[cell]);
- }
- // check how many more cells are needed in the lists
- for (unsigned int j=0; j<max_fe_index; j++)
- {
- remaining_per_macro_cell[j] =
- renumbering_fe_index[j].size()%vectorization_length;
- if (remaining_per_macro_cell[j] != 0)
- filled = false;
- missing_macros += ((renumbering_fe_index[j].size()+
- vectorization_length-1)/vectorization_length);
- }
- }
- else
- {
- remaining_per_macro_cell.resize(1);
- remaining_per_macro_cell[0] = partition_counter%
- vectorization_length;
- missing_macros = partition_counter/vectorization_length;
- if (remaining_per_macro_cell[0] != 0)
- {
- filled = false;
- missing_macros++;
- }
- }
- missing_macros = task_info.block_size -
- (missing_macros%task_info.block_size);
-
- // now we realized that there are some cells missing.
- while (missing_macros>0 || filled == false)
- {
- if (index==0)
- {
- index = neighbor_neighbor_list.size();
- if (index == index_before)
- {
- if (missing_macros != 0)
- {
- neighbor_neighbor_list.resize(0);
- }
- start_up--;
- break;// not connected - start again
- }
- index_before = index;
- }
- index--;
- unsigned int additional = neighbor_neighbor_list
- [index];
-
- // go through the neighbors of the last cell in the
- // current partition and check if we find some to
- // fill up with.
- DynamicSparsityPattern::iterator
- neighbor = connectivity.begin(additional),
- end = connectivity.end(additional);
- for (; neighbor!=end ; ++neighbor)
- {
- if (cell_partition[neighbor->column()] == part &&
- cell_partition_l2[neighbor->column()] ==
- size_info.n_active_cells)
- {
- unsigned int this_index = 0;
- if (hp_bool == true)
- this_index = cell_active_fe_index.empty() ? 0 :
- cell_active_fe_index[neighbor->column()];
-
- // Only add this cell if we need more macro
- // cells in the current block or if there is
- // a macro cell with the FE index that is
- // not yet fully populated
- if (missing_macros > 0 ||
- remaining_per_macro_cell[this_index] > 0)
- {
- cell_partition_l2[neighbor->column()] = partition_l2;
- neighbor_neighbor_list.push_back(neighbor->column());
- if (hp_bool == true)
- renumbering_fe_index[this_index].
- push_back(neighbor->column());
- partition_partition_list[counter] =
- neighbor->column();
- counter++;
- partition_counter++;
- if (remaining_per_macro_cell[this_index]
- == 0 && missing_macros > 0)
- missing_macros--;
- remaining_per_macro_cell[this_index]++;
- if (remaining_per_macro_cell[this_index]
- == vectorization_length)
- {
- remaining_per_macro_cell[this_index] = 0;
- }
- if (missing_macros == 0)
- {
- filled = true;
- for (unsigned int fe_ind=0;
- fe_ind<max_fe_index; ++fe_ind)
- if (remaining_per_macro_cell[fe_ind]!=0)
- filled = false;
- }
- if (filled == true)
- break;
- }
- }
- }
- }
- if (hp_bool == true)
- {
- // set the renumbering according to their active FE
- // index within one partition-partition which was
- // implicitly assumed above
- cell = counter - partition_counter;
- for (unsigned int j=0; j<max_fe_index; j++)
- {
- for (unsigned int jj=0; jj<renumbering_fe_index[j].
- size(); jj++)
- renumbering[cell++] =
- renumbering_fe_index[j][jj];
- if (renumbering_fe_index[j].size()%vectorization_length != 0)
- irregular_cells[renumbering_fe_index[j].size()/
- vectorization_length+
- n_macro_cells_before] =
- renumbering_fe_index[j].size()%vectorization_length;
- n_macro_cells_before += (renumbering_fe_index[j].
- size()+vectorization_length-1)/
- vectorization_length;
- renumbering_fe_index[j].resize(0);
- }
- }
- else
- {
- n_macro_cells_before += partition_counter/vectorization_length;
- if (partition_counter%vectorization_length != 0)
- {
- irregular_cells[n_macro_cells_before] =
- partition_counter%vectorization_length;
- n_macro_cells_before++;
- }
- }
- }
- task_info.partition_color_blocks_data.
- push_back(n_macro_cells_before);
- partition_l2++;
- }
- neighbor_list = neighbor_neighbor_list;
- neighbor_neighbor_list.resize(0);
- }
- task_info.partition_color_blocks_row_index[part+1] =
- task_info.partition_color_blocks_row_index[part] + partition_l2;
- }
- }
-
- if (size_info.boundary_cells_end>0)
- size_info.boundary_cells_end = task_info.partition_color_blocks_data
- [task_info.partition_color_blocks_row_index[1]];
-
- if (hp_bool == false)
- renumbering.swap(partition_partition_list);
- irregular_cells.resize(n_macro_cells_before);
- size_info.n_macro_cells = n_macro_cells_before;
-
- task_info.evens = (partition+1)/2;
- task_info.odds = partition/2;
- task_info.n_blocked_workers =
- task_info.odds-(task_info.odds+task_info.evens+1)%2;
- task_info.n_workers = task_info.evens+task_info.odds-
- task_info.n_blocked_workers;
- task_info.partition_evens.resize(partition);
- task_info.partition_odds.resize(partition);
- task_info.partition_n_blocked_workers.resize(partition);
- task_info.partition_n_workers.resize(partition);
- for (unsigned int part=0; part<partition; part++)
- {
- task_info.partition_evens[part] =
- (task_info.partition_color_blocks_row_index[part+1]-
- task_info.partition_color_blocks_row_index[part]+1)/2;
- task_info.partition_odds[part] =
- (task_info.partition_color_blocks_row_index[part+1]-
- task_info.partition_color_blocks_row_index[part])/2;
- task_info.partition_n_blocked_workers[part] =
- task_info.partition_odds[part]-(task_info.partition_odds[part]+
- task_info.partition_evens[part]+1)%2;
- task_info.partition_n_workers[part] =
- task_info.partition_evens[part]+task_info.partition_odds[part]-
- task_info.partition_n_blocked_workers[part];
- }
- }
-
-
- namespace internal
+ namespace
{
// rudimentary version of a vector that keeps entries always ordered
class ordered_vector : public std::vector<types::global_dof_index>
++dat;
}
};
+
+ // We construct the connectivity graph in parallel. we use one lock for
+ // 256 degrees of freedom to keep the number of locks down to a
+ // reasonable level and reduce the cost of locking to some extent.
+ static constexpr unsigned int bucket_size_threading = 256;
+
+ void compute_row_lengths(const unsigned int begin,
+ const unsigned int end,
+ const DoFInfo &dof_info,
+ std::vector<Threads::Mutex> &mutexes,
+ std::vector<unsigned int> &row_lengths)
+ {
+ std::vector<unsigned int> scratch;
+ constexpr unsigned int n_components = 1;
+ for (unsigned int block=begin; block<end; ++block)
+ {
+ scratch.clear();
+ scratch.insert(scratch.end(),
+ &dof_info.dof_indices[dof_info.row_starts[block*n_components][0]],
+ &dof_info.dof_indices[dof_info.row_starts[(block+1)*n_components][0]]);
+ std::sort(scratch.begin(), scratch.end());
+ std::vector<unsigned int>::const_iterator end_unique =
+ std::unique(scratch.begin(), scratch.end());
+ std::vector<unsigned int>::const_iterator it = scratch.begin();
+ while (it != end_unique)
+ {
+ // In this code, the procedure is that we insert all elements
+ // that are within the range of one lock at once
+ const unsigned int next_bucket = (*it/bucket_size_threading+1)*
+ bucket_size_threading;
+ Threads::Mutex::ScopedLock lock(mutexes[*it/bucket_size_threading]);
+ for ( ; it != end_unique && *it < next_bucket; ++it)
+ {
+ AssertIndexRange(*it, row_lengths.size());
+ row_lengths[*it]++;
+ }
+ }
+ }
+ }
+
+ void fill_connectivity_dofs(const unsigned int begin,
+ const unsigned int end,
+ const DoFInfo &dof_info,
+ const std::vector<unsigned int> &row_lengths,
+ std::vector<Threads::Mutex> &mutexes,
+ dealii::SparsityPattern &connectivity_dof)
+ {
+ std::vector<unsigned int> scratch;
+ const unsigned int n_components = 1;
+ for (unsigned int block=begin; block<end; ++block)
+ {
+ scratch.clear();
+ scratch.insert(scratch.end(),
+ &dof_info.dof_indices[dof_info.row_starts[block*n_components][0]],
+ &dof_info.dof_indices[dof_info.row_starts[(block+1)*n_components][0]]);
+ std::sort(scratch.begin(), scratch.end());
+ std::vector<unsigned int>::const_iterator end_unique =
+ std::unique(scratch.begin(), scratch.end());
+ std::vector<unsigned int>::const_iterator it = scratch.begin();
+ while (it != end_unique)
+ {
+ const unsigned int next_bucket = (*it/bucket_size_threading+1)*
+ bucket_size_threading;
+ Threads::Mutex::ScopedLock lock(mutexes[*it/bucket_size_threading]);
+ for ( ; it != end_unique && *it < next_bucket; ++it)
+ if (row_lengths[*it]>0)
+ connectivity_dof.add(*it, block);
+ }
+ }
+ }
+
+ void fill_connectivity(const unsigned int begin,
+ const unsigned int end,
+ const DoFInfo &dof_info,
+ const std::vector<unsigned int> &renumbering,
+ const dealii::SparsityPattern &connectivity_dof,
+ DynamicSparsityPattern &connectivity)
+ {
+ ordered_vector row_entries;
+ const unsigned int n_components = 1;
+ for (unsigned int block=begin; block < end; ++block)
+ {
+ row_entries.clear();
+
+ const unsigned int
+ *it = &dof_info.dof_indices[dof_info.row_starts[block*n_components][0]],
+ *end_cell = &dof_info.dof_indices[dof_info.row_starts[(block+1)*n_components][0]];
+ for ( ; it != end_cell; ++it)
+ {
+ SparsityPattern::iterator sp = connectivity_dof.begin(*it);
+ std::vector<types::global_dof_index>::iterator insert_pos = row_entries.begin();
+ for ( ; sp != connectivity_dof.end(*it); ++sp)
+ if (sp->column() != block)
+ row_entries.insert (renumbering[sp->column()], insert_pos);
+ }
+ connectivity.add_entries (renumbering[block], row_entries.begin(), row_entries.end());
+ }
+ }
}
void
DoFInfo::make_connectivity_graph
- (const SizeInfo &size_info,
- const TaskInfo &task_info,
+ (const TaskInfo &task_info,
const std::vector<unsigned int> &renumbering,
- const std::vector<unsigned int> &irregular_cells,
- const bool do_blocking,
- DynamicSparsityPattern &connectivity) const
+ DynamicSparsityPattern &connectivity) const
{
- AssertDimension (row_starts.size()-1, size_info.n_active_cells);
- const unsigned int n_rows =
+ unsigned int n_rows =
(vector_partitioner->local_range().second-
vector_partitioner->local_range().first)
+ vector_partitioner->ghost_indices().n_elements();
- const unsigned int n_blocks = (do_blocking == true) ?
- task_info.n_blocks : size_info.n_active_cells;
+
+ // Avoid square sparsity patterns that allocate the diagonal entry
+ if (n_rows == task_info.n_active_cells)
+ ++n_rows;
// first determine row lengths
std::vector<unsigned int> row_lengths(n_rows);
- unsigned int cell_start = 0, mcell_start = 0;
- std::vector<unsigned int> scratch;
- for (unsigned int block = 0; block < n_blocks; ++block)
- {
- // if we have the blocking variant (used in the coloring scheme), we
- // want to build a graph with the blocks with interaction with
- // remote MPI processes up front. in the non-blocking variant, we do
- // not do this here. TODO: unify this approach!!!
- if (do_blocking == true)
- {
- scratch.clear();
- for (unsigned int mcell=mcell_start; mcell<
- std::min(mcell_start+task_info.block_size,
- size_info.n_macro_cells);
- ++mcell)
- {
- unsigned int n_comp = (irregular_cells[mcell]>0)
- ?irregular_cells[mcell]:size_info.vectorization_length;
- for (unsigned int cell = cell_start; cell < cell_start+n_comp;
- ++cell)
- scratch.insert(scratch.end(),
- begin_indices(renumbering[cell]),
- end_indices(renumbering[cell]));
- cell_start += n_comp;
- }
- std::sort(scratch.begin(), scratch.end());
- const unsigned int n_unique =
- std::unique(scratch.begin(), scratch.end())-scratch.begin();
- for (unsigned int i=0; i<n_unique; ++i)
- row_lengths[scratch[i]]++;
- mcell_start += task_info.block_size;
- }
- else
- {
- scratch.clear();
- scratch.insert(scratch.end(),
- begin_indices(block), end_indices(block));
- std::sort(scratch.begin(), scratch.end());
- const unsigned int n_unique =
- std::unique(scratch.begin(), scratch.end())-scratch.begin();
- for (unsigned int i=0; i<n_unique; ++i)
- row_lengths[scratch[i]]++;
- }
- }
-
- // disregard dofs that only sit on one cell
+ std::vector<Threads::Mutex> mutexes(n_rows/bucket_size_threading+1);
+ parallel::apply_to_subranges(0, task_info.n_active_cells,
+ std::bind(&compute_row_lengths,
+ std::placeholders::_1,
+ std::placeholders::_2,
+ std::cref(*this),
+ std::ref(mutexes),
+ std::ref(row_lengths)), 20);
+
+ // disregard dofs that only sit on a single cell because they cannot
+ // couple
for (unsigned int row=0; row<n_rows; ++row)
- if (row_lengths[row] == 1)
+ if (row_lengths[row] <= 1)
row_lengths[row] = 0;
- SparsityPattern connectivity_dof (n_rows, n_blocks, row_lengths);
- cell_start = 0, mcell_start = 0;
- for (unsigned int block = 0; block < n_blocks; ++block)
- {
- // if we have the blocking variant (used in the coloring scheme), we
- // want to build a graph with the blocks with interaction with
- // remote MPI processes up front. in the non-blocking variant, we do
- // not do this here. TODO: unify this approach!!!
- if (do_blocking == true)
- {
- for (unsigned int mcell=mcell_start; mcell<
- std::min(mcell_start+task_info.block_size,
- size_info.n_macro_cells);
- ++mcell)
- {
- unsigned int n_comp = (irregular_cells[mcell]>0)
- ?irregular_cells[mcell]:size_info.vectorization_length;
- for (unsigned int cell = cell_start; cell < cell_start+n_comp;
- ++cell)
- {
- const unsigned int
- *it = begin_indices (renumbering[cell]),
- *end_cell = end_indices (renumbering[cell]);
- for ( ; it != end_cell; ++it)
- if (row_lengths[*it]>0)
- connectivity_dof.add(*it, block);
- }
- cell_start += n_comp;
- }
- mcell_start += task_info.block_size;
- }
- else
- {
- const unsigned int
- *it = begin_indices (block),
- *end_cell = end_indices (block);
- for ( ; it != end_cell; ++it)
- if (row_lengths[*it]>0)
- connectivity_dof.add(*it, block);
- }
- }
+ // Create a temporary sparsity pattern that holds to each degree of
+ // freedom on which cells it appears, i.e., store the connectivity
+ // between cells and dofs
+ SparsityPattern connectivity_dof (n_rows, task_info.n_active_cells,
+ row_lengths);
+ parallel::apply_to_subranges(0, task_info.n_active_cells,
+ std::bind(&fill_connectivity_dofs,
+ std::placeholders::_1,
+ std::placeholders::_2,
+ std::cref(*this),
+ std::cref(row_lengths),
+ std::ref(mutexes),
+ std::ref(connectivity_dof)), 20);
connectivity_dof.compress();
- connectivity.reinit (n_blocks, n_blocks);
- internal::ordered_vector row_entries;
- cell_start = 0;
- mcell_start = 0;
- for (unsigned int block=0; block < n_blocks; ++block)
- {
- row_entries.clear();
- if (do_blocking==true)
- {
- for (unsigned int mcell=mcell_start; mcell<
- std::min(mcell_start+task_info.block_size,
- size_info.n_macro_cells);
- ++mcell)
- {
- unsigned int n_comp = (irregular_cells[mcell]>0)
- ?irregular_cells[mcell]:size_info.vectorization_length;
- for (unsigned int cell = cell_start; cell < cell_start+n_comp;
- ++cell)
- {
- // apply renumbering when we do blocking
- const unsigned int
- *it = begin_indices (renumbering[cell]),
- *end_cell = end_indices (renumbering[cell]);
- for ( ; it != end_cell; ++it)
- if (row_lengths[*it] > 0)
- {
- SparsityPattern::iterator sp = connectivity_dof.begin(*it);
- // jump over diagonal for square patterns
- if (connectivity_dof.n_rows()==connectivity_dof.n_cols())
- ++sp;
- row_entries.reserve (row_entries.size() + end_cell - it);
- std::vector<types::global_dof_index>::iterator insert_pos = row_entries.begin();
- for ( ; sp != connectivity_dof.end(*it); ++sp)
- if (sp->column() >= block)
- break;
- else
- row_entries.insert (sp->column(), insert_pos);
- }
- }
- cell_start +=n_comp;
- }
- mcell_start += task_info.block_size;
- }
- else
- {
- const unsigned int *it = begin_indices (block),
- * end_cell = end_indices (block);
- for ( ; it != end_cell; ++it)
- if (row_lengths[*it] > 0)
- {
- SparsityPattern::iterator sp = connectivity_dof.begin(*it);
- // jump over diagonal for square patterns
- if (connectivity_dof.n_rows()==connectivity_dof.n_cols())
- ++sp;
- row_entries.reserve (row_entries.size() + end_cell - it);
- std::vector<types::global_dof_index>::iterator insert_pos = row_entries.begin();
- for ( ; sp != connectivity_dof.end(*it); ++sp)
- if (sp->column() >= block)
- break;
- else
- row_entries.insert (sp->column(), insert_pos);
- }
- }
- connectivity.add_entries (block, row_entries.begin(), row_entries.end());
- }
- connectivity.symmetrize ();
+ // Invert renumbering for use in fill_connectivity.
+ std::vector<unsigned int> reverse_numbering(task_info.n_active_cells);
+ reverse_numbering = Utilities::invert_permutation(renumbering);
+
+ // From the above connectivity between dofs and cells, we can finally
+ // create a connectivity list between cells. The connectivity graph
+ // should apply the renumbering, i.e., the entry for cell j is the entry
+ // for cell renumbering[j] in the original ordering.
+ parallel::apply_to_subranges(0, task_info.n_active_cells,
+ std::bind(&fill_connectivity,
+ std::placeholders::_1,
+ std::placeholders::_2,
+ std::cref(*this),
+ std::cref(reverse_numbering),
+ std::cref(connectivity_dof),
+ std::ref(connectivity)), 20);
}
template <typename StreamType>
void
DoFInfo::print_memory_consumption (StreamType &out,
- const SizeInfo &size_info) const
+ const TaskInfo &task_info) const
{
out << " Memory row starts indices: ";
- size_info.print_memory_statistics
+ task_info.print_memory_statistics
(out, (row_starts.capacity()*sizeof(std::array<unsigned int, 3>)));
out << " Memory dof indices: ";
- size_info.print_memory_statistics
+ task_info.print_memory_statistics
(out, MemoryConsumption::memory_consumption (dof_indices));
out << " Memory constraint indicators: ";
- size_info.print_memory_statistics
+ task_info.print_memory_statistics
(out, MemoryConsumption::memory_consumption (constraint_indicator));
out << " Memory plain indices: ";
- size_info.print_memory_statistics
+ task_info.print_memory_statistics
(out, MemoryConsumption::memory_consumption (row_starts_plain_indices)+
MemoryConsumption::memory_consumption (plain_dof_indices));
out << " Memory vector partitioner: ";
- size_info.print_memory_statistics
+ task_info.print_memory_statistics
(out, MemoryConsumption::memory_consumption (*vector_partitioner));
}
// ---------------------------------------------------------------------
//
-// Copyright (C) 2011 - 2017 by the deal.II authors
+// Copyright (C) 2011 - 2018 by the deal.II authors
//
// This file is part of the deal.II library.
//
#include <deal.II/base/tensor.h>
#include <deal.II/base/vectorization.h>
-DEAL_II_NAMESPACE_OPEN
+#include <deal.II/matrix_free/task_info.h>
-
-namespace internal
-{
- namespace MatrixFreeFunctions
- {
- /**
- * A struct that collects all information related to parallelization with
- * threads: The work is subdivided into tasks that can be done
- * independently.
- */
- struct TaskInfo
- {
- /**
- * Constructor.
- */
- TaskInfo ();
-
- /**
- * Clear all the data fields and resets them to zero.
- */
- void clear ();
-
- /**
- * Return the memory consumption of the class.
- */
- std::size_t memory_consumption () const;
-
- unsigned int block_size;
- unsigned int n_blocks;
- unsigned int block_size_last;
- unsigned int position_short_block;
- bool use_multithreading;
- bool use_partition_partition;
- bool use_coloring_only;
-
- std::vector<unsigned int> partition_color_blocks_row_index;
- std::vector<unsigned int> partition_color_blocks_data;
- unsigned int evens;
- unsigned int odds;
- unsigned int n_blocked_workers;
- unsigned int n_workers;
-
- std::vector<unsigned int> partition_evens;
- std::vector<unsigned int> partition_odds;
- std::vector<unsigned int> partition_n_blocked_workers;
- std::vector<unsigned int> partition_n_workers;
- };
-
-
-
- /**
- * A struct that collects all information related to the size of the
- * problem and MPI parallelization.
- */
- struct SizeInfo
- {
- /**
- * Constructor.
- */
- SizeInfo ();
-
- /**
- * Clear all data fields and resets the sizes to zero.
- */
- void clear();
-
- /**
- * Prints minimum, average, and maximal memory consumption over the MPI
- * processes.
- */
- template <typename StreamType>
- void print_memory_statistics (StreamType &out,
- std::size_t data_length) const;
-
- /**
- * Determines the position of cells with ghosts for distributed-memory
- * calculations.
- */
- void make_layout (const unsigned int n_active_cells_in,
- const unsigned int vectorization_length_in,
- std::vector<unsigned int> &boundary_cells,
- std::vector<unsigned int> &irregular_cells);
-
- unsigned int n_active_cells;
- unsigned int n_macro_cells;
- unsigned int boundary_cells_start;
- unsigned int boundary_cells_end;
- unsigned int vectorization_length;
-
- /**
- * index sets to describe the layout of cells: locally owned cells and
- * locally active cells
- */
- IndexSet locally_owned_cells;
- IndexSet ghost_cells;
-
- /**
- * MPI communicator
- */
- MPI_Comm communicator;
- unsigned int my_pid;
- unsigned int n_procs;
- };
-
- } // end of namespace MatrixFreeFunctions
-} // end of namespace internal
-
-DEAL_II_NAMESPACE_CLOSE
-
#endif
*/
template <typename StreamType>
void print_memory_consumption(StreamType &out,
- const SizeInfo &task_info) const;
+ const TaskInfo &task_info) const;
/**
* Stores whether a cell is Cartesian (cell type 0), has constant
#include <deal.II/dofs/dof_handler.h>
#include <deal.II/hp/dof_handler.h>
#include <deal.II/hp/q_collection.h>
-#include <deal.II/matrix_free/helper_functions.h>
+#include <deal.II/matrix_free/task_info.h>
#include <deal.II/matrix_free/shape_info.h>
#include <deal.II/matrix_free/dof_info.h>
#include <deal.II/matrix_free/mapping_info.h>
-#ifdef DEAL_II_WITH_THREADS
-#include <tbb/task.h>
-#include <tbb/task_scheduler_init.h>
-#include <tbb/parallel_for.h>
-#include <tbb/blocked_range.h>
-#endif
-
#include <stdlib.h>
#include <memory>
#include <limits>
/**
* Return information on system size.
*/
- const internal::MatrixFreeFunctions::SizeInfo &
+ DEAL_II_DEPRECATED
+ const internal::MatrixFreeFunctions::TaskInfo &
get_size_info () const;
/*
std::vector<std::pair<unsigned int,unsigned int> > cell_level_index;
/**
- * Stores how many cells we have, how many cells that we see after applying
- * vectorization (i.e., the number of macro cells), and MPI-related stuff.
- */
- internal::MatrixFreeFunctions::SizeInfo size_info;
+ * For discontinuous Galerkin, the cell_level_index includes cells that are
+ * not on the local processor but that are needed to evaluate the cell
+ * integrals. In cell_level_index_end_local, we store the number of local
+ * cells.
+ **/
+ unsigned int cell_level_index_end_local;
/**
- * Information regarding the shared memory parallelization.
+ * Stores how many cells we have, how many cells that we see after applying
+ * vectorization (i.e., the number of macro cells), MPI-related stuff, and,
+ * if threads are enabled, information regarding the shared memory
+ * parallelization.
*/
internal::MatrixFreeFunctions::TaskInfo task_info;
template <int dim, typename Number>
inline
-const internal::MatrixFreeFunctions::SizeInfo &
+const internal::MatrixFreeFunctions::TaskInfo &
MatrixFree<dim,Number>::get_size_info () const
{
- return size_info;
+ return task_info;
}
unsigned int
MatrixFree<dim,Number>::n_macro_cells () const
{
- return size_info.n_macro_cells;
+ return *(task_info.cell_partition_data.end()-2);
}
unsigned int
MatrixFree<dim,Number>::n_physical_cells () const
{
- return size_info.n_active_cells;
+ return task_info.n_active_cells;
}
const unsigned int vectorization_length=VectorizedArray<Number>::n_array_elements;
#ifdef DEBUG
AssertIndexRange (dof_index, dof_handlers.n_dof_handlers);
- AssertIndexRange (macro_cell_number, size_info.n_macro_cells);
+ AssertIndexRange (macro_cell_number, n_macro_cells());
AssertIndexRange (vector_number, vectorization_length);
const unsigned int irreg_filled = dof_info[dof_index].row_starts[macro_cell_number][2];
if (irreg_filled > 0)
const unsigned int vectorization_length=VectorizedArray<Number>::n_array_elements;
#ifdef DEBUG
AssertIndexRange (dof_index, dof_handlers.n_dof_handlers);
- AssertIndexRange (macro_cell_number, size_info.n_macro_cells);
+ AssertIndexRange (macro_cell_number, n_macro_cells());
AssertIndexRange (vector_number, vectorization_length);
const unsigned int irreg_filled = dof_info[dof_index].row_starts[macro_cell_number][2];
if (irreg_filled > 0)
bool
MatrixFree<dim,Number>::at_irregular_cell (const unsigned int macro_cell) const
{
- AssertIndexRange (macro_cell, size_info.n_macro_cells);
+ AssertIndexRange (macro_cell, n_macro_cells());
return dof_info[0].row_starts[macro_cell][2] > 0;
}
unsigned int
MatrixFree<dim,Number>::n_components_filled (const unsigned int macro_cell) const
{
- AssertIndexRange (macro_cell, size_info.n_macro_cells);
+ AssertIndexRange (macro_cell, n_macro_cells());
const unsigned int n_filled = dof_info[0].row_starts[macro_cell][2];
if (n_filled == 0)
return VectorizedArray<Number>::n_array_elements;
// functions: for generic vectors, do nothing at all. For distributed vectors,
// call update_ghost_values_start function and so on. If we have collections
// of vectors, just do the individual functions of the components. In order to
-// keep ghost values consistent (whether we are in read or write mode). the whole situation is a bit complicated by the fact
-// that we need to treat block vectors differently, which use some additional
-// helper functions to select the blocks and template magic.
+// keep ghost values consistent (whether we are in read or write mode). the
+// whole situation is a bit complicated by the fact that we need to treat
+// block vectors differently, which use some additional helper functions to
+// select the blocks and template magic.
namespace internal
{
template <typename VectorStruct>
-#ifdef DEAL_II_WITH_THREADS
- // This defines the TBB data structures that are needed to schedule the
- // partition-partition variant
-
- namespace partition
+ namespace MatrixFreeFunctions
{
- template <typename Worker>
- class CellWork : public tbb::task
- {
- public:
- CellWork (const Worker &worker_in,
- const unsigned int partition_in,
- const internal::MatrixFreeFunctions::TaskInfo &task_info_in,
- const bool is_blocked_in)
- :
- dummy (nullptr),
- worker (worker_in),
- partition (partition_in),
- task_info (task_info_in),
- is_blocked (is_blocked_in)
- {};
- tbb::task *execute ()
- {
- std::pair<unsigned int, unsigned int> cell_range
- (task_info.partition_color_blocks_data[partition],
- task_info.partition_color_blocks_data[partition+1]);
- worker(cell_range);
- if (is_blocked==true)
- dummy->spawn (*dummy);
- return (nullptr);
- }
-
- tbb::empty_task *dummy;
+ // struct to select between a const interface and a non-const interface
+ // for MFWorker
+ template <typename, typename, typename, typename, bool>
+ struct InterfaceSelector
+ {};
- private:
- const Worker &worker;
- const unsigned int partition;
- const internal::MatrixFreeFunctions::TaskInfo &task_info;
- const bool is_blocked;
+ // Version of constant functions
+ template <typename MF, typename InVector, typename OutVector, typename Container>
+ struct InterfaceSelector<MF, InVector, OutVector, Container, true>
+ {
+ typedef void (Container::*function_type)
+ (const MF &, OutVector &, const InVector &,
+ const std::pair<unsigned int, unsigned int> &)const;
};
-
-
- template <typename Worker>
- class PartitionWork : public tbb::task
+ // Version for non-constant functions
+ template <typename MF, typename InVector, typename OutVector, typename Container>
+ struct InterfaceSelector<MF, InVector, OutVector, Container, false>
{
- public:
- PartitionWork (const Worker &function_in,
- const unsigned int partition_in,
- const internal::MatrixFreeFunctions::TaskInfo &task_info_in,
- const bool is_blocked_in = false)
- :
- dummy (nullptr),
- function (function_in),
- partition (partition_in),
- task_info (task_info_in),
- is_blocked (is_blocked_in)
- {};
- tbb::task *execute ()
- {
- tbb::empty_task *root = new ( tbb::task::allocate_root() )
- tbb::empty_task;
- unsigned int evens = task_info.partition_evens[partition];
- unsigned int odds = task_info.partition_odds[partition];
- unsigned int n_blocked_workers =
- task_info.partition_n_blocked_workers[partition];
- unsigned int n_workers = task_info.partition_n_workers[partition];
- std::vector<CellWork<Worker>*> worker(n_workers);
- std::vector<CellWork<Worker>*> blocked_worker(n_blocked_workers);
-
- root->set_ref_count(evens+1);
- for (unsigned int j=0; j<evens; j++)
- {
- worker[j] = new (root->allocate_child())
- CellWork<Worker>(function, task_info.
- partition_color_blocks_row_index[partition]+2*j,
- task_info, false);
- if (j>0)
- {
- worker[j]->set_ref_count(2);
- blocked_worker[j-1]->dummy = new (worker[j]->allocate_child())
- tbb::empty_task;
- worker[j-1]->spawn(*blocked_worker[j-1]);
- }
- else
- worker[j]->set_ref_count(1);
- if (j<evens-1)
- {
- blocked_worker[j] = new (worker[j]->allocate_child())
- CellWork<Worker>(function, task_info.
- partition_color_blocks_row_index
- [partition] + 2*j+1, task_info, true);
- }
- else
- {
- if (odds==evens)
- {
- worker[evens] = new (worker[j]->allocate_child())
- CellWork<Worker>(function, task_info.
- partition_color_blocks_row_index[partition]+2*j+1,
- task_info, false);
- worker[j]->spawn(*worker[evens]);
- }
- else
- {
- tbb::empty_task *child = new (worker[j]->allocate_child())
- tbb::empty_task();
- worker[j]->spawn(*child);
- }
- }
- }
-
- root->wait_for_all();
- root->destroy(*root);
- if (is_blocked==true)
- dummy->spawn (*dummy);
- return (nullptr);
- }
-
- tbb::empty_task *dummy;
-
- private:
- const Worker &function;
- const unsigned int partition;
- const internal::MatrixFreeFunctions::TaskInfo &task_info;
- const bool is_blocked;
+ typedef void (Container::*function_type)
+ (const MF &, OutVector &, const InVector &,
+ const std::pair<unsigned int, unsigned int> &);
};
-
- } // end of namespace partition
+ }
- namespace color
+ // A implementation class for the worker object that runs the various
+ // operations we want to perform during the matrix-free loop
+ template <typename MF, typename InVector, typename OutVector,
+ typename Container, bool is_constant>
+ class MFWorker : public MFWorkerInterface
{
- template <typename Worker>
- class CellWork
- {
- public:
- CellWork (const Worker &worker_in,
- const internal::MatrixFreeFunctions::TaskInfo &task_info_in)
- :
- worker (worker_in),
- task_info (task_info_in)
- {};
- void operator()(const tbb::blocked_range<unsigned int> &r) const
- {
- for (unsigned int block=r.begin(); block<r.end(); block++)
- {
- std::pair<unsigned int,unsigned int> cell_range;
- if (task_info.position_short_block<block)
- {
- cell_range.first = (block-1)*task_info.block_size+
- task_info.block_size_last;
- cell_range.second = cell_range.first + task_info.block_size;
- }
- else
- {
- cell_range.first = block*task_info.block_size;
- cell_range.second = cell_range.first +
- ((block == task_info.position_short_block)?
- (task_info.block_size_last):(task_info.block_size));
- }
- worker (cell_range);
- }
- }
- private:
- const Worker &worker;
- const internal::MatrixFreeFunctions::TaskInfo &task_info;
- };
+ public:
+ // A typedef to make the arguments further down more readable
+ typedef typename MatrixFreeFunctions::InterfaceSelector
+ <MF,InVector,OutVector,Container,is_constant>::function_type function_type;
+
+ // constructor, binds all the arguments to this class
+ MFWorker (const MF &matrix_free,
+ const InVector &src,
+ OutVector &dst,
+ const bool zero_dst_vector_setting,
+ const Container &container,
+ function_type cell_function,
+ function_type face_function,
+ function_type boundary_function)
+ :
+ matrix_free (matrix_free),
+ container (const_cast<Container &>(container)),
+ cell_function (cell_function),
+ face_function (face_function),
+ boundary_function (boundary_function),
+ src (src),
+ dst (dst),
+ ghosts_were_set(false),
+ src_and_dst_are_same (PointerComparison::equal(&src, &dst)),
+ zero_dst_vector_setting(zero_dst_vector_setting &&!src_and_dst_are_same)
+ {}
+ // Runs the cell work. If no function is given, nothing is done
+ virtual void cell(const std::pair<unsigned int,unsigned int> &cell_range) override
+ {
+ if (cell_function != nullptr && cell_range.second > cell_range.first)
+ (container.*cell_function)(matrix_free, this->dst, this->src, cell_range);
+ }
- template <typename Worker>
- class PartitionWork : public tbb::task
+ // Runs the assembler on interior faces. If no function is given, nothing
+ // is done
+ virtual void face(const std::pair<unsigned int,unsigned int> &face_range) override
{
- public:
- PartitionWork (const Worker &worker_in,
- const unsigned int partition_in,
- const internal::MatrixFreeFunctions::TaskInfo &task_info_in,
- const bool is_blocked_in)
- :
- dummy (nullptr),
- worker (worker_in),
- partition (partition_in),
- task_info (task_info_in),
- is_blocked (is_blocked_in)
- {};
- tbb::task *execute ()
- {
- unsigned int lower = task_info.partition_color_blocks_data[partition],
- upper = task_info.partition_color_blocks_data[partition+1];
- parallel_for(tbb::blocked_range<unsigned int>(lower,upper,1),
- CellWork<Worker> (worker,task_info));
- if (is_blocked==true)
- dummy->spawn (*dummy);
- return (nullptr);
- }
+ if (face_function != nullptr && face_range.second > face_range.first)
+ (container.*face_function)(matrix_free, this->dst, this->src, face_range);
+ }
- tbb::empty_task *dummy;
+ // Runs the assembler on boundary faces. If no function is given, nothing
+ // is done
+ virtual void boundary(const std::pair<unsigned int,unsigned int> &face_range) override
+ {
+ if (boundary_function != nullptr && face_range.second > face_range.first)
+ (container.*boundary_function)(matrix_free, this->dst, this->src, face_range);
+ }
- private:
- const Worker &worker;
- const unsigned int partition;
- const internal::MatrixFreeFunctions::TaskInfo &task_info;
- const bool is_blocked;
- };
+ // Starts the communication for the update ghost values operation. We
+ // cannot call this update if ghost and destination are the same because
+ // that would introduce spurious entries in the destination (there is also
+ // the problem that reading from a vector that we also write to is usually
+ // not intended in case there is overlap, but this is up to the
+ // application code to decide and we cannot catch this case here).
+ virtual void vector_update_ghosts_start() override
+ {
+ if (!src_and_dst_are_same)
+ ghosts_were_set = internal::update_ghost_values_start(src);
+ }
- } // end of namespace color
+ // Finishes the communication for the update ghost values operation
+ virtual void vector_update_ghosts_finish() override
+ {
+ if (!src_and_dst_are_same)
+ internal::update_ghost_values_finish(src);
+ }
+ // Starts the communication for the vector compress operation
+ virtual void vector_compress_start() override
+ {
+ internal::compress_start(dst);
+ }
- template <typename VectorStruct>
- class MPIComDistribute : public tbb::task
- {
- public:
- MPIComDistribute (const VectorStruct &src_in)
- :
- src(src_in)
- {};
+ // Finishes the communication for the vector compress operation
+ virtual void vector_compress_finish() override
+ {
+ internal::compress_finish(dst);
+ if (!src_and_dst_are_same)
+ internal::reset_ghost_values(src, !ghosts_were_set);
+ }
- tbb::task *execute ()
+ // Zeros the given input vector
+ virtual void zero_dst_vector_range(const unsigned int /*range_index*/) override
{
- internal::update_ghost_values_finish(src);
- return nullptr;
+ // currently not implemented
+ (void)zero_dst_vector_setting;
}
private:
- const VectorStruct &src;
+ const MF &matrix_free;
+ Container &container;
+ function_type cell_function;
+ function_type face_function;
+ function_type boundary_function;
+
+ const InVector &src;
+ OutVector &dst;
+ bool ghosts_were_set;
+ const bool src_and_dst_are_same;
+ const bool zero_dst_vector_setting;
};
- template <typename VectorStruct>
- class MPIComCompress : public tbb::task
+ /**
+ * An internal class to convert three function pointers to the
+ * scheme with virtual functions above.
+ */
+ template <class MF, typename InVector, typename OutVector>
+ struct MFClassWrapper
{
- public:
- MPIComCompress (VectorStruct &dst_in)
+ typedef std::function<void (const MF &, OutVector &, const InVector &,
+ const std::pair<unsigned int, unsigned int> &)> function_type;
+
+ MFClassWrapper (const function_type cell,
+ const function_type face,
+ const function_type boundary)
:
- dst(dst_in)
- {};
+ cell (cell),
+ face (face),
+ boundary (boundary)
+ {}
- tbb::task *execute ()
+ void cell_integrator (const MF &mf, OutVector &dst, const InVector &src,
+ const std::pair<unsigned int, unsigned int> &range) const
{
- internal::compress_start(dst);
- return nullptr;
+ if (cell)
+ cell(mf, dst, src, range);
}
- private:
- VectorStruct &dst;
- };
+ void face_integrator (const MF &mf, OutVector &dst, const InVector &src,
+ const std::pair<unsigned int, unsigned int> &range) const
+ {
+ if (face)
+ face(mf, dst, src, range);
+ }
+
+ void boundary_integrator (const MF &mf, OutVector &dst, const InVector &src,
+ const std::pair<unsigned int, unsigned int> &range) const
+ {
+ if (boundary)
+ boundary(mf, dst, src, range);
+ }
-#endif // DEAL_II_WITH_THREADS
+ const function_type cell;
+ const function_type face;
+ const function_type boundary;
+ };
} // end of namespace internal
OutVector &dst,
const InVector &src) const
{
- // in any case, need to start the ghost import at the beginning
- bool ghosts_were_not_set = internal::update_ghost_values_start (src);
-
-#ifdef DEAL_II_WITH_THREADS
-
- // Use multithreading if so requested and if there is enough work to do in
- // parallel (the code might hang if there are less than two chunks!)
- if (task_info.use_multithreading == true && task_info.n_blocks > 3)
- {
- // to simplify the function calls, bind away all arguments except the
- // cell range
- typedef
- std::function<void (const std::pair<unsigned int,unsigned int> &range)>
- Worker;
-
- const Worker func = std::bind (std::ref(cell_operation),
- std::cref(*this),
- std::ref(dst),
- std::cref(src),
- std::placeholders::_1);
-
- if (task_info.use_partition_partition == true)
- {
- tbb::empty_task *root = new ( tbb::task::allocate_root() )
- tbb::empty_task;
- unsigned int evens = task_info.evens;
- unsigned int odds = task_info.odds;
- root->set_ref_count(evens+1);
- unsigned int n_blocked_workers = task_info.n_blocked_workers;
- unsigned int n_workers = task_info.n_workers;
- std::vector<internal::partition::PartitionWork<Worker>*>
- worker(n_workers);
- std::vector<internal::partition::PartitionWork<Worker>*>
- blocked_worker(n_blocked_workers);
- internal::MPIComCompress<OutVector> *worker_compr =
- new (root->allocate_child())
- internal::MPIComCompress<OutVector>(dst);
- worker_compr->set_ref_count(1);
- for (unsigned int j=0; j<evens; j++)
- {
- if (j>0)
- {
- worker[j] = new (root->allocate_child())
- internal::partition::PartitionWork<Worker>
- (func,2*j,task_info,false);
- worker[j]->set_ref_count(2);
- blocked_worker[j-1]->dummy = new (worker[j]->allocate_child())
- tbb::empty_task;
- if (j>1)
- worker[j-1]->spawn(*blocked_worker[j-1]);
- else
- worker_compr->spawn(*blocked_worker[j-1]);
- }
- else
- {
- worker[j] = new (worker_compr->allocate_child())
- internal::partition::PartitionWork<Worker>
- (func,2*j,task_info,false);
- worker[j]->set_ref_count(2);
- internal::MPIComDistribute<InVector> *worker_dist =
- new (worker[j]->allocate_child())
- internal::MPIComDistribute<InVector>(src);
- worker_dist->spawn(*worker_dist);
- }
- if (j<evens-1)
- {
- blocked_worker[j] = new (worker[j]->allocate_child())
- internal::partition::PartitionWork<Worker>
- (func,2*j+1,task_info,true);
- }
- else
- {
- if (odds==evens)
- {
- worker[evens] = new (worker[j]->allocate_child())
- internal::partition::PartitionWork<Worker>
- (func,2*j+1,task_info,false);
- worker[j]->spawn(*worker[evens]);
- }
- else
- {
- tbb::empty_task *child = new (worker[j]->allocate_child())
- tbb::empty_task();
- worker[j]->spawn(*child);
- }
- }
- }
-
- root->wait_for_all();
- root->destroy(*root);
- }
- else // end of partition-partition, start of partition-color
- {
- unsigned int evens = task_info.evens;
- unsigned int odds = task_info.odds;
-
- // check whether there is only one partition. if not, build up the
- // tree of partitions
- if (odds > 0)
- {
- tbb::empty_task *root = new ( tbb::task::allocate_root() ) tbb::empty_task;
- root->set_ref_count(evens+1);
- unsigned int n_blocked_workers = odds-(odds+evens+1)%2;
- unsigned int n_workers = task_info.partition_color_blocks_data.size()-1-
- n_blocked_workers;
- std::vector<internal::color::PartitionWork<Worker>*> worker(n_workers);
- std::vector<internal::color::PartitionWork<Worker>*> blocked_worker(n_blocked_workers);
- unsigned int worker_index = 0, slice_index = 0;
- unsigned int spawn_index = 0;
- int spawn_index_child = -2;
- internal::MPIComCompress<OutVector> *worker_compr = new (root->allocate_child())
- internal::MPIComCompress<OutVector>(dst);
- worker_compr->set_ref_count(1);
- for (unsigned int part=0;
- part<task_info.partition_color_blocks_row_index.size()-1; part++)
- {
- const unsigned int spawn_index_new = worker_index;
- if (part == 0)
- worker[worker_index] = new (worker_compr->allocate_child())
- internal::color::PartitionWork<Worker>(func,slice_index,task_info,false);
- else
- worker[worker_index] = new (root->allocate_child())
- internal::color::PartitionWork<Worker>(func,slice_index,task_info,false);
- slice_index++;
- for (; slice_index<task_info.partition_color_blocks_row_index[part+1];
- slice_index++)
- {
- worker[worker_index]->set_ref_count(1);
- worker_index++;
- worker[worker_index] = new (worker[worker_index-1]->allocate_child())
- internal::color::PartitionWork<Worker>(func,slice_index,task_info,false);
- }
- worker[worker_index]->set_ref_count(2);
- if (part>0)
- {
- blocked_worker[(part-1)/2]->dummy =
- new (worker[worker_index]->allocate_child()) tbb::empty_task;
- worker_index++;
- if (spawn_index_child == -1)
- worker[spawn_index]->spawn(*blocked_worker[(part-1)/2]);
- else
- {
- Assert(spawn_index_child>=0, ExcInternalError());
- worker[spawn_index]->spawn(*worker[spawn_index_child]);
- }
- spawn_index = spawn_index_new;
- }
- else
- {
- internal::MPIComDistribute<InVector> *worker_dist =
- new (worker[worker_index]->allocate_child())
- internal::MPIComDistribute<InVector>(src);
- worker_dist->spawn(*worker_dist);
- worker_index++;
- }
- part += 1;
- if (part<task_info.partition_color_blocks_row_index.size()-1)
- {
- if (part<task_info.partition_color_blocks_row_index.size()-2)
- {
- blocked_worker[part/2] = new (worker[worker_index-1]->allocate_child())
- internal::color::PartitionWork<Worker>(func,slice_index,task_info,true);
- slice_index++;
- if (slice_index<
- task_info.partition_color_blocks_row_index[part+1])
- {
- blocked_worker[part/2]->set_ref_count(1);
- worker[worker_index] = new (blocked_worker[part/2]->allocate_child())
- internal::color::PartitionWork<Worker>(func,slice_index,task_info,false);
- slice_index++;
- }
- else
- {
- spawn_index_child = -1;
- continue;
- }
- }
- for (; slice_index<task_info.partition_color_blocks_row_index[part+1];
- slice_index++)
- {
- if (slice_index>
- task_info.partition_color_blocks_row_index[part])
- {
- worker[worker_index]->set_ref_count(1);
- worker_index++;
- }
- worker[worker_index] = new (worker[worker_index-1]->allocate_child())
- internal::color::PartitionWork<Worker>(func,slice_index,task_info,false);
- }
- spawn_index_child = worker_index;
- worker_index++;
- }
- else
- {
- tbb::empty_task *final = new (worker[worker_index-1]->allocate_child())
- tbb::empty_task;
- worker[spawn_index]->spawn(*final);
- spawn_index_child = worker_index-1;
- }
- }
- if (evens==odds)
- {
- Assert(spawn_index_child>=0, ExcInternalError());
- worker[spawn_index]->spawn(*worker[spawn_index_child]);
- }
- root->wait_for_all();
- root->destroy(*root);
- }
- // case when we only have one partition: this is the usual coloring
- // scheme, and we just schedule a parallel for loop for each color
- else
- {
- Assert(evens==1,ExcInternalError());
- internal::update_ghost_values_finish(src);
-
- for (unsigned int color=0;
- color < task_info.partition_color_blocks_row_index[1];
- ++color)
- {
- unsigned int lower = task_info.partition_color_blocks_data[color],
- upper = task_info.partition_color_blocks_data[color+1];
- parallel_for(tbb::blocked_range<unsigned int>(lower,upper,1),
- internal::color::CellWork<Worker>
- (func,task_info));
- }
-
- internal::compress_start(dst);
- }
- }
- }
- else
-#endif
- // serial loop
- {
- std::pair<unsigned int,unsigned int> cell_range;
-
- // First operate on cells where no ghost data is needed (inner cells)
- {
- cell_range.first = 0;
- cell_range.second = size_info.boundary_cells_start;
- cell_operation (*this, dst, src, cell_range);
- }
-
- // before starting operations on cells that contain ghost nodes (outer
- // cells), wait for the MPI commands to finish
- internal::update_ghost_values_finish(src);
-
- // For the outer cells, do the same procedure as for inner cells.
- if (size_info.boundary_cells_end > size_info.boundary_cells_start)
- {
- cell_range.first = size_info.boundary_cells_start;
- cell_range.second = size_info.boundary_cells_end;
- cell_operation (*this, dst, src, cell_range);
- }
-
- internal::compress_start(dst);
-
- // Finally operate on cells where no ghost data is needed (inner cells)
- if (size_info.n_macro_cells > size_info.boundary_cells_end)
- {
- cell_range.first = size_info.boundary_cells_end;
- cell_range.second = size_info.n_macro_cells;
- cell_operation (*this, dst, src, cell_range);
- }
- }
+ typedef internal::MFClassWrapper<MatrixFree<dim, Number>, InVector, OutVector> Wrapper;
+ Wrapper wrap (cell_operation, nullptr, nullptr);
+ internal::MFWorker<MatrixFree<dim, Number>, InVector, OutVector, Wrapper, true>
+ worker(*this, src, dst, false, wrap, &Wrapper::cell_integrator,
+ &Wrapper::face_integrator, &Wrapper::boundary_integrator);
- // In every case, we need to finish transfers at the very end
- internal::compress_finish(dst);
- internal::reset_ghost_values(src, ghosts_were_not_set);
+ task_info.loop (worker);
}
OutVector &dst,
const InVector &src) const
{
- // here, use std::bind to hand a function handler with the appropriate
- // argument to the other loop function
- std::function<void (const MatrixFree<dim,Number> &,
- OutVector &,
- const InVector &,
- const std::pair<unsigned int,
- unsigned int> &)>
- function = std::bind<void>(function_pointer,
- owning_class,
- std::placeholders::_1,
- std::placeholders::_2,
- std::placeholders::_3,
- std::placeholders::_4);
- cell_loop (function, dst, src);
+ internal::MFWorker<MatrixFree<dim, Number>, InVector, OutVector, CLASS, true>
+ worker(*this, src, dst, false, *owning_class, function_pointer, nullptr, nullptr);
+ task_info.loop(worker);
}
OutVector &dst,
const InVector &src) const
{
- // here, use std::bind to hand a function handler with the appropriate
- // argument to the other loop function
- std::function<void (const MatrixFree<dim,Number> &,
- OutVector &,
- const InVector &,
- const std::pair<unsigned int,
- unsigned int> &)>
- function = std::bind<void>(function_pointer,
- owning_class,
- std::placeholders::_1,
- std::placeholders::_2,
- std::placeholders::_3,
- std::placeholders::_4);
- cell_loop (function, dst, src);
+ internal::MFWorker<MatrixFree<dim, Number>, InVector, OutVector, CLASS, false>
+ worker(*this, src, dst, false, *owning_class, function_pointer, nullptr, nullptr);
+ task_info.loop(worker);
}
shape_info = v.shape_info;
cell_level_index = v.cell_level_index;
task_info = v.task_info;
- size_info = v.size_info;
indices_are_initialized = v.indices_are_initialized;
mapping_is_initialized = v.mapping_is_initialized;
}
const parallel::Triangulation<dim> *dist_tria =
dynamic_cast<const parallel::Triangulation<dim>*>
(&(dof_handler[0]->get_triangulation()));
- size_info.communicator = dist_tria != nullptr ?
+ task_info.communicator = dist_tria != nullptr ?
dist_tria->get_communicator() :
MPI_COMM_SELF;
- size_info.my_pid =
- Utilities::MPI::this_mpi_process(size_info.communicator);
- size_info.n_procs =
- Utilities::MPI::n_mpi_processes(size_info.communicator);
+ task_info.my_pid =
+ Utilities::MPI::this_mpi_process(task_info.communicator);
+ task_info.n_procs =
+ Utilities::MPI::n_mpi_processes(task_info.communicator);
}
else
{
- size_info.communicator = MPI_COMM_SELF;
- size_info.my_pid = 0;
- size_info.n_procs = 1;
+ task_info.communicator = MPI_COMM_SELF;
+ task_info.my_pid = 0;
+ task_info.n_procs = 1;
}
initialize_dof_handlers (dof_handler, additional_data.level_mg_handler);
if (additional_data.tasks_parallel_scheme != AdditionalData::none &&
MultithreadInfo::n_threads() > 1)
{
- task_info.use_multithreading = true;
+ task_info.scheme = internal::MatrixFreeFunctions::TaskInfo::TasksParallelScheme(static_cast<int>(additional_data.tasks_parallel_scheme));
task_info.block_size = additional_data.tasks_block_size;
- task_info.use_partition_partition =
- (additional_data.tasks_parallel_scheme ==
- AdditionalData::partition_partition ? true : false);
- task_info.use_coloring_only =
- (additional_data.tasks_parallel_scheme ==
- AdditionalData::color ? true : false);
}
else
#endif
- task_info.use_multithreading = false;
+ task_info.scheme = internal::MatrixFreeFunctions::TaskInfo::none;
// set dof_indices together with constraint_indicator and
// constraint_pool_data. It also reorders the way cells are gone through
{
initialize_dof_handlers(dof_handler, additional_data.level_mg_handler);
std::vector<unsigned int> dummy;
- size_info.make_layout (cell_level_index.size(),
- VectorizedArray<Number>::n_array_elements,
- dummy, dummy);
+ std::vector<unsigned char> dummy2;
+ task_info.collect_boundary_cells (cell_level_index.size(), cell_level_index.size(),
+ VectorizedArray<Number>::n_array_elements, dummy);
+ task_info.create_blocks_serial(dummy, dummy, 1, dummy, false, dummy, dummy2);
for (unsigned int i=0; i<dof_info.size(); ++i)
{
dof_info[i].dimension = dim;
dof_info[i].n_components = dof_handler[i]->get_fe().element_multiplicity(0);
dof_info[i].dofs_per_cell.push_back(dof_handler[i]->get_fe().dofs_per_cell);
- dof_info[i].row_starts.resize(size_info.n_macro_cells+1);
+ dof_info[i].row_starts.resize(task_info.cell_partition_data.back()+1);
dof_info[i].row_starts.back()[2] =
cell_level_index.size() % VectorizedArray<Number>::n_array_elements;
const parallel::Triangulation<dim> *dist_tria =
dynamic_cast<const parallel::Triangulation<dim>*>
(&(dof_handler[0]->get_triangulation()));
- size_info.communicator = dist_tria != nullptr ?
+ task_info.communicator = dist_tria != nullptr ?
dist_tria->get_communicator() :
MPI_COMM_SELF;
- size_info.my_pid =
- Utilities::MPI::this_mpi_process(size_info.communicator);
- size_info.n_procs =
- Utilities::MPI::n_mpi_processes(size_info.communicator);
+ task_info.my_pid =
+ Utilities::MPI::this_mpi_process(task_info.communicator);
+ task_info.n_procs =
+ Utilities::MPI::n_mpi_processes(task_info.communicator);
}
else
{
- size_info.communicator = MPI_COMM_SELF;
- size_info.my_pid = 0;
- size_info.n_procs = 1;
+ task_info.communicator = MPI_COMM_SELF;
+ task_info.my_pid = 0;
+ task_info.n_procs = 1;
}
initialize_dof_handlers (dof_handler, additional_data.level_mg_handler);
if (additional_data.tasks_parallel_scheme != AdditionalData::none &&
MultithreadInfo::n_threads() > 1)
{
- task_info.use_multithreading = true;
+ task_info.scheme = internal::MatrixFreeFunctions::TaskInfo::TasksParallelScheme(static_cast<int>(additional_data.tasks_parallel_scheme));
task_info.block_size = additional_data.tasks_block_size;
- task_info.use_partition_partition =
- (additional_data.tasks_parallel_scheme ==
- AdditionalData::partition_partition ? true : false);
- task_info.use_coloring_only =
- (additional_data.tasks_parallel_scheme ==
- AdditionalData::color ? true : false);
}
else
#endif
- task_info.use_multithreading = false;
+ task_info.scheme = internal::MatrixFreeFunctions::TaskInfo::none;
// set dof_indices together with constraint_indicator and
// constraint_pool_data. It also reorders the way cells are gone through
{
initialize_dof_handlers(dof_handler, additional_data.level_mg_handler);
std::vector<unsigned int> dummy;
- size_info.make_layout (cell_level_index.size(),
- VectorizedArray<Number>::n_array_elements,
- dummy, dummy);
+ std::vector<unsigned char> dummy2;
+ task_info.collect_boundary_cells (cell_level_index.size(), cell_level_index.size(),
+ VectorizedArray<Number>::n_array_elements, dummy);
+ task_info.create_blocks_serial(dummy, dummy, 1, dummy, false, dummy, dummy2);
for (unsigned int i=0; i<dof_info.size(); ++i)
{
Assert(dof_handler[i]->get_fe_collection().size() == 1, ExcNotImplemented());
dof_info[i].dimension = dim;
dof_info[i].n_components = dof_handler[i]->get_fe(0).element_multiplicity(0);
dof_info[i].dofs_per_cell.push_back(dof_handler[i]->get_fe(0).dofs_per_cell);
- dof_info[i].row_starts.resize(size_info.n_macro_cells+1);
+ dof_info[i].row_starts.resize(task_info.cell_partition_data.back()+1);
dof_info[i].row_starts.back()[2] =
cell_level_index.size() % VectorizedArray<Number>::n_array_elements;
// Go through cells on zeroth level and then successively step down into
// children. This gives a z-ordering of the cells, which is beneficial when
// setting up neighboring relations between cells for thread parallelization
- const unsigned int n_mpi_procs = size_info.n_procs;
- const unsigned int my_pid = size_info.my_pid;
+ const unsigned int n_mpi_procs = task_info.n_procs;
+ const unsigned int my_pid = task_info.my_pid;
const Triangulation<dim> &tria = dof_handlers.dof_handler[0]->get_triangulation();
if (level == numbers::invalid_unsigned_int)
cell_level_index.emplace_back (cell->level(), cell->index());
}
}
+
+ // All these are cells local to this processor. Therefore, set
+ // cell_level_index_end_local to the size of cell_level_index.
+ cell_level_index_end_local = cell_level_index.size();
}
// go through cells on zeroth level and then successively step down into
// children. This gives a z-ordering of the cells, which is beneficial when
// setting up neighboring relations between cells for thread parallelization
- const unsigned int n_mpi_procs = size_info.n_procs;
- const unsigned int my_pid = size_info.my_pid;
+ const unsigned int n_mpi_procs = task_info.n_procs;
+ const unsigned int my_pid = task_info.my_pid;
// if we have no level given, use the same as for the standard DoFHandler,
// otherwise we must loop through the respective level
Assert(n_mpi_procs>1 || cell_level_index.size()==tria.n_active_cells(),
ExcInternalError());
+
+ // All these are cells local to this processor. Therefore, set
+ // cell_level_index_end_local to the size of cell_level_index.
+ cell_level_index_end_local = cell_level_index.size();
}
// set locally owned range for each component
Assert (locally_owned_set[no].is_contiguous(), ExcNotImplemented());
dof_info[no].vector_partitioner.reset
- (new Utilities::MPI::Partitioner(locally_owned_set[no], size_info.communicator));
+ (new Utilities::MPI::Partitioner(locally_owned_set[no], task_info.communicator));
// initialize the arrays for indices
dof_info[no].row_starts.resize (n_active_cells+1);
// if we found dofs on some FE component that belong to other
// processors, the cell is added to the boundary cells.
- if (cell_at_boundary == true)
+ if (cell_at_boundary == true && counter < cell_level_index_end_local)
boundary_cells.push_back(counter);
}
const unsigned int vectorization_length =
VectorizedArray<Number>::n_array_elements;
- std::vector<unsigned int> irregular_cells;
- size_info.make_layout (n_active_cells, vectorization_length, boundary_cells,
- irregular_cells);
+ task_info.collect_boundary_cells (cell_level_index_end_local,
+ n_active_cells, vectorization_length,
+ boundary_cells);
+ // finalize the creation of ghosts
for (unsigned int no=0; no<n_fe; ++no)
dof_info[no].assign_ghosts (boundary_cells);
- // reorganize the indices in order to overlap communication in MPI with
- // computations: Place all cells with ghost indices into one chunk. Also
- // reorder cells so that we can parallelize by threads
std::vector<unsigned int> renumbering;
- if (task_info.use_multithreading == true)
+ std::vector<unsigned char> irregular_cells;
+ if (task_info.scheme == internal::MatrixFreeFunctions::TaskInfo::none)
{
- dof_info[0].compute_renumber_parallel (boundary_cells, size_info,
- renumbering);
- if (task_info.use_partition_partition == true)
- dof_info[0].make_thread_graph_partition_partition
- (size_info, task_info, renumbering, irregular_cells,
- dof_handlers.active_dof_handler == DoFHandlers::hp);
- else
- dof_info[0].make_thread_graph_partition_color
- (size_info, task_info, renumbering, irregular_cells,
- dof_handlers.active_dof_handler == DoFHandlers::hp);
+ const bool strict_categories = dof_handlers.active_dof_handler == DoFHandlers::hp;
+ unsigned int dofs_per_cell = 0;
+ for (unsigned int no=0; no<dof_info.size(); ++no)
+ dofs_per_cell = std::max(dofs_per_cell, dof_info[no].dofs_per_cell[0]);
+ task_info.create_blocks_serial(boundary_cells, std::vector<unsigned int>(),
+ dofs_per_cell,
+ dof_info[0].cell_active_fe_index,
+ strict_categories,
+ renumbering, irregular_cells);
}
else
{
- // In case, we have an hp-dofhandler, we have to reorder the cell
- // according to the polynomial degree on the cell.
- dof_info[0].compute_renumber_serial (boundary_cells, size_info,
- renumbering);
- if (dof_handlers.active_dof_handler == DoFHandlers::hp)
- dof_info[0].compute_renumber_hp_serial (size_info, renumbering,
- irregular_cells);
+ // For strategy with blocking before partitioning: reorganize the indices
+ // in order to overlap communication in MPI with computations: Place all
+ // cells with ghost indices into one chunk. Also reorder cells so that we
+ // can parallelize by threads
+ task_info.initial_setup_blocks_tasks(boundary_cells, renumbering,
+ irregular_cells);
+ task_info.guess_block_size (dof_info[0].dofs_per_cell[0]);
+
+ unsigned int n_macro_cells_before = *(task_info.cell_partition_data.end()-2);
+ unsigned int n_ghost_slots = *(task_info.cell_partition_data.end()-1)-
+ n_macro_cells_before;
+
+ unsigned int start_nonboundary = numbers::invalid_unsigned_int;
+
+ if (task_info.scheme = internal::MatrixFreeFunctions::TaskInfo::partition_color)
+ {
+ // set up partitions. if we just use coloring without partitions, do
+ // nothing here, assume all cells to belong to the zero partition (that
+ // we otherwise use for MPI boundary cells)
+ if (task_info.scheme == internal::MatrixFreeFunctions::TaskInfo::color)
+ {
+ start_nonboundary = task_info.n_procs > 1 ?
+ std::min(((task_info.cell_partition_data[2]-
+ task_info.cell_partition_data[1]+task_info.block_size-1)/
+ task_info.block_size)*task_info.block_size,
+ task_info.cell_partition_data[3]) : 0;
+ }
+ else
+ {
+ if (task_info.n_procs > 1)
+ {
+ task_info.cell_partition_data[1] = 0;
+ task_info.cell_partition_data[2] = task_info.cell_partition_data[3];
+ }
+ start_nonboundary = task_info.cell_partition_data.back();
+ }
+
+ if (dof_handlers.active_dof_handler == DoFHandlers::hp)
+ {
+ irregular_cells.resize (0);
+ irregular_cells.resize (task_info.cell_partition_data.back()+
+ 2*dof_info[0].max_fe_index);
+ std::vector<std::vector<unsigned int> > renumbering_fe_index;
+ renumbering_fe_index.resize(dof_info[0].max_fe_index);
+ unsigned int counter;
+ n_macro_cells_before = 0;
+ for (counter=0; counter<std::min(start_nonboundary*vectorization_length,
+ task_info.n_active_cells); counter++)
+ {
+ AssertIndexRange (counter, renumbering.size());
+ AssertIndexRange (renumbering[counter],
+ dof_info[0].cell_active_fe_index.size());
+ renumbering_fe_index[dof_info[0].cell_active_fe_index[renumbering[counter]]].
+ push_back(renumbering[counter]);
+ }
+ counter = 0;
+ for (unsigned int j=0; j<dof_info[0].max_fe_index; j++)
+ {
+ for (unsigned int jj=0; jj<renumbering_fe_index[j].size(); jj++)
+ renumbering[counter++] = renumbering_fe_index[j][jj];
+ irregular_cells[renumbering_fe_index[j].size()/vectorization_length+
+ n_macro_cells_before] =
+ renumbering_fe_index[j].size()%vectorization_length;
+ n_macro_cells_before += (renumbering_fe_index[j].size()+vectorization_length-1)/
+ vectorization_length;
+ renumbering_fe_index[j].resize(0);
+ }
+
+ for (counter=start_nonboundary*vectorization_length;
+ counter<task_info.n_active_cells; counter++)
+ {
+ renumbering_fe_index[dof_info[0].cell_active_fe_index.empty() ? 0 :
+ dof_info[0].cell_active_fe_index[renumbering[counter]]].
+ push_back(renumbering[counter]);
+ }
+ counter = start_nonboundary * vectorization_length;
+ for (unsigned int j=0; j<dof_info[0].max_fe_index; j++)
+ {
+ for (unsigned int jj=0; jj<renumbering_fe_index[j].size(); jj++)
+ renumbering[counter++] = renumbering_fe_index[j][jj];
+ irregular_cells[renumbering_fe_index[j].size()/vectorization_length+
+ n_macro_cells_before] =
+ renumbering_fe_index[j].size()%vectorization_length;
+ n_macro_cells_before += (renumbering_fe_index[j].size()+vectorization_length-1)/
+ vectorization_length;
+ }
+ AssertIndexRange (n_macro_cells_before,
+ task_info.cell_partition_data.back() + 2*dof_info[0].max_fe_index+1);
+ irregular_cells.resize (n_macro_cells_before+n_ghost_slots);
+ *(task_info.cell_partition_data.end()-2) = n_macro_cells_before;
+ *(task_info.cell_partition_data.end()-1) = n_macro_cells_before+n_ghost_slots;
+ }
+ }
+
+ task_info.n_blocks = (n_macro_cells()+task_info.block_size-1)/
+ task_info.block_size;
+
+ DynamicSparsityPattern connectivity;
+ connectivity.reinit(task_info.n_active_cells, task_info.n_active_cells);
+ if (task_info.n_active_cells > 0)
+ dof_info[0].make_connectivity_graph(task_info, renumbering, connectivity);
+
+ task_info.make_thread_graph(dof_info[0].cell_active_fe_index,
+ connectivity, renumbering, irregular_cells,
+ dof_handlers.active_dof_handler == DoFHandlers::hp);
+
+ Assert(irregular_cells.size() >= task_info.cell_partition_data.back(),
+ ExcInternalError());
+
+ irregular_cells.resize(task_info.cell_partition_data.back()+n_ghost_slots);
+ if (n_ghost_slots > 0)
+ {
+ for (unsigned int i=task_info.cell_partition_data.back();
+ i<task_info.cell_partition_data.back()+n_ghost_slots-1; ++i)
+ irregular_cells[i] = 0;
+ irregular_cells.back() = task_info.n_ghost_cells%vectorization_length;
+ }
+
+ {
+ unsigned int n_cells = 0;
+ for (unsigned int i=0; i<task_info.cell_partition_data.back(); ++i)
+ n_cells += irregular_cells[i] > 0 ? irregular_cells[i] : vectorization_length;
+ AssertDimension(n_cells, task_info.n_active_cells);
+ n_cells = 0;
+ for (unsigned int i=task_info.cell_partition_data.back();
+ i<n_ghost_slots+task_info.cell_partition_data.back(); ++i)
+ n_cells += irregular_cells[i] > 0 ? irregular_cells[i] : vectorization_length;
+ AssertDimension(n_cells, task_info.n_ghost_cells);
+ }
+
+ task_info.cell_partition_data
+ .push_back(task_info.cell_partition_data.back()+n_ghost_slots);
}
- // Finally perform the renumbering. We also want to group several cells
- // together to one "macro-cell" for vectorization (where the arithmetic
- // operations will then be done simultaneously).
+ // Finally perform the renumbering of the degree of freedom number data. We
+ // also want to group several cells together to one "macro-cell" for
+ // vectorization (where the arithmetic operations will then be done
+ // simultaneously).
#ifdef DEBUG
{
std::vector<unsigned int> sorted_renumbering (renumbering);
std::vector<std::pair<unsigned int,unsigned int> >
cell_level_index_old;
cell_level_index.swap (cell_level_index_old);
- cell_level_index.reserve(size_info.n_macro_cells*vectorization_length);
+ cell_level_index.reserve(task_info.cell_partition_data.back()*vectorization_length);
unsigned int position_cell=0;
- for (unsigned int i=0; i<size_info.n_macro_cells; ++i)
+ for (unsigned int i=0; i<task_info.cell_partition_data.back(); ++i)
{
unsigned int n_comp = (irregular_cells[i]>0)?
irregular_cells[i] : vectorization_length;
(cell_level_index_old[renumbering[position_cell+n_comp-1]]);
position_cell += n_comp;
}
- AssertDimension (position_cell, size_info.n_active_cells);
- AssertDimension (cell_level_index.size(),size_info.n_macro_cells*vectorization_length);
+ AssertDimension (position_cell, task_info.n_active_cells + task_info.n_ghost_cells);
+ AssertDimension (cell_level_index.size(),task_info.cell_partition_data.back()*
+ vectorization_length);
}
// set constraint pool from the std::map and reorder the indices
}
AssertDimension(constraint_pool_data.size(), length);
for (unsigned int no=0; no<n_fe; ++no)
- dof_info[no].reorder_cells(size_info, renumbering,
+ dof_info[no].reorder_cells(task_info, renumbering,
constraint_pool_row_index,
irregular_cells, vectorization_length);
dof_info.clear();
mapping_info.clear();
cell_level_index.clear();
- size_info.clear();
task_info.clear();
dof_handlers.dof_handler.clear();
dof_handlers.hp_dof_handler.clear();
void MatrixFree<dim,Number>::print_memory_consumption (StreamType &out) const
{
out << " Memory cell FE operator total: --> ";
- size_info.print_memory_statistics (out, memory_consumption());
+ task_info.print_memory_statistics (out, memory_consumption());
out << " Memory cell index: ";
- size_info.print_memory_statistics
+ task_info.print_memory_statistics
(out, MemoryConsumption::memory_consumption (cell_level_index));
for (unsigned int j=0; j<dof_info.size(); ++ j)
{
out << " Memory DoFInfo component "<< j << std::endl;
- dof_info[j].print_memory_consumption(out, size_info);
+ dof_info[j].print_memory_consumption(out, task_info);
}
out << " Memory mapping info" << std::endl;
- mapping_info.print_memory_consumption(out, size_info);
+ mapping_info.print_memory_consumption(out, task_info);
out << " Memory unit cell shape data: ";
- size_info.print_memory_statistics
+ task_info.print_memory_statistics
(out, MemoryConsumption::memory_consumption (shape_info));
- if (task_info.use_multithreading == true)
+ if (task_info.scheme != internal::MatrixFreeFunctions::TaskInfo::none)
{
out << " Memory task partitioning info: ";
- size_info.print_memory_statistics
+ task_info.print_memory_statistics
(out, MemoryConsumption::memory_consumption (task_info));
}
}
-/*-------------------- Implementation of helper functions ------------------*/
-
-namespace internal
-{
- namespace MatrixFreeFunctions
- {
-
- TaskInfo::TaskInfo ()
- {
- clear();
- }
-
-
-
- void TaskInfo::clear ()
- {
- block_size = 0;
- n_blocks = 0;
- block_size_last = 0;
- position_short_block = 0;
- use_multithreading = false;
- use_partition_partition = false;
- use_coloring_only = false;
- partition_color_blocks_row_index.clear();
- partition_color_blocks_data.clear();
- evens = 0;
- odds = 0;
- n_blocked_workers = 0;
- n_workers = 0;
- partition_evens.clear();
- partition_odds.clear();
- partition_n_blocked_workers.clear();
- partition_n_workers.clear();
- }
-
-
-
- std::size_t
- TaskInfo::memory_consumption () const
- {
- return (sizeof(*this)+
- MemoryConsumption::memory_consumption (partition_color_blocks_row_index) +
- MemoryConsumption::memory_consumption (partition_color_blocks_data)+
- MemoryConsumption::memory_consumption (partition_evens) +
- MemoryConsumption::memory_consumption (partition_odds) +
- MemoryConsumption::memory_consumption (partition_n_blocked_workers) +
- MemoryConsumption::memory_consumption (partition_n_workers));
- }
-
-
-
- SizeInfo::SizeInfo ()
- {
- clear();
- }
-
-
-
- void SizeInfo::clear()
- {
- n_active_cells = 0;
- n_macro_cells = 0;
- boundary_cells_start = 0;
- boundary_cells_end = 0;
- vectorization_length = 0;
- locally_owned_cells = IndexSet();
- ghost_cells = IndexSet();
- communicator = MPI_COMM_SELF;
- my_pid = 0;
- n_procs = 0;
- }
-
-
-
- template <typename StreamType>
- void SizeInfo::print_memory_statistics (StreamType &out,
- std::size_t data_length) const
- {
- Utilities::MPI::MinMaxAvg memory_c
- = Utilities::MPI::min_max_avg (1e-6*data_length, communicator);
- if (n_procs < 2)
- out << memory_c.min;
- else
- out << memory_c.min << "/" << memory_c.avg << "/" << memory_c.max;
- out << " MB" << std::endl;
- }
-
-
-
- inline
- void SizeInfo::make_layout (const unsigned int n_active_cells_in,
- const unsigned int vectorization_length_in,
- std::vector<unsigned int> &boundary_cells,
- std::vector<unsigned int> &irregular_cells)
- {
- vectorization_length = vectorization_length_in;
- n_active_cells = n_active_cells_in;
-
- unsigned int n_max_boundary_cells = boundary_cells.size();
- unsigned int n_boundary_cells = n_max_boundary_cells;
-
- // try to make the number of boundary cells divisible by the number of
- // vectors in vectorization
-
- /*
- // try to balance the number of cells before and after the boundary part
- // on each processor. probably not worth it!
- #ifdef DEAL_II_WITH_MPI
- MPI_Allreduce (&n_boundary_cells, &n_max_boundary_cells, 1, MPI_UNSIGNED,
- MPI_MAX, size_info.communicator);
- #endif
- if (n_max_boundary_cells > n_active_cells)
- n_max_boundary_cells = n_active_cells;
- */
-
- unsigned int fillup_needed =
- (vectorization_length - n_boundary_cells%vectorization_length)%vectorization_length;
- if (fillup_needed > 0 && n_boundary_cells < n_active_cells)
- {
- // fill additional cells into the list of boundary cells to get a
- // balanced number. Go through the indices successively until we
- // found enough indices
- std::vector<unsigned int> new_boundary_cells;
- new_boundary_cells.reserve (n_max_boundary_cells);
-
- unsigned int next_free_slot = 0, bound_index = 0;
- while (fillup_needed > 0 && bound_index < boundary_cells.size())
- {
- if (next_free_slot < boundary_cells[bound_index])
- {
- // check if there are enough cells to fill with in the
- // current slot
- if (next_free_slot + fillup_needed <= boundary_cells[bound_index])
- {
- for (unsigned int j=boundary_cells[bound_index]-fillup_needed;
- j < boundary_cells[bound_index]; ++j)
- new_boundary_cells.push_back(j);
- fillup_needed = 0;
- }
- // ok, not enough indices, so just take them all up to the
- // next boundary cell
- else
- {
- for (unsigned int j=next_free_slot;
- j<boundary_cells[bound_index]; ++j)
- new_boundary_cells.push_back(j);
- fillup_needed -= boundary_cells[bound_index]-next_free_slot;
- }
- }
- new_boundary_cells.push_back(boundary_cells[bound_index]);
- next_free_slot = boundary_cells[bound_index]+1;
- ++bound_index;
- }
- while (fillup_needed > 0 && (new_boundary_cells.size()==0 ||
- new_boundary_cells.back()<n_active_cells-1))
- new_boundary_cells.push_back(new_boundary_cells.back()+1);
- while (bound_index<boundary_cells.size())
- new_boundary_cells.push_back(boundary_cells[bound_index++]);
-
- boundary_cells.swap(new_boundary_cells);
- }
-
- // set the number of cells
- std::sort (boundary_cells.begin(), boundary_cells.end());
- n_boundary_cells = boundary_cells.size();
-
- // check that number of boundary cells is divisible by
- // vectorization_length or that it contains all cells
- Assert (n_boundary_cells % vectorization_length == 0 ||
- n_boundary_cells == n_active_cells, ExcInternalError());
- n_macro_cells = (n_active_cells+vectorization_length-1)/vectorization_length;
- irregular_cells.resize (n_macro_cells);
- if (n_macro_cells*vectorization_length > n_active_cells)
- {
- irregular_cells[n_macro_cells-1] =
- vectorization_length - (n_macro_cells*vectorization_length - n_active_cells);
- }
- if (n_procs > 1)
- {
- const unsigned int n_macro_boundary_cells =
- (n_boundary_cells+vectorization_length-1)/vectorization_length;
- boundary_cells_start = (n_macro_cells-n_macro_boundary_cells)/2;
- boundary_cells_end = boundary_cells_start + n_macro_boundary_cells;
- }
- else
- boundary_cells_start = boundary_cells_end = n_macro_cells;
- }
-
- }
-}
-
-
DEAL_II_NAMESPACE_CLOSE
#endif
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2011 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+#ifndef __deal2__matrix_free_task_info_h
+#define __deal2__matrix_free_task_info_h
+
+
+#include <deal.II/base/exceptions.h>
+#include <deal.II/base/memory_consumption.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/base/thread_management.h>
+#include <deal.II/base/vectorization.h>
+#include <deal.II/lac/dynamic_sparsity_pattern.h>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+
+
+namespace internal
+{
+ /**
+ * An interface for the worker object that runs the various operations we
+ * want to perform during the matrix-free loop.
+ *
+ * @author Katharina Kormann, Martin Kronbichler, 2018
+ */
+ struct MFWorkerInterface
+ {
+ public:
+ virtual ~MFWorkerInterface() {}
+
+ /// Starts the communication for the update ghost values operation
+ virtual void vector_update_ghosts_start() = 0;
+
+ /// Finishes the communication for the update ghost values operation
+ virtual void vector_update_ghosts_finish() = 0;
+
+ /// Starts the communication for the vector compress operation
+ virtual void vector_compress_start() = 0;
+
+ /// Finishes the communication for the vector compress operation
+ virtual void vector_compress_finish() = 0;
+
+ /// Zeros part of the vector accroding to a given range as stored in
+ /// DoFInfo
+ virtual void zero_dst_vector_range(const unsigned int range_index) = 0;
+
+ /// Runs the cell work specified by MatrixFree::loop or
+ /// MatrixFree::cell_loop
+ virtual void cell(const std::pair<unsigned int,unsigned int> &cell_range) = 0;
+
+ /// Runs the body of the work on interior faces specified by
+ /// MatrixFree::loop
+ virtual void face(const std::pair<unsigned int,unsigned int> &face_range) = 0;
+
+ /// Runs the body of the work on boundary faces specified by
+ /// MatrixFree::loop
+ virtual void boundary(const std::pair<unsigned int,unsigned int> &face_range) = 0;
+ };
+
+
+
+ namespace MatrixFreeFunctions
+ {
+ // forward declaration of internal data structure
+ template <typename Number> struct ConstraintValues;
+
+ /**
+ * A struct that collects all information related to parallelization with
+ * threads: The work is subdivided into tasks that can be done
+ * independently.
+ *
+ * @author Katharina Kormann, Martin Kronbichler, 2011, 2018
+ */
+ struct TaskInfo
+ {
+ // enum for choice of how to build the task graph. Odd add versions with
+ // preblocking and even versions with postblocking. partition_partition
+ // and partition_color are deprecated but kept for backward
+ // compatibility.
+ enum TasksParallelScheme {none,
+ partition_partition,
+ partition_color,
+ color
+ };
+
+ /**
+ * Constructor.
+ */
+ TaskInfo ();
+
+ /**
+ * Clears all the data fields and resets them
+ * to zero.
+ */
+ void clear ();
+
+ /**
+ * Runs the matrix-free loop.
+ */
+ void loop(MFWorkerInterface &worker) const;
+
+ /**
+ * Determines the position of cells with ghosts for distributed-memory
+ * calculations.
+ */
+ void collect_boundary_cells (const unsigned int n_active_cells,
+ const unsigned int n_active_and_ghost_cells,
+ const unsigned int vectorization_length,
+ std::vector<unsigned int> &boundary_cells);
+
+ /**
+ * Sets up the blocks for running the cell loop based on the options
+ * controlled by the input arguments.
+ *
+ * @param boundary_cells A list of cells that need to exchange data prior
+ * to performing computations. These will be given a certain id in the
+ * partitioning.
+ *
+ * @param dofs_per_cell Gives an expected value for the number of degrees
+ * of freedom on a cell, which is used to determine the block size for
+ * interleaving cell and face integrals.
+ *
+ * @param cell_vectorization_categories This set of categories defines
+ * the cells that should be grouped together inside the lanes of a
+ * vectorized array. This can be the polynomial degree in an hp-element
+ * or a user-provided grouping.
+ *
+ * @param cell_vectorization_categories_strict Defines whether the
+ * categories defined by the previous variables should be separated
+ * strictly or whether it is allowed to insert lower categories into the
+ * next high one(s).
+ *
+ * @param renumbering When leaving this function, the vector contains a
+ * new numbering of the cells that aligns with the grouping stored in
+ * this class.
+ *
+ * @param incompletely_filled_vectorization Given the vectorized layout
+ * of this class, some cell batches might have components in the
+ * vectorized array (SIMD lanes) that are not used and do not carray
+ * valid data. This array indicates the cell batches where this occurs
+ * according to the renumbering returned by this function.
+ */
+ void
+ create_blocks_serial (const std::vector<unsigned int> &boundary_cells,
+ const std::vector<unsigned int> &cells_close_to_boundary,
+ const unsigned int dofs_per_cell,
+ const std::vector<unsigned int> &cell_vectorization_categories,
+ const bool cell_vectorization_categories_strict,
+ std::vector<unsigned int> &renumbering,
+ std::vector<unsigned char> &incompletely_filled_vectorization);
+
+ /**
+ * First step in the block creation for the task-parallel blocking setup.
+ *
+ * @param boundary_cells A list of cells that need to exchange data prior
+ * to performing computations. These will be given a certain id in the
+ * partitioning.
+ *
+ * @param renumbering When leaving this function, the vector contains a
+ * new numbering of the cells that aligns with the grouping stored in
+ * this class (before actually creating the tasks).
+ *
+ * @param incompletely_filled_vectorization Given the vectorized layout
+ * of this class, some cell batches might have components in the
+ * vectorized array (SIMD lanes) that are not used and do not carray
+ * valid data. This array indicates the cell batches where this occurs
+ * according to the renumbering returned by this function.
+ */
+ void
+ initial_setup_blocks_tasks (const std::vector<unsigned int> &boundary_cells,
+ std::vector<unsigned int> &renumbering,
+ std::vector<unsigned char> &incompletely_filled_vectorization);
+
+ /**
+ * This helper function determines a block size if the user decided not
+ * to force a block size through MatrixFree::AdditionalData. This is
+ * computed based on the number of hardware threads on the system and
+ * the number of macro cells that we should work on.
+ */
+ void guess_block_size (const unsigned int dofs_per_cell);
+
+ /**
+ * This method goes through all cells that have been filled into @p
+ * dof_indices and finds out which cells can be worked on independently
+ * and which ones are neighboring and need to be done at different times
+ * when used in parallel.
+ *
+ * The strategy is based on a two-level approach. The outer level is
+ * subdivided into partitions similar to the type of neighbors in
+ * Cuthill-McKee, and the inner level is subdivided via colors (for
+ * chunks within the same color, can work independently). One task is
+ * represented by a chunk of cells. The cell chunks are formed before
+ * subdivision into partitions and colors.
+ *
+ * @param renumbering At output, the element j of this variable gives
+ * the original number of the cell that is reordered to place j by the
+ * ordering due to the thread graph.
+ */
+ void
+ make_thread_graph_partition_color (DynamicSparsityPattern &connectivity,
+ std::vector<unsigned int> &renumbering,
+ std::vector<unsigned char> &irregular_cells,
+ const bool hp_bool);
+
+ /**
+ * This function goes through all cells that have been filled into @p
+ * dof_indices and finds out which cells can be worked on independently
+ * and which ones are neighboring and need to be done at different times
+ * when used in parallel.
+ *
+ * The strategy is based on a two-level approach. The outer level is
+ * subdivided into partitions similar to the type of neighbors in
+ * Cuthill-McKee, and the inner level is again subdivided into Cuthill-
+ * McKee-like partitions (partitions whose level differs by more than 2
+ * can be worked on independently). One task is represented by a chunk
+ * of cells. The cell chunks are formed after subdivision into the two
+ * levels of partitions.
+ *
+ * @param renumbering At output, the element j of this variable gives
+ * the original number of the cell that is reordered to place j by the
+ * ordering due to the thread graph.
+ */
+ void
+ make_thread_graph_partition_partition (const std::vector<unsigned int> &cell_active_fe_index,
+ DynamicSparsityPattern &connectivity,
+ std::vector<unsigned int> &renumbering,
+ std::vector<unsigned char> &irregular_cells,
+ const bool hp_bool);
+
+ /**
+ * Either calls make_thread_graph_partition_color() or
+ * make_thread_graph_partition_partition() accessible from the outside,
+ * depending on the setting in the data structure.
+ *
+ * @param renumbering At output, the element j of this variable gives
+ * the original number of the cell that is reordered to place j by the
+ * ordering due to the thread graph.
+ */
+ void
+ make_thread_graph (const std::vector<unsigned int> &cell_active_fe_index,
+ DynamicSparsityPattern &connectivity,
+ std::vector<unsigned int> &renumbering,
+ std::vector<unsigned char> &irregular_cells,
+ const bool hp_bool);
+
+ /**
+ * This function computes the connectivity between blocks of cells from
+ * the connectivity between the individual cells.
+ */
+ void make_connectivity_cells_to_blocks
+ (const std::vector<unsigned char> &irregular_cells,
+ const DynamicSparsityPattern &connectivity_cells,
+ DynamicSparsityPattern &connectivity_blocks) const;
+
+ /**
+ * Function to create coloring on the second layer within each
+ * partition.
+ */
+ void make_coloring_within_partitions_pre_blocked
+ (const DynamicSparsityPattern &connectivity,
+ const unsigned int partition,
+ const std::vector<unsigned int> &cell_partition,
+ const std::vector<unsigned int> &partition_list,
+ const std::vector<unsigned int> &partition_size,
+ std::vector<unsigned int> &partition_color_list);
+
+ /**
+ * Function to create partitioning on the second layer within each
+ * partition.
+ */
+ void make_partitioning_within_partitions_post_blocked
+ (const DynamicSparsityPattern &connectivity,
+ const std::vector<unsigned int> &cell_active_fe_index,
+ const unsigned int partition,
+ const unsigned int cluster_size,
+ const bool hp_bool,
+ const std::vector<unsigned int> &cell_partition,
+ const std::vector<unsigned int> &partition_list,
+ const std::vector<unsigned int> &partition_size,
+ std::vector<unsigned int> &partition_partition_list,
+ std::vector<unsigned char> &irregular_cells);
+
+ /**
+ * This function creates partitions according to the provided connectivity graph.
+ *
+ * @param connectivity Connectivity between (blocks of cells)
+ *
+ * @param cluster_size The number of cells in each partition should be a
+ * multiple of cluster_size (for blocking later on)
+ *
+ * @param cell_partition Saves of each (block of cells) to which
+ * partition the block belongs
+ *
+ * @param partition_list partition_list[j] gives the old number of the
+ * block that should be renumbered to j due to the partitioning
+ *
+ * @param partition_size Vector pointing to start of each partition (on
+ * output)
+ *
+ * @param partition number of partitions created
+ */
+ void
+ make_partitioning (const DynamicSparsityPattern &connectivity,
+ const unsigned int cluster_size,
+ std::vector<unsigned int> &cell_partition,
+ std::vector<unsigned int> &partition_list,
+ std::vector<unsigned int> &partition_size,
+ unsigned int &partition) const;
+
+ /**
+ * Update fields of task info for task graph set up in make_thread_graph.
+ */
+ void
+ update_task_info (const unsigned int partition);
+
+ /**
+ * Creates a task graph from a connectivity structure.
+ */
+ void create_flow_graph();
+
+ /**
+ * Returns the memory consumption of the class.
+ */
+ std::size_t memory_consumption () const;
+
+ /**
+ * Prints minimum, average, and maximal memory consumption over the MPI
+ * processes.
+ */
+ template <typename StreamType>
+ void print_memory_statistics (StreamType &out,
+ std::size_t data_length) const;
+
+ /**
+ * Number of physical cells in the mesh, not cell batches after
+ * vectorization
+ */
+ unsigned int n_active_cells;
+
+ /**
+ * Number of physical ghost cells in the mesh which are subject to
+ * special treatment and should not be included in loops
+ */
+ unsigned int n_ghost_cells;
+
+ /**
+ * Number of lanes in the SIMD array that are used for vectorization
+ */
+ unsigned int vectorization_length;
+
+ /**
+ * Block size information for multithreading
+ */
+ unsigned int block_size;
+
+ /**
+ * Number of blocks for multithreading
+ */
+ unsigned int n_blocks;
+
+ /**
+ * Parallel scheme applied by multithreading
+ */
+ TasksParallelScheme scheme;
+
+ /**
+ * The blocks are organized by a vector-of-vector concept, and this data
+ * field @p partition_row_index stores the distance from one 'vector' to
+ * the next within the linear storage of all data to the two-level
+ * partitioning.
+ */
+ std::vector<unsigned int> partition_row_index;
+
+ /**
+ * This is a linear storage of all partitions, building a range of
+ * indices of the form cell_partition_data[idx] to
+ * cell_partition_data[idx+1] within the integer list of all cells in
+ * MatrixFree, subdivided into chunks by @p partition_row_index.
+ */
+ std::vector<unsigned int> cell_partition_data;
+
+ /**
+ * This is a linear storage of all partitions of inner faces, building a
+ * range of indices of the form face_partition_data[idx] to
+ * face_partition_data[idx+1] within the integer list of all interior
+ * faces in MatrixFree, subdivided into chunks by @p
+ * partition_row_index.
+ */
+ std::vector<unsigned int> face_partition_data;
+
+ /**
+ * This is a linear storage of all partitions of boundary faces,
+ * building a range of indices of the form boundary_partition_data[idx]
+ * to boundary_partition_data[idx+1] within the integer list of all
+ * boundary faces in MatrixFree, subdivided into chunks by @p
+ * partition_row_index.
+ */
+ std::vector<unsigned int> boundary_partition_data;
+
+ /**
+ * This is a linear storage of all partitions of interior faces on
+ * boundaries to other processors that are not locally used, building a
+ * range of indices of the form ghost_face_partition_data[idx] to
+ * ghost_face_partition_data[idx+1] within the integer list of all such
+ * faces in MatrixFree, subdivided into chunks by @p
+ * partition_row_index.
+ */
+ std::vector<unsigned int> ghost_face_partition_data;
+
+ /**
+ * This is a linear storage of all partitions of faces for multigrid
+ * levels that have a coarser neighbor and are only included in certain
+ * residual computations but not in smoothing, building a range of
+ * indices of the form refinement_edge_face_partition_data[idx] to
+ * refinement_edge_face_partition_data[idx+1] within the integer list of
+ * all such faces in MatrixFree, subdivided into chunks by @p
+ * partition_row_index.
+ */
+ std::vector<unsigned int> refinement_edge_face_partition_data;
+
+ /**
+ * Thread information (which chunk to start 'even' partitions from) to
+ * be handed to the dynamic task scheduler
+ */
+ std::vector<unsigned int> partition_evens;
+
+ /**
+ * Thread information (which chunk to start 'odd' partitions from) to be
+ * handed to the dynamic task scheduler
+ */
+ std::vector<unsigned int> partition_odds;
+
+ /**
+ * Thread information regarding the dependencies for partitions handed
+ * to the dynamic task scheduler
+ */
+ std::vector<unsigned int> partition_n_blocked_workers;
+
+ /**
+ * Thread information regarding the dependencies for partitions handed
+ * to the dynamic task scheduler
+ */
+ std::vector<unsigned int> partition_n_workers;
+
+ /**
+ * Number of even partitions accumulated over the field @p
+ * partitions_even
+ */
+ unsigned int evens;
+
+ /**
+ * Number of odd partitions accumulated over the field @p
+ * partitions_odd
+ */
+ unsigned int odds;
+
+ /**
+ * Number of blocked workers accumulated over the field @p
+ * partition_n_blocked_workers
+ */
+ unsigned int n_blocked_workers;
+
+ /**
+ * Number of workers accumulated over the field @p partition_n_workers
+ */
+ unsigned int n_workers;
+
+ /**
+ * Stores whether a particular task is at an MPI boundary and needs data
+ * exchange
+ */
+ std::vector<unsigned char> task_at_mpi_boundary;
+
+ /**
+ * MPI communicator
+ */
+ MPI_Comm communicator;
+
+ /**
+ * Rank of MPI process
+ */
+ unsigned int my_pid;
+
+ /**
+ * Number of MPI rank for the current communicator
+ */
+ unsigned int n_procs;
+ };
+
+ /**
+ * Typedef to deprecated name.
+ */
+ DEAL_II_DEPRECATED
+ typedef TaskInfo SizeInfo;
+
+ } // end of namespace MatrixFreeFunctions
+} // end of namespace internal
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
SET(_src
matrix_free.cc
evaluation_selector.cc
+ task_info.cc
)
SET(_inst
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/memory_consumption.h>
+#include <deal.II/base/parallel.h>
+#include <deal.II/base/multithread_info.h>
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/conditional_ostream.h>
+
+#include <deal.II/matrix_free/task_info.h>
+
+
+#ifdef DEAL_II_WITH_THREADS
+#include <tbb/task.h>
+#include <tbb/task_scheduler_init.h>
+#include <tbb/parallel_for.h>
+#include <tbb/blocked_range.h>
+#endif
+
+#include <iostream>
+#include <set>
+
+DEAL_II_NAMESPACE_OPEN
+
+
+
+/*-------------------- Implementation of the matrix-free loop --------------*/
+namespace internal
+{
+ namespace MatrixFreeFunctions
+ {
+#ifdef DEAL_II_WITH_THREADS
+
+ // This defines the TBB data structures that are needed to schedule the
+ // partition-partition variant
+
+ namespace partition
+ {
+ class ActualCellWork
+ {
+ public:
+ ActualCellWork (MFWorkerInterface **worker_pointer,
+ const unsigned int partition,
+ const TaskInfo &task_info)
+ :
+ worker(nullptr),
+ worker_pointer(worker_pointer),
+ partition (partition),
+ task_info (task_info)
+ {}
+
+ ActualCellWork (MFWorkerInterface &worker,
+ const unsigned int partition,
+ const TaskInfo &task_info)
+ :
+ worker (&worker),
+ worker_pointer (nullptr),
+ partition (partition),
+ task_info (task_info)
+ {}
+
+ void operator() () const
+ {
+ MFWorkerInterface *used_worker = worker != 0 ? worker : *worker_pointer;
+ Assert(used_worker != 0, ExcInternalError());
+ used_worker->cell(std::make_pair(task_info.cell_partition_data[partition],
+ task_info.cell_partition_data[partition+1]));
+
+ if (task_info.face_partition_data.empty() == false)
+ {
+ used_worker->face(std::make_pair(task_info.face_partition_data[partition],
+ task_info.face_partition_data[partition+1]));
+
+ used_worker->boundary(std::make_pair(task_info.boundary_partition_data[partition],
+ task_info.boundary_partition_data[partition+1]));
+ }
+ }
+
+ private:
+ MFWorkerInterface *worker;
+ MFWorkerInterface **worker_pointer;
+ const unsigned int partition;
+ const TaskInfo &task_info;
+ };
+
+ class CellWork : public tbb::task
+ {
+ public:
+ CellWork (MFWorkerInterface &worker,
+ const unsigned int partition,
+ const TaskInfo &task_info,
+ const bool is_blocked)
+ :
+ dummy (nullptr),
+ work (worker, partition, task_info),
+ is_blocked (is_blocked)
+ {}
+
+ tbb::task *execute ()
+ {
+ work();
+
+ if (is_blocked==true)
+ dummy->spawn (*dummy);
+ return nullptr;
+ }
+
+ tbb::empty_task *dummy;
+
+ private:
+ ActualCellWork work;
+ const bool is_blocked;
+ };
+
+
+
+ class PartitionWork : public tbb::task
+ {
+ public:
+ PartitionWork (MFWorkerInterface &function_in,
+ const unsigned int partition_in,
+ const TaskInfo &task_info_in,
+ const bool is_blocked_in = false)
+ :
+ dummy (nullptr),
+ function (function_in),
+ partition (partition_in),
+ task_info (task_info_in),
+ is_blocked (is_blocked_in)
+ {};
+ tbb::task *execute ()
+ {
+ tbb::empty_task *root = new( tbb::task::allocate_root() )tbb::empty_task;
+ const unsigned int evens = task_info.partition_evens[partition];
+ const unsigned int odds = task_info.partition_odds[partition];
+ const unsigned int n_blocked_workers =
+ task_info.partition_n_blocked_workers[partition];
+ const unsigned int n_workers = task_info.partition_n_workers[partition];
+ std::vector<CellWork *> worker(n_workers);
+ std::vector<CellWork *> blocked_worker(n_blocked_workers);
+
+ root->set_ref_count(evens+1);
+ for (unsigned int j=0; j<evens; j++)
+ {
+ worker[j] = new(root->allocate_child())
+ CellWork(function, task_info.
+ partition_row_index[partition]+2*j,
+ task_info, false);
+ if (j>0)
+ {
+ worker[j]->set_ref_count(2);
+ blocked_worker[j-1]->dummy = new(worker[j]->allocate_child())
+ tbb::empty_task;
+ worker[j-1]->spawn(*blocked_worker[j-1]);
+ }
+ else
+ worker[j]->set_ref_count(1);
+ if (j<evens-1)
+ {
+ blocked_worker[j] = new(worker[j]->allocate_child())
+ CellWork(function, task_info.
+ partition_row_index
+ [partition] + 2*j+1, task_info, true);
+ }
+ else
+ {
+ if (odds==evens)
+ {
+ worker[evens] = new(worker[j]->allocate_child())
+ CellWork(function, task_info.
+ partition_row_index[partition]+2*j+1,
+ task_info, false);
+ worker[j]->spawn(*worker[evens]);
+ }
+ else
+ {
+ tbb::empty_task *child = new(worker[j]->allocate_child())
+ tbb::empty_task();
+ worker[j]->spawn(*child);
+ }
+ }
+ }
+
+ root->wait_for_all();
+ root->destroy(*root);
+ if (is_blocked==true)
+ dummy->spawn (*dummy);
+ return nullptr;
+ }
+
+ tbb::empty_task *dummy;
+
+ private:
+ MFWorkerInterface &function;
+ const unsigned int partition;
+ const TaskInfo &task_info;
+ const bool is_blocked;
+ };
+
+ } // end of namespace partition
+
+
+
+ namespace color
+ {
+ class CellWork
+ {
+ public:
+ CellWork (MFWorkerInterface &worker_in,
+ const TaskInfo &task_info_in,
+ const unsigned int partition_in)
+ :
+ worker (worker_in),
+ task_info (task_info_in),
+ partition (partition_in)
+ {};
+ void operator()(const tbb::blocked_range<unsigned int> &r) const
+ {
+ const unsigned int start_index = task_info.cell_partition_data[partition]
+ + task_info.block_size * r.begin();
+ const unsigned int end_index = std::min(start_index +
+ task_info.block_size*(r.end()-r.begin()),
+ task_info.cell_partition_data[partition+1]);
+ worker.cell(std::make_pair(start_index, end_index));
+
+ if (task_info.face_partition_data.empty() == false)
+ {
+ AssertThrow(false, ExcNotImplemented());
+ }
+ }
+ private:
+ MFWorkerInterface &worker;
+ const TaskInfo &task_info;
+ const unsigned int partition;
+ };
+
+
+
+ class PartitionWork : public tbb::task
+ {
+ public:
+ PartitionWork (MFWorkerInterface &worker_in,
+ const unsigned int partition_in,
+ const TaskInfo &task_info_in,
+ const bool is_blocked_in)
+ :
+ dummy (nullptr),
+ worker (worker_in),
+ partition (partition_in),
+ task_info (task_info_in),
+ is_blocked (is_blocked_in)
+ {};
+ tbb::task *execute ()
+ {
+ const unsigned int n_chunks = (task_info.cell_partition_data[partition+1]-
+ task_info.cell_partition_data[partition]+
+ task_info.block_size-1)/task_info.block_size;
+ parallel_for(tbb::blocked_range<unsigned int>(0,n_chunks,1),
+ CellWork (worker,task_info,partition));
+ if (is_blocked==true)
+ dummy->spawn (*dummy);
+ return nullptr;
+ }
+
+ tbb::empty_task *dummy;
+
+ private:
+ MFWorkerInterface &worker;
+ const unsigned int partition;
+ const TaskInfo &task_info;
+ const bool is_blocked;
+ };
+
+ } // end of namespace color
+
+
+
+ class MPICommunication : public tbb::task
+ {
+ public:
+ MPICommunication (MFWorkerInterface &worker_in,
+ const bool do_compress)
+ :
+ worker(worker_in),
+ do_compress(do_compress)
+ {};
+
+ tbb::task *execute ()
+ {
+ if (do_compress == false)
+ worker.vector_update_ghosts_finish();
+ else
+ worker.vector_compress_start();
+ return 0;
+ }
+
+ private:
+ MFWorkerInterface &worker;
+ const bool do_compress;
+ };
+
+#endif // DEAL_II_WITH_THREADS
+
+
+
+ void
+ TaskInfo::loop(MFWorkerInterface &funct) const
+ {
+ funct.vector_update_ghosts_start();
+
+#ifdef DEAL_II_WITH_THREADS
+
+ if (scheme != none)
+ {
+ funct.zero_dst_vector_range(numbers::invalid_unsigned_int);
+ if (scheme == partition_partition)
+ {
+ tbb::empty_task *root = new( tbb::task::allocate_root() )
+ tbb::empty_task;
+ root->set_ref_count(evens+1);
+ std::vector<partition::PartitionWork *> worker(n_workers);
+ std::vector<partition::PartitionWork *>
+ blocked_worker(n_blocked_workers);
+ MPICommunication *worker_compr =
+ new(root->allocate_child())MPICommunication(funct, true);
+ worker_compr->set_ref_count(1);
+ for (unsigned int j=0; j<evens; j++)
+ {
+ if (j>0)
+ {
+ worker[j] = new(root->allocate_child())
+ partition::PartitionWork (funct,2*j,*this,false);
+ worker[j]->set_ref_count(2);
+ blocked_worker[j-1]->dummy = new(worker[j]->allocate_child())
+ tbb::empty_task;
+ if (j>1)
+ worker[j-1]->spawn(*blocked_worker[j-1]);
+ else
+ worker_compr->spawn(*blocked_worker[j-1]);
+ }
+ else
+ {
+ worker[j] = new(worker_compr->allocate_child())
+ partition::PartitionWork (funct,2*j,*this,false);
+ worker[j]->set_ref_count(2);
+ MPICommunication *worker_dist =
+ new (worker[j]->allocate_child())MPICommunication(funct, false);
+ worker_dist->spawn(*worker_dist);
+ }
+ if (j<evens-1)
+ {
+ blocked_worker[j] = new(worker[j]->allocate_child())
+ partition::PartitionWork(funct,2*j+1,*this,true);
+ }
+ else
+ {
+ if (odds==evens)
+ {
+ worker[evens] = new(worker[j]->allocate_child())
+ partition::PartitionWork(funct,2*j+1,*this,false);
+ worker[j]->spawn(*worker[evens]);
+ }
+ else
+ {
+ tbb::empty_task *child = new(worker[j]->allocate_child())
+ tbb::empty_task();
+ worker[j]->spawn(*child);
+ }
+ }
+ }
+
+ root->wait_for_all();
+ root->destroy(*root);
+ }
+ else // end of partition-partition, start of partition-color
+ {
+ // check whether there is only one partition. if not, build up the
+ // tree of partitions
+ if (odds > 0)
+ {
+ tbb::empty_task *root = new( tbb::task::allocate_root() ) tbb::empty_task;
+ root->set_ref_count(evens+1);
+ const unsigned int n_blocked_workers = odds-(odds+evens+1)%2;
+ const unsigned int n_workers = cell_partition_data.size()-1-
+ n_blocked_workers;
+ std::vector<color::PartitionWork *> worker(n_workers);
+ std::vector<color::PartitionWork *> blocked_worker(n_blocked_workers);
+ unsigned int worker_index = 0, slice_index = 0;
+ unsigned int spawn_index = 0;
+ int spawn_index_child = -2;
+ MPICommunication *worker_compr =
+ new (root->allocate_child())MPICommunication(funct, true);
+ worker_compr->set_ref_count(1);
+ for (unsigned int part=0; part<partition_row_index.size()-1; part++)
+ {
+ const unsigned int spawn_index_new = worker_index;
+ if (part == 0)
+ worker[worker_index] = new(worker_compr->allocate_child())
+ color::PartitionWork(funct,slice_index,*this,false);
+ else
+ worker[worker_index] = new(root->allocate_child())
+ color::PartitionWork(funct,slice_index,*this,false);
+ slice_index++;
+ for (; slice_index<partition_row_index[part+1]; slice_index++)
+ {
+ worker[worker_index]->set_ref_count(1);
+ worker_index++;
+ worker[worker_index] = new (worker[worker_index-1]->allocate_child())
+ color::PartitionWork(funct,slice_index,*this,false);
+ }
+ worker[worker_index]->set_ref_count(2);
+ if (part>0)
+ {
+ blocked_worker[(part-1)/2]->dummy =
+ new (worker[worker_index]->allocate_child()) tbb::empty_task;
+ worker_index++;
+ if (spawn_index_child == -1)
+ worker[spawn_index]->spawn(*blocked_worker[(part-1)/2]);
+ else
+ {
+ Assert(spawn_index_child>=0, ExcInternalError());
+ worker[spawn_index]->spawn(*worker[spawn_index_child]);
+ }
+ spawn_index = spawn_index_new;
+ spawn_index_child = -2;
+ }
+ else
+ {
+ MPICommunication *worker_dist =
+ new (worker[worker_index]->allocate_child())MPICommunication(funct,false);
+ worker_dist->spawn(*worker_dist);
+ worker_index++;
+ }
+ part += 1;
+ if (part<partition_row_index.size()-1)
+ {
+ if (part<partition_row_index.size()-2)
+ {
+ blocked_worker[part/2] = new(worker[worker_index-1]->allocate_child())
+ color::PartitionWork(funct,slice_index,*this,true);
+ slice_index++;
+ if (slice_index<partition_row_index[part+1])
+ {
+ blocked_worker[part/2]->set_ref_count(1);
+ worker[worker_index] = new(blocked_worker[part/2]->allocate_child())
+ color::PartitionWork(funct,slice_index,*this,false);
+ slice_index++;
+ }
+ else
+ {
+ spawn_index_child = -1;
+ continue;
+ }
+ }
+ for (; slice_index<partition_row_index[part+1];
+ slice_index++)
+ {
+ if (slice_index>partition_row_index[part])
+ {
+ worker[worker_index]->set_ref_count(1);
+ worker_index++;
+ }
+ worker[worker_index] = new (worker[worker_index-1]->allocate_child())
+ color::PartitionWork(funct,slice_index,*this,false);
+ }
+ spawn_index_child = worker_index;
+ worker_index++;
+ }
+ else
+ {
+ tbb::empty_task *final = new (worker[worker_index-1]->allocate_child())
+ tbb::empty_task;
+ worker[spawn_index]->spawn(*final);
+ spawn_index_child = worker_index-1;
+ }
+ }
+ if (evens==odds)
+ {
+ Assert(spawn_index_child>=0, ExcInternalError());
+ worker[spawn_index]->spawn(*worker[spawn_index_child]);
+ }
+ root->wait_for_all();
+ root->destroy(*root);
+ }
+ // case when we only have one partition: this is the usual coloring
+ // scheme, and we just schedule a parallel for loop for each color
+ else
+ {
+ Assert(evens<=1,ExcInternalError());
+ funct.vector_update_ghosts_finish();
+
+ for (unsigned int color=0; color < partition_row_index[1]; ++color)
+ {
+ tbb::empty_task *root = new( tbb::task::allocate_root() ) tbb::empty_task;
+ root->set_ref_count(2);
+ color::PartitionWork *worker =
+ new (root->allocate_child())color::PartitionWork(funct,color,*this,false);
+ root->spawn(*worker);
+ root->wait_for_all();
+ root->destroy(*root);
+ }
+
+ funct.vector_compress_start();
+ }
+ }
+ }
+ else
+#endif
+ // serial loop, go through up to three times and do the MPI transfer at
+ // the beginning/end of the second part
+ {
+ for (unsigned int part = 0; part < partition_row_index.size()-2; ++part)
+ {
+ if (part == 1)
+ funct.vector_update_ghosts_finish();
+
+ for (unsigned int i=partition_row_index[part]; i<partition_row_index[part+1]; ++i)
+ {
+ AssertIndexRange(i+1, cell_partition_data.size());
+ if (cell_partition_data[i+1] > cell_partition_data[i])
+ {
+ funct.zero_dst_vector_range(i);
+ funct.cell(std::make_pair(cell_partition_data[i],
+ cell_partition_data[i+1]));
+ }
+
+ if (face_partition_data.empty() == false)
+ {
+ if (face_partition_data[i+1] > face_partition_data[i])
+ funct.face(std::make_pair(face_partition_data[i],
+ face_partition_data[i+1]));
+ if (boundary_partition_data[i+1] > boundary_partition_data[i])
+ funct.boundary(std::make_pair(boundary_partition_data[i],
+ boundary_partition_data[i+1]));
+ }
+ }
+
+ if (part == 1)
+ funct.vector_compress_start();
+ }
+ }
+ funct.vector_compress_finish();
+ }
+
+
+
+ TaskInfo::TaskInfo ()
+ {
+ clear();
+ }
+
+
+
+ void TaskInfo::clear ()
+ {
+ n_active_cells = 0;
+ n_ghost_cells = 0;
+ vectorization_length = 1;
+ block_size = 0;
+ n_blocks = 0;
+ scheme = none;
+ partition_row_index.clear();
+ cell_partition_data.clear();
+ face_partition_data.clear();
+ boundary_partition_data.clear();
+ evens = 0;
+ odds = 0;
+ n_blocked_workers = 0;
+ n_workers = 0;
+ partition_evens.clear();
+ partition_odds.clear();
+ partition_n_blocked_workers.clear();
+ partition_n_workers.clear();
+ communicator = MPI_COMM_SELF;
+ my_pid = 0;
+ n_procs = 1;
+ }
+
+
+
+ template <typename StreamType>
+ void TaskInfo::print_memory_statistics (StreamType &out,
+ const std::size_t data_length) const
+ {
+ Utilities::MPI::MinMaxAvg memory_c
+ = Utilities::MPI::min_max_avg (1e-6*data_length, communicator);
+ if (n_procs < 2)
+ out << memory_c.min;
+ else
+ out << memory_c.min << "/" << memory_c.avg << "/" << memory_c.max;
+ out << " MB" << std::endl;
+ }
+
+
+
+ std::size_t
+ TaskInfo::memory_consumption () const
+ {
+ return (sizeof(*this)+
+ MemoryConsumption::memory_consumption (partition_row_index) +
+ MemoryConsumption::memory_consumption (cell_partition_data) +
+ MemoryConsumption::memory_consumption (face_partition_data) +
+ MemoryConsumption::memory_consumption (boundary_partition_data) +
+ MemoryConsumption::memory_consumption (partition_evens) +
+ MemoryConsumption::memory_consumption (partition_odds) +
+ MemoryConsumption::memory_consumption (partition_n_blocked_workers) +
+ MemoryConsumption::memory_consumption (partition_n_workers));
+ }
+
+
+
+ void
+ TaskInfo::collect_boundary_cells (const unsigned int n_active_cells_in,
+ const unsigned int n_active_and_ghost_cells,
+ const unsigned int vectorization_length_in,
+ std::vector<unsigned int> &boundary_cells)
+ {
+ vectorization_length = vectorization_length_in;
+ n_active_cells = n_active_cells_in;
+ n_ghost_cells = n_active_and_ghost_cells - n_active_cells;
+
+ // try to make the number of boundary cells divisible by the number of
+ // vectors in vectorization
+ unsigned int fillup_needed =
+ (vectorization_length - boundary_cells.size()%vectorization_length)%vectorization_length;
+ if (fillup_needed > 0 && boundary_cells.size() < n_active_cells)
+ {
+ // fill additional cells into the list of boundary cells to get a
+ // balanced number. Go through the indices successively until we
+ // found enough indices
+ std::vector<unsigned int> new_boundary_cells;
+ new_boundary_cells.reserve (boundary_cells.size());
+
+ unsigned int next_free_slot = 0, bound_index = 0;
+ while (fillup_needed > 0 && bound_index < boundary_cells.size())
+ {
+ if (next_free_slot < boundary_cells[bound_index])
+ {
+ // check if there are enough cells to fill with in the
+ // current slot
+ if (next_free_slot + fillup_needed <= boundary_cells[bound_index])
+ {
+ for (unsigned int j=boundary_cells[bound_index]-fillup_needed;
+ j < boundary_cells[bound_index]; ++j)
+ new_boundary_cells.push_back(j);
+ fillup_needed = 0;
+ }
+ // ok, not enough indices, so just take them all up to the
+ // next boundary cell
+ else
+ {
+ for (unsigned int j=next_free_slot;
+ j<boundary_cells[bound_index]; ++j)
+ new_boundary_cells.push_back(j);
+ fillup_needed -= boundary_cells[bound_index]-next_free_slot;
+ }
+ }
+ new_boundary_cells.push_back(boundary_cells[bound_index]);
+ next_free_slot = boundary_cells[bound_index]+1;
+ ++bound_index;
+ }
+ while (fillup_needed > 0 && (new_boundary_cells.size()==0 ||
+ new_boundary_cells.back()<n_active_cells-1))
+ new_boundary_cells.push_back(new_boundary_cells.back()+1);
+ while (bound_index<boundary_cells.size())
+ new_boundary_cells.push_back(boundary_cells[bound_index++]);
+
+ boundary_cells.swap(new_boundary_cells);
+ }
+
+ // set the number of cells
+ std::sort (boundary_cells.begin(), boundary_cells.end());
+
+ // check that number of boundary cells is divisible by
+ // vectorization_length or that it contains all cells
+ Assert (boundary_cells.size() % vectorization_length == 0 ||
+ boundary_cells.size() == n_active_cells, ExcInternalError());
+ }
+
+
+
+ void
+ TaskInfo
+ ::create_blocks_serial (const std::vector<unsigned int> &boundary_cells,
+ const std::vector<unsigned int> &cells_close_to_boundary,
+ const unsigned int dofs_per_cell,
+ const std::vector<unsigned int> &cell_vectorization_categories,
+ const bool cell_vectorization_categories_strict,
+ std::vector<unsigned int> &renumbering,
+ std::vector<unsigned char> &incompletely_filled_vectorization)
+ {
+ const unsigned int n_macro_cells =
+ (n_active_cells + vectorization_length - 1) / vectorization_length;
+ const unsigned int n_ghost_slots =
+ (n_ghost_cells + vectorization_length - 1) / vectorization_length;
+ const unsigned int n_boundary_cells = boundary_cells.size();
+
+ incompletely_filled_vectorization.resize (n_macro_cells+n_ghost_slots);
+ renumbering.resize(n_active_cells + n_ghost_cells,
+ numbers::invalid_unsigned_int);
+
+ // Define the outer number of partitions. In the MPI case, we have three
+ // partitions (part before comm, part with comm, part after comm)
+ if (n_procs == 1)
+ partition_row_index.resize(3);
+ else
+ partition_row_index.resize(5);
+
+ // Initially mark the cells according to the MPI ranking
+ std::vector<unsigned char> cell_marked(n_active_cells+n_ghost_cells, 0);
+ if (n_procs > 1)
+ {
+ for (unsigned int i=0; i<n_boundary_cells; ++i)
+ cell_marked[boundary_cells[i]] = 2;
+
+ Assert(boundary_cells.size()%vectorization_length == 0 ||
+ boundary_cells.size() == n_active_cells,
+ ExcInternalError());
+
+ const unsigned int n_second_slot =
+ ((n_active_cells - n_boundary_cells)/2/vectorization_length)*vectorization_length;
+ unsigned int count = 0;
+ for (unsigned int i=0; i<cells_close_to_boundary.size(); ++i)
+ if (cell_marked[cells_close_to_boundary[i]] == 0)
+ {
+ cell_marked[cells_close_to_boundary[i]] = count<n_second_slot ? 1 : 3;
+ ++count;
+ }
+
+ unsigned int c=0;
+ for ( ; c<n_active_cells && count < n_second_slot; ++c)
+ if (cell_marked[c] == 0)
+ {
+ cell_marked[c] = 1;
+ ++count;
+ }
+ for ( ; c < n_active_cells; ++c)
+ if (cell_marked[c] == 0)
+ cell_marked[c] = 3;
+ for ( ; c < n_active_cells+n_ghost_cells; ++c)
+ if (cell_marked[c] == 0)
+ cell_marked[c] = 4;
+ }
+ else
+ std::fill(cell_marked.begin(), cell_marked.end(), 1);
+
+ for (unsigned int i=0; i<cell_marked.size(); ++i)
+ Assert(cell_marked[i] != 0, ExcInternalError());
+
+ unsigned int n_categories = 1;
+ std::vector<unsigned int> tight_category_map;
+ if (cell_vectorization_categories.empty() == false)
+ {
+ AssertDimension(cell_vectorization_categories.size(),
+ n_active_cells+n_ghost_cells);
+
+ // create a tight map of categories for not taking exceeding amounts
+ // of memory below. Sort the new categories by the numbers in the
+ // old one.
+ tight_category_map.reserve(n_active_cells+n_ghost_cells);
+ std::set<unsigned int> used_categories;
+ for (unsigned int i=0; i<n_active_cells+n_ghost_cells; ++i)
+ used_categories.insert(cell_vectorization_categories[i]);
+ std::vector<unsigned int> used_categories_vector(used_categories.size());
+ n_categories = 0;
+ for (auto &it : used_categories)
+ used_categories_vector[n_categories++] = it;
+ for (unsigned int i=0; i<n_active_cells+n_ghost_cells; ++i)
+ {
+ const unsigned int index = std::lower_bound(used_categories_vector.begin(),
+ used_categories_vector.end(),
+ cell_vectorization_categories[i])
+ - used_categories_vector.begin();
+ AssertIndexRange(index, used_categories_vector.size());
+ tight_category_map[i] = index;
+ }
+
+ // leave some more space for empty lanes
+ incompletely_filled_vectorization.resize(incompletely_filled_vectorization.size()
+ +4*n_categories);
+ }
+ else if (cells_close_to_boundary.empty())
+ tight_category_map.resize(n_active_cells+n_ghost_cells, 0);
+ else
+ {
+ n_categories = 2;
+ tight_category_map.resize(n_active_cells+n_ghost_cells, 1);
+ for (unsigned int i=0; i<cells_close_to_boundary.size(); ++i)
+ tight_category_map[cells_close_to_boundary[i]] = 0;
+ }
+
+ cell_partition_data.clear();
+ cell_partition_data.resize(1, 0);
+ unsigned int counter = 0;
+ unsigned int n_cells = 0;
+ std::vector<std::vector<unsigned int> > renumbering_category(n_categories);
+ for (unsigned int block=1; block<(n_procs>1 ? 5 : 3); ++block)
+ {
+ // step 1: sort by category
+ for (unsigned int i=0; i<n_active_cells+n_ghost_cells; ++i)
+ if (cell_marked[i] == block)
+ renumbering_category[tight_category_map[i]].push_back(i);
+
+ // step 2: if we want to fill up the ranges in vectorization, promote
+ // some of the cells to a higher category
+ if (cell_vectorization_categories_strict == false && n_categories > 1)
+ for (unsigned int j=n_categories-1; j>0; --j)
+ {
+ unsigned int lower_index = j-1;
+ while (renumbering_category[j].size()%vectorization_length)
+ {
+ while (renumbering_category[j].size()%vectorization_length &&
+ !renumbering_category[lower_index].empty())
+ {
+ renumbering_category[j].push_back(renumbering_category[lower_index].back());
+ renumbering_category[lower_index].pop_back();
+ }
+ if (lower_index == 0)
+ break;
+ else
+ --lower_index;
+ }
+ }
+
+ // step 3: append cells according to categories
+ for (unsigned int j=0; j<n_categories; ++j)
+ {
+ for (unsigned int jj=0; jj<renumbering_category[j].size(); jj++)
+ renumbering[counter++] = renumbering_category[j][jj];
+ unsigned int remainder = renumbering_category[j].size()%vectorization_length;
+ if (remainder)
+ incompletely_filled_vectorization
+ [renumbering_category[j].size()/vectorization_length+n_cells]
+ = remainder;
+ const unsigned int n_my_macro_cells =
+ (renumbering_category[j].size()+vectorization_length-1)/vectorization_length;
+ renumbering_category[j].clear();
+
+ // step 4: create blocks for face integrals, make the number of
+ // cells divisible by 4 if possible
+ const unsigned int block_size = std::max((2048U/dofs_per_cell)/8*4, 2U);
+ if (block < 4)
+ for (unsigned int k=0; k<n_my_macro_cells; k+=block_size)
+ cell_partition_data.push_back(n_cells +
+ std::min(k+block_size,
+ n_my_macro_cells));
+ else
+ cell_partition_data.back() += n_my_macro_cells;
+ n_cells += n_my_macro_cells;
+ }
+ partition_row_index[block] = cell_partition_data.size()-1;
+ if (block == 3 || (block==1 && n_procs == 1))
+ cell_partition_data.push_back(n_cells);
+ }
+ if (cell_vectorization_categories_strict == true)
+ Assert(n_cells >= n_macro_cells+n_ghost_slots, ExcInternalError())
+ else
+ AssertDimension(n_cells, n_macro_cells+n_ghost_slots);
+ AssertDimension(cell_partition_data.back(), n_cells);
+ AssertDimension(counter, n_active_cells+n_ghost_cells);
+
+ incompletely_filled_vectorization.resize(cell_partition_data.back());
+ }
+
+
+
+ void
+ TaskInfo
+ ::initial_setup_blocks_tasks(const std::vector<unsigned int> &boundary_cells,
+ std::vector<unsigned int> &renumbering,
+ std::vector<unsigned char> &incompletely_filled_vectorization)
+ {
+ const unsigned int n_macro_cells =
+ (n_active_cells + vectorization_length - 1) / vectorization_length;
+ const unsigned int n_ghost_slots =
+ (n_ghost_cells + vectorization_length - 1) / vectorization_length;
+ incompletely_filled_vectorization.resize (n_macro_cells+n_ghost_slots);
+ if (n_macro_cells*vectorization_length > n_active_cells)
+ incompletely_filled_vectorization[n_macro_cells-1] =
+ vectorization_length - (n_macro_cells*vectorization_length - n_active_cells);
+ if (n_ghost_slots*vectorization_length > n_ghost_cells)
+ incompletely_filled_vectorization[n_macro_cells+n_ghost_slots-1] =
+ vectorization_length - (n_ghost_slots*vectorization_length - n_ghost_cells);
+
+ std::vector<unsigned int> reverse_numbering (n_active_cells,
+ numbers::invalid_unsigned_int);
+ for (unsigned int j=0; j<boundary_cells.size(); ++j)
+ reverse_numbering[boundary_cells[j]] = j;
+ unsigned int counter = boundary_cells.size();
+ for (unsigned int j=0; j<n_active_cells; ++j)
+ if (reverse_numbering[j] == numbers::invalid_unsigned_int)
+ reverse_numbering[j] = counter++;
+
+ AssertDimension (counter, n_active_cells);
+ renumbering = Utilities::invert_permutation (reverse_numbering);
+
+ for (unsigned int j=n_active_cells; j<n_active_cells+n_ghost_cells; ++j)
+ renumbering.push_back(j);
+
+ // TODO: might be able to simplify this code by not relying on the cell
+ // partition data while computing the thread graph
+ cell_partition_data.clear();
+ cell_partition_data.push_back(0);
+ if (n_procs > 1)
+ {
+ const unsigned int n_macro_boundary_cells =
+ (boundary_cells.size()+vectorization_length-1)/vectorization_length;
+ cell_partition_data.push_back((n_macro_cells-n_macro_boundary_cells)/2);
+ cell_partition_data.push_back(cell_partition_data[1] + n_macro_boundary_cells);
+ }
+ else
+ AssertDimension(boundary_cells.size(), 0);
+ cell_partition_data.push_back(n_macro_cells);
+ cell_partition_data.push_back(cell_partition_data.back() + n_ghost_slots);
+ partition_row_index.resize(n_procs > 1 ? 4 : 2);
+ partition_row_index[0] = 0;
+ partition_row_index[1] = 1;
+ if (n_procs > 1)
+ {
+ partition_row_index[2] = 2;
+ partition_row_index[3] = 3;
+ }
+ }
+
+
+
+ void
+ TaskInfo::guess_block_size (const unsigned int dofs_per_cell)
+ {
+ // user did not say a positive number, so we have to guess
+ if (block_size == 0)
+ {
+ // we would like to have enough work to do, so as first guess, try
+ // to get 16 times as many chunks as we have threads on the system.
+ block_size = n_active_cells / (MultithreadInfo::n_threads() *
+ 16 * vectorization_length);
+
+ // if there are too few degrees of freedom per cell, need to
+ // increase the block size
+ const unsigned int minimum_parallel_grain_size = 200;
+ if (dofs_per_cell * block_size <
+ minimum_parallel_grain_size)
+ block_size = (minimum_parallel_grain_size /
+ dofs_per_cell + 1);
+ if (dofs_per_cell * block_size > 10000)
+ block_size /= 4;
+
+ block_size = 1 << (unsigned int)(log2(block_size+1));
+ }
+ if (block_size > n_active_cells)
+ block_size = std::max(1U, n_active_cells);
+ }
+
+
+
+ void
+ TaskInfo::make_thread_graph_partition_color
+ (DynamicSparsityPattern &connectivity_large,
+ std::vector<unsigned int> &renumbering,
+ std::vector<unsigned char> &irregular_cells,
+ const bool )
+ {
+ const unsigned int n_macro_cells = *(cell_partition_data.end()-2);
+ if (n_macro_cells == 0)
+ return;
+
+ Assert (vectorization_length > 0, ExcInternalError());
+
+ unsigned int partition = 0, counter = 0;
+
+ // Create connectivity graph for blocks based on connectivity graph for cells.
+ DynamicSparsityPattern connectivity(n_blocks,
+ n_blocks);
+ make_connectivity_cells_to_blocks(irregular_cells, connectivity_large, connectivity);
+
+ // Create cell-block partitioning.
+
+ // For each block of cells, this variable saves to which partitions the
+ // block belongs. Initialize all to -1 to mark them as not yet assigned
+ // a partition.
+ std::vector<unsigned int> cell_partition(n_blocks,
+ numbers::invalid_unsigned_int);
+
+ // In element j of this variable, one puts the old number of the block
+ // that should be the jth block in the new numeration.
+ std::vector<unsigned int> partition_list (n_blocks,0);
+ std::vector<unsigned int> partition_color_list(n_blocks,0);
+
+ // This vector points to the start of each partition.
+ std::vector<unsigned int> partition_size (2,0);
+
+ //blocking_connectivity = true;
+
+ // The cluster_size in make_partitioning defines that the no. of cells
+ // in each partition should be a multiple of cluster_size.
+ unsigned int cluster_size = 1;
+
+ // Make the partitioning of the first layer of the blocks of cells.
+ make_partitioning( connectivity, cluster_size, cell_partition,
+ partition_list, partition_size, partition );
+
+ // Color the cells within each partition
+ make_coloring_within_partitions_pre_blocked
+ (connectivity, partition, cell_partition,
+ partition_list, partition_size, partition_color_list);
+
+ partition_list = renumbering;
+
+#ifdef DEBUG
+ // in debug mode, check that the partition color list is one-to-one
+ {
+ std::vector<unsigned int> sorted_pc_list (partition_color_list);
+ std::sort(sorted_pc_list.begin(), sorted_pc_list.end());
+ for (unsigned int i=0; i<sorted_pc_list.size(); ++i)
+ Assert(sorted_pc_list[i] == i, ExcInternalError());
+ }
+#endif
+
+ // set the start list for each block and compute the renumbering of
+ // cells
+ std::vector<unsigned int> block_start(n_macro_cells+1);
+ std::vector<unsigned char> irregular(n_macro_cells);
+
+ unsigned int mcell_start=0;
+ block_start[0] = 0;
+ for (unsigned int block=0; block<n_blocks; block++)
+ {
+ block_start[block+1] = block_start[block];
+ for (unsigned int mcell=mcell_start; mcell<
+ std::min(mcell_start+block_size, n_macro_cells);
+ ++mcell)
+ {
+ unsigned int n_comp = (irregular_cells[mcell]>0)
+ ?irregular_cells[mcell]:vectorization_length;
+ block_start[block+1] += n_comp;
+ ++counter;
+ }
+ mcell_start += block_size;
+ }
+ counter = 0;
+ unsigned int counter_macro = 0;
+ unsigned int block_size_last =
+ n_macro_cells - block_size * (n_blocks-1);
+ if (block_size_last == 0)
+ block_size_last = block_size;
+
+ unsigned int tick = 0;
+ for (unsigned int block=0; block<n_blocks; block++)
+ {
+ unsigned int present_block = partition_color_list[block];
+ for (unsigned int cell = block_start[present_block];
+ cell<block_start[present_block+1]; ++cell)
+ renumbering[counter++] = partition_list[cell];
+ unsigned int this_block_size = (present_block == n_blocks-1)?
+ block_size_last:block_size;
+
+ // Also re-compute the content of cell_partition_data to
+ // contain the numbers of cells, not blocks
+ if (cell_partition_data[tick] == block)
+ cell_partition_data[tick++] = counter_macro;
+
+ for (unsigned int j=0; j<this_block_size; j++)
+ irregular[counter_macro++] =
+ irregular_cells[present_block*block_size+j];
+ }
+ AssertDimension(tick+1, cell_partition_data.size());
+ cell_partition_data.back() = counter_macro;
+
+ irregular_cells.swap(irregular);
+ AssertDimension (counter, n_active_cells);
+ AssertDimension (counter_macro, n_macro_cells);
+
+ // check that the renumbering is one-to-one
+#ifdef DEBUG
+ {
+ std::vector<unsigned int> sorted_renumbering (renumbering);
+ std::sort(sorted_renumbering.begin(), sorted_renumbering.end());
+ for (unsigned int i=0; i<sorted_renumbering.size(); ++i)
+ Assert(sorted_renumbering[i] == i, ExcInternalError());
+ }
+#endif
+
+
+ update_task_info(partition); // Actually sets too much for partition color case
+
+ AssertDimension(cell_partition_data.back(), n_macro_cells);
+ }
+
+
+
+ void
+ TaskInfo::make_thread_graph
+ (const std::vector<unsigned int> &cell_active_fe_index,
+ DynamicSparsityPattern &connectivity,
+ std::vector<unsigned int> &renumbering,
+ std::vector<unsigned char> &irregular_cells,
+ const bool hp_bool)
+ {
+ const unsigned int n_macro_cells = *(cell_partition_data.end()-2);
+ if (n_macro_cells == 0)
+ return;
+
+ Assert (vectorization_length > 0, ExcInternalError());
+
+ // if we want to block before partitioning, create connectivity graph
+ // for blocks based on connectivity graph for cells.
+ DynamicSparsityPattern connectivity_blocks(n_blocks,
+ n_blocks);
+ make_connectivity_cells_to_blocks(irregular_cells, connectivity, connectivity_blocks);
+
+ unsigned int n_blocks = 0;
+ if ( scheme == partition_color || scheme == color )// blocking_connectivity == true
+ n_blocks = this->n_blocks;
+ else
+ n_blocks = n_active_cells;
+
+ // For each block of cells, this variable saves to which partitions the
+ // block belongs. Initialize all to -1 to mark them as not yet assigned
+ // a partition.
+ std::vector<unsigned int> cell_partition(n_blocks,
+ numbers::invalid_unsigned_int);
+
+ // In element j of this variable, one puts the old number (but after
+ // renumbering according to the input renumbering) of the block that
+ // should be the jth block in the new numeration.
+ std::vector<unsigned int> partition_list (n_blocks,0);
+ std::vector<unsigned int> partition_2layers_list(n_blocks,0);
+
+ // This vector points to the start of each partition.
+ std::vector<unsigned int> partition_size (2,0);
+
+ unsigned int partition = 0;
+
+ // Within the partitions we want to be able to block for the case that
+ // we do not block already in the connectivity. The cluster_size in
+ // make_partitioning defines that the no. of cells in each partition
+ // should be a multiple of cluster_size.
+ unsigned int cluster_size = 1;
+ if (scheme == partition_partition)
+ cluster_size = block_size*vectorization_length;
+
+ // Make the partitioning of the first layer of the blocks of cells.
+ if ( scheme == partition_color || scheme == color )
+ make_partitioning( connectivity_blocks, cluster_size, cell_partition,
+ partition_list, partition_size, partition);
+ else
+ make_partitioning( connectivity, cluster_size, cell_partition,
+ partition_list, partition_size, partition);
+
+ // Partition or color second layer
+ if ( scheme == partition_partition )
+
+ {
+ // Partition within partitions.
+ make_partitioning_within_partitions_post_blocked
+ (connectivity, cell_active_fe_index, partition, cluster_size, hp_bool,
+ cell_partition, partition_list, partition_size,
+ partition_2layers_list, irregular_cells);
+ }
+ else if ( scheme == partition_color || scheme == color )
+ {
+ make_coloring_within_partitions_pre_blocked
+ (connectivity_blocks, partition, cell_partition,
+ partition_list, partition_size, partition_2layers_list);
+ }
+
+ // in debug mode, check that the partition_2layers_list is one-to-one
+#ifdef DEBUG
+ {
+ std::vector<unsigned int> sorted_pc_list (partition_2layers_list);
+ std::sort(sorted_pc_list.begin(), sorted_pc_list.end());
+ for (unsigned int i=0; i<sorted_pc_list.size(); ++i)
+ Assert(sorted_pc_list[i] == i, ExcInternalError());
+ }
+#endif
+
+ // Set the new renumbering
+ std::vector<unsigned int> renumbering_in (n_active_cells,0);
+ renumbering_in.swap(renumbering);
+ if (scheme == partition_partition) // blocking_connectivity == false
+ {
+ // This is the simple case. The renumbering is just a combination of
+ // the renumbering that we were given as an input and the
+ // renumbering of partition/coloring given in partition_2layers_list
+ for (unsigned int j=0; j<renumbering.size(); j++)
+ renumbering[j] = renumbering_in[partition_2layers_list[j]];
+ // Account for the ghost cells, finally.
+ for (unsigned int i=0; i<n_ghost_cells; ++i)
+ renumbering.push_back(i+n_active_cells);
+ }
+ else
+ {
+ // set the start list for each block and compute the renumbering of
+ // cells
+ std::vector<unsigned int> block_start(n_macro_cells+1);
+ std::vector<unsigned char> irregular(n_macro_cells);
+
+ unsigned int counter = 0;
+ unsigned int mcell_start=0;
+ block_start[0] = 0;
+ for (unsigned int block=0; block<n_blocks; block++)
+ {
+ block_start[block+1] = block_start[block];
+ for (unsigned int mcell=mcell_start; mcell<
+ std::min(mcell_start+block_size, n_macro_cells);
+ ++mcell)
+ {
+ unsigned int n_comp = (irregular_cells[mcell]>0)
+ ?irregular_cells[mcell]:vectorization_length;
+ block_start[block+1] += n_comp;
+ ++counter;
+ }
+ mcell_start += block_size;
+ }
+ counter = 0;
+ unsigned int counter_macro = 0;
+ unsigned int block_size_last =
+ n_macro_cells - block_size * (n_blocks-1);
+ if (block_size_last == 0)
+ block_size_last = block_size;
+
+ unsigned int tick = 0;
+ for (unsigned int block=0; block<n_blocks; block++)
+ {
+ unsigned int present_block = partition_2layers_list[block];
+ for (unsigned int cell = block_start[present_block];
+ cell<block_start[present_block+1]; ++cell)
+ renumbering[counter++] = renumbering_in[cell];
+ unsigned int this_block_size = (present_block == n_blocks-1)?
+ block_size_last:block_size;
+
+ // Also re-compute the content of cell_partition_data to
+ // contain the numbers of cells, not blocks
+ if (cell_partition_data[tick] == block)
+ cell_partition_data[tick++] = counter_macro;
+
+ for (unsigned int j=0; j<this_block_size; j++)
+ irregular[counter_macro++] =
+ irregular_cells[present_block*block_size+j];
+ }
+ AssertDimension(tick+1, cell_partition_data.size());
+ cell_partition_data.back() = counter_macro;
+
+ irregular_cells.swap(irregular);
+ AssertDimension (counter, n_active_cells);
+ AssertDimension (counter_macro, n_macro_cells);
+ // check that the renumbering is one-to-one
+#ifdef DEBUG
+ {
+ std::vector<unsigned int> sorted_renumbering (renumbering);
+ std::sort(sorted_renumbering.begin(), sorted_renumbering.end());
+ for (unsigned int i=0; i<sorted_renumbering.size(); ++i)
+ Assert(sorted_renumbering[i] == i, ExcInternalError());
+ }
+#endif
+ }
+
+ // Update the task_info with the more information for the thread graph.
+ update_task_info(partition);
+
+ }
+
+
+
+ void
+ TaskInfo::make_thread_graph_partition_partition
+ (const std::vector<unsigned int> &cell_active_fe_index,
+ DynamicSparsityPattern &connectivity,
+ std::vector<unsigned int> &renumbering,
+ std::vector<unsigned char> &irregular_cells,
+ const bool hp_bool)
+ {
+ const unsigned int n_macro_cells = *(cell_partition_data.end()-2);
+ if (n_macro_cells == 0)
+ return;
+
+ const unsigned int cluster_size = block_size*vectorization_length;
+
+ // Create cell-block partitioning.
+
+ // For each block of cells, this variable saves to which partitions the
+ // block belongs. Initialize all to n_macro_cells to mark them as not
+ // yet assigned a partition.
+ std::vector<unsigned int> cell_partition (n_active_cells,
+ numbers::invalid_unsigned_int);
+
+
+ // In element j of this variable, one puts the old number of the block
+ // that should be the jth block in the new numeration.
+ std::vector<unsigned int> partition_list(n_active_cells,0);
+ std::vector<unsigned int> partition_partition_list(n_active_cells,0);
+
+ // This vector points to the start of each partition.
+ std::vector<unsigned int> partition_size(2,0);
+
+ unsigned int partition = 0;
+ // Here, we do not block inside the connectivity graph
+ //blocking_connectivity = false;
+
+ // Make the partitioning of the first layer of the blocks of cells.
+ make_partitioning( connectivity, cluster_size, cell_partition,
+ partition_list, partition_size, partition);
+
+ // Partition within partitions.
+ make_partitioning_within_partitions_post_blocked
+ (connectivity, cell_active_fe_index, partition, cluster_size, hp_bool,
+ cell_partition,
+ partition_list, partition_size,
+ partition_partition_list,
+ irregular_cells);
+
+ partition_list.swap(renumbering);
+
+ for (unsigned int j=0; j<renumbering.size(); j++)
+ renumbering[j] = partition_list[partition_partition_list[j]];
+
+ for (unsigned int i=0; i<n_ghost_cells; ++i)
+ renumbering.push_back(i+n_active_cells);
+
+ update_task_info(partition);
+
+ }
+
+
+
+ void
+ TaskInfo::make_connectivity_cells_to_blocks
+ (const std::vector<unsigned char> &irregular_cells,
+ const DynamicSparsityPattern &connectivity_cells,
+ DynamicSparsityPattern &connectivity_blocks) const
+ {
+ std::vector<std::vector<unsigned int> > cell_blocks(n_blocks);
+ std::vector<unsigned int> touched_cells(n_active_cells);
+ unsigned int cell = 0;
+ for (unsigned int i=0, mcell=0; i<n_blocks; ++i)
+ {
+ for (unsigned int c=0; c<block_size &&
+ mcell < *(cell_partition_data.end()-2); ++c, ++mcell)
+ {
+ unsigned int ncomp = (irregular_cells[mcell]>0)
+ ?irregular_cells[mcell]:vectorization_length;
+ for (unsigned int c=0; c<ncomp; ++c, ++cell)
+ {
+ cell_blocks[i].push_back(cell);
+ touched_cells[cell] = i;
+ }
+ }
+ }
+ AssertDimension(cell, n_active_cells);
+ for (unsigned int i=0; i<cell_blocks.size(); ++i)
+ for (unsigned int col=0; col<cell_blocks[i].size(); ++col)
+ {
+ for (DynamicSparsityPattern::iterator it
+ = connectivity_cells.begin(cell_blocks[i][col]);
+ it != connectivity_cells.end(cell_blocks[i][col]); ++it)
+ {
+ if (touched_cells[it->column()] != i)
+ connectivity_blocks.add(i,touched_cells[it->column()]);
+ }
+ }
+ }
+
+
+
+
+ // Function to create partitioning on the second layer within each
+ // partition. Version without preblocking.
+ void
+ TaskInfo::make_partitioning_within_partitions_post_blocked
+ (const DynamicSparsityPattern &connectivity,
+ const std::vector<unsigned int> &cell_active_fe_index,
+ const unsigned int partition,
+ const unsigned int cluster_size,
+ const bool hp_bool,
+ const std::vector<unsigned int> &cell_partition,
+ const std::vector<unsigned int> &partition_list,
+ const std::vector<unsigned int> &partition_size,
+ std::vector<unsigned int> &partition_partition_list,
+ std::vector<unsigned char> &irregular_cells)
+ {
+
+ const unsigned int n_macro_cells = *(cell_partition_data.end()-2);
+ const unsigned int n_ghost_slots =
+ *(cell_partition_data.end()-1)-n_macro_cells;
+
+ // List of cells in previous partition
+ std::vector<unsigned int> neighbor_list;
+ // List of cells in current partition for use as neighbors in next partition
+ std::vector<unsigned int> neighbor_neighbor_list;
+
+ std::vector<unsigned int> renumbering(n_active_cells);
+
+ irregular_cells.back() = 0;
+ irregular_cells.resize(n_active_cells+n_ghost_slots);
+
+ unsigned int max_fe_index = 0;
+ for (unsigned int i=0; i<cell_active_fe_index.size(); ++i)
+ max_fe_index = std::max(cell_active_fe_index[i], max_fe_index);
+ Assert(!hp_bool || cell_active_fe_index.size() == n_macro_cells,
+ ExcInternalError());
+
+ unsigned int n_macro_cells_before = 0;
+ {
+ // Create partitioning within partitions.
+
+ // For each block of cells, this variable saves to which partitions
+ // the block belongs. Initialize all to n_macro_cells to mark them as
+ // not yet assigned a partition.
+ std::vector<unsigned int> cell_partition_l2(n_active_cells,
+ numbers::invalid_unsigned_int);
+ partition_row_index.clear();
+ partition_row_index.resize(partition+1,0);
+ cell_partition_data.resize(1,0);
+
+ unsigned int start_up = 0;
+ unsigned int counter = 0;
+ unsigned int missing_macros;
+ for (unsigned int part=0; part<partition; ++part)
+ {
+ neighbor_neighbor_list.resize(0);
+ neighbor_list.resize(0);
+ bool work = true;
+ unsigned int partition_l2 = 0;
+ start_up = partition_size[part];
+ unsigned int partition_counter = 0;
+ while (work)
+ {
+ if (neighbor_list.size()==0)
+ {
+ work = false;
+ partition_counter = 0;
+ for (unsigned int j=start_up; j<partition_size[part+1]; ++j)
+ if (cell_partition[partition_list[j]] == part &&
+ cell_partition_l2[partition_list[j]] == numbers::invalid_unsigned_int)
+ {
+ start_up = j;
+ work = true;
+ partition_counter = 1;
+ // To start up, set the start_up cell to partition
+ // and list all its neighbors.
+ AssertIndexRange (start_up, partition_size[part+1]);
+ cell_partition_l2[partition_list[start_up]] =
+ partition_l2;
+ neighbor_neighbor_list.push_back
+ (partition_list[start_up]);
+ partition_partition_list[counter++] =
+ partition_list[start_up];
+ start_up++;
+ break;
+ }
+ }
+ else
+ {
+ partition_counter = 0;
+ for (unsigned int j=0; j<neighbor_list.size(); ++j)
+ {
+ Assert(cell_partition[neighbor_list[j]]==part,
+ ExcInternalError());
+ Assert(cell_partition_l2[neighbor_list[j]]==partition_l2-1,
+ ExcInternalError());
+ DynamicSparsityPattern::iterator neighbor =
+ connectivity.begin(neighbor_list[j]),
+ end = connectivity.end(neighbor_list[j]);
+ for (; neighbor!=end ; ++neighbor)
+ {
+ if (cell_partition[neighbor->column()] == part &&
+ cell_partition_l2[neighbor->column()]==
+ numbers::invalid_unsigned_int)
+ {
+ cell_partition_l2[neighbor->column()] = partition_l2;
+ neighbor_neighbor_list.push_back(neighbor->column());
+ partition_partition_list[counter++] = neighbor->column();
+ partition_counter++;
+ }
+ }
+ }
+ }
+ if (partition_counter>0)
+ {
+ int index_before = neighbor_neighbor_list.size(),
+ index = index_before;
+ {
+ // put the cells into separate lists for each FE index
+ // within one partition-partition
+ missing_macros = 0;
+ std::vector<unsigned int> remaining_per_macro_cell
+ (max_fe_index);
+ std::vector<std::vector<unsigned int> >
+ renumbering_fe_index;
+ unsigned int cell;
+ bool filled = true;
+ if (hp_bool == true)
+ {
+ renumbering_fe_index.resize(max_fe_index);
+ for (cell=counter-partition_counter; cell<counter; ++cell)
+ {
+ renumbering_fe_index
+ [cell_active_fe_index.empty() ? 0 :
+ cell_active_fe_index[partition_partition_list
+ [cell]]].
+ push_back(partition_partition_list[cell]);
+ }
+ // check how many more cells are needed in the lists
+ for (unsigned int j=0; j<max_fe_index; j++)
+ {
+ remaining_per_macro_cell[j] =
+ renumbering_fe_index[j].size()%vectorization_length;
+ if (remaining_per_macro_cell[j] != 0)
+ filled = false;
+ missing_macros += ((renumbering_fe_index[j].size()+
+ vectorization_length-1)/vectorization_length);
+ }
+ }
+ else
+ {
+ remaining_per_macro_cell.resize(1);
+ remaining_per_macro_cell[0] = partition_counter%
+ vectorization_length;
+ missing_macros = partition_counter/vectorization_length;
+ if (remaining_per_macro_cell[0] != 0)
+ {
+ filled = false;
+ missing_macros++;
+ }
+ }
+ missing_macros = cluster_size - (missing_macros%cluster_size);
+
+ // now we realized that there are some cells missing.
+ while (missing_macros>0 || filled == false)
+ {
+ if (index==0)
+ {
+ index = neighbor_neighbor_list.size();
+ if (index == index_before)
+ {
+ if (missing_macros != 0)
+ {
+ neighbor_neighbor_list.resize(0);
+ }
+ start_up--;
+ break;// not connected - start again
+ }
+ index_before = index;
+ }
+ index--;
+ unsigned int additional = neighbor_neighbor_list
+ [index];
+
+ // go through the neighbors of the last cell in the
+ // current partition and check if we find some to
+ // fill up with.
+ DynamicSparsityPattern::iterator
+ neighbor = connectivity.begin(additional),
+ end = connectivity.end(additional);
+ for (; neighbor!=end ; ++neighbor)
+ {
+ if (cell_partition[neighbor->column()] == part &&
+ cell_partition_l2[neighbor->column()] ==
+ numbers::invalid_unsigned_int)
+ {
+ unsigned int this_index = 0;
+ if (hp_bool == true)
+ this_index = cell_active_fe_index.empty() ? 0 :
+ cell_active_fe_index[neighbor->column()];
+
+ // Only add this cell if we need more macro
+ // cells in the current block or if there is
+ // a macro cell with the FE index that is
+ // not yet fully populated
+ if (missing_macros > 0 ||
+ remaining_per_macro_cell[this_index] > 0)
+ {
+ cell_partition_l2[neighbor->column()] = partition_l2;
+ neighbor_neighbor_list.push_back(neighbor->column());
+ if (hp_bool == true)
+ renumbering_fe_index[this_index].
+ push_back(neighbor->column());
+ partition_partition_list[counter] =
+ neighbor->column();
+ counter++;
+ partition_counter++;
+ if (remaining_per_macro_cell[this_index]
+ == 0 && missing_macros > 0)
+ missing_macros--;
+ remaining_per_macro_cell[this_index]++;
+ if (remaining_per_macro_cell[this_index]
+ == vectorization_length)
+ {
+ remaining_per_macro_cell[this_index] = 0;
+ }
+ if (missing_macros == 0)
+ {
+ filled = true;
+ for (unsigned int fe_ind=0;
+ fe_ind<max_fe_index; ++fe_ind)
+ if (remaining_per_macro_cell[fe_ind]!=0)
+ filled = false;
+ }
+ if (filled == true)
+ break;
+ }
+ }
+ }
+ }
+ if (hp_bool == true)
+ {
+ // set the renumbering according to their active FE
+ // index within one partition-partition which was
+ // implicitly assumed above
+ cell = counter - partition_counter;
+ for (unsigned int j=0; j<max_fe_index; j++)
+ {
+ for (unsigned int jj=0; jj<renumbering_fe_index[j].
+ size(); jj++)
+ renumbering[cell++] =
+ renumbering_fe_index[j][jj];
+ if (renumbering_fe_index[j].size()%vectorization_length != 0)
+ irregular_cells[renumbering_fe_index[j].size()/
+ vectorization_length+
+ n_macro_cells_before] =
+ renumbering_fe_index[j].size()%vectorization_length;
+ n_macro_cells_before += (renumbering_fe_index[j].
+ size()+vectorization_length-1)/
+ vectorization_length;
+ renumbering_fe_index[j].resize(0);
+ }
+ }
+ else
+ {
+ n_macro_cells_before += partition_counter/vectorization_length;
+ if (partition_counter%vectorization_length != 0)
+ {
+ irregular_cells[n_macro_cells_before] =
+ partition_counter%vectorization_length;
+ n_macro_cells_before++;
+ }
+ }
+ }
+ cell_partition_data.
+ push_back(n_macro_cells_before);
+ partition_l2++;
+ }
+ neighbor_list = neighbor_neighbor_list;
+ neighbor_neighbor_list.resize(0);
+ }
+ partition_row_index[part+1] =
+ partition_row_index[part] + partition_l2;
+ }
+ }
+ if (hp_bool == true)
+ {
+ partition_partition_list.swap(renumbering);
+ }
+
+ }
+
+
+
+ // Function to create coloring on the second layer within each partition. Version assumes preblocking.
+ void
+ TaskInfo::make_coloring_within_partitions_pre_blocked
+ (const DynamicSparsityPattern &connectivity,
+ const unsigned int partition,
+ const std::vector<unsigned int> &cell_partition,
+ const std::vector<unsigned int> &partition_list,
+ const std::vector<unsigned int> &partition_size,
+ std::vector<unsigned int> &partition_color_list)
+ {
+
+ const unsigned int n_macro_cells = *(cell_partition_data.end()-2);
+ std::vector<unsigned int> neighbor_list;
+ std::vector<unsigned int> neighbor_neighbor_list;
+ std::vector<unsigned int> cell_color(n_blocks, n_macro_cells);
+ std::vector<bool> color_finder;
+
+ partition_row_index.resize(partition+1);
+ cell_partition_data.clear();
+ unsigned int color_counter = 0, index_counter = 0;
+ for (unsigned int part=0; part<partition; part++)
+ {
+ partition_row_index[part] = index_counter;
+ unsigned int max_color = 0;
+ for (unsigned int k=partition_size[part]; k<partition_size[part+1];
+ k++)
+ {
+ unsigned int cell = partition_list[k];
+ unsigned int n_neighbors = connectivity.row_length(cell);
+
+ // In the worst case, each neighbor has a different color. So we
+ // find at least one available color between 0 and n_neighbors.
+ color_finder.resize(n_neighbors+1);
+ for (unsigned int j=0; j<=n_neighbors; ++j)
+ color_finder[j]=true;
+ DynamicSparsityPattern::iterator
+ neighbor = connectivity.begin(cell),
+ end = connectivity.end(cell);
+ for (; neighbor!=end ; ++neighbor)
+ {
+ // Mark the color that a neighbor within the partition has
+ // as taken
+ if (cell_partition[neighbor->column()] == part &&
+ cell_color[neighbor->column()] <= n_neighbors)
+ color_finder[cell_color[neighbor->column()]] = false;
+ }
+ // Choose the smallest color that is not taken for the block
+ cell_color[cell]=0;
+ while (color_finder[cell_color[cell]] == false)
+ cell_color[cell]++;
+ if (cell_color[cell] > max_color)
+ max_color = cell_color[cell];
+ }
+ // Reorder within partition: First, all blocks that belong the 0 and
+ // then so on until those with color max (Note that the smaller the
+ // number the larger the partition)
+ for (unsigned int color=0; color<=max_color; color++)
+ {
+ cell_partition_data.push_back(color_counter);
+ index_counter++;
+ for (unsigned int k=partition_size[part];
+ k<partition_size[part+1]; k++)
+ {
+ unsigned int cell=partition_list[k];
+ if (cell_color[cell] == color)
+ {
+ partition_color_list[color_counter++] = cell;
+ }
+ }
+ }
+ }
+ cell_partition_data.push_back(n_blocks);
+ partition_row_index[partition] = index_counter;
+ AssertDimension (color_counter, n_blocks);
+ }
+
+
+ // Function to create partitioning on the first layer.
+ void
+ TaskInfo::make_partitioning
+ (const DynamicSparsityPattern &connectivity,
+ const unsigned int cluster_size,
+ std::vector<unsigned int> &cell_partition,
+ std::vector<unsigned int> &partition_list,
+ std::vector<unsigned int> &partition_size,
+ unsigned int &partition) const
+
+ {
+ // For each block of cells, this variable saves to which partitions the
+ // block belongs. Initialize all to n_macro_cells to mark them as not
+ // yet assigned a partition.
+ //std::vector<unsigned int> cell_partition (n_active_cells,
+ // numbers::invalid_unsigned_int);
+ // List of cells in previous partition
+ std::vector<unsigned int> neighbor_list;
+ // List of cells in current partition for use as neighbors in next partition
+ std::vector<unsigned int> neighbor_neighbor_list;
+
+ // In element j of this variable, one puts the old number of the block
+ // that should be the jth block in the new numeration.
+ //std::vector<unsigned int> partition_list(n_active_cells,0);
+
+ // This vector points to the start of each partition.
+ //std::vector<unsigned int> partition_size(2,0);
+
+ partition = 0;
+ unsigned int counter=0;
+ unsigned int start_nonboundary = cell_partition_data.size() == 5 ?
+ vectorization_length * (cell_partition_data[2] -
+ cell_partition_data[1]) : 0;
+
+ const unsigned int n_macro_cells = *(cell_partition_data.end()-2);
+ if (n_macro_cells == 0)
+ return;
+ if (scheme == color)
+ start_nonboundary = n_macro_cells;
+ if ( scheme == partition_color || scheme == color ) // blocking_connectivity == true
+ start_nonboundary = ((start_nonboundary+block_size-1)
+ /block_size);
+ unsigned int n_blocks;
+ if ( scheme == partition_color || scheme == color ) // blocking_connectivity == true
+ n_blocks = this->n_blocks;
+ else
+ n_blocks = n_active_cells;
+
+ if (start_nonboundary > n_blocks)
+ start_nonboundary = n_blocks;
+
+
+ unsigned int start_up = 0;
+ bool work = true;
+ unsigned int remainder = cluster_size;
+
+ // this performs a classical breath-first search in the connectivity
+ // graph of the cells under the restriction that the size of the
+ // partitions should be a multiple of the given block size
+ while (work)
+ {
+ // put the cells with neighbors on remote MPI processes up front
+ if (start_nonboundary>0)
+ {
+ for (unsigned int cell=0; cell<start_nonboundary; ++cell)
+ {
+ const unsigned int cell_nn = cell;
+ cell_partition[cell_nn] = partition;
+ neighbor_list.push_back(cell_nn);
+ partition_list[counter++] = cell_nn;
+ partition_size.back()++;
+ }
+ start_nonboundary = 0;
+ remainder -= (start_nonboundary%cluster_size);
+ if (remainder == cluster_size)
+ remainder = 0;
+ }
+ else
+ {
+ // To start up, set the start_up cell to partition and list all
+ // its neighbors.
+ cell_partition[start_up] = partition;
+ neighbor_list.push_back(start_up);
+ partition_list[counter++] = start_up;
+ partition_size.back()++;
+ start_up++;
+ remainder--;
+ if (remainder == cluster_size)
+ remainder = 0;
+ }
+ int index_before = neighbor_list.size(), index = index_before,
+ index_stop = 0;
+ while (remainder>0)
+ {
+ if (index==index_stop)
+ {
+ index = neighbor_list.size();
+ if (index == index_before)
+ {
+ neighbor_list.resize(0);
+ goto not_connect;
+ }
+ index_stop = index_before;
+ index_before = index;
+ }
+ index--;
+ unsigned int additional = neighbor_list[index];
+ DynamicSparsityPattern::iterator neighbor =
+ connectivity.begin(additional),
+ end = connectivity.end(additional);
+ for (; neighbor!=end ; ++neighbor)
+ {
+ if (cell_partition[neighbor->column()]==numbers::invalid_unsigned_int)
+ {
+ partition_size.back()++;
+ cell_partition[neighbor->column()] = partition;
+ neighbor_list.push_back(neighbor->column());
+ partition_list[counter++] = neighbor->column();
+ remainder--;
+ if (remainder == 0)
+ break;
+ }
+ }
+ }
+
+ while (neighbor_list.size()>0)
+ {
+ partition++;
+
+ // counter for number of cells so far in current partition
+ unsigned int partition_counter = 0;
+
+ // Mark the start of the new partition
+ partition_size.push_back(partition_size.back());
+
+ // Loop through the list of cells in previous partition and put
+ // all their neighbors in current partition
+ for (unsigned int j=0; j<neighbor_list.size(); ++j)
+ {
+ Assert(cell_partition[neighbor_list[j]]==partition-1,
+ ExcInternalError());
+ DynamicSparsityPattern::iterator neighbor =
+ connectivity.begin(neighbor_list[j]),
+ end = connectivity.end(neighbor_list[j]);
+ for (; neighbor!=end ; ++neighbor)
+ {
+ if (cell_partition[neighbor->column()]==numbers::invalid_unsigned_int)
+ {
+ partition_size.back()++;
+ cell_partition[neighbor->column()] = partition;
+
+ // collect the cells of the current partition for
+ // use as neighbors in next partition
+ neighbor_neighbor_list.push_back(neighbor->column());
+ partition_list[counter++] = neighbor->column();
+ partition_counter++;
+ }
+ }
+ }
+ remainder = cluster_size-(partition_counter%cluster_size);
+ if (remainder == cluster_size)
+ remainder = 0;
+ int index_stop = 0;
+ int index_before = neighbor_neighbor_list.size(), index = index_before;
+ while (remainder>0)
+ {
+ if (index==index_stop)
+ {
+ index = neighbor_neighbor_list.size();
+ if (index == index_before)
+ {
+ neighbor_neighbor_list.resize(0);
+ break;
+ }
+ index_stop = index_before;
+ index_before = index;
+ }
+ index--;
+ unsigned int additional = neighbor_neighbor_list[index];
+ DynamicSparsityPattern::iterator neighbor =
+ connectivity.begin(additional),
+ end = connectivity.end(additional);
+ for (; neighbor!=end ; ++neighbor)
+ {
+ if (cell_partition[neighbor->column()]==numbers::invalid_unsigned_int)
+ {
+ partition_size.back()++;
+ cell_partition[neighbor->column()] = partition;
+ neighbor_neighbor_list.push_back(neighbor->column());
+ partition_list[counter++] = neighbor->column();
+ remainder--;
+ if (remainder == 0)
+ break;
+ }
+ }
+ }
+
+ neighbor_list = neighbor_neighbor_list;
+ neighbor_neighbor_list.resize(0);
+ }
+not_connect:
+ // One has to check if the graph is not connected so we have to find
+ // another partition.
+ work = false;
+ for (unsigned int j=start_up; j<n_blocks; ++j)
+ if (cell_partition[j] == numbers::invalid_unsigned_int)
+ {
+ start_up = j;
+ work = true;
+ if (remainder == 0)
+ remainder = cluster_size;
+ break;
+ }
+ }
+ if (remainder != 0)
+ partition++;
+
+ AssertDimension (partition_size[partition], n_blocks);
+
+ }
+
+
+ void
+ TaskInfo::update_task_info
+ (const unsigned int partition)
+ {
+ evens = (partition+1)/2;
+ odds = partition/2;
+ n_blocked_workers =
+ odds-(odds+evens+1)%2;
+ n_workers = evens+odds-
+ n_blocked_workers;
+ // From here only used for partition partition option.
+ partition_evens.resize(partition);
+ partition_odds.resize(partition);
+ partition_n_blocked_workers.resize(partition);
+ partition_n_workers.resize(partition);
+ for (unsigned int part=0; part<partition; part++)
+ {
+ partition_evens[part] =
+ (partition_row_index[part+1]-
+ partition_row_index[part]+1)/2;
+ partition_odds[part] =
+ (partition_row_index[part+1]-
+ partition_row_index[part])/2;
+ partition_n_blocked_workers[part] =
+ partition_odds[part]-(partition_odds[part]+
+ partition_evens[part]+1)%2;
+ partition_n_workers[part] =
+ partition_evens[part]+partition_odds[part]-
+ partition_n_blocked_workers[part];
+ }
+ }
+ }
+}
+
+
+
+// explicit instantiations of template functions
+template void internal::MatrixFreeFunctions::TaskInfo::
+print_memory_statistics<std::ostream> (std::ostream &, const std::size_t) const;
+template void internal::MatrixFreeFunctions::TaskInfo::
+print_memory_statistics<ConditionalOStream> (ConditionalOStream &, const std::size_t) const;
+
+
+DEAL_II_NAMESPACE_CLOSE
FEEvaluation<dim,fe_degree> fe_eval(data);
const unsigned int n_q_points = fe_eval.n_q_points;
- for (unsigned int cell=0; cell<data.get_size_info().n_macro_cells; ++cell)
+ for (unsigned int cell=0; cell<data.n_macro_cells(); ++cell)
{
fe_eval.reinit(cell);
for (unsigned int q=0; q<n_q_points; ++q)
}
}
}
-
FEEvaluation<dim,fe_degree> fe_eval(data);
const unsigned int n_q_points = fe_eval.n_q_points;
- for (unsigned int cell=0; cell<data.get_size_info().n_macro_cells; ++cell)
+ for (unsigned int cell=0; cell<data.n_macro_cells(); ++cell)
{
fe_eval.reinit(cell);
for (unsigned int q=0; q<n_q_points; ++q)
Step48::SineGordonProblem<2> sg_problem;
sg_problem.run ();
}
-
FEEvaluation<dim,fe_degree> fe_eval(data);
const unsigned int n_q_points = fe_eval.n_q_points;
- for (unsigned int cell=0; cell<data.get_size_info().n_macro_cells; ++cell)
+ for (unsigned int cell=0; cell<data.n_macro_cells(); ++cell)
{
fe_eval.reinit(cell);
for (unsigned int q=0; q<n_q_points; ++q)
}
}
}
-