It is way too slow for the parallel case because one needs to go over all indices, not only locally owned ones
Also fix several 64 bit bugs.
Improve performance of nonlocal graph of Trilinos matrix by not setting dummy entries on each processor
* Constructor.
*/
Accessor (const DynamicSparsityPattern *sparsity_pattern,
- const unsigned int row,
+ const size_type row,
const unsigned int index_within_row);
/**
/**
* The row we currently point into.
*/
- unsigned int current_row;
+ size_type current_row;
/**
* A pointer to the element within the current row that we currently point
* the zeroth row).
*/
Iterator (const DynamicSparsityPattern *sp,
- const unsigned int row,
+ const size_type row,
const unsigned int index_within_row);
/**
inline
Accessor::
Accessor (const DynamicSparsityPattern *sparsity_pattern,
- const unsigned int row,
+ const size_type row,
const unsigned int index_within_row)
:
sparsity_pattern(sparsity_pattern),
:
sparsity_pattern->lines[sparsity_pattern->rowset.index_within_set(current_row)].entries.end())
{
- Assert (current_row < sparsity_pattern->n_rows(),
- ExcIndexRange (row, 0, sparsity_pattern->n_rows()));
+ AssertIndexRange(current_row, sparsity_pattern->n_rows());
Assert ((sparsity_pattern->rowset.size()==0)
||
sparsity_pattern->rowset.is_element(current_row),
"DynamicSparsityPattern's row that is not "
"actually stored by that sparsity pattern "
"based on the IndexSet argument to it."));
- Assert (index_within_row <
- ((sparsity_pattern->rowset.size()==0)
- ?
- sparsity_pattern->lines[current_row].entries.size()
- :
- sparsity_pattern->lines[sparsity_pattern->rowset.index_within_set(current_row)].entries.size()),
- ExcIndexRange (index_within_row, 0,
- ((sparsity_pattern->rowset.size()==0)
- ?
- sparsity_pattern->lines[current_row].entries.size()
- :
- sparsity_pattern->lines[sparsity_pattern->rowset.index_within_set(current_row)].entries.size())));
+ AssertIndexRange(index_within_row,
+ ((sparsity_pattern->rowset.size()==0)
+ ?
+ sparsity_pattern->lines[current_row].entries.size()
+ :
+ sparsity_pattern->lines[sparsity_pattern->rowset.index_within_set(current_row)].entries.size()));
}
Accessor (const DynamicSparsityPattern *sparsity_pattern)
:
sparsity_pattern(sparsity_pattern),
- current_row(numbers::invalid_unsigned_int),
+ current_row(numbers::invalid_size_type),
current_entry(),
end_of_row()
{}
// current_entry field may not point to a deterministic location
return (sparsity_pattern == other.sparsity_pattern &&
current_row == other.current_row &&
- ((current_row == numbers::invalid_unsigned_int)
+ ((current_row == numbers::invalid_size_type)
|| (current_entry == other.current_entry)));
}
ExcInternalError());
// if *this is past-the-end, then it is less than no one
- if (current_row == numbers::invalid_unsigned_int)
+ if (current_row == numbers::invalid_size_type)
return (false);
// now *this should be an valid value
Assert (current_row < sparsity_pattern->n_rows(),
ExcInternalError());
// if other is past-the-end
- if (other.current_row == numbers::invalid_unsigned_int)
+ if (other.current_row == numbers::invalid_size_type)
return (true);
// now other should be an valid value
Assert (other.current_row < sparsity_pattern->n_rows(),
inline
Iterator::Iterator (const DynamicSparsityPattern *sparsity_pattern,
- const unsigned int row,
+ const size_type row,
const unsigned int index_within_row)
:
accessor(sparsity_pattern, row, index_within_row)
// pattern
//
// note: row_length(row) returns zero if the row is not locally stored
- unsigned int row = r;
+ //
+ // TODO: this is way too slow when used in parallel, so do not use it on
+ // non-owned rows
+ size_type row = r;
while ((row<n_rows())
&&
(row_length(row)==0))
#ifdef DEBUG
if (!overlapping)
{
- const unsigned int n_global_elements
+ const size_type n_global_elements
= Utilities::MPI::sum (n_elements(), communicator);
Assert (n_global_elements == size(),
ExcMessage ("You are trying to create an Epetra_Map object "
+ // for the non-local graph, we need to circumvent the problem that some
+ // processors will not add into the non-local graph at all: We do not want
+ // to insert dummy elements on >5000 processors because that gets very
+ // slow. Thus, we set a flag in Epetra_CrsGraph that sets the correct
+ // flag. Since it is protected, we need to expose this information by
+ // deriving a class from Epetra_CrsGraph for the purpose of creating the
+ // data structure
+ class Epetra_CrsGraphMod : public Epetra_CrsGraph
+ {
+ public:
+ Epetra_CrsGraphMod (const Epetra_Map &row_map,
+ const int *n_entries_per_row)
+ :
+ Epetra_CrsGraph(Copy, row_map, n_entries_per_row, true)
+ {};
+
+ void SetIndicesAreGlobal()
+ {
+ this->Epetra_CrsGraph::SetIndicesAreGlobal(true);
+ }
+ };
+
+
// specialization for DynamicSparsityPattern which can provide us with
// more information about the non-locally owned rows
template <>
}
}
- // make sure all processors create an off-processor matrix with at least
- // one entry
- if (have_ghost_rows == true && ghost_rows.empty() == true)
- {
- ghost_rows.push_back(0);
- n_entries_per_ghost_row.push_back(1);
- }
-
Epetra_Map off_processor_map(-1, ghost_rows.size(),
(ghost_rows.size()>0)?(&ghost_rows[0]):NULL,
0, input_row_map.Comm());
- std_cxx11::shared_ptr<Epetra_CrsGraph> graph, nonlocal_graph;
+ std_cxx11::shared_ptr<Epetra_CrsGraph> graph;
+ std_cxx11::shared_ptr<Epetra_CrsGraphMod> nonlocal_graph;
if (input_row_map.Comm().NumProc() > 1)
{
graph.reset (new Epetra_CrsGraph (Copy, input_row_map,
(n_entries_per_row.size()>0)?(&n_entries_per_row[0]):NULL,
exchange_data ? false : true));
if (have_ghost_rows == true)
- nonlocal_graph.reset (new Epetra_CrsGraph (Copy, off_processor_map,
- &n_entries_per_ghost_row[0],
- true));
+ nonlocal_graph.reset (new Epetra_CrsGraphMod (off_processor_map,
+ &n_entries_per_ghost_row[0]));
}
else
graph.reset (new Epetra_CrsGraph (Copy, input_row_map, input_col_map,
continue;
row_indices.resize (row_length, -1);
- {
- dealii::DynamicSparsityPattern::iterator p = sparsity_pattern.begin(global_row);
- for (size_type col=0; p != sparsity_pattern.end(global_row); ++p, ++col)
- row_indices[col] = p->column();
- }
+ for (int col=0; col < row_length; ++col)
+ row_indices[col] = sparsity_pattern.column_number(global_row, col);
if (input_row_map.MyGID(global_row))
graph->InsertGlobalIndices (global_row, row_length, &row_indices[0]);
// finalize nonlocal graph and create nonlocal matrix
if (nonlocal_graph.get() != 0)
{
- if (nonlocal_graph->IndicesAreGlobal() == false &&
- nonlocal_graph->RowMap().NumMyElements() > 0)
- {
- // insert dummy element
- TrilinosWrappers::types::int_type row =
- nonlocal_graph->RowMap().MyGID(TrilinosWrappers::types::int_type(0));
- nonlocal_graph->InsertGlobalIndices(row, 1, &row);
- }
+ // must make sure the IndicesAreGlobal flag is set on all processors
+ // because some processors might not call InsertGlobalIndices (and
+ // we do not want to insert dummy indices on all processors for
+ // large-scale simulations due to the bad impact on performance)
+ nonlocal_graph->SetIndicesAreGlobal();
Assert(nonlocal_graph->IndicesAreGlobal() == true,
ExcInternalError());
nonlocal_graph->FillComplete(input_col_map, input_row_map);
row_indices.resize (row_length, -1);
{
typename SparsityType::iterator p = sp.begin(row);
- for (size_type col=0; p != sp.end(row); ++p, ++col)
- row_indices[col] = p->column();
+ // avoid incrementing p over the end of the current row because
+ // it is slow for DynamicSparsityPattern in parallel
+ for (int col=0; col<row_length; )
+ {
+ row_indices[col++] = p->column();
+ if (col < row_length)
+ ++p;
+ }
}
graph->Epetra_CrsGraph::InsertGlobalIndices (row, row_length,
&row_indices[0]);
row_indices.resize (row_length, -1);
{
typename SparsityType::iterator p = sp.begin(row);
- for (size_type col=0; p != sp.end(row); ++p, ++col)
- row_indices[col] = p->column();
+ // avoid incrementing p over the end of the current row because
+ // it is slow for DynamicSparsityPattern in parallel
+ for (int col=0; col<row_length; )
+ {
+ row_indices[col++] = p->column();
+ if (col < row_length)
+ ++p;
+ }
}
graph->InsertGlobalIndices (1,
reinterpret_cast<TrilinosWrappers::types::int_type *>(&row),