// for the scratch array.
n_max_entries_per_row += n_local_dofs;
if (column_indices.size() < n_max_entries_per_row)
- {
- column_indices.resize(n_max_entries_per_row);
- column_values.resize(n_max_entries_per_row);
- }
+ column_indices.resize(n_max_entries_per_row);
+ if (column_values.size() < n_max_entries_per_row)
+ column_values.resize(n_max_entries_per_row);
// now distribute entries row by row
for (unsigned int i=0; i<n_local_dofs; ++i)
// Check whether we did remain within the
// arrays when adding elements into the
- // scratch arrays. Moreover, there should
- // be at least one element in the scratch
- // array (the element diagonal).
+ // scratch arrays.
Assert (col_counter <= n_max_entries_per_row, ExcInternalError());
// Finally, write the scratch array into
- // the sparse matrix.
+ // the sparse matrix. Of course, do it
+ // only in case there is anything to
+ // write...
if (col_counter > 0)
global_matrix.add(local_dof_indices[i], col_counter,
&column_indices[0], &column_values[0],
ExcDimensionMismatch(dof_mask.n_cols(), n_local_dofs));
}
+ // A lock that allows only one thread at
+ // time to go on in this function.
+ Threads::ThreadMutex::ScopedLock lock(mutex);
if (lines.size() == 0)
{
- bool add_these_indices;
- for (unsigned int i=0; i<n_local_dofs; ++i)
- for (unsigned int j=0; j<n_local_dofs; ++j)
+ if (column_indices.size() < local_dof_indices.size())
+ column_indices.resize(local_dof_indices.size());
+
+ // if the dof_mask is not active, we can
+ // just submit the local_dof_indices
+ // array for one row after the other.
+ if (dof_mask_is_active == false)
+ for (unsigned int i=0; i<n_local_dofs; ++i)
+ sparsity_pattern.add_entries (local_dof_indices[i],
+ local_dof_indices.begin(),
+ local_dof_indices.end());
+ else
+ for (unsigned int i=0; i<n_local_dofs; ++i)
{
- // There is one complication when
- // we call this function with
- // dof_mask argument: When the
- // mask is empty, we cannot
- // access the respective
- // position, and it would even
- // be inefficient to create
- // such a mask. Hence, we need
- // to first check whether
- // there is some information
- // in the mask at all.
- if (dof_mask_is_active)
- {
- if (dof_mask[i][j] == true)
- add_these_indices = true;
- else
- add_these_indices = false;
- }
- else
- add_these_indices = true;
- if (add_these_indices == true)
- sparsity_pattern.add(local_dof_indices[i],
- local_dof_indices[j]);
+ unsigned int col_counter = 0;
+ for (unsigned int j=0; j<n_local_dofs; ++j)
+ if (dof_mask[i][j] == true)
+ column_indices[col_counter++] = local_dof_indices[j];
+ sparsity_pattern.add_entries (local_dof_indices[i],
+ column_indices.begin(),
+ column_indices.begin()+col_counter);
}
}
else
// constraints, as above)
Assert (sorted == true, ExcMatrixNotClosed());
+ unsigned int n_max_entries_per_row = 0;
std::vector<const ConstraintLine *>
constraint_lines (n_local_dofs,
static_cast<const ConstraintLine *>(0));
if ((position != lines.end()) &&
(position->line == local_dof_indices[i]))
- constraint_lines[i] = &*position;
+ {
+ constraint_lines[i] = &*position;
+ n_max_entries_per_row += position->entries.size();
+ }
}
+ n_max_entries_per_row += n_local_dofs;
+ if (column_indices.size() < n_max_entries_per_row)
+ column_indices.resize(n_max_entries_per_row);
bool add_these_indices;
for (unsigned int i=0; i<n_local_dofs; ++i)
const ConstraintLine *position_i = constraint_lines[i];
const bool is_constrained_i = (position_i != 0);
+ unsigned int col_counter = 0;
+
for (unsigned int j=0; j<n_local_dofs; ++j)
{
// Again, first check whether
const bool is_constrained_j = (position_j != 0);
// if so requested, add the
- // entry unconditionally, even
- // if it is going to be
- // constrained away
+ // entry unconditionally,
+ // even if it is going to be
+ // constrained away. note
+ // that we can use the array
+ // of column indices only for
+ // those elements that are
+ // going to get into the same
+ // row.
if (keep_constrained_entries == true)
- sparsity_pattern.add (local_dof_indices[i],
- local_dof_indices[j]);
+ column_indices[col_counter++] = local_dof_indices[j];
if ((is_constrained_i == false) &&
(is_constrained_j == false) &&
(keep_constrained_entries == false))
- {
- sparsity_pattern.add (local_dof_indices[i],
- local_dof_indices[j]);
- }
+ column_indices[col_counter++] = local_dof_indices[j];
else if ((is_constrained_i == true) &&
(is_constrained_j == false))
{
(is_constrained_j == true))
{
for (unsigned int q=0; q<position_j->entries.size(); ++q)
- sparsity_pattern.add (local_dof_indices[i],
- position_j->entries[q].first);
+ column_indices[col_counter++] = position_j->entries[q].first;
}
else if ((is_constrained_i == true) &&
(is_constrained_j == true))
sparsity_pattern.add (position_i->entries[p].first,
position_j->entries[q].first);
- if (i == j)
- sparsity_pattern.add (local_dof_indices[i],
- local_dof_indices[i]);
+ if (i == j && keep_constrained_entries == false)
+ column_indices[col_counter++] = local_dof_indices[i];
}
else
// the only case that can
ExcInternalError());
}
}
+
+ // Check whether we did remain within the
+ // arrays when adding elements into the
+ // scratch arrays.
+ Assert (col_counter <= n_max_entries_per_row, ExcInternalError());
+
+ // Finally, write the scratch array into
+ // the sparsity pattern. Of course, do it
+ // only in case there is anything to
+ // write...
+ if (col_counter > 0)
+ sparsity_pattern.add_entries (local_dof_indices[i],
+ column_indices.begin(),
+ column_indices.begin()+col_counter);
}
}
}
// only have to do the work if the
// current cell is owned by the calling
// processor. Otherwise, just continue.
+ //
+ // TODO: here, we should actually check
+ // the communicator of the sparsity
+ // pattern, not MPI_COMM_WORLD as the
+ // Utilities function does!
for (; cell!=endc; ++cell)
#ifdef DEAL_II_USE_TRILINOS
if ((types_are_equal<SparsityPattern,TrilinosWrappers::SparsityPattern>::value
||
types_are_equal<SparsityPattern,TrilinosWrappers::BlockSparsityPattern>::value)
&&
- cell->subdomain_id() !=
+ cell->subdomain_id() !=
Utilities::Trilinos::get_this_mpi_process(Utilities::Trilinos::comm_world()))
continue;
else
cell_row->get_dof_indices (local_dof_indices_row);
cell_col->get_dof_indices (local_dof_indices_col);
for (unsigned int i=0; i<dofs_per_cell_row; ++i)
- for (unsigned int j=0; j<dofs_per_cell_col; ++j)
- sparsity.add (local_dof_indices_row[i],
- local_dof_indices_col[j]);
+ sparsity.add_entries (local_dof_indices_row[i],
+ local_dof_indices_col.begin(),
+ local_dof_indices_col.end());
}
else if (cell_row->has_children())
{
cell_row_child->get_dof_indices (local_dof_indices_row);
cell_col->get_dof_indices (local_dof_indices_col);
for (unsigned int i=0; i<dofs_per_cell_row; ++i)
- for (unsigned int j=0; j<dofs_per_cell_col; ++j)
- sparsity.add (local_dof_indices_row[i],
- local_dof_indices_col[j]);
+ sparsity.add_entries (local_dof_indices_row[i],
+ local_dof_indices_col.begin(),
+ local_dof_indices_col.end());
}
}
else
cell_row->get_dof_indices (local_dof_indices_row);
cell_col_child->get_dof_indices (local_dof_indices_col);
for (unsigned int i=0; i<dofs_per_cell_row; ++i)
- for (unsigned int j=0; j<dofs_per_cell_col; ++j)
- sparsity.add (local_dof_indices_row[i],
- local_dof_indices_col[j]);
+ sparsity.add_entries (local_dof_indices_row[i],
+ local_dof_indices_col.begin(),
+ local_dof_indices_col.end());
}
}
}
= dof_to_boundary_mapping[cell->vertex_dof_index(direction,i)];
for (unsigned int i=0; i<dofs_per_vertex; ++i)
- for (unsigned int j=0; j<dofs_per_vertex; ++j)
- sparsity.add (boundary_dof_boundary_indices[i],
- boundary_dof_boundary_indices[j]);
+ sparsity.add_entries (boundary_dof_boundary_indices[i],
+ boundary_dof_boundary_indices.begin(),
+ boundary_dof_boundary_indices.end());
};
}
sub_neighbor->get_dof_indices (dofs_on_other_cell);
for (unsigned int i=0; i<n_dofs_on_this_cell; ++i)
- for (unsigned int j=0; j<n_dofs_on_neighbor; ++j)
- {
- sparsity.add (dofs_on_this_cell[i],
- dofs_on_other_cell[j]);
- sparsity.add (dofs_on_other_cell[j],
- dofs_on_this_cell[i]);
- }
+ sparsity.add_entries (dofs_on_this_cell[i],
+ dofs_on_other_cell.begin(),
+ dofs_on_other_cell.end());
+ for (unsigned int j=0; j<n_dofs_on_neighbor; ++j)
+ sparsity.add_entries (dofs_on_other_cell[j],
+ dofs_on_this_cell.begin(),
+ dofs_on_this_cell.end());
+
sub_neighbor->face(neighbor_face)->set_user_flag ();
}
}
dofs_on_other_cell.resize (n_dofs_on_neighbor);
neighbor->get_dof_indices (dofs_on_other_cell);
+
for (unsigned int i=0; i<n_dofs_on_this_cell; ++i)
- for (unsigned int j=0; j<n_dofs_on_neighbor; ++j)
- {
- sparsity.add (dofs_on_this_cell[i],
- dofs_on_other_cell[j]);
- sparsity.add (dofs_on_other_cell[j],
- dofs_on_this_cell[i]);
- }
+ sparsity.add_entries (dofs_on_this_cell[i],
+ dofs_on_other_cell.begin(),
+ dofs_on_other_cell.end());
+ for (unsigned int j=0; j<n_dofs_on_neighbor; ++j)
+ sparsity.add_entries (dofs_on_other_cell[j],
+ dofs_on_this_cell.begin(),
+ dofs_on_this_cell.end());
neighbor->face(neighbor_face)->set_user_flag ();
}
}
local_dof_indices.resize (n_dofs_on_this_cell);
cell->get_dof_indices (local_dof_indices);
for (unsigned int i=0; i<n_dofs_on_this_cell; ++i)
- for (unsigned int j=0; j<n_dofs_on_this_cell; ++j)
- sparsity.add (local_dof_indices[i], local_dof_indices[j]);
+ sparsity.add_entries (local_dof_indices[i],
+ local_dof_indices.begin(),
+ local_dof_indices.end());
// then do the same for the up
// to 2 neighbors
// compute couplings
for (unsigned int i=0; i<n_dofs_on_this_cell; ++i)
- for (unsigned int j=0; j<n_dofs_on_neighbor; ++j)
- sparsity.add (local_dof_indices[i], neighbor_dof_indices[j]);
+ sparsity.add_entries (local_dof_indices[i],
+ neighbor_dof_indices.begin(),
+ neighbor_dof_indices.end());
};
};
}
+// TODO: look whether one can employ collective add operations in sparsity
+// pattern in this function.
template <class DH, class SparsityPattern>
void
DoFTools::make_flux_sparsity_pattern (
void Tvmult_nonblock_nonblock (VectorType &dst,
const VectorType &src) const;
-
- /**
- * Make the iterator class a
- * friend. We have to work around
- * a compiler bug here again.
- */
-#ifndef DEAL_II_NAMESP_TEMPL_FRIEND_BUG
- template <typename, bool>
- friend class BlockMatrixIterators::Accessor;
-
- template <typename>
- friend class MatrixIterator;
-#else
- typedef BlockMatrixIterators::Accessor<BlockMatrixBase, true> ConstAccessor;
- typedef BlockMatrixIterators::Accessor<BlockMatrixBase, false> Accessor;
- friend class ConstAccessor;
-
- friend class const_iterator;
-#endif
-
-
private:
/**
* Temporary vector for counting the
*/
std::vector<std::vector<double> > column_values;
+
+ /**
+ * Make the iterator class a
+ * friend. We have to work around
+ * a compiler bug here again.
+ */
+#ifndef DEAL_II_NAMESP_TEMPL_FRIEND_BUG
+ template <typename, bool>
+ friend class BlockMatrixIterators::Accessor;
+
+ template <typename>
+ friend class MatrixIterator;
+#else
+ typedef BlockMatrixIterators::Accessor<BlockMatrixBase, true> ConstAccessor;
+ typedef BlockMatrixIterators::Accessor<BlockMatrixBase, false> Accessor;
+ friend class ConstAccessor;
+
+ friend class const_iterator;
+#endif
+
};
* and then relays to that block.
*/
void add (const unsigned int i, const unsigned int j);
-
+
+ /**
+ * Add several nonzero entries to the
+ * specified matrix row. This function
+ * may only be called for
+ * non-compressed sparsity patterns.
+ *
+ * If some of the entries already
+ * exist, nothing bad happens.
+ *
+ * This function simply finds out to
+ * which blocks <tt>(row,col)</tt> for
+ * <tt>col</tt> in the iterator range
+ * belong and then relays to those
+ * blocks.
+ */
+ template <typename ForwardIterator>
+ void add_entries (const unsigned int row,
+ ForwardIterator begin,
+ ForwardIterator end);
+
/**
* Return number of rows of this
* matrix, which equals the
*/
BlockIndices column_indices;
+ private:
+ /**
+ * Temporary vector for counting the
+ * elements written into the
+ * individual blocks when doing a
+ * collective add or set.
+ */
+ std::vector<unsigned int> counter_within_block;
+
+ /**
+ * Temporary vector for column
+ * indices on each block when writing
+ * local to global data on each
+ * sparse matrix.
+ */
+ std::vector<std::vector<unsigned int> > block_column_indices;
+
/**
* Make the block sparse matrix a
* friend, so that it can use our
+template <class SparsityPatternBase>
+template <typename ForwardIterator>
+void
+BlockSparsityPatternBase<SparsityPatternBase>::add_entries (const unsigned int row,
+ ForwardIterator begin,
+ ForwardIterator end)
+{
+ // Resize scratch arrays
+ if (block_column_indices.size() < this->n_block_cols())
+ {
+ block_column_indices.resize (this->n_block_cols());
+ counter_within_block.resize (this->n_block_cols());
+ }
+
+ const unsigned int n_cols = static_cast<unsigned int>(end - begin);
+
+ // Resize sub-arrays to n_cols. This
+ // is a bit wasteful, but we resize
+ // only a few times (then the maximum
+ // row length won't increase that
+ // much any more). At least we know
+ // that all arrays are going to be of
+ // the same size, so we can check
+ // whether the size of one is large
+ // enough before actually going
+ // through all of them.
+ if (block_column_indices[0].size() < n_cols)
+ for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ block_column_indices[i].resize(n_cols);
+
+ // Reset the number of added elements
+ // in each block to zero.
+ for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ counter_within_block[i] = 0;
+
+ // Go through the column indices to
+ // find out which portions of the
+ // values should be set in which
+ // block of the matrix. We need to
+ // touch all the data, since we can't
+ // be sure that the data of one block
+ // is stored contiguously (in fact,
+ // indices will be intermixed when it
+ // comes from an element matrix).
+ for (ForwardIterator it = begin; it != end; ++it)
+ {
+ const unsigned int col = *it;
+
+ const std::pair<unsigned int, unsigned int>
+ col_index = this->column_indices.global_to_local(col);
+
+ const unsigned int local_index = counter_within_block[col_index.first]++;
+
+ block_column_indices[col_index.first][local_index] = col_index.second;
+ }
+
+#ifdef DEBUG
+ // If in debug mode, do a check whether
+ // the right length has been obtained.
+ unsigned int length = 0;
+ for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ length += counter_within_block[i];
+ Assert (length == n_cols, ExcInternalError());
+#endif
+
+ // Now we found out about where the
+ // individual columns should start and
+ // where we should start reading out
+ // data. Now let's write the data into
+ // the individual blocks!
+ const std::pair<unsigned int,unsigned int>
+ row_index = this->row_indices.global_to_local (row);
+ for (unsigned int block_col=0; block_col<n_block_cols(); ++block_col)
+ {
+ if (counter_within_block[block_col] == 0)
+ continue;
+ sub_objects[row_index.first][block_col]->
+ add_entries (row_index.second,
+ block_column_indices[block_col].begin(),
+ block_column_indices[block_col].begin()+counter_within_block[block_col]);
+ }
+}
+
+
+
template <class SparsityPatternBase>
inline
bool
*/
void add (const unsigned int i,
const unsigned int j);
-
+
+ /**
+ * Add several nonzero entries to the
+ * specified row of the matrix. If the
+ * entries already exist, nothing bad
+ * happens.
+ */
+ template <typename ForwardIterator>
+ void add_entries (const unsigned int row,
+ ForwardIterator begin,
+ ForwardIterator end);
+
/**
* Check if a value at a certain
* position may be non-zero.
* this line.
*/
void add (const unsigned int col_num);
+
+ /**
+ * Add the columns specified by the
+ * iterator range to this line.
+ */
+ template <typename ForwardIterator>
+ void add_entries (ForwardIterator begin,
+ ForwardIterator end);
};
+template <typename ForwardIterator>
+inline
+void
+CompressedSetSparsityPattern::Line::add_entries (ForwardIterator begin,
+ ForwardIterator end)
+{
+ // right now, just forward to the individual function.
+ for (ForwardIterator it = begin; it != end; ++it)
+ add (*it);
+}
+
+
+
inline
unsigned int
CompressedSetSparsityPattern::n_rows () const
+template <typename ForwardIterator>
+inline
+void
+CompressedSetSparsityPattern::add_entries (const unsigned int row,
+ ForwardIterator begin,
+ ForwardIterator end)
+{
+ Assert (row < rows, ExcIndexRange (row, 0, rows));
+
+ lines[row].add_entries (begin, end);
+}
+
+
+
inline
unsigned int
CompressedSetSparsityPattern::row_length (const unsigned int row) const
void add (const unsigned int i,
const unsigned int j);
+ /**
+ * Add several nonzero entries to the
+ * specified row of the matrix. If the
+ * entries already exist, nothing bad
+ * happens.
+ */
+ template <typename ForwardIterator>
+ void add_entries (const unsigned int row,
+ ForwardIterator begin,
+ ForwardIterator end);
+
/**
* Check if a value at a certain
* position may be non-zero.
* this line.
*/
void add (const unsigned int col_num);
+
+ /**
+ * Add the columns specified by the
+ * iterator range to this line.
+ */
+ template <typename ForwardIterator>
+ void add_entries (ForwardIterator begin,
+ ForwardIterator end);
};
+template <typename ForwardIterator>
+inline
+void
+CompressedSimpleSparsityPattern::Line::add_entries (ForwardIterator begin,
+ ForwardIterator end)
+{
+ if (end - begin <= 0)
+ return;
+
+ ForwardIterator my_it = begin;
+ const unsigned int n_cols = static_cast<unsigned int>(end - begin);
+
+ // If necessary, increase the size of the array. In order to avoid
+ // allocating just a few entries every time, use five elements at a time
+ // at least.
+ const unsigned int stop_size = entries.size() + (n_cols > 5 ? n_cols : 5);
+ if (stop_size > entries.capacity())
+ entries.reserve (stop_size);
+
+ unsigned int col = *my_it;
+ std::vector<unsigned int>::iterator it, it2;
+ // insert the first element as for one entry only first check the last
+ // element (or if line is still empty)
+ if ( (entries.size()==0) || ( entries.back() < col) ) {
+ entries.push_back(col);
+ it = entries.end()-1;
+ }
+ else {
+ // do a binary search to find the place where to insert:
+ it2 = std::lower_bound(entries.begin(), entries.end(), col);
+
+ // If this entry is a duplicate, continue immediately Insert at the
+ // right place in the vector. Vector grows automatically to fit
+ // elements. Always doubles its size.
+ if (*it2 != col)
+ it = entries.insert(it2, col);
+ else
+ it = it2;
+ }
+
+ ++my_it;
+ // Now try to be smart and insert with bias in the direction we are
+ // walking. This has the advantage that for sorted lists, we always search
+ // in the right direction, what should decrease the work needed in here.
+ for ( ; my_it != end; ++my_it)
+ {
+ col = *my_it;
+ // need a special insertion command when we're at the end of the list
+ if (col > entries.back()) {
+ entries.push_back(col);
+ it = entries.end()-1;
+ }
+ // search to the right (preferred search direction)
+ else if (col > *it) {
+ it2 = std::lower_bound(it++, entries.end(), col);
+ if (*it2 != col)
+ it = entries.insert(it2, col);
+ }
+ // search to the left
+ else if (col < *it) {
+ it2 = std::lower_bound(entries.begin(), it, col);
+ if (*it2 != col)
+ it = entries.insert(it2, col);
+ }
+ // if we're neither larger nor smaller, then this was a duplicate and
+ // we can just continue.
+ }
+}
+
+
+
inline
unsigned int
CompressedSimpleSparsityPattern::n_rows () const
+template <typename ForwardIterator>
+inline
+void
+CompressedSimpleSparsityPattern::add_entries (const unsigned int row,
+ ForwardIterator begin,
+ ForwardIterator end)
+{
+ Assert (row < rows, ExcIndexRange (row, 0, rows));
+
+ lines[row].add_entries (begin, end);
+}
+
+
+
inline
CompressedSimpleSparsityPattern::Line::Line ()
{}
*/
void add (const unsigned int i,
const unsigned int j);
-
+
+ /**
+ * Add several nonzero entries to the
+ * specified row of the matrix. If the
+ * entries already exist, nothing bad
+ * happens.
+ */
+ template <typename ForwardIterator>
+ void add_entries (const unsigned int row,
+ ForwardIterator begin,
+ ForwardIterator end);
+
/**
* Check if a value at a certain
* position may be non-zero.
*/
void add (const unsigned int col_num);
+ /**
+ * Add the columns specified by the
+ * iterator range to this line.
+ */
+ template <typename ForwardIterator>
+ void add_entries (ForwardIterator begin,
+ ForwardIterator end);
+
/**
* Flush the cache my merging it with
* the #entries array.
+template <typename ForwardIterator>
+inline
+void
+CompressedSparsityPattern::Line::add_entries (ForwardIterator begin,
+ ForwardIterator end)
+{
+ // right now, just forward to the individual function.
+ for (ForwardIterator it = begin; it != end; ++it)
+ add (*it);
+}
+
+
+
inline
unsigned int
CompressedSparsityPattern::n_rows () const
+template <typename ForwardIterator>
+inline
+void
+CompressedSparsityPattern::add_entries (const unsigned int row,
+ ForwardIterator begin,
+ ForwardIterator end)
+{
+ Assert (row < rows, ExcIndexRange (row, 0, rows));
+
+ lines[row].add_entries (begin, end);
+}
+
+
+
inline
CompressedSparsityPattern::Line::Line ()
:
*/
void add (const unsigned int i,
const unsigned int j);
+
+ /**
+ * Add several nonzero entries to the
+ * specified matrix row. This function
+ * may only be called for
+ * non-compressed sparsity patterns.
+ *
+ * If some of the entries already
+ * exist, nothing bad happens.
+ */
+ template <typename ForwardIterator>
+ void add_entries (const unsigned int row,
+ ForwardIterator begin,
+ ForwardIterator end);
/**
* Make the sparsity pattern
* Add several elements in one row to
* the sparsity pattern.
*/
- void add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices);
+ template <typename ForwardIterator>
+ void add_entries (const unsigned int row,
+ ForwardIterator begin,
+ ForwardIterator end);
//@}
/**
* @name Access of underlying Trilinos data
*/
std::auto_ptr<Epetra_FECrsGraph> graph;
- /**
- * Scratch array that holds several
- * indices that should be written
- * into the same row of the sparsity
- * pattern. This is to increase the
- * speed of this function.
- */
- std::vector<unsigned int> cached_row_indices;
-
- /**
- * A number that tells how many
- * indices currently are active in
- * the cache.
- */
- unsigned int n_cached_elements;
-
- /**
- * The row that is currently in the
- * cache.
- */
- unsigned int row_in_cache;
-
friend class SparsityPatternIterators::const_iterator;
friend class SparsityPatternIterators::const_iterator::Accessor;
};
SparsityPattern::add (const unsigned int i,
const unsigned int j)
{
- // if we want to write an element to the
- // row the cache is currently pointed to,
- // we just append the data to the cache
- if (i == row_in_cache)
- {
- // if the size is too small, extend the
- // cache by 10 elements
- if (n_cached_elements >= cached_row_indices.size())
- cached_row_indices.resize(cached_row_indices.size() + 10);
-
- cached_row_indices[n_cached_elements] = j;
- ++n_cached_elements;
- return;
- }
-
- // if we are to write another row data,
- // we write the cache data into the
- // sparsity pattern, and then call this
- // function again
- add (row_in_cache, n_cached_elements, &cached_row_indices[0]);
- row_in_cache = i;
- n_cached_elements = 0;
- add (i,j);
+ add_entries (i, &j, &j+1);
}
+ template <typename ForwardIterator>
inline
void
- SparsityPattern::add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices)
+ SparsityPattern::add_entries (const unsigned int row,
+ ForwardIterator begin,
+ ForwardIterator end)
{
- if (n_cols == 0)
+ if (begin == end)
return;
- int * col_index_ptr = (int*)(col_indices);
+ int * col_index_ptr = (int*)(&*begin);
+ const int n_cols = static_cast<int>(end - begin);
compressed = false;
const int ierr = graph->InsertGlobalIndices (1, (int*)&row,
}
+
+template <typename ForwardIterator>
+void
+SparsityPattern::add_entries (const unsigned int row,
+ ForwardIterator begin,
+ ForwardIterator end)
+{
+ // right now, just forward to the other function.
+ for (ForwardIterator it = begin; it != end; ++it)
+ add (row, *it);
+}
+
+
bool
SparsityPattern::exists (const unsigned int i,
const unsigned int j) const
template void SparsityPattern::copy_from<float> (const FullMatrix<float> &, bool);
template void SparsityPattern::copy_from<double> (const FullMatrix<double> &, bool);
+template void SparsityPattern::add_entries<const unsigned int*> (const unsigned int,
+ const unsigned int*,
+ const unsigned int*);
+template void SparsityPattern::add_entries<std::vector<unsigned int>::const_iterator>
+(const unsigned int,
+ std::vector<unsigned int>::const_iterator,
+ std::vector<unsigned int>::const_iterator);
+template void SparsityPattern::add_entries<std::vector<unsigned int>::iterator>
+(const unsigned int,
+ std::vector<unsigned int>::iterator,
+ std::vector<unsigned int>::iterator);
+
DEAL_II_NAMESPACE_CLOSE
col_map (row_map),
compressed (true),
graph (std::auto_ptr<Epetra_FECrsGraph>
- (new Epetra_FECrsGraph(View, row_map, col_map, 0))),
- cached_row_indices (1),
- n_cached_elements (0),
- row_in_cache (0)
+ (new Epetra_FECrsGraph(View, row_map, col_map, 0)))
{
graph->FillComplete();
}
(new Epetra_FECrsGraph(Copy, row_map, col_map,
(int)n_entries_per_row,
false)))
- ),
- cached_row_indices (1),
- n_cached_elements (0),
- row_in_cache (0)
+ )
{}
SparsityPattern::SparsityPattern (const Epetra_Map &InputMap,
(int*)const_cast<unsigned int*>
(&(n_entries_per_row[row_map.MinMyGID()])),
false)))
- ),
- cached_row_indices (1),
- n_cached_elements (0),
- row_in_cache (0)
+ )
{}
SparsityPattern::SparsityPattern (const Epetra_Map &InputRowMap,
(new Epetra_FECrsGraph(Copy, row_map, col_map,
(int)n_entries_per_row,
false)))
- ),
- cached_row_indices (1),
- n_cached_elements (0),
- row_in_cache (0)
+ )
{}
SparsityPattern::SparsityPattern (const Epetra_Map &InputRowMap,
(int*)const_cast<unsigned int*>
(&(n_entries_per_row[row_map.MinMyGID()])),
false)))
- ),
- cached_row_indices (1),
- n_cached_elements (0),
- row_in_cache (0)
+ )
{}
SparsityPattern::SparsityPattern (const unsigned int m,
compressed (false),
graph (std::auto_ptr<Epetra_FECrsGraph>
(new Epetra_FECrsGraph(Copy, row_map, col_map,
- int(n_entries_per_row), false))),
- cached_row_indices (1),
- n_cached_elements (0),
- row_in_cache (0)
+ int(n_entries_per_row), false)))
{}
SparsityPattern::SparsityPattern (const unsigned int m,
graph (std::auto_ptr<Epetra_FECrsGraph>
(new Epetra_FECrsGraph(Copy, row_map, col_map,
(int*)const_cast<unsigned int*>(&(n_entries_per_row[0])),
- false))),
- cached_row_indices (1),
- n_cached_elements (0),
- row_in_cache (0)
+ false)))
{}
// Copy function is currently not working
col_map (InputSP.col_map),
compressed (false),
graph (std::auto_ptr<Epetra_FECrsGraph>
- (new Epetra_FECrsGraph(*InputSP.graph))),
- cached_row_indices (1),
- n_cached_elements (0),
- row_in_cache (0)
+ (new Epetra_FECrsGraph(*InputSP.graph)))
{}
*/
else
graph = std::auto_ptr<Epetra_FECrsGraph>
(new Epetra_FECrsGraph(Copy, row_map, col_map, n_entries_per_row, false));
-
- n_cached_elements = 0;
- row_in_cache = 0;
}
(new Epetra_FECrsGraph(Copy, row_map, col_map,
n_entries_per_row[input_row_map.MinMyGID()],
false));
-
- n_cached_elements = 0;
- row_in_cache = 0;
}
graph->FillComplete();
- n_cached_elements = 0;
- row_in_cache = 0;
-
compressed = true;
}
void
SparsityPattern::compress ()
{
- // flush buffers
- if (n_cached_elements > 0)
- {
- add (row_in_cache, n_cached_elements, &cached_row_indices[0]);
- n_cached_elements = 0;
- row_in_cache = 0;
- }
-
int ierr;
ierr = graph->GlobalAssemble (col_map, row_map, true);