From: kronbichler Date: Wed, 4 Mar 2009 19:29:52 +0000 (+0000) Subject: Introduce collective add_entries() functions to sparsity patterns. X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=b23f17b4388d2156e658735ea945d4bd22093f4e;p=dealii-svn.git Introduce collective add_entries() functions to sparsity patterns. git-svn-id: https://svn.dealii.org/trunk@18447 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/deal.II/include/dofs/dof_constraints.templates.h b/deal.II/deal.II/include/dofs/dof_constraints.templates.h index b54a167a98..db04ee8136 100644 --- a/deal.II/deal.II/include/dofs/dof_constraints.templates.h +++ b/deal.II/deal.II/include/dofs/dof_constraints.templates.h @@ -908,10 +908,9 @@ distribute_local_to_global (const FullMatrix &local_matrix, // for the scratch array. n_max_entries_per_row += n_local_dofs; if (column_indices.size() < n_max_entries_per_row) - { - column_indices.resize(n_max_entries_per_row); - column_values.resize(n_max_entries_per_row); - } + column_indices.resize(n_max_entries_per_row); + if (column_values.size() < n_max_entries_per_row) + column_values.resize(n_max_entries_per_row); // now distribute entries row by row for (unsigned int i=0; i &local_matrix, // Check whether we did remain within the // arrays when adding elements into the - // scratch arrays. Moreover, there should - // be at least one element in the scratch - // array (the element diagonal). + // scratch arrays. Assert (col_counter <= n_max_entries_per_row, ExcInternalError()); // Finally, write the scratch array into - // the sparse matrix. + // the sparse matrix. Of course, do it + // only in case there is anything to + // write... if (col_counter > 0) global_matrix.add(local_dof_indices[i], col_counter, &column_indices[0], &column_values[0], @@ -1117,35 +1116,32 @@ add_entries_local_to_global (const std::vector &local_dof_indices, ExcDimensionMismatch(dof_mask.n_cols(), n_local_dofs)); } + // A lock that allows only one thread at + // time to go on in this function. + Threads::ThreadMutex::ScopedLock lock(mutex); if (lines.size() == 0) { - bool add_these_indices; - for (unsigned int i=0; i &local_dof_indices, // constraints, as above) Assert (sorted == true, ExcMatrixNotClosed()); + unsigned int n_max_entries_per_row = 0; std::vector constraint_lines (n_local_dofs, static_cast(0)); @@ -1173,9 +1170,15 @@ add_entries_local_to_global (const std::vector &local_dof_indices, if ((position != lines.end()) && (position->line == local_dof_indices[i])) - constraint_lines[i] = &*position; + { + constraint_lines[i] = &*position; + n_max_entries_per_row += position->entries.size(); + } } + n_max_entries_per_row += n_local_dofs; + if (column_indices.size() < n_max_entries_per_row) + column_indices.resize(n_max_entries_per_row); bool add_these_indices; for (unsigned int i=0; i &local_dof_indices, const ConstraintLine *position_i = constraint_lines[i]; const bool is_constrained_i = (position_i != 0); + unsigned int col_counter = 0; + for (unsigned int j=0; j &local_dof_indices, const bool is_constrained_j = (position_j != 0); // if so requested, add the - // entry unconditionally, even - // if it is going to be - // constrained away + // entry unconditionally, + // even if it is going to be + // constrained away. note + // that we can use the array + // of column indices only for + // those elements that are + // going to get into the same + // row. if (keep_constrained_entries == true) - sparsity_pattern.add (local_dof_indices[i], - local_dof_indices[j]); + column_indices[col_counter++] = local_dof_indices[j]; if ((is_constrained_i == false) && (is_constrained_j == false) && (keep_constrained_entries == false)) - { - sparsity_pattern.add (local_dof_indices[i], - local_dof_indices[j]); - } + column_indices[col_counter++] = local_dof_indices[j]; else if ((is_constrained_i == true) && (is_constrained_j == false)) { @@ -1232,8 +1238,7 @@ add_entries_local_to_global (const std::vector &local_dof_indices, (is_constrained_j == true)) { for (unsigned int q=0; qentries.size(); ++q) - sparsity_pattern.add (local_dof_indices[i], - position_j->entries[q].first); + column_indices[col_counter++] = position_j->entries[q].first; } else if ((is_constrained_i == true) && (is_constrained_j == true)) @@ -1243,9 +1248,8 @@ add_entries_local_to_global (const std::vector &local_dof_indices, sparsity_pattern.add (position_i->entries[p].first, position_j->entries[q].first); - if (i == j) - sparsity_pattern.add (local_dof_indices[i], - local_dof_indices[i]); + if (i == j && keep_constrained_entries == false) + column_indices[col_counter++] = local_dof_indices[i]; } else // the only case that can @@ -1257,6 +1261,20 @@ add_entries_local_to_global (const std::vector &local_dof_indices, ExcInternalError()); } } + + // Check whether we did remain within the + // arrays when adding elements into the + // scratch arrays. + Assert (col_counter <= n_max_entries_per_row, ExcInternalError()); + + // Finally, write the scratch array into + // the sparsity pattern. Of course, do it + // only in case there is anything to + // write... + if (col_counter > 0) + sparsity_pattern.add_entries (local_dof_indices[i], + column_indices.begin(), + column_indices.begin()+col_counter); } } } diff --git a/deal.II/deal.II/source/dofs/dof_tools.cc b/deal.II/deal.II/source/dofs/dof_tools.cc index 023e7dd288..077b493fc4 100644 --- a/deal.II/deal.II/source/dofs/dof_tools.cc +++ b/deal.II/deal.II/source/dofs/dof_tools.cc @@ -77,13 +77,18 @@ DoFTools::make_sparsity_pattern (const DH &dof, // only have to do the work if the // current cell is owned by the calling // processor. Otherwise, just continue. + // + // TODO: here, we should actually check + // the communicator of the sparsity + // pattern, not MPI_COMM_WORLD as the + // Utilities function does! for (; cell!=endc; ++cell) #ifdef DEAL_II_USE_TRILINOS if ((types_are_equal::value || types_are_equal::value) && - cell->subdomain_id() != + cell->subdomain_id() != Utilities::Trilinos::get_this_mpi_process(Utilities::Trilinos::comm_world())) continue; else @@ -265,9 +270,9 @@ DoFTools::make_sparsity_pattern ( cell_row->get_dof_indices (local_dof_indices_row); cell_col->get_dof_indices (local_dof_indices_col); for (unsigned int i=0; ihas_children()) { @@ -288,9 +293,9 @@ DoFTools::make_sparsity_pattern ( cell_row_child->get_dof_indices (local_dof_indices_row); cell_col->get_dof_indices (local_dof_indices_col); for (unsigned int i=0; iget_dof_indices (local_dof_indices_row); cell_col_child->get_dof_indices (local_dof_indices_col); for (unsigned int i=0; ivertex_dof_index(direction,i)]; for (unsigned int i=0; iget_dof_indices (dofs_on_other_cell); for (unsigned int i=0; iface(neighbor_face)->set_user_flag (); } } @@ -605,14 +611,15 @@ DoFTools::make_flux_sparsity_pattern ( dofs_on_other_cell.resize (n_dofs_on_neighbor); neighbor->get_dof_indices (dofs_on_other_cell); + for (unsigned int i=0; iface(neighbor_face)->set_user_flag (); } } @@ -650,8 +657,9 @@ DoFTools::make_flux_sparsity_pattern ( local_dof_indices.resize (n_dofs_on_this_cell); cell->get_dof_indices (local_dof_indices); for (unsigned int i=0; i void DoFTools::make_flux_sparsity_pattern ( diff --git a/deal.II/lac/include/lac/block_matrix_base.h b/deal.II/lac/include/lac/block_matrix_base.h index 748bd12c3e..9e28ce9218 100644 --- a/deal.II/lac/include/lac/block_matrix_base.h +++ b/deal.II/lac/include/lac/block_matrix_base.h @@ -1150,27 +1150,6 @@ class BlockMatrixBase : public Subscriptor void Tvmult_nonblock_nonblock (VectorType &dst, const VectorType &src) const; - - /** - * Make the iterator class a - * friend. We have to work around - * a compiler bug here again. - */ -#ifndef DEAL_II_NAMESP_TEMPL_FRIEND_BUG - template - friend class BlockMatrixIterators::Accessor; - - template - friend class MatrixIterator; -#else - typedef BlockMatrixIterators::Accessor ConstAccessor; - typedef BlockMatrixIterators::Accessor Accessor; - friend class ConstAccessor; - - friend class const_iterator; -#endif - - private: /** * Temporary vector for counting the @@ -1196,6 +1175,26 @@ class BlockMatrixBase : public Subscriptor */ std::vector > column_values; + + /** + * Make the iterator class a + * friend. We have to work around + * a compiler bug here again. + */ +#ifndef DEAL_II_NAMESP_TEMPL_FRIEND_BUG + template + friend class BlockMatrixIterators::Accessor; + + template + friend class MatrixIterator; +#else + typedef BlockMatrixIterators::Accessor ConstAccessor; + typedef BlockMatrixIterators::Accessor Accessor; + friend class ConstAccessor; + + friend class const_iterator; +#endif + }; diff --git a/deal.II/lac/include/lac/block_sparsity_pattern.h b/deal.II/lac/include/lac/block_sparsity_pattern.h index 45b914cba8..eb0df50d7b 100644 --- a/deal.II/lac/include/lac/block_sparsity_pattern.h +++ b/deal.II/lac/include/lac/block_sparsity_pattern.h @@ -280,7 +280,27 @@ class BlockSparsityPatternBase : public Subscriptor * and then relays to that block. */ void add (const unsigned int i, const unsigned int j); - + + /** + * Add several nonzero entries to the + * specified matrix row. This function + * may only be called for + * non-compressed sparsity patterns. + * + * If some of the entries already + * exist, nothing bad happens. + * + * This function simply finds out to + * which blocks (row,col) for + * col in the iterator range + * belong and then relays to those + * blocks. + */ + template + void add_entries (const unsigned int row, + ForwardIterator begin, + ForwardIterator end); + /** * Return number of rows of this * matrix, which equals the @@ -421,6 +441,23 @@ class BlockSparsityPatternBase : public Subscriptor */ BlockIndices column_indices; + private: + /** + * Temporary vector for counting the + * elements written into the + * individual blocks when doing a + * collective add or set. + */ + std::vector counter_within_block; + + /** + * Temporary vector for column + * indices on each block when writing + * local to global data on each + * sparse matrix. + */ + std::vector > block_column_indices; + /** * Make the block sparse matrix a * friend, so that it can use our @@ -1046,6 +1083,91 @@ BlockSparsityPatternBase::add (const unsigned int i, +template +template +void +BlockSparsityPatternBase::add_entries (const unsigned int row, + ForwardIterator begin, + ForwardIterator end) +{ + // Resize scratch arrays + if (block_column_indices.size() < this->n_block_cols()) + { + block_column_indices.resize (this->n_block_cols()); + counter_within_block.resize (this->n_block_cols()); + } + + const unsigned int n_cols = static_cast(end - begin); + + // Resize sub-arrays to n_cols. This + // is a bit wasteful, but we resize + // only a few times (then the maximum + // row length won't increase that + // much any more). At least we know + // that all arrays are going to be of + // the same size, so we can check + // whether the size of one is large + // enough before actually going + // through all of them. + if (block_column_indices[0].size() < n_cols) + for (unsigned int i=0; in_block_cols(); ++i) + block_column_indices[i].resize(n_cols); + + // Reset the number of added elements + // in each block to zero. + for (unsigned int i=0; in_block_cols(); ++i) + counter_within_block[i] = 0; + + // Go through the column indices to + // find out which portions of the + // values should be set in which + // block of the matrix. We need to + // touch all the data, since we can't + // be sure that the data of one block + // is stored contiguously (in fact, + // indices will be intermixed when it + // comes from an element matrix). + for (ForwardIterator it = begin; it != end; ++it) + { + const unsigned int col = *it; + + const std::pair + col_index = this->column_indices.global_to_local(col); + + const unsigned int local_index = counter_within_block[col_index.first]++; + + block_column_indices[col_index.first][local_index] = col_index.second; + } + +#ifdef DEBUG + // If in debug mode, do a check whether + // the right length has been obtained. + unsigned int length = 0; + for (unsigned int i=0; in_block_cols(); ++i) + length += counter_within_block[i]; + Assert (length == n_cols, ExcInternalError()); +#endif + + // Now we found out about where the + // individual columns should start and + // where we should start reading out + // data. Now let's write the data into + // the individual blocks! + const std::pair + row_index = this->row_indices.global_to_local (row); + for (unsigned int block_col=0; block_col + add_entries (row_index.second, + block_column_indices[block_col].begin(), + block_column_indices[block_col].begin()+counter_within_block[block_col]); + } +} + + + template inline bool diff --git a/deal.II/lac/include/lac/compressed_set_sparsity_pattern.h b/deal.II/lac/include/lac/compressed_set_sparsity_pattern.h index 584f6580a4..059b8049f1 100644 --- a/deal.II/lac/include/lac/compressed_set_sparsity_pattern.h +++ b/deal.II/lac/include/lac/compressed_set_sparsity_pattern.h @@ -216,7 +216,18 @@ class CompressedSetSparsityPattern : public Subscriptor */ void add (const unsigned int i, const unsigned int j); - + + /** + * Add several nonzero entries to the + * specified row of the matrix. If the + * entries already exist, nothing bad + * happens. + */ + template + void add_entries (const unsigned int row, + ForwardIterator begin, + ForwardIterator end); + /** * Check if a value at a certain * position may be non-zero. @@ -375,6 +386,14 @@ class CompressedSetSparsityPattern : public Subscriptor * this line. */ void add (const unsigned int col_num); + + /** + * Add the columns specified by the + * iterator range to this line. + */ + template + void add_entries (ForwardIterator begin, + ForwardIterator end); }; @@ -405,6 +424,19 @@ CompressedSetSparsityPattern::Line::add (const unsigned int j) +template +inline +void +CompressedSetSparsityPattern::Line::add_entries (ForwardIterator begin, + ForwardIterator end) +{ + // right now, just forward to the individual function. + for (ForwardIterator it = begin; it != end; ++it) + add (*it); +} + + + inline unsigned int CompressedSetSparsityPattern::n_rows () const @@ -436,6 +468,20 @@ CompressedSetSparsityPattern::add (const unsigned int i, +template +inline +void +CompressedSetSparsityPattern::add_entries (const unsigned int row, + ForwardIterator begin, + ForwardIterator end) +{ + Assert (row < rows, ExcIndexRange (row, 0, rows)); + + lines[row].add_entries (begin, end); +} + + + inline unsigned int CompressedSetSparsityPattern::row_length (const unsigned int row) const diff --git a/deal.II/lac/include/lac/compressed_simple_sparsity_pattern.h b/deal.II/lac/include/lac/compressed_simple_sparsity_pattern.h index 69b99aa2b8..57033f93d6 100644 --- a/deal.II/lac/include/lac/compressed_simple_sparsity_pattern.h +++ b/deal.II/lac/include/lac/compressed_simple_sparsity_pattern.h @@ -186,6 +186,17 @@ class CompressedSimpleSparsityPattern : public Subscriptor void add (const unsigned int i, const unsigned int j); + /** + * Add several nonzero entries to the + * specified row of the matrix. If the + * entries already exist, nothing bad + * happens. + */ + template + void add_entries (const unsigned int row, + ForwardIterator begin, + ForwardIterator end); + /** * Check if a value at a certain * position may be non-zero. @@ -347,6 +358,14 @@ class CompressedSimpleSparsityPattern : public Subscriptor * this line. */ void add (const unsigned int col_num); + + /** + * Add the columns specified by the + * iterator range to this line. + */ + template + void add_entries (ForwardIterator begin, + ForwardIterator end); }; @@ -393,6 +412,77 @@ CompressedSimpleSparsityPattern::Line::add (const unsigned int j) +template +inline +void +CompressedSimpleSparsityPattern::Line::add_entries (ForwardIterator begin, + ForwardIterator end) +{ + if (end - begin <= 0) + return; + + ForwardIterator my_it = begin; + const unsigned int n_cols = static_cast(end - begin); + + // If necessary, increase the size of the array. In order to avoid + // allocating just a few entries every time, use five elements at a time + // at least. + const unsigned int stop_size = entries.size() + (n_cols > 5 ? n_cols : 5); + if (stop_size > entries.capacity()) + entries.reserve (stop_size); + + unsigned int col = *my_it; + std::vector::iterator it, it2; + // insert the first element as for one entry only first check the last + // element (or if line is still empty) + if ( (entries.size()==0) || ( entries.back() < col) ) { + entries.push_back(col); + it = entries.end()-1; + } + else { + // do a binary search to find the place where to insert: + it2 = std::lower_bound(entries.begin(), entries.end(), col); + + // If this entry is a duplicate, continue immediately Insert at the + // right place in the vector. Vector grows automatically to fit + // elements. Always doubles its size. + if (*it2 != col) + it = entries.insert(it2, col); + else + it = it2; + } + + ++my_it; + // Now try to be smart and insert with bias in the direction we are + // walking. This has the advantage that for sorted lists, we always search + // in the right direction, what should decrease the work needed in here. + for ( ; my_it != end; ++my_it) + { + col = *my_it; + // need a special insertion command when we're at the end of the list + if (col > entries.back()) { + entries.push_back(col); + it = entries.end()-1; + } + // search to the right (preferred search direction) + else if (col > *it) { + it2 = std::lower_bound(it++, entries.end(), col); + if (*it2 != col) + it = entries.insert(it2, col); + } + // search to the left + else if (col < *it) { + it2 = std::lower_bound(entries.begin(), it, col); + if (*it2 != col) + it = entries.insert(it2, col); + } + // if we're neither larger nor smaller, then this was a duplicate and + // we can just continue. + } +} + + + inline unsigned int CompressedSimpleSparsityPattern::n_rows () const @@ -424,6 +514,20 @@ CompressedSimpleSparsityPattern::add (const unsigned int i, +template +inline +void +CompressedSimpleSparsityPattern::add_entries (const unsigned int row, + ForwardIterator begin, + ForwardIterator end) +{ + Assert (row < rows, ExcIndexRange (row, 0, rows)); + + lines[row].add_entries (begin, end); +} + + + inline CompressedSimpleSparsityPattern::Line::Line () {} diff --git a/deal.II/lac/include/lac/compressed_sparsity_pattern.h b/deal.II/lac/include/lac/compressed_sparsity_pattern.h index 0a329f3a52..2ad866c797 100644 --- a/deal.II/lac/include/lac/compressed_sparsity_pattern.h +++ b/deal.II/lac/include/lac/compressed_sparsity_pattern.h @@ -189,7 +189,18 @@ class CompressedSparsityPattern : public Subscriptor */ void add (const unsigned int i, const unsigned int j); - + + /** + * Add several nonzero entries to the + * specified row of the matrix. If the + * entries already exist, nothing bad + * happens. + */ + template + void add_entries (const unsigned int row, + ForwardIterator begin, + ForwardIterator end); + /** * Check if a value at a certain * position may be non-zero. @@ -417,6 +428,14 @@ class CompressedSparsityPattern : public Subscriptor */ void add (const unsigned int col_num); + /** + * Add the columns specified by the + * iterator range to this line. + */ + template + void add_entries (ForwardIterator begin, + ForwardIterator end); + /** * Flush the cache my merging it with * the #entries array. @@ -460,6 +479,19 @@ CompressedSparsityPattern::Line::add (const unsigned int j) +template +inline +void +CompressedSparsityPattern::Line::add_entries (ForwardIterator begin, + ForwardIterator end) +{ + // right now, just forward to the individual function. + for (ForwardIterator it = begin; it != end; ++it) + add (*it); +} + + + inline unsigned int CompressedSparsityPattern::n_rows () const @@ -491,6 +523,20 @@ CompressedSparsityPattern::add (const unsigned int i, +template +inline +void +CompressedSparsityPattern::add_entries (const unsigned int row, + ForwardIterator begin, + ForwardIterator end) +{ + Assert (row < rows, ExcIndexRange (row, 0, rows)); + + lines[row].add_entries (begin, end); +} + + + inline CompressedSparsityPattern::Line::Line () : diff --git a/deal.II/lac/include/lac/sparsity_pattern.h b/deal.II/lac/include/lac/sparsity_pattern.h index fdb6b5f534..517c0966dd 100644 --- a/deal.II/lac/include/lac/sparsity_pattern.h +++ b/deal.II/lac/include/lac/sparsity_pattern.h @@ -1054,6 +1054,20 @@ class SparsityPattern : public Subscriptor */ void add (const unsigned int i, const unsigned int j); + + /** + * Add several nonzero entries to the + * specified matrix row. This function + * may only be called for + * non-compressed sparsity patterns. + * + * If some of the entries already + * exist, nothing bad happens. + */ + template + void add_entries (const unsigned int row, + ForwardIterator begin, + ForwardIterator end); /** * Make the sparsity pattern diff --git a/deal.II/lac/include/lac/trilinos_sparsity_pattern.h b/deal.II/lac/include/lac/trilinos_sparsity_pattern.h index a6dd7b2941..5fcdb2edf2 100755 --- a/deal.II/lac/include/lac/trilinos_sparsity_pattern.h +++ b/deal.II/lac/include/lac/trilinos_sparsity_pattern.h @@ -807,9 +807,10 @@ namespace TrilinosWrappers * Add several elements in one row to * the sparsity pattern. */ - void add (const unsigned int row, - const unsigned int n_cols, - const unsigned int *col_indices); + template + void add_entries (const unsigned int row, + ForwardIterator begin, + ForwardIterator end); //@} /** * @name Access of underlying Trilinos data @@ -1056,28 +1057,6 @@ namespace TrilinosWrappers */ std::auto_ptr graph; - /** - * Scratch array that holds several - * indices that should be written - * into the same row of the sparsity - * pattern. This is to increase the - * speed of this function. - */ - std::vector cached_row_indices; - - /** - * A number that tells how many - * indices currently are active in - * the cache. - */ - unsigned int n_cached_elements; - - /** - * The row that is currently in the - * cache. - */ - unsigned int row_in_cache; - friend class SparsityPatternIterators::const_iterator; friend class SparsityPatternIterators::const_iterator::Accessor; }; @@ -1329,43 +1308,23 @@ namespace TrilinosWrappers SparsityPattern::add (const unsigned int i, const unsigned int j) { - // if we want to write an element to the - // row the cache is currently pointed to, - // we just append the data to the cache - if (i == row_in_cache) - { - // if the size is too small, extend the - // cache by 10 elements - if (n_cached_elements >= cached_row_indices.size()) - cached_row_indices.resize(cached_row_indices.size() + 10); - - cached_row_indices[n_cached_elements] = j; - ++n_cached_elements; - return; - } - - // if we are to write another row data, - // we write the cache data into the - // sparsity pattern, and then call this - // function again - add (row_in_cache, n_cached_elements, &cached_row_indices[0]); - row_in_cache = i; - n_cached_elements = 0; - add (i,j); + add_entries (i, &j, &j+1); } + template inline void - SparsityPattern::add (const unsigned int row, - const unsigned int n_cols, - const unsigned int *col_indices) + SparsityPattern::add_entries (const unsigned int row, + ForwardIterator begin, + ForwardIterator end) { - if (n_cols == 0) + if (begin == end) return; - int * col_index_ptr = (int*)(col_indices); + int * col_index_ptr = (int*)(&*begin); + const int n_cols = static_cast(end - begin); compressed = false; const int ierr = graph->InsertGlobalIndices (1, (int*)&row, diff --git a/deal.II/lac/source/sparsity_pattern.cc b/deal.II/lac/source/sparsity_pattern.cc index 4165e97f32..63b2dfa2cf 100644 --- a/deal.II/lac/source/sparsity_pattern.cc +++ b/deal.II/lac/source/sparsity_pattern.cc @@ -864,6 +864,19 @@ SparsityPattern::add (const unsigned int i, } + +template +void +SparsityPattern::add_entries (const unsigned int row, + ForwardIterator begin, + ForwardIterator end) +{ + // right now, just forward to the other function. + for (ForwardIterator it = begin; it != end; ++it) + add (row, *it); +} + + bool SparsityPattern::exists (const unsigned int i, const unsigned int j) const @@ -1122,4 +1135,16 @@ partition (const unsigned int n_partitions, template void SparsityPattern::copy_from (const FullMatrix &, bool); template void SparsityPattern::copy_from (const FullMatrix &, bool); +template void SparsityPattern::add_entries (const unsigned int, + const unsigned int*, + const unsigned int*); +template void SparsityPattern::add_entries::const_iterator> +(const unsigned int, + std::vector::const_iterator, + std::vector::const_iterator); +template void SparsityPattern::add_entries::iterator> +(const unsigned int, + std::vector::iterator, + std::vector::iterator); + DEAL_II_NAMESPACE_CLOSE diff --git a/deal.II/lac/source/trilinos_sparsity_pattern.cc b/deal.II/lac/source/trilinos_sparsity_pattern.cc index 2bac870be1..7d1a6f1ec5 100755 --- a/deal.II/lac/source/trilinos_sparsity_pattern.cc +++ b/deal.II/lac/source/trilinos_sparsity_pattern.cc @@ -90,10 +90,7 @@ namespace TrilinosWrappers col_map (row_map), compressed (true), graph (std::auto_ptr - (new Epetra_FECrsGraph(View, row_map, col_map, 0))), - cached_row_indices (1), - n_cached_elements (0), - row_in_cache (0) + (new Epetra_FECrsGraph(View, row_map, col_map, 0))) { graph->FillComplete(); } @@ -124,10 +121,7 @@ namespace TrilinosWrappers (new Epetra_FECrsGraph(Copy, row_map, col_map, (int)n_entries_per_row, false))) - ), - cached_row_indices (1), - n_cached_elements (0), - row_in_cache (0) + ) {} SparsityPattern::SparsityPattern (const Epetra_Map &InputMap, @@ -148,10 +142,7 @@ namespace TrilinosWrappers (int*)const_cast (&(n_entries_per_row[row_map.MinMyGID()])), false))) - ), - cached_row_indices (1), - n_cached_elements (0), - row_in_cache (0) + ) {} SparsityPattern::SparsityPattern (const Epetra_Map &InputRowMap, @@ -171,10 +162,7 @@ namespace TrilinosWrappers (new Epetra_FECrsGraph(Copy, row_map, col_map, (int)n_entries_per_row, false))) - ), - cached_row_indices (1), - n_cached_elements (0), - row_in_cache (0) + ) {} SparsityPattern::SparsityPattern (const Epetra_Map &InputRowMap, @@ -196,10 +184,7 @@ namespace TrilinosWrappers (int*)const_cast (&(n_entries_per_row[row_map.MinMyGID()])), false))) - ), - cached_row_indices (1), - n_cached_elements (0), - row_in_cache (0) + ) {} SparsityPattern::SparsityPattern (const unsigned int m, @@ -216,10 +201,7 @@ namespace TrilinosWrappers compressed (false), graph (std::auto_ptr (new Epetra_FECrsGraph(Copy, row_map, col_map, - int(n_entries_per_row), false))), - cached_row_indices (1), - n_cached_elements (0), - row_in_cache (0) + int(n_entries_per_row), false))) {} SparsityPattern::SparsityPattern (const unsigned int m, @@ -237,10 +219,7 @@ namespace TrilinosWrappers graph (std::auto_ptr (new Epetra_FECrsGraph(Copy, row_map, col_map, (int*)const_cast(&(n_entries_per_row[0])), - false))), - cached_row_indices (1), - n_cached_elements (0), - row_in_cache (0) + false))) {} // Copy function is currently not working @@ -255,10 +234,7 @@ namespace TrilinosWrappers col_map (InputSP.col_map), compressed (false), graph (std::auto_ptr - (new Epetra_FECrsGraph(*InputSP.graph))), - cached_row_indices (1), - n_cached_elements (0), - row_in_cache (0) + (new Epetra_FECrsGraph(*InputSP.graph))) {} */ @@ -321,9 +297,6 @@ namespace TrilinosWrappers else graph = std::auto_ptr (new Epetra_FECrsGraph(Copy, row_map, col_map, n_entries_per_row, false)); - - n_cached_elements = 0; - row_in_cache = 0; } @@ -380,9 +353,6 @@ namespace TrilinosWrappers (new Epetra_FECrsGraph(Copy, row_map, col_map, n_entries_per_row[input_row_map.MinMyGID()], false)); - - n_cached_elements = 0; - row_in_cache = 0; } @@ -579,9 +549,6 @@ namespace TrilinosWrappers graph->FillComplete(); - n_cached_elements = 0; - row_in_cache = 0; - compressed = true; } @@ -590,14 +557,6 @@ namespace TrilinosWrappers void SparsityPattern::compress () { - // flush buffers - if (n_cached_elements > 0) - { - add (row_in_cache, n_cached_elements, &cached_row_indices[0]); - n_cached_elements = 0; - row_in_cache = 0; - } - int ierr; ierr = graph->GlobalAssemble (col_map, row_map, true);