* than one accessor can access
* this data if necessary.
*/
- std_cxx1x::shared_ptr<const std::vector<unsigned int> > colnum_cache;
+ std_cxx1x::shared_ptr<std::vector<unsigned int> > colnum_cache;
/**
* Similar cache for the values
* of this row.
*/
- std_cxx1x::shared_ptr<const std::vector<TrilinosScalar> > value_cache;
+ std_cxx1x::shared_ptr<std::vector<TrilinosScalar> > value_cache;
/**
* Discard the old row caches
temp_vector.reinit(dst, true);
- vmult (temp_vector, src);
+ Tvmult (temp_vector, src);
dst += temp_vector;
}
explicit Vector (const Epetra_Map ¶llel_partitioning,
const VectorBase &v);
+ /**
+ * Reinitialize from a deal.II
+ * vector. The Epetra_Map specifies the
+ * %parallel partitioning.
+ */
+ template <typename number>
+ void reinit (const Epetra_Map ¶llel_partitioner,
+ const dealii::Vector<number> &v);
+
/**
* Reinit functionality. This
* function destroys the old
Vector::Vector (const Epetra_Map &input_map,
const dealii::Vector<number> &v)
{
- vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector(input_map));
-
- const int min_my_id = input_map.MinMyGID();
- const int size = input_map.NumMyElements();
-
- Assert (input_map.MaxLID() == size-1,
- ExcDimensionMismatch(input_map.MaxLID(), size-1));
-
- // Need to copy out values, since the
- // deal.II might not use doubles, so
- // that a direct access is not possible.
- for (int i=0; i<size; ++i)
- (*vector)[0][i] = v(i);
+ reinit (input_map, v);
}
+
+ template <typename number>
+ void Vector::reinit (const Epetra_Map ¶llel_partitioner,
+ const dealii::Vector<number> &v)
+ {
+ if (&*vector != 0 && vector->Map().SameAs(parallel_partitioner))
+ vector = std::auto_ptr<Epetra_FEVector>
+ (new Epetra_FEVector(parallel_partitioner));
+
+ const int size = parallel_partitioner.NumMyElements();
+
+ // Need to copy out values, since the
+ // deal.II might not use doubles, so
+ // that a direct access is not possible.
+ for (int i=0; i<size; ++i)
+ (*vector)[0][i] = v(parallel_partitioner.GID(i));
+ }
+
+
inline
Vector &
Vector::operator = (const TrilinosScalar s)
{
if (size() != v.size())
{
- *vector = std::auto_ptr<Epetra_FEVector>
+ vector = std::auto_ptr<Epetra_FEVector>
(new Epetra_FEVector(Epetra_Map (v.size(), 0,
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
Epetra_MpiComm(MPI_COMM_SELF)
vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector(map));
}
- Epetra_Map & map = vector_partitioner();
- const int min_my_id = map.MinMyGID();
+ const Epetra_Map & map = vector_partitioner();
const int size = map.NumMyElements();
Assert (map.MaxLID() == size-1,
// row
int ncols;
int colnums = matrix->n();
- TrilinosScalar *values = new TrilinosScalar(colnums);
+ if (value_cache.get() == 0)
+ {
+ value_cache.reset (new std::vector<TrilinosScalar> (matrix->n()));
+ colnum_cache.reset (new std::vector<unsigned int> (matrix->n()));
+ }
+ else
+ {
+ value_cache->resize (matrix->n());
+ colnum_cache->resize (matrix->n());
+ }
- int ierr;
- ierr = matrix->trilinos_matrix().ExtractGlobalRowCopy((int)this->a_row,
- colnums,
- ncols, &(values[0]));
+ int ierr = matrix->trilinos_matrix().
+ ExtractGlobalRowCopy((int)this->a_row,
+ colnums,
+ ncols, &((*value_cache)[0]),
+ reinterpret_cast<int*>(&((*colnum_cache)[0])));
+ value_cache->resize (ncols);
+ colnum_cache->resize (ncols);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
// copy it into our caches if the
// we shouldn't have initialized an
// iterator for an empty line (what
// would it point to?)
- Assert (ncols != 0, ExcInternalError());
- colnum_cache.reset (new std::vector<unsigned int> (colnums,
- colnums+ncols));
- value_cache.reset (new std::vector<TrilinosScalar> (values, values+ncols));
}
}
{
matrix->ExtractMyRowView (i, num_entries, values, indices);
for (int j=0; j<num_entries; ++j)
- out << "(" << i << "," << indices[matrix->GRID(j)] << ") "
+ out << "(" << matrix->GRID(i) << "," << indices[matrix->GCID(j)] << ") "
<< values[j] << std::endl;
}
}