next_level = this->level()+1;
else
next_level = 0;
-
+
TriaIterator<DoFAccessor<structdim,DH> > q (this->tria,
next_level,
this->child_index (i),
this->dof_handler);
-
+
// make sure that we either created
// a past-the-end iterator or one
// pointing to a used cell
local_index,
global_index);
}
-
+
template <int spacedim>
static
local_index,
global_index);
}
-
-
+
+
template <int spacedim>
static
unsigned int
get_dof_index (dof_handler,
obj_index,
fe_index,
- local_index);
+ local_index);
}
fe_index,
local_index,
global_index,
- obj_level);
+ obj_level);
}
fe_index,
local_index,
global_index,
- obj_level);
+ obj_level);
}
fe_index,
local_index,
global_index,
- obj_level);
+ obj_level);
}
ExcMessage ("This cell is not active and therefore can't be "
"queried for its active FE indices"));
Assert (n == 0, ExcIndexRange (n, 0, 1));
-
+
return dealii::DoFHandler<dim,spacedim>::default_fe_index;
}
-
+
template <int spacedim>
static
n);
}
-
+
template <int spacedim>
static
Assert (pointer <= &dof_handler.vertex_dofs.back(), ExcInternalError());
const unsigned int this_fe_index = *pointer;
-
+
Assert (this_fe_index != numbers::invalid_unsigned_int,
ExcInternalError());
Assert (this_fe_index < dof_handler.finite_elements->size(),
pointer += (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1;
}
}
-
-
+
+
/**
* Get the @p local_index-th
* degree of freedom
dof_handler.vertex_dofs[vertex_index *
dof_handler.selected_fe->dofs_per_vertex
+ local_index];
- }
+ }
template<int dim, int spacedim>
Assert (pointer <= &dof_handler.vertex_dofs.back(), ExcInternalError());
const unsigned int this_fe_index = *pointer;
-
+
Assert (this_fe_index != numbers::invalid_unsigned_int,
ExcInternalError());
Assert (this_fe_index < dof_handler.finite_elements->size(),
Assert (*pointer != numbers::invalid_unsigned_int,
ExcInternalError());
-
+
unsigned int counter = 0;
while (true)
{
Assert (pointer <= &dof_handler.vertex_dofs.back(), ExcInternalError());
const unsigned int this_fe_index = *pointer;
-
+
if (this_fe_index == numbers::invalid_unsigned_int)
return counter;
else
Assert (*pointer != numbers::invalid_unsigned_int,
ExcInternalError());
-
+
unsigned int counter = 0;
while (true)
{
Assert (pointer <= &dof_handler.vertex_dofs.back(), ExcInternalError());
const unsigned int this_fe_index = *pointer;
-
+
Assert (this_fe_index < dof_handler.finite_elements->size(),
ExcInternalError());
Assert (this_fe_index != numbers::invalid_unsigned_int,
ExcInternalError());
-
+
pointer += (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1;
- ++counter;
+ ++counter;
}
}
-
+
/**
Assert (*pointer != numbers::invalid_unsigned_int,
ExcInternalError());
-
+
while (true)
{
Assert (pointer <= &dof_handler.vertex_dofs.back(), ExcInternalError());
const unsigned int this_fe_index = *pointer;
-
+
Assert (this_fe_index < dof_handler.finite_elements->size(),
ExcInternalError());
pointer += (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1;
}
}
-
+
};
}
}
DoFAccessor<dim,DH>::dof_index (const unsigned int i,
const unsigned int fe_index) const
{
- // access the respective DoF
+ // access the respective DoF
return internal::DoFAccessor::Implementation::get_dof_index (*this->dof_handler,
this->level(),
this->present_index,
}
-
+
template <int dim, int spacedim>
inline
const FiniteElement<dim,spacedim> &
{
Assert (fe_index_is_active (fe_index) == true,
ExcMessage ("This function can only be called for active fe indices"));
-
+
return internal::DoFAccessor::get_fe (this->dof_handler->get_fe(), fe_index);
}
}
-
+
template <class DH>
void get_dof_indices (const dealii::DoFAccessor<2,DH> &accessor,
std::vector<unsigned int> &dof_indices,
*next++ = accessor.dof_index(d,fe_index);
}
-
+
template <class DH>
void get_dof_indices (const dealii::DoFAccessor<3,DH> &accessor,
default:
Assert (false, ExcNotImplemented());
}
-
+
// this function really only makes
// sense if either a) there are
Assert (structdim > 1, ExcImpossibleInDim(structdim));
// checking of 'i' happens in
// line_index(i)
-
+
return typename internal::DoFHandler::Iterators<DH>::line_iterator
(
this->tria,
// internal::DoFCellAccessor
using dealii::DoFCellAccessor;
using dealii::DoFHandler;
-
+
/**
* A class with the same purpose as the similarly named class of the
* Triangulation class. See there for more information.
(accessor.get_fe().dofs_per_cell !=
accessor.get_fe().dofs_per_vertex * GeometryInfo<1>::vertices_per_cell))
return;
-
+
const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
dofs_per_line = accessor.get_fe().dofs_per_line,
dofs_per_cell = accessor.get_fe().dofs_per_cell;
-
+
// make sure the cache is at least
// as big as we need it when
// writing to the last element of
accessor.dof_handler->levels[accessor.present_level]
->cell_dof_indices_cache.size(),
ExcInternalError());
-
+
std::vector<unsigned int>::iterator next
= (accessor.dof_handler->levels[accessor.present_level]
->cell_dof_indices_cache.begin() + accessor.present_index * dofs_per_cell);
-
+
for (unsigned int vertex=0; vertex<2; ++vertex)
for (unsigned int d=0; d<dofs_per_vertex; ++d)
*next++ = accessor.vertex_dof_index(vertex,d);
(accessor.get_fe().dofs_per_cell !=
accessor.get_fe().dofs_per_vertex * GeometryInfo<2>::vertices_per_cell))
return;
-
+
const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
dofs_per_line = accessor.get_fe().dofs_per_line,
dofs_per_quad = accessor.get_fe().dofs_per_quad,
(accessor.get_fe().dofs_per_cell !=
accessor.get_fe().dofs_per_vertex * GeometryInfo<3>::vertices_per_cell))
return;
-
+
const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
dofs_per_line = accessor.get_fe().dofs_per_line,
dofs_per_quad = accessor.get_fe().dofs_per_quad,
update_cell_dof_indices_cache (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &)
{
//TODO[WB]: should implement a dof indices cache for hp as well
-
+
// not implemented, but should also
// not be called
Assert (false, ExcNotImplemented());
(accessor.get_fe().dofs_per_cell ==
accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
-
+
unsigned int *cache = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index * accessor.get_fe().dofs_per_cell];
+ ->cell_dof_indices_cache[accessor.present_index * accessor.get_fe().dofs_per_cell];
for (unsigned int i=0; i<accessor.get_fe().dofs_per_cell; ++i, ++cache)
dof_indices[i] = *cache;
- }
+ }
/**
* Same function as above except
get_dof_indices (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
std::vector<unsigned int> &dof_indices)
{
- // no caching for hp::DoFHandler implemented
+ // no caching for hp::DoFHandler implemented
accessor.dealii::DoFAccessor<dim,dealii::hp::DoFHandler<dim,spacedim> >::get_dof_indices (dof_indices,
accessor.active_fe_index());
}
(accessor.get_fe().dofs_per_cell ==
accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
-
+
unsigned int *cache = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index * accessor.get_fe().dofs_per_cell];
+ ->cell_dof_indices_cache[accessor.present_index * accessor.get_fe().dofs_per_cell];
for (unsigned int i=0; i<accessor.get_fe().dofs_per_cell; ++i, ++cache)
local_values(i) = values(*cache);
- }
+ }
/**
* Same function as above except
// no caching for hp::DoFHandler
// implemented
const unsigned int dofs_per_cell = accessor.get_fe().dofs_per_cell;
-
+
std::vector<unsigned int> local_dof_indices (dofs_per_cell);
get_dof_indices (accessor, local_dof_indices);
(accessor.get_fe().dofs_per_cell ==
accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
-
+
unsigned int *cache = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index * accessor.get_fe().dofs_per_cell];
+ ->cell_dof_indices_cache[accessor.present_index * accessor.get_fe().dofs_per_cell];
for (unsigned int i=0; i<accessor.get_fe().dofs_per_cell; ++i, ++cache)
values(*cache) = local_values(i);
- }
+ }
// no caching for hp::DoFHandler
// implemented
const unsigned int dofs_per_cell = accessor.get_fe().dofs_per_cell;
-
+
std::vector<unsigned int> local_dof_indices (dofs_per_cell);
get_dof_indices (accessor, local_dof_indices);
for (unsigned int i=0; i<dofs_per_cell; ++i)
values(local_dof_indices[i]) = local_values(i);
}
-
+
/**
* Do what the active_fe_index
->cell_dof_indices_cache[accessor.present_index * n_dofs];
// distribute cell vector
- global_destination.add(n_dofs, dofs,
+ global_destination.add(n_dofs, dofs,
&(*const_cast<dealii::Vector<number>*>(&local_source))(0));
}
const unsigned int n_dofs = local_source.size();
//TODO[WB/MK]: This function could me made more efficient because it allocates memory, which could be avoided by passing in another argument as a scratch array. This should be fixed eventually
-
+
// get indices of dofs
std::vector<unsigned int> dofs (n_dofs);
accessor.get_dof_indices (dofs);
-
+
// distribute cell vector
global_destination.add (dofs, local_source);
}
// distribute cell matrices
for (unsigned int i=0; i<n_dofs; ++i)
- global_destination.add(dofs[i], n_dofs, dofs,
+ global_destination.add(dofs[i], n_dofs, dofs,
&local_source(i,0));
}
const unsigned int n_dofs = local_source.size();
//TODO[WB/MK]: This function could me made more efficient because it allocates memory, which could be avoided by passing in another argument as a scratch array.
-
+
// get indices of dofs
std::vector<unsigned int> dofs (n_dofs);
accessor.get_dof_indices (dofs);
-
+
// distribute cell vector
global_destination.add(dofs,local_source);
}
this->neighbor_level (i),
this->neighbor_index (i),
this->dof_handler);
-
+
#ifdef DEBUG
if (q.state() != IteratorState::past_the_end)
Assert (q->used(), TriaAccessorExceptions::ExcUnusedCellAsNeighbor());
this->level()+1,
this->child_index (i),
this->dof_handler);
-
+
#ifdef DEBUG
if (q.state() != IteratorState::past_the_end)
Assert (q->used(), TriaAccessorExceptions::ExcUnusedCellAsChild());
Assert (static_cast<unsigned int>(this->level()) < this->dof_handler->levels.size(),
ExcMessage ("DoFHandler not initialized"));
- const unsigned int dim = DH::dimension;
+ const unsigned int dim = DH::dimension;
Assert (dim > 1, ExcImpossibleInDim(1));
switch (dim)
0,
this->line_index (i),
this->dof_handler);
-
+
case 3:
return typename internal::DoFHandler::Iterators<DH>::face_iterator
(this->tria,
0,
this->quad_index (i),
this->dof_handler);
-
+
default:
Assert (false, ExcNotImplemented());
return typename internal::DoFHandler::Iterators<DH>::face_iterator();
std::size_t glob_size = derivatives.memory_consumption() +
indices_local_to_global.memory_consumption() +
constraints.memory_consumption() +
- small_matrix.memory_consumption() +
+ small_matrix.memory_consumption() +
diagonal_values.memory_consumption() + sizeof(*this);
return glob_size;
}
system_matrix.get_constraints().close();
std::cout.precision(4);
std::cout << "System matrix memory consumption: "
- << (double)system_matrix.memory_consumption()*std::pow(2.,-20.)
+ << (double)system_matrix.memory_consumption()*std::pow(2.,-20.)
<< " MBytes."
<< std::endl;
(transpose
(fe_values.inverse_jacobian(q)) *
fe_values.inverse_jacobian(q)) *
- fe_values.JxW(q) *
+ fe_values.JxW(q) *
coefficient_values[q]);
++cell_no[level];
{
GridGenerator::hyper_cube (triangulation);
- triangulation.refine_global (3);
+ triangulation.refine_global (5);
}
else
refine_grid ();
// two solutions for equality.
solution1-=solution2;
const double difference=solution1.linfty_norm();
- if (difference>1e-13)
+ if (difference>1e-12)
std::cout << "solution1 and solution2 differ!!" << std::endl;
else
std::cout << "solution1 and solution2 coincide." << std::endl;
}
+ cell->distribute_local_to_global (cell_rhs, system_rhs);
+ cell->distribute_local_to_global (cell_matrix, system_matrix);
+ /*
cell->get_dof_indices (local_dof_indices);
for (unsigned int i=0; i<dofs_per_cell; ++i)
{
system_rhs(local_dof_indices[i]) += cell_rhs(i);
}
+ */
}
// With the matrix so built, we use
* sparsity pattern and the constraints; third, the global matrix is
* assembled; and fourth, the matrix is finally condensed. To do these steps,
* you have (at least) two possibilities:
- *
+ *
* <ul>
* <li> Use two different sparsity patterns and two different matrices: you
* may eliminate the lines and rows connected with a constraint and create a
* PETSc/Trilinos matrices, you can either copy an already condensed deal.II
* matrix, or build the PETSc/Trilinos matrix in the already condensed form,
* see the discussion below.
- *
- *
+ *
+ *
* <h5>Condensing vectors</h5>
- *
+ *
* Condensing vectors works exactly as described above for matrices. Note that
* condensation is an idempotent operation, i.e. doing it more than once on a
* vector or matrix yields the same result as doing it only once: once an
* The use of ConstraintMatrix for implementing Dirichlet boundary conditions
* is discussed in the @ref step_22 "step-22" tutorial program.
*
- *
+ *
* <h3>Avoiding explicit condensation</h3>
*
* Sometimes, one wants to avoid explicit condensation of a linear system
* after it has been built at all. There are two main reasons for wanting to
* do so:
*
- * <ul>
+ * <ul>
* <li>
* Condensation is an expensive operation, in particular if there
* are many constraints and/or if the matrix has many nonzero entries. Both
* matrices and right hand side vectors, whereas the distribute() function
* discussed below is applied to the solution vector after solving the linear
* system.
- *
- *
+ *
+ *
* <h3>Distributing constraints</h3>
- *
+ *
* After solving the condensed system of equations, the solution vector has
* to be redistributed. This is done by the two distribute() functions, one
* working with two vectors, one working in-place. The operation of
* again.
*/
void shift (const unsigned int offset);
-
+
/**
* Clear all entries of this
* matrix. Reset the flag determining
* @name Querying constraints
* @{
*/
-
+
/**
* Return number of constraints stored in
* this matrix.
* from one.
*/
bool is_identity_constrained (const unsigned int index) const;
-
+
/**
* Return the maximum number of other
* dofs that one dof is constrained
/**
* @}
*/
-
+
/**
* @name Eliminating constraints from linear systems after their creation
* @{
* patterns.
*/
void condense (BlockCompressedSimpleSparsityPattern &sparsity) const;
-
+
/**
* Condense a given matrix. The
*/
template <typename number>
void condense (BlockSparseMatrix<number> &matrix) const;
-
+
/**
* Condense the given vector @p
* uncondensed into @p condensed. It is
* @name Eliminating constraints from linear systems during their creation
* @{
*/
-
+
/**
* This function takes a vector of
* local contributions (@p
template <class VectorType>
void set_zero (VectorType &vec) const;
-
+
/**
* @}
*/
* @name Dealing with constraints after solving a linear system
* @{
*/
-
+
/**
* Re-distribute the elements of the
* vector @p condensed to @p
<< "to another DoF with number " << arg1
<< ", which however is constrained by this object. This is not"
<< " allowed.");
-
+
private:
/**
* misses. This should also be fixed by
* using the O(1) algorithm to access
* the fields of this array.
- *
+ *
* The field is useful in a number of
* other contexts as well, though.
*/
std::vector<bool> constraint_line_exists;
-
+
/**
* Store whether the arrays are sorted.
* If so, no new entries can be added.
*/
std::vector<ConstraintLine>::iterator
find_constraint (const unsigned int line);
-
+
#ifdef DEAL_II_USE_TRILINOS
-//TODO: Make use of the following member thread safe
+//TODO: Make use of the following member thread safe
/**
* This vector is used to import data
* within the distribute function.
{
Assert (sorted==false, ExcMatrixIsClosed());
-
-
+
+
// check whether line already exists; it
// may, in which case we can just quit
if ((line < constraint_line_exists.size())
if (line >= constraint_line_exists.size())
constraint_line_exists.resize (line+1, false);
constraint_line_exists[line] = true;
-
+
// push a new line to the end of the
// list
lines.push_back (ConstraintLine());
ExcEntryAlreadyExists(line, column, p->second, value));
return;
}
-
+
line_ptr->entries.push_back (std::make_pair(column,value));
}
inline
bool
-ConstraintMatrix::is_constrained (const unsigned int index) const
+ConstraintMatrix::is_constrained (const unsigned int index) const
{
return ((index < constraint_line_exists.size())
&&
inline
bool
-ConstraintMatrix::is_inhomogeneously_constrained (const unsigned int index) const
+ConstraintMatrix::is_inhomogeneously_constrained (const unsigned int index) const
{
- const std::vector<ConstraintLine>::const_iterator position
+ const std::vector<ConstraintLine>::const_iterator position
= find_constraint(index);
return position!=lines.end() ? position->inhomogeneity != 0 : false;
}
inline
-std::vector<ConstraintMatrix::ConstraintLine>::const_iterator
+std::vector<ConstraintMatrix::ConstraintLine>::const_iterator
ConstraintMatrix::find_constraint (const unsigned int line) const
{
if (is_constrained(line) == false)
inline
-std::vector<ConstraintMatrix::ConstraintLine>::iterator
+std::vector<ConstraintMatrix::ConstraintLine>::iterator
ConstraintMatrix::find_constraint (const unsigned int line)
{
Assert (sorted==false, ExcMatrixIsClosed());
*/
static
#ifdef PETSC_USE_64BIT_INDICES
- PetscErrorCode
+ PetscErrorCode
#else
- int
+ int
#endif
convergence_test (KSP ksp,
#ifdef PETSC_USE_64BIT_INDICES
namespace MPI
{
-/**
+/**
* This class implements a wrapper to use the Trilinos distributed
* vector class Epetra_FEVector. This class is derived from the
* TrilinosWrappers::VectorBase class and provides all functionality
* writes. If it encounters an operation of the opposite kind, it calls
* compress() and flips the state. This can sometimes lead to very
* confusing behavior, in code that may for example look like this:
- *
- * @verbatim
+ *
+ * @verbatim
* TrilinosWrappers::Vector vector;
- * // do some write operations on the vector
- * for (unsigned int i=0; i<vector->size(); ++i)
+ * // do some write operations on the vector
+ * for (unsigned int i=0; i<vector->size(); ++i)
* vector(i) = i;
*
* // do some additions to vector elements, but
* Destructor.
*/
~Vector ();
-
+
/**
* Reinit functionality. This
* function destroys the old
* &input_map) function.
*/
template <typename Number>
- Vector &
+ Vector &
operator = (const ::dealii::Vector<Number> &v);
/**
* for example done during the
* solution of linear systems.
*/
- void import_nonlocal_data_for_fe
+ void import_nonlocal_data_for_fe
(const dealii::TrilinosWrappers::SparseMatrix &matrix,
const Vector &vector);
{
u.swap (v);
}
-
+
#ifndef DOXYGEN
map (InputMap)
{
vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector(map));
-
+
const int min_my_id = map.MinMyGID();
const int size = map.NumMyElements();
ExcDimensionMismatch(map.MaxLID(), size-1));
// Need to copy out values, since the
- // deal.II might not use doubles, so
+ // deal.II might not use doubles, so
// that a direct access is not possible.
std::vector<int> indices (size);
std::vector<double> values (size);
values[i] = v(i);
}
- const int ierr = vector->ReplaceGlobalValues (size, &indices[0],
+ const int ierr = vector->ReplaceGlobalValues (size, &indices[0],
&values[0]);
AssertThrow (ierr == 0, VectorBase::ExcTrilinosError(ierr));
}
-
-
+
+
inline
Vector &
Vector::operator = (const TrilinosScalar s)
template <typename Number>
- Vector &
+ Vector &
Vector::operator = (const ::dealii::Vector<Number> &v)
{
if (size() != v.size())
map = Epetra_Map (v.size(), 0, Epetra_SerialComm());
#endif
}
-
+
*this = Vector(map, v);
return *this;
}
-
-
+
+
#endif
} /* end of namespace MPI */
* the right hand argument.
*/
Vector &
- operator = (const Vector &V);
+ operator = (const Vector &V);
private:
/**
*this = v;
}
-
-
+
+
inline
Vector &
Vector::operator = (const TrilinosScalar s)
}
template <typename Number>
- Vector &
+ Vector &
Vector::operator = (const ::dealii::Vector<Number> &v)
{
if (size() != v.size())
ExcDimensionMismatch(map.MaxLID(), size-1));
// Need to copy out values, since the
- // deal.II might not use doubles, so
+ // deal.II might not use doubles, so
// that a direct access is not possible.
std::vector<int> indices (size);
std::vector<double> values (size);
values[i] = v(i);
}
- const int ierr = vector->ReplaceGlobalValues (size, &indices[0],
+ const int ierr = vector->ReplaceGlobalValues (size, &indices[0],
&values[0]);
AssertThrow (ierr == 0, VectorBase::ExcTrilinosError(ierr));
return *this;
}
-
+
#endif
// $Id$
// Version: $Name$
//
-// Copyright (C) 2008 by the deal.II authors
+// Copyright (C) 2008, 2009 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
/**
* A namespace for internal implementation details of the
* TrilinosWrapper members.
- *
+ *
* @ingroup TrilinosWrappers
*/
namespace internal
* this allows us to make the
* assignment operator const.
*/
- const VectorReference &
+ const VectorReference &
operator = (const VectorReference &r) const;
/**
* Set the referenced element of the
* vector to <tt>s</tt>.
*/
- const VectorReference &
+ const VectorReference &
operator = (const TrilinosScalar &s) const;
/**
* referenced element of the
* vector->
*/
- const VectorReference &
+ const VectorReference &
operator += (const TrilinosScalar &s) const;
/**
* referenced element of the
* vector->
*/
- const VectorReference &
+ const VectorReference &
operator -= (const TrilinosScalar &s) const;
/**
* element of the vector by
* <tt>s</tt>.
*/
- const VectorReference &
+ const VectorReference &
operator *= (const TrilinosScalar &s) const;
/**
* element of the vector by
* <tt>s</tt>.
*/
- const VectorReference &
+ const VectorReference &
operator /= (const TrilinosScalar &s) const;
/**
typedef const internal::VectorReference const_reference;
/**
- * @name 1: Basic Object-handling
+ * @name 1: Basic Object-handling
*/
//@{
* distribution as the calling
* vector.
*/
- VectorBase &
+ VectorBase &
operator = (const VectorBase &v);
/**
* &input_map) function.
*/
template <typename Number>
- VectorBase &
+ VectorBase &
operator = (const ::dealii::Vector<Number> &v);
/**
/**
* Simple vector addition, equal
* to the <tt>operator
- * +=</tt>.
+ * +=</tt>.
*
* Though, if the second argument
* <tt>allow_different_maps</tt>
* of a vector, i.e. <tt>*this =
* a*V</tt>.
*/
- void add (const TrilinosScalar a,
+ void add (const TrilinosScalar a,
const VectorBase &V);
/**
* vectors, i.e. <tt>*this = a*V
* + b*W</tt>.
*/
- void add (const TrilinosScalar a,
+ void add (const TrilinosScalar a,
const VectorBase &V,
- const TrilinosScalar b,
+ const TrilinosScalar b,
const VectorBase &W);
/**
* Assignment <tt>*this =
* a*V</tt>.
*/
- void equ (const TrilinosScalar a,
+ void equ (const TrilinosScalar a,
const VectorBase &V);
/**
* Assignment <tt>*this = a*V +
* b*W</tt>.
*/
- void equ (const TrilinosScalar a,
+ void equ (const TrilinosScalar a,
const VectorBase &V,
- const TrilinosScalar b,
+ const TrilinosScalar b,
const VectorBase &W);
/**
vector.last_action = Add;
}
- const int ierr = vector.vector->SumIntoGlobalValues (1,
- (const int*)(&index),
+ const int ierr = vector.vector->SumIntoGlobalValues (1,
+ (const int*)(&index),
&value);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
vector.compressed = false;
const int ierr = vector->GlobalAssemble(last_action);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
last_action = Zero;
-
+
compressed = true;
}
const int local_row = vector->Map().LID(indices[i]);
if (local_row == -1)
{
- const int ierr = vector->ReplaceGlobalValues (1,
- (const int*)(&row),
+ const int ierr = vector->ReplaceGlobalValues (1,
+ (const int*)(&row),
&values[i]);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
compressed = false;
const int local_row = vector->Map().LID(row);
if (local_row == -1)
{
- const int ierr = vector->SumIntoGlobalValues (1,
- (const int*)(&row),
+ const int ierr = vector->SumIntoGlobalValues (1,
+ (const int*)(&row),
&values[i]);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
compressed = false;
const TrilinosScalar * ptr = start_ptr;
- // add up elements
+ // add up elements
// TODO: This
// won't work in parallel like
// this. Find out a better way to
Assert (size() == w.size(),
ExcDimensionMismatch (size(), w.size()));
- const int ierr = vector->ReciprocalMultiply(1.0, *(w.vector),
+ const int ierr = vector->ReciprocalMultiply(1.0, *(w.vector),
*(v.vector), 0.0);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
{
Assert (line != col_val_pair->first,
ExcMessage ("Can't constrain a degree of freedom to itself"));
-
+
for (std::vector<std::pair<unsigned int,double> >::const_iterator
p=line_ptr->entries.begin();
p != line_ptr->entries.end(); ++p)
p->second, col_val_pair->second));
break;
}
-
+
line_ptr->entries.push_back (*col_val_pair);
}
}
line->entries.end(),
&check_zero_weight),
line->entries.end());
-
+
// replace references to dofs that
// are themselves constrained. note
// that because we may replace
// further
// constrained:
chained_constraint_replaced = true;
-
+
// look up the chain
// of constraints for
// this entry
Assert (dof_index != line->line,
ExcMessage ("Cycle in constraints detected!"));
-
+
// find the line
// corresponding to
// this entry. note
std::make_pair (constrained_line->entries[0].first,
constrained_line->entries[0].second *
weight);
-
+
for (unsigned int i=1; i<constrained_line->entries.size(); ++i)
line->entries
.push_back (std::make_pair (constrained_line->entries[i].first,
line->inhomogeneity += constrained_line->inhomogeneity *
weight;
-
+
// now that we're here, do
// not increase index by
// one but rather make
}
else
new_entries.push_back (line->entries[j]);
-
+
Assert (new_entries.size() == line->entries.size() - duplicates,
ExcInternalError());
ExcInternalError());
}
-
+
// replace old list of
// constraints for this
// dof by the new one and
line->entries.swap (new_entries);
break;
}
-
+
// finally do the following
// check: if the sum of
// weights for the
line->inhomogeneity /= sum;
}
}
-
+
#ifdef DEBUG
// if in debug mode: check that no
// dof is constraint to another dof
ExcDoFConstrainedToConstrainedDoF(line->line, entry->first));
};
#endif
-
+
sorted = true;
}
// constraints in the two objects
// are for different degrees of
// freedom
-#ifdef DEBUG
+#ifdef DEBUG
if (true)
{
// first insert all dofs in
std::vector<std::pair<unsigned int,double> > tmp;
std::vector<std::vector<ConstraintLine>::const_iterator> tmp_other_lines;
for (std::vector<ConstraintLine>::iterator line=lines.begin();
- line!=lines.end(); ++line)
+ line!=lines.end(); ++line)
{
// copy the line of old object
// modulo dofs constrained in
tmp_other_lines.clear ();
tmp_other_lines.reserve (line->entries.size());
-
+
bool entries_to_resolve = false;
-
+
for (unsigned int i=0; i<line->entries.size(); ++i)
{
if (other_constraints.sorted == true)
{
std::vector<ConstraintLine>::const_iterator
it = other_constraints.lines.end ();
-
+
for (std::vector<ConstraintLine>::const_iterator
p=other_constraints.lines.begin();
p!=other_constraints.lines.end(); ++p)
tmp_other_lines.push_back (it);
};
-
+
if (tmp_other_lines.back() != other_constraints.lines.end ())
entries_to_resolve = true;
};
j!=tmp_other_lines[i]->entries.end(); ++j)
tmp.push_back (std::make_pair(j->first, j->second*weight));
- line->inhomogeneity += tmp_other_lines[i]->inhomogeneity *
+ line->inhomogeneity += tmp_other_lines[i]->inhomogeneity *
line->entries[i].second;
};
};
// newly resolved line
line->entries.swap (tmp);
};
-
-
-
+
+
+
// next action: append new lines at
// the end
lines.insert (lines.end(),
{
constraint_line_exists.insert (constraint_line_exists.begin(), offset,
false);
-
+
for (std::vector<ConstraintLine>::iterator i = lines.begin();
i != lines.end(); i++)
{
i->line += offset;
- for (std::vector<std::pair<unsigned int,double> >::iterator
+ for (std::vector<std::pair<unsigned int,double> >::iterator
j = i->entries.begin();
j != i->entries.end(); j++)
j->first += offset;
std::vector<ConstraintLine> tmp;
lines.swap (tmp);
}
-
+
{
std::vector<bool> tmp;
constraint_line_exists.swap (tmp);
vec_distribute.reset();
}
#endif
-
+
sorted = false;
}
unsigned int shift = 0;
unsigned int n_rows = uncondensed.n_rows();
- if (next_constraint == lines.end())
+ if (next_constraint == lines.end())
// if no constraint is to be handled
for (unsigned int row=0; row!=n_rows; ++row)
new_line.push_back (row);
else
- for (unsigned int row=0; row!=n_rows; ++row)
+ for (unsigned int row=0; row!=n_rows; ++row)
if (row == next_constraint->line)
{
// this line is constrained
new_line.push_back (-1);
- // note that @p{lines} is ordered
+ // note that @p{lines} is ordered
++shift;
++next_constraint;
if (next_constraint == lines.end())
j<uncondensed.get_rowstart_indices()[row+1]; ++j)
if (new_line[uncondensed.get_column_numbers()[j]] != -1)
condensed.add (new_line[row], new_line[uncondensed.get_column_numbers()[j]]);
- else
+ else
{
// let c point to the constraint
// of this column
while (c->line != uncondensed.get_column_numbers()[j])
++c;
- for (unsigned int q=0; q!=c->entries.size(); ++q)
+ for (unsigned int q=0; q!=c->entries.size(); ++q)
condensed.add (new_line[row], new_line[c->entries[q].first]);
}
else
// for each entry: distribute
if (new_line[uncondensed.get_column_numbers()[j]] != -1)
// column is not constrained
- for (unsigned int q=0; q!=next_constraint->entries.size(); ++q)
+ for (unsigned int q=0; q!=next_constraint->entries.size(); ++q)
condensed.add (new_line[next_constraint->entries[q].first],
new_line[uncondensed.get_column_numbers()[j]]);
-
+
else
// not only this line but
// also this col is constrained
// of this column
std::vector<ConstraintLine>::const_iterator c = lines.begin();
while (c->line != uncondensed.get_column_numbers()[j]) ++c;
-
+
for (unsigned int p=0; p!=c->entries.size(); ++p)
for (unsigned int q=0; q!=next_constraint->entries.size(); ++q)
condensed.add (new_line[next_constraint->entries[q].first],
new_line[c->entries[p].first]);
};
-
+
++next_constraint;
};
Assert (sparsity.is_compressed() == false, ExcMatrixIsClosed());
Assert (sparsity.n_rows() == sparsity.n_cols(),
ExcNotQuadratic());
-
+
// store for each index whether it must be
// distributed or not. If entry is
// numbers::invalid_unsigned_int,
// index
std::vector<unsigned int> distribute(sparsity.n_rows(),
numbers::invalid_unsigned_int);
-
+
for (unsigned int c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
// sparsity.colnums[j]
for (unsigned int q=0;
q!=lines[distribute[column]].entries.size();
- ++q)
+ ++q)
sparsity.add (row,
lines[distribute[column]].entries[q].first);
}
// row @p{row} and regular column
// sparsity.colnums[j]
for (unsigned int q=0;
- q!=lines[distribute[row]].entries.size(); ++q)
+ q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
column);
else
}
}
}
-
+
sparsity.compress();
}
Assert (sorted == true, ExcMatrixNotClosed());
Assert (sparsity.n_rows() == sparsity.n_cols(),
ExcNotQuadratic());
-
+
// store for each index whether it must be
// distributed or not. If entry is
// numbers::invalid_unsigned_int,
// index
std::vector<unsigned int> distribute(sparsity.n_rows(),
numbers::invalid_unsigned_int);
-
+
for (unsigned int c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
unsigned int old_rowlength = sparsity.row_length(row);
for (unsigned int q=0;
q!=lines[distribute[column]].entries.size();
- ++q)
+ ++q)
{
const unsigned int
new_col = lines[distribute[column]].entries[q].first;
-
+
sparsity.add (row, new_col);
const unsigned int new_rowlength = sparsity.row_length(row);
// row @p{row} and regular column
// sparsity.colnums[j]
for (unsigned int q=0;
- q!=lines[distribute[row]].entries.size(); ++q)
+ q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
column);
else
Assert (sorted == true, ExcMatrixNotClosed());
Assert (sparsity.n_rows() == sparsity.n_cols(),
ExcNotQuadratic());
-
+
// store for each index whether it must be
// distributed or not. If entry is
// numbers::invalid_unsigned_int,
// index
std::vector<unsigned int> distribute(sparsity.n_rows(),
numbers::invalid_unsigned_int);
-
+
for (unsigned int c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
// distribute cols, the loop may
// get longer
CompressedSetSparsityPattern::row_iterator col_num = sparsity.row_begin (row);
-
+
for (; col_num != sparsity.row_end (row); ++col_num)
{
const unsigned int column = *col_num;
-
+
if (distribute[column] != numbers::invalid_unsigned_int)
{
// row
unsigned int old_rowlength = sparsity.row_length(row);
for (unsigned int q=0;
q!=lines[distribute[column]].entries.size();
- ++q)
+ ++q)
{
const unsigned int
new_col = lines[distribute[column]].entries[q].first;
-
+
sparsity.add (row, new_col);
-
+
const unsigned int new_rowlength = sparsity.row_length(row);
// if ((new_col < column) && (old_rowlength != new_rowlength))
// ++col_num;
// row must be distributed
{
CompressedSetSparsityPattern::row_iterator col_num = sparsity.row_begin (row);
-
+
for (; col_num != sparsity.row_end (row); ++col_num)
{
const unsigned int column = *col_num;
-
+
if (distribute[column] == numbers::invalid_unsigned_int)
// distribute entry at irregular
// row @p{row} and regular column
// sparsity.colnums[j]
for (unsigned int q=0;
- q!=lines[distribute[row]].entries.size(); ++q)
+ q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
column);
else
Assert (sorted == true, ExcMatrixNotClosed());
Assert (sparsity.n_rows() == sparsity.n_cols(),
ExcNotQuadratic());
-
+
// store for each index whether it must be
// distributed or not. If entry is
// numbers::invalid_unsigned_int,
// index
std::vector<unsigned int> distribute(sparsity.n_rows(),
numbers::invalid_unsigned_int);
-
+
for (unsigned int c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
unsigned int old_rowlength = sparsity.row_length(row);
for (unsigned int q=0;
q!=lines[distribute[column]].entries.size();
- ++q)
+ ++q)
{
const unsigned int
new_col = lines[distribute[column]].entries[q].first;
-
+
sparsity.add (row, new_col);
const unsigned int new_rowlength = sparsity.row_length(row);
// row @p{row} and regular column
// sparsity.colnums[j]
for (unsigned int q=0;
- q!=lines[distribute[row]].entries.size(); ++q)
+ q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
column);
else
ExcNotQuadratic());
Assert (sparsity.get_column_indices() == sparsity.get_row_indices(),
ExcNotQuadratic());
-
+
const BlockIndices &
index_mapping = sparsity.get_column_indices();
const unsigned int n_blocks = sparsity.n_block_rows();
-
+
// store for each index whether it must be
// distributed or not. If entry is
// numbers::invalid_unsigned_int,
// index
std::vector<unsigned int> distribute (sparsity.n_rows(),
numbers::invalid_unsigned_int);
-
+
for (unsigned int c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
const std::pair<unsigned int,unsigned int>
block_index = index_mapping.global_to_local(row);
const unsigned int block_row = block_index.first;
-
+
if (distribute[row] == numbers::invalid_unsigned_int)
// regular line. loop over
// all columns and see
{
const unsigned int global_col
= index_mapping.local_to_global(block_col, entry->column());
-
+
if (distribute[global_col] != numbers::invalid_unsigned_int)
// distribute entry at regular
// row @p{row} and irregular column
{
const SparsityPattern &
block_sparsity = sparsity.block(block_row,block_col);
-
+
for (SparsityPattern::const_iterator
entry = block_sparsity.begin(block_index.second);
(entry != block_sparsity.end(block_index.second)) &&
{
const unsigned int global_col
= index_mapping.local_to_global (block_col, entry->column());
-
+
if (distribute[global_col] == numbers::invalid_unsigned_int)
// distribute entry at irregular
// row @p{row} and regular column
// global_col.
{
- for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
+ for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first, global_col);
}
else
}
}
}
-
+
sparsity.compress();
}
ExcNotQuadratic());
Assert (sparsity.get_column_indices() == sparsity.get_row_indices(),
ExcNotQuadratic());
-
+
const BlockIndices &
index_mapping = sparsity.get_column_indices();
const unsigned int n_blocks = sparsity.n_block_rows();
-
+
// store for each index whether it must be
// distributed or not. If entry is
// numbers::invalid_unsigned_int,
// index
std::vector<unsigned int> distribute (sparsity.n_rows(),
numbers::invalid_unsigned_int);
-
+
for (unsigned int c=0; c<lines.size(); ++c)
distribute[lines[c].line] = static_cast<signed int>(c);
block_index = index_mapping.global_to_local(row);
const unsigned int block_row = block_index.first;
const unsigned int local_row = block_index.second;
-
+
if (distribute[row] == numbers::invalid_unsigned_int)
// regular line. loop over
// all columns and see
const unsigned int global_col
= index_mapping.local_to_global(block_col,
block_sparsity.column_number(local_row,j));
-
+
if (distribute[global_col] != numbers::invalid_unsigned_int)
// distribute entry at regular
// row @p{row} and irregular column
{
const CompressedSparsityPattern &
block_sparsity = sparsity.block(block_row,block_col);
-
+
for (unsigned int j=0; j<block_sparsity.row_length(local_row); ++j)
{
const unsigned int global_col
= index_mapping.local_to_global (block_col,
block_sparsity.column_number(local_row,j));
-
+
if (distribute[global_col] == numbers::invalid_unsigned_int)
// distribute entry at irregular
// row @p{row} and regular column
// global_col.
{
- for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
+ for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
global_col);
}
ExcNotQuadratic());
Assert (sparsity.get_column_indices() == sparsity.get_row_indices(),
ExcNotQuadratic());
-
+
const BlockIndices &
index_mapping = sparsity.get_column_indices();
const unsigned int n_blocks = sparsity.n_block_rows();
-
+
// store for each index whether it must be
// distributed or not. If entry is
// numbers::invalid_unsigned_int,
// index
std::vector<unsigned int> distribute (sparsity.n_rows(),
numbers::invalid_unsigned_int);
-
+
for (unsigned int c=0; c<lines.size(); ++c)
distribute[lines[c].line] = static_cast<signed int>(c);
block_index = index_mapping.global_to_local(row);
const unsigned int block_row = block_index.first;
const unsigned int local_row = block_index.second;
-
+
if (distribute[row] == numbers::invalid_unsigned_int)
// regular line. loop over
// all columns and see
{
const unsigned int global_col
= index_mapping.local_to_global(block_col, *j);
-
+
if (distribute[global_col] != numbers::invalid_unsigned_int)
// distribute entry at regular
// row @p{row} and irregular column
{
const CompressedSetSparsityPattern &
block_sparsity = sparsity.block(block_row,block_col);
-
+
for (CompressedSetSparsityPattern::row_iterator
j = block_sparsity.row_begin(local_row);
j != block_sparsity.row_end(local_row); ++j)
{
const unsigned int global_col
= index_mapping.local_to_global (block_col, *j);
-
+
if (distribute[global_col] == numbers::invalid_unsigned_int)
// distribute entry at irregular
// row @p{row} and regular column
// global_col.
{
- for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
+ for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
global_col);
}
ExcNotQuadratic());
Assert (sparsity.get_column_indices() == sparsity.get_row_indices(),
ExcNotQuadratic());
-
+
const BlockIndices &
index_mapping = sparsity.get_column_indices();
const unsigned int n_blocks = sparsity.n_block_rows();
-
+
// store for each index whether it must be
// distributed or not. If entry is
// numbers::invalid_unsigned_int,
// index
std::vector<unsigned int> distribute (sparsity.n_rows(),
numbers::invalid_unsigned_int);
-
+
for (unsigned int c=0; c<lines.size(); ++c)
distribute[lines[c].line] = static_cast<signed int>(c);
block_index = index_mapping.global_to_local(row);
const unsigned int block_row = block_index.first;
const unsigned int local_row = block_index.second;
-
+
if (distribute[row] == numbers::invalid_unsigned_int)
// regular line. loop over
// all columns and see
const unsigned int global_col
= index_mapping.local_to_global(block_col,
block_sparsity.column_number(local_row,j));
-
+
if (distribute[global_col] != numbers::invalid_unsigned_int)
// distribute entry at regular
// row @p{row} and irregular column
{
const CompressedSimpleSparsityPattern &
block_sparsity = sparsity.block(block_row,block_col);
-
+
for (unsigned int j=0; j<block_sparsity.row_length(local_row); ++j)
{
const unsigned int global_col
= index_mapping.local_to_global (block_col,
block_sparsity.column_number(local_row,j));
-
+
if (distribute[global_col] == numbers::invalid_unsigned_int)
// distribute entry at irregular
// row @p{row} and regular column
// global_col.
{
- for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
+ for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
global_col);
}
ConstraintLine index_comparison;
index_comparison.line = vec.local_range().first;
- std::vector<ConstraintLine>::const_iterator next_constraint =
+ std::vector<ConstraintLine>::const_iterator next_constraint =
std::lower_bound (lines.begin(),lines.end(),index_comparison);
index_comparison.line = vec.local_range().second;
-
+
std::vector<ConstraintLine>::const_iterator end_constraint
= std::lower_bound(lines.begin(),lines.end(),index_comparison);
{
my_indices[index2]=i;
}
- for (; next_constraint != end_constraint; ++next_constraint)
+ for (; next_constraint != end_constraint; ++next_constraint)
{
for (unsigned int i=0; i<next_constraint->entries.size(); ++i)
my_indices.push_back (next_constraint->entries[i].first);
// here we import the data
vec_distribute->reinit(vec,false,true);
- next_constraint =
+ next_constraint =
std::lower_bound (lines.begin(),lines.end(),index_comparison);
- for (; next_constraint != end_constraint; ++next_constraint)
+ for (; next_constraint != end_constraint; ++next_constraint)
{
// fill entry in line
// next_constraint.line by adding the
-bool ConstraintMatrix::is_identity_constrained (const unsigned int index) const
+bool ConstraintMatrix::is_identity_constrained (const unsigned int index) const
{
if (is_constrained(index) == false)
return false;
-
+
if (sorted == true)
{
ConstraintLine index_comparison;
-unsigned int ConstraintMatrix::max_constraint_indirections () const
+unsigned int ConstraintMatrix::max_constraint_indirections () const
{
unsigned int return_value = 0;
for (std::vector<ConstraintLine>::const_iterator i=lines.begin();
return false;
}
-
+
void ConstraintMatrix::print (std::ostream &out) const
{
<< " " << lines[i].entries[j].first
<< ": " << lines[i].entries[j].second << "\n";
- // print out inhomogeneity.
+ // print out inhomogeneity.
if (lines[i].inhomogeneity != 0)
out << " " << lines[i].line
<< ": " << lines[i].inhomogeneity << "\n";
out << " " << lines[i].line << " = 0\n";
}
}
-
+
AssertThrow (out, ExcIO());
}
MATRIX_FUNCTIONS(SparseMatrix<double>, Vector<double>);
MATRIX_FUNCTIONS(SparseMatrix<float>, Vector<float>);
-template void ConstraintMatrix::distribute_local_to_global<SparseMatrix<float>,Vector<double> >
+template void ConstraintMatrix::distribute_local_to_global<SparseMatrix<float>,Vector<double> >
(const FullMatrix<double> &,
const Vector<double> &,
const std::vector<unsigned int> &,
MATRIX_FUNCTIONS(BlockSparseMatrix<double>, BlockVector<double>);
MATRIX_FUNCTIONS(BlockSparseMatrix<float>, BlockVector<float>);
-template void ConstraintMatrix::distribute_local_to_global<BlockSparseMatrix<float>,BlockVector<double> >
+template void ConstraintMatrix::distribute_local_to_global<BlockSparseMatrix<float>,BlockVector<double> >
(const FullMatrix<double> &,
const Vector<double> &,
const std::vector<unsigned int> &,
MATRIX_FUNCTIONS(TrilinosWrappers::SparseMatrix, TrilinosWrappers::Vector);
MATRIX_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix, TrilinosWrappers::BlockVector);
template void ConstraintMatrix::distribute_local_to_global
-<TrilinosWrappers::SparseMatrix,TrilinosWrappers::MPI::Vector>
- (const FullMatrix<double> &,
- const Vector<double> &,
- const std::vector<unsigned int> &,
- TrilinosWrappers::SparseMatrix &,
+<TrilinosWrappers::SparseMatrix,TrilinosWrappers::MPI::Vector>
+ (const FullMatrix<double> &,
+ const Vector<double> &,
+ const std::vector<unsigned int> &,
+ TrilinosWrappers::SparseMatrix &,
TrilinosWrappers::MPI::Vector &) const;
template void ConstraintMatrix::distribute_local_to_global
-<TrilinosWrappers::BlockSparseMatrix,TrilinosWrappers::MPI::BlockVector>
- (const FullMatrix<double> &,
- const Vector<double> &,
- const std::vector<unsigned int> &,
- TrilinosWrappers::BlockSparseMatrix &,
+<TrilinosWrappers::BlockSparseMatrix,TrilinosWrappers::MPI::BlockVector>
+ (const FullMatrix<double> &,
+ const Vector<double> &,
+ const std::vector<unsigned int> &,
+ TrilinosWrappers::BlockSparseMatrix &,
TrilinosWrappers::MPI::BlockVector &) const;
#endif
const unsigned int j) const
{
#ifdef PETSC_USE_64BIT_INDICES
- PetscInt
+ PetscInt
#else
- int
+ int
#endif
petsc_i = i, petsc_j = j;
PetscScalar value;
// then count the elements in- and
// out-of-window for the rows we own
#ifdef PETSC_USE_64BIT_INDICES
- std::vector<PetscInt>
+ std::vector<PetscInt>
#else
- std::vector<int>
+ std::vector<int>
#endif
row_lengths_in_window (local_row_end - local_row_start),
row_lengths_out_of_window (local_row_end - local_row_start);
((PETSC_VERSION_MINOR == 2) && (PETSC_VERSION_SUBMINOR == 0)))
#ifdef PETSC_USE_64BIT_INDICES
- std::vector<PetscInt>
+ std::vector<PetscInt>
#else
- std::vector<int>
+ std::vector<int>
#endif
row_entries;
std::vector<PetscScalar> row_values;
// sure petsc doesn't read past the
// end
#ifdef PETSC_USE_64BIT_INDICES
- std::vector<PetscInt>
+ std::vector<PetscInt>
#else
- std::vector<int>
+ std::vector<int>
#endif
rowstart_in_window (local_row_end - local_row_start + 1, 0),
colnums_in_window;
for (unsigned int i=local_row_start; i<local_row_end; ++i)
{
#ifdef PETSC_USE_64BIT_INDICES
- PetscInt
+ PetscInt
#else
- int
+ int
#endif
petsc_i = i;
MatSetValues (matrix, 1, &petsc_i,
do_reinit (sparsity_pattern, preset_nonzero_locations);
}
-
+
SparseMatrix &
SparseMatrix::operator = (const double d)
// get rid of old matrix and generate a
// new one
const int ierr = MatDestroy (matrix);
- AssertThrow (ierr == 0, ExcPETScError(ierr));
-
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
do_reinit (m, n, n_nonzero_per_row, is_symmetric);
}
// get rid of old matrix and generate a
// new one
const int ierr = MatDestroy (matrix);
- AssertThrow (ierr == 0, ExcPETScError(ierr));
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
do_reinit (m, n, row_lengths, is_symmetric);
- }
+ }
// get rid of old matrix and generate a
// new one
const int ierr = MatDestroy (matrix);
- AssertThrow (ierr == 0, ExcPETScError(ierr));
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
do_reinit (sparsity_pattern, preset_nonzero_locations);
}
-
+
const MPI_Comm &
SparseMatrix::get_mpi_communicator () const
{
// set symmetric flag, if so requested
if (is_symmetric == true)
{
-#if (PETSC_VERSION_MAJOR <= 2)
+#if (PETSC_VERSION_MAJOR <= 2)
const int ierr
= MatSetOption (matrix, MAT_SYMMETRIC);
#else
{
Assert (row_lengths.size() == m,
ExcDimensionMismatch (row_lengths.size(), m));
-
+
// use the call sequence indicating a
// maximal number of elements for each
// row individually. annoyingly, we
// set symmetric flag, if so requested
if (is_symmetric == true)
{
-#if (PETSC_VERSION_MAJOR <= 2)
+#if (PETSC_VERSION_MAJOR <= 2)
const int ierr
= MatSetOption (matrix, MAT_SYMMETRIC);
#else
#endif
AssertThrow (ierr == 0, ExcPETScError(ierr));
- }
+ }
}
-
+
template <typename SparsityType>
void
row_values.resize (row_lengths[i], 0.0);
for (unsigned int j=0; j<row_lengths[i]; ++j)
row_entries[j] = sparsity_pattern.column_number (i,j);
-
+
#ifdef PETSC_USE_64BIT_INDICES
const PetscInt
#else
// In the end, tell the matrix that
// it should not expect any new
// entries.
-#if (PETSC_VERSION_MAJOR <= 2)
+#if (PETSC_VERSION_MAJOR <= 2)
const int ierr =
MatSetOption (matrix, MAT_NO_NEW_NONZERO_LOCATIONS);
#else
{
Assert (indices.size() == values.size(),
ExcMessage ("Function called with arguments of different sizes"));
- do_set_add_operation(indices.size(), &indices[0],
- &(*const_cast<dealii::Vector<PetscScalar>*>(&values))(0),
+ do_set_add_operation(indices.size(), &indices[0],
+ &(*const_cast<dealii::Vector<PetscScalar>*>(&values))(0),
true);
}
{
// Destroy the solver object.
int ierr = EPSDestroy (eps);
- AssertThrow (ierr == 0, ExcSLEPcError(ierr));
+ AssertThrow (ierr == 0, ExcSLEPcError(ierr));
}
-
+
SolverBase::SolverBase (SolverControl &cn,
const MPI_Comm &mpi_communicator)
:
transform (NULL)
{
}
-
+
SolverBase::~SolverBase ()
{
if( solver_data != 0 )
SolverBase::solve (const unsigned int n_eigenvectors, unsigned int *n_converged)
{
int ierr;
-
- AssertThrow (solver_data == 0, ExcSLEPcWrappersUsageError());
+
+ AssertThrow (solver_data == 0, ExcSLEPcWrappersUsageError());
solver_data.reset (new SolverData());
-
+
// create eigensolver context and
// set operators.
ierr = EPSCreate (mpi_communicator, &solver_data->eps);
else
ierr = EPSSetOperators (solver_data->eps, *opA, PETSC_NULL);
AssertThrow (ierr == 0, ExcSLEPcError(ierr));
-
- if (ini_vec && ini_vec->size() != 0)
+
+ if (ini_vec && ini_vec->size() != 0)
{
ierr = EPSSetInitialVector(solver_data->eps, *ini_vec);
AssertThrow (ierr == 0, ExcSLEPcError(ierr));
}
-
+
if (transform)
transform->set_context(solver_data->eps);
-
+
// set runtime options.
set_solver_type (solver_data->eps);
// set number of eigenvectors to
// compute
- ierr = EPSSetDimensions (solver_data->eps, n_eigenvectors,
+ ierr = EPSSetDimensions (solver_data->eps, n_eigenvectors,
PETSC_DECIDE, PETSC_DECIDE);
AssertThrow (ierr == 0, ExcSLEPcError(ierr));
// get number of converged
// eigenstates
- ierr = EPSGetConverged (solver_data->eps,
+ ierr = EPSGetConverged (solver_data->eps,
#ifdef PETSC_USE_64BIT_INDICES
reinterpret_cast<PetscInt *>(n_converged));
#else
}
void
- SolverBase::get_eigenpair (const unsigned int index,
+ SolverBase::get_eigenpair (const unsigned int index,
double &kr,
- PETScWrappers::VectorBase &vr)
+ PETScWrappers::VectorBase &vr)
{
- AssertThrow (solver_data != 0, ExcSLEPcWrappersUsageError());
+ AssertThrow (solver_data != 0, ExcSLEPcWrappersUsageError());
// get converged eigenpair
- int ierr = EPSGetEigenpair(solver_data->eps, index,
+ int ierr = EPSGetEigenpair(solver_data->eps, index,
&kr, PETSC_NULL, vr, PETSC_NULL);
AssertThrow (ierr == 0, ExcSLEPcError(ierr));
}
return NULL;
return &solver_data->eps;
}
-
+
/* ---------------------- SolverControls ----------------------- */
SolverControl &
SolverBase::control () const
EPSConvergedReason *reason,
void *solver_control_x)
{
- SolverControl &solver_control
+ SolverControl &solver_control
= *reinterpret_cast<SolverControl*>(solver_control_x);
-
+
const SolverControl::State state
= solver_control.check (iteration, residual_norm);
-
+
switch (state)
{
case ::dealii::SolverControl::iterate:
*reason = EPS_CONVERGED_ITERATING;
break;
-
+
case ::dealii::SolverControl::success:
*reason = static_cast<EPSConvergedReason>(1);
break;
-
+
case ::dealii::SolverControl::failure:
if (solver_control.last_step() > solver_control.max_steps())
*reason = EPS_DIVERGED_ITS;
break;
-
+
default:
Assert (false, ExcNotImplemented());
}
-
+
// return without failure.
return 0;
}
SolverBase (cn, mpi_communicator),
additional_data (data)
{}
-
+
void
SolverKrylovSchur::set_solver_type (EPS &eps) const
{
this->solver_control.max_steps());
AssertThrow (ierr == 0, ExcSLEPcError(ierr));
}
-
+
/* ---------------------- SolverArnoldi ------------------------ */
SolverArnoldi::SolverArnoldi (SolverControl &cn,
const MPI_Comm &mpi_communicator,
SolverBase (cn, mpi_communicator),
additional_data (data)
{}
-
+
void
SolverArnoldi::set_solver_type (EPS &eps) const
{
SolverBase (cn, mpi_communicator),
additional_data (data)
{}
-
+
void
SolverLanczos::set_solver_type (EPS &eps) const
{
const int local_index = vector.vector->Map().LID(index);
Assert (local_index >= 0,
- ExcAccessToNonLocalElement (index,
+ ExcAccessToNonLocalElement (index,
vector.vector->Map().MinMyGID(),
vector.vector->Map().MaxMyGID()));
last_action (Zero),
compressed (true),
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
- vector(std::auto_ptr<Epetra_FEVector>
+ vector(std::auto_ptr<Epetra_FEVector>
(new Epetra_FEVector(
Epetra_Map(0,0,Epetra_MpiComm(MPI_COMM_SELF)))))
#else
- vector(std::auto_ptr<Epetra_FEVector>
+ vector(std::auto_ptr<Epetra_FEVector>
(new Epetra_FEVector(
Epetra_Map(0,0,Epetra_SerialComm()))))
#endif
{}
-
-
+
+
VectorBase::VectorBase (const VectorBase &v)
:
Subscriptor(),
last_action (Zero),
compressed (true),
- vector(std::auto_ptr<Epetra_FEVector>
+ vector(std::auto_ptr<Epetra_FEVector>
(new Epetra_FEVector(*v.vector)))
{}
-
+
VectorBase::~VectorBase ()
{
Assert (size() == v.size(),
ExcDimensionMismatch(size(), v.size()));
-
+
// this is probably not very efficient
// but works. in particular, we could do
// better if we know that
return false;
unsigned int i;
- for (i=0; i<local_size(); i++)
+ for (i=0; i<local_size(); i++)
if ((*(v.vector))[0][i]!=(*vector)[0][i]) return false;
return true;