// $Id$
// Version: $Name$
//
-// Copyright (C) 2009 by the deal.II authors
+// Copyright (C) 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
*/
unsigned int index_within_set (const unsigned int n) const;
+ /**
+ * Each index set can be
+ * represented as the union of a
+ * number of contiguous intervals
+ * of indices, where if necessary
+ * intervals may only consist of
+ * individual elements to
+ * represent isolated members of
+ * the index set.
+ *
+ * This function returns the
+ * minimal number of such
+ * intervals that are needed to
+ * represent the index set under
+ * consideration.
+ */
+ unsigned int n_intervals () const;
+
/**
* Compress the internal
* representation by merging
IndexSet get_view (const unsigned int begin,
const unsigned int end) const;
+ /**
+ * Outputs a text representation of this
+ * IndexSet to the given stream. Used for
+ * testing.
+ */
+ template <class STREAM>
+ void print(STREAM &out) const;
+
#ifdef DEAL_II_USE_TRILINOS
/**
* Given an MPI communicator,
return ranges != is.ranges;
}
+template <class STREAM>
+inline
+void
+IndexSet::print (STREAM &out) const
+{
+ compress();
+ out << "{";
+ std::vector<Range>::const_iterator p;
+ for (p = ranges.begin(); p != ranges.end(); ++p)
+ {
+ if (p->end-p->begin==1)
+ out << p->begin;
+ else
+ out << "[" << p->begin << "," << p->end-1 << "]";
+
+ if (p !=--ranges.end())
+ out << ", ";
+ }
+ out << "}" << std::endl;
+}
DEAL_II_NAMESPACE_CLOSE
+unsigned int
+IndexSet::n_intervals () const
+{
+ compress ();
+ return ranges.size();
+}
+
+
+
IndexSet
IndexSet::get_view (const unsigned int begin,
const unsigned int end) const
<< "to another DoF with number " << arg1
<< ", which however is constrained by this object. This is not"
<< " allowed.");
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException1 (ExcRowNotStoredHere,
+ int,
+ << "The index set given to this constraint matrix indicates "
+ << "constraints for degree of freedom " << arg1
+ << " should not be stored by this object, but a constraint "
+ << "is being added.");
private:
if (!local_lines.size())
return line;
- Assert(local_lines.is_element(line), ExcInternalError());
+ Assert(local_lines.is_element(line),
+ ExcRowNotStoredHere(line));
return local_lines.index_within_set(line);
}
// specialized sort and insert functions.
struct GlobalRowsFromLocal
{
- GlobalRowsFromLocal (const unsigned int n_local_rows)
+ GlobalRowsFromLocal (const unsigned int n_local_rows)
:
total_row_indices (n_local_rows), n_active_rows (n_local_rows),
n_inhomogeneous_rows (0){};
[index_in_constraint]).second; };
bool have_indirect_rows () const { return data_cache.element_size; };
void insert_constraint (const unsigned int constrained_local_dof)
- { --n_active_rows;
+ { --n_active_rows;
total_row_indices[n_active_rows].local_row = constrained_local_dof; }
- unsigned int last_constrained_local_row ()
+ unsigned int last_constrained_local_row ()
{ Assert (total_row_indices.back().global_row == numbers::invalid_unsigned_int,
ExcInternalError());
return total_row_indices.back().local_row; };
{
if (value != 0.)
{
- Assert (col_ptr != 0,
+ Assert (col_ptr != 0,
typename SparseMatrix<number>::ExcInvalidIndex (row, column));
if (are_on_diagonal)
{
const unsigned int loc_row = global_rows.local_row(i);
#ifdef DEBUG
- const unsigned int * col_ptr = sparsity.n_nonzero_elements() == 0 ? 0 :
+ const unsigned int * col_ptr = sparsity.n_nonzero_elements() == 0 ? 0 :
&sparsity_struct[row_start[row]];
number * val_ptr = sparsity.n_nonzero_elements() == 0 ? 0 :
&sparse_matrix->global_entry (row_start[row]);
// $Id$
// Version: $Name$
//
-// Copyright (C) 2004, 2005, 2006, 2007, 2009 by the deal.II authors
+// Copyright (C) 2004, 2005, 2006, 2007, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
* classes simply set the right flags to select one solver or another, or to
* set certain parameters for individual solvers.
*
+ * Note: Repeated calls to solve() on a solver object with a Preconditioner
+ * must be used with care. The preconditioner is initialized in the first call
+ * to solve() and subsequent calls reuse the solver and preconditioner
+ * object. This is done for performance reasons. The solver and preconditioner
+ * can be reset by calling reset().
+ *
* One of the gotchas of PETSc is that -- in particular in MPI mode -- it
* often does not produce very helpful error messages. In order to save
* other users some time in searching a hard to track down error, here is
* classes and the object passed as a
* preconditioner, one of the linear
* solvers and preconditioners of PETSc
- * is chosen.
+ * is chosen. Repeated calls to
+ * solve() do not reconstruct the
+ * preconditioner for performance
+ * reasons. See class Documentation.
*/
void
solve (const MatrixBase &A,
const PreconditionerBase &preconditioner);
+ /**
+ * Resets the contained preconditioner
+ * and solver object. See class
+ * description for more details.
+ */
+ virtual void reset();
+
+
/**
* Access to object that controls
* convergence.
*/
AdditionalData (const bool elliptic = true,
const bool higher_order_elements = false,
+ const unsigned int n_cycles = 1,
+ const bool w_cyle = false,
const double aggregation_threshold = 1e-4,
const std::vector<std::vector<bool> > &constant_modes = std::vector<std::vector<bool> > (1),
- const unsigned int smoother_sweeps = 3,
+ const unsigned int smoother_sweeps = 2,
const unsigned int smoother_overlap = 0,
const bool output_details = false);
*/
bool higher_order_elements;
+ /**
+ * Defines how many multigrid cycles
+ * should be performed by the
+ * preconditioner.
+ */
+ unsigned int n_cycles;
+
+ /**
+ * Defines whether a w-cycle should be
+ * used instead of the standard setting
+ * of a v-cycle.
+ */
+ bool w_cycle;
+
/**
* This threshold tells the AMG setup
* how the coarsening should be
*/
namespace MPI
{
+ class BlockVector;
/**
* This class implements a wrapper to use the Trilinos distributed
const bool fast = false,
const bool allow_different_maps = false);
+ void reinit (const BlockVector &v,
+ const bool import_data = false);
+
/**
* Set all components of the
* vector to the given number @p
// this line (including
// ones that we have
// appended in this go
- // around)
+ // around) and see whether
+ // they are further
+ // constrained. ignore
+ // elements that we don't
+ // store on the current
+ // processor
unsigned int entry = 0;
while (entry < line->entries.size())
- if (is_constrained (line->entries[entry].first))
+ if (((local_lines.size() == 0)
+ ||
+ (local_lines.is_element(line->entries[entry].first)))
+ &&
+ is_constrained (line->entries[entry].first))
{
// ok, this entry is
// further
#ifdef DEBUG
// if in debug mode: check that no
// dof is constraint to another dof
- // that is also constrained
+ // that is also
+ // constrained. exclude dofs from
+ // this check whose constraint
+ // lines would not be store on the
+ // local processor
for (std::vector<ConstraintLine>::const_iterator line=lines.begin();
line!=lines.end(); ++line)
for (std::vector<std::pair<unsigned int,double> >::const_iterator
entry=line->entries.begin();
entry!=line->entries.end(); ++entry)
- {
- // make sure that
- // entry->first is not the
- // index of a line itself
- const bool is_circle = is_constrained(entry->first);
- Assert (is_circle == false,
- ExcDoFConstrainedToConstrainedDoF(line->line, entry->first));
- };
+ if ((local_lines.size() == 0)
+ ||
+ (local_lines.is_element(entry->first)))
+ {
+ // make sure that
+ // entry->first is not the
+ // index of a line itself
+ const bool is_circle = is_constrained(entry->first);
+ Assert (is_circle == false,
+ ExcDoFConstrainedToConstrainedDoF(line->line, entry->first));
+ }
#endif
sorted = true;
// on the local range of the vector. But
// we need access to values that the
// local nodes are constrained to.
+
template<>
void
ConstraintMatrix::distribute (TrilinosWrappers::MPI::Vector &vec) const
}
}
+
+
+template<>
+void
+ConstraintMatrix::distribute (TrilinosWrappers::MPI::BlockVector &vec) const
+{
+ IndexSet my_indices (vec.size());
+ for (unsigned int block=0; block<vec.n_blocks(); ++block)
+ {
+ typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
+ ConstraintLine index_comparison;
+ index_comparison.line = vec.block(block).local_range().first
+ +vec.get_block_indices().block_start(block);
+ const constraint_iterator begin_my_constraints =
+ std::lower_bound (lines.begin(),lines.end(),index_comparison);
+
+ index_comparison.line = vec.block(block).local_range().second
+ +vec.get_block_indices().block_start(block);
+
+ const constraint_iterator end_my_constraints
+ = std::lower_bound(lines.begin(),lines.end(),index_comparison);
+
+ // Here we search all the indices that we
+ // need to have read-access to - the local
+ // nodes and all the nodes that the
+ // constraints indicate. No caching done
+ // yet. would need some more clever data
+ // structures for doing that.
+ const std::pair<unsigned int, unsigned int>
+ local_range = vec.block(block).local_range();
+
+ my_indices.add_range (local_range.first, local_range.second);
+
+ std::set<unsigned int> individual_indices;
+ for (constraint_iterator it = begin_my_constraints;
+ it != end_my_constraints; ++it)
+ for (unsigned int i=0; i<it->entries.size(); ++i)
+ if ((it->entries[i].first < local_range.first)
+ ||
+ (it->entries[i].first >= local_range.second))
+ individual_indices.insert (it->entries[i].first);
+
+ my_indices.add_indices (individual_indices.begin(),
+ individual_indices.end());
+ }
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ const Epetra_MpiComm *mpi_comm
+ = dynamic_cast<const Epetra_MpiComm*>(&vec.block(0).trilinos_vector().Comm());
+
+ Assert (mpi_comm != 0, ExcInternalError());
+
+ TrilinosWrappers::MPI::Vector vec_distribute
+ (my_indices.make_trilinos_map (mpi_comm->Comm(), true));
+#else
+ TrilinosWrappers::MPI::Vector vec_distribute
+ (my_indices.make_trilinos_map (MPI_COMM_WORLD, true)));
+#endif
+
+ // here we import the data
+ vec_distribute.reinit(vec,true);
+
+ for (unsigned int block=0; block<vec.n_blocks(); ++block)
+ {
+ typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
+ ConstraintLine index_comparison;
+ index_comparison.line = vec.block(block).local_range().first
+ +vec.get_block_indices().block_start(block);
+ const constraint_iterator begin_my_constraints =
+ std::lower_bound (lines.begin(),lines.end(),index_comparison);
+
+ index_comparison.line = vec.block(block).local_range().second
+ +vec.get_block_indices().block_start(block);
+
+ const constraint_iterator end_my_constraints
+ = std::lower_bound(lines.begin(),lines.end(),index_comparison);
+
+ for (constraint_iterator it = begin_my_constraints;
+ it != end_my_constraints; ++it)
+ {
+ // fill entry in line
+ // next_constraint.line by adding the
+ // different contributions
+ double new_value = it->inhomogeneity;
+ for (unsigned int i=0; i<it->entries.size(); ++i)
+ new_value += (vec_distribute(it->entries[i].first) *
+ it->entries[i].second);
+ vec(it->line) = new_value;
+ }
+ }
+}
+
#endif
std::cout << "*** PETSC-Matrix: num-allocs = "
<< info.mallocs << " ***" << std::endl;
- return sizeof(*this) + info.memory;
+ return sizeof(*this) + static_cast<unsigned int>(info.memory);
}
}
// $Id$
// Version: $Name$
//
-// Copyright (C) 2004, 2006, 2008, 2009 by the deal.II authors
+// Copyright (C) 2004, 2006, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
# endif
#endif
- // destroy solver object
- solver_data.reset ();
+ // do not destroy solver object
+// solver_data.reset ();
// in case of failure: throw
// exception
+ void
+ SolverBase::reset()
+ {
+ solver_data.reset ();
+ }
+
+
+
SolverControl &
SolverBase::control() const
{
// $Id$
// Version: $Name$
//
-// Copyright (C) 2008 by the deal.II authors
+// Copyright (C) 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
PreconditionAMG::AdditionalData::
AdditionalData (const bool elliptic,
const bool higher_order_elements,
+ const unsigned int n_cycles,
+ const bool w_cycle,
const double aggregation_threshold,
const std::vector<std::vector<bool> > &constant_modes,
const unsigned int smoother_sweeps,
:
elliptic (elliptic),
higher_order_elements (higher_order_elements),
+ n_cycles (n_cycles),
+ w_cycle (w_cycle),
aggregation_threshold (aggregation_threshold),
constant_modes (constant_modes),
smoother_sweeps (smoother_sweeps),
multilevel_operator.release();
const unsigned int n_rows = matrix.m();
- const unsigned int constant_modes_dimension =
- additional_data.constant_modes.size();
// Build the AMG preconditioner.
Teuchos::ParameterList parameter_list;
parameter_list.set("smoother: sweeps",
static_cast<int>(additional_data.smoother_sweeps));
+ parameter_list.set("cycle applications",
+ static_cast<int>(additional_data.n_cycles));
+ if (additional_data.w_cycle == true)
+ parameter_list.set("prec type", "MGW");
+ else
+ parameter_list.set("prec type", "MGV");
+
parameter_list.set("smoother: Chebyshev alpha",10.);
parameter_list.set("smoother: ifpack overlap",
static_cast<int>(additional_data.smoother_overlap));
const Epetra_Map & domain_map = matrix.domain_partitioner();
+ const unsigned int constant_modes_dimension =
+ additional_data.constant_modes.size();
Epetra_MultiVector distributed_constant_modes (domain_map,
constant_modes_dimension);
if (constant_modes_dimension > 1)
{
- Assert (n_rows == additional_data.constant_modes[0].size(),
- ExcDimensionMismatch(n_rows,
- additional_data.constant_modes[0].size()));
+ const bool constant_modes_are_global =
+ additional_data.constant_modes[0].size() == n_rows;
+ const unsigned int n_relevant_rows =
+ constant_modes_are_global ? n_rows : additional_data.constant_modes[0].size();
+ const unsigned int my_size = domain_map.NumMyElements();
+ if (constant_modes_are_global == false)
+ Assert (n_relevant_rows == my_size,
+ ExcDimensionMismatch(n_relevant_rows, my_size));
Assert (n_rows ==
static_cast<unsigned int>(distributed_constant_modes.GlobalLength()),
ExcDimensionMismatch(n_rows,
distributed_constant_modes.GlobalLength()));
- const unsigned int my_size = domain_map.NumMyElements();
- Assert (my_size == static_cast<unsigned int>(domain_map.MaxLID()+1),
- ExcDimensionMismatch (my_size, domain_map.MaxLID()+1));
-
// Reshape null space as a
// contiguous vector of
// doubles so that Trilinos
for (unsigned int d=0; d<constant_modes_dimension; ++d)
for (unsigned int row=0; row<my_size; ++row)
{
- int global_row_id = domain_map.GID(row);
+ int global_row_id = constant_modes_are_global ? domain_map.GID(row) : row;
distributed_constant_modes.ReplaceMyValue(row, d,
static_cast<double>(additional_data.constant_modes[d][global_row_id]));
}
#ifdef DEAL_II_USE_TRILINOS
# include <lac/trilinos_sparse_matrix.h>
+# include <lac/trilinos_block_vector.h>
# include <cmath>
# include <Epetra_Import.h>
+# include <Epetra_Vector.h>
DEAL_II_NAMESPACE_OPEN
+ void
+ Vector::reinit (const BlockVector &v,
+ const bool import_data)
+ {
+ // In case we do not allow to
+ // have different maps, this
+ // call means that we have to
+ // reset the vector. So clear
+ // the vector, initialize our
+ // map with the map in v, and
+ // generate the vector.
+ if (v.n_blocks() == 0)
+ return;
+
+ // create a vector that holds all the elements
+ // contained in the block vector. need to
+ // manually create an Epetra_Map.
+ unsigned int n_elements = 0, added_elements = 0, block_offset = 0;
+ for (unsigned int block=0; block<v.n_blocks();++block)
+ n_elements += v.block(block).local_size();
+ std::vector<int> global_ids (n_elements, -1);
+ for (unsigned int block=0; block<v.n_blocks();++block)
+ {
+ int * glob_elements = v.block(block).vector_partitioner().MyGlobalElements();
+ for (unsigned int i=0; i<v.block(block).local_size(); ++i)
+ global_ids[added_elements++] = glob_elements[i] + block_offset;
+ block_offset += v.block(block).size();
+ }
+
+ Assert (n_elements == added_elements, ExcInternalError());
+ Epetra_Map new_map (v.size(), n_elements, &global_ids[0], 0,
+ v.block(0).vector_partitioner().Comm());
+
+ if (import_data == false)
+ vector = std::auto_ptr<Epetra_FEVector> (new Epetra_FEVector (new_map));
+
+ Teuchos::RCP<Epetra_FEVector> actual_vec = (import_data == true) ?
+ Teuchos::rcp (new Epetra_FEVector (new_map), true) :
+ Teuchos::rcp (&*vector, false);
+
+ TrilinosScalar* entries = (*actual_vec)[0];
+ block_offset = 0;
+ for (unsigned int block=0; block<v.n_blocks();++block)
+ {
+ v.block(block).trilinos_vector().ExtractCopy (entries, 0);
+ entries += v.block(block).local_size();
+ }
+
+ if (import_data == true)
+ {
+ AssertThrow (static_cast<unsigned int>(actual_vec->GlobalLength())
+ == v.size(),
+ ExcDimensionMismatch (actual_vec->GlobalLength(),
+ v.size()));
+
+ Epetra_Import data_exchange (vector->Map(), actual_vec->Map());
+
+ const int ierr = vector->Import(*actual_vec, data_exchange, Insert);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+
+ last_action = Insert;
+ }
+
+ }
+
+
+
Vector &
Vector::operator = (const Vector &v)
{
{
Assert (vector->Map().SameAs(v.vector->Map()) == true,
ExcMessage ("The Epetra maps in the assignment operator ="
- " do not match, even though the local_range "
- " seems to be the same. Check vector setup!"));
+ " do not match, even though the local_range"
+ " seems to be the same. Check vector setup or"
+ " use reinit()!"));
const int ierr = vector->Update(1.0, *v.vector, 0.0);
Assert (ierr == 0, ExcTrilinosError(ierr));
{
Epetra_Import data_exchange (vector->Map(), v.vector->Map());
const int ierr = vector->Import(*v.vector, data_exchange, Insert);
- AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ Assert (ierr == 0, ExcTrilinosError(ierr));
+ }
+ else if (size() == v.size() && local_range().first>=v.local_range().first &&
+ local_range().second<=v.local_range().second &&
+ vector->Map().UniqueGIDs())
+ {
+ for (unsigned int i=0; i<local_size(); ++i)
+ {
+ Assert (v.trilinos_vector().Map().MyGID(vector->Map().GID(i)),
+ ExcMessage ("The right hand side vector does not contain "
+ "all local elements present in the left hand "
+ "side vector in the assignment operation =, "
+ "which is not allowed. Use reinit()."));
+ (*vector)[0][i] = v(vector->Map().GID(i));
+ }
}
else
{
{
// get a representation of the vector and
// loop over all the elements
- TrilinosScalar *start_ptr;
- int leading_dimension;
- int ierr = vector->ExtractView (&start_ptr, &leading_dimension);
- AssertThrow (ierr == 0, ExcTrilinosError(ierr));
-
- // TODO: This
- // won't work in parallel like
- // this. Find out a better way to
- // this in that case.
+ TrilinosScalar *start_ptr = (*vector)[0];
const TrilinosScalar *ptr = start_ptr,
- *eptr = start_ptr + size();
+ *eptr = start_ptr + local_size();
bool flag = true;
while (ptr != eptr)
{