// $Id$
// Version: $Name$
//
-// Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors
+// Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
* depending on the ConditionalOStream object being active (default)
* or not. The condition of this object can be changed by
* set_condition() and in the constructor. This class is used in the
- * step-17, step-18, step-32,
+ * step-17, step-18,
* step-33, and step-35
* tutorial programs.
*
* object. In this case, we did a lot of other stuff, so that the time
* proportions of the functions we measured are far away from 100 precent.
*
- * See the step-32 tutorial program for usage of this class.
- *
* @ingroup utilities
* @author M. Kronbichler, 2009.
*/
* destructor of this object which in
* turns calls <code>MPI_Finalize</code>
* to shut down the MPI system.
- *
- * This class is used in @ref step_32
- * "step-32", for example.
*/
class MPI_InitFinalize
{
* provides a unified interface to both serial and %parallel
* implementations of Trilinos, sets up the MPI communicator in case the
* programs are run in %parallel, and correctly terminates all processes
- * when the destructor is called. An example usage of this class is shown
- * in the tutorial program step-32.
+ * when the destructor is called.
*/
#ifdef DEAL_II_USE_TRILINOS
namespace Trilinos
* builds that part of the sparsity
* pattern that corresponds to the
* subdomain_id for which it is
- * responsible. This feature is
- * step-32.
+ * responsible.
*/
template <class DH, class SparsityPattern>
static
* builds that part of the sparsity
* pattern that corresponds to the
* subdomain_id for which it is
- * responsible. This feature is
- * step-32.
+ * responsible.
*/
template <class DH, class SparsityPattern>
static
* case, an assertion is thrown.
*
* This function is used in the
- * step-22,
- * step-31, and
- * step-32 tutorial
+ * step-22 and
+ * step-31 tutorial
* programs.
*/
template <int dim, int spacedim>
// $Id$
// Version: $Name$
//
-// Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2009 by the deal.II authors
+// Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
*
* @ingroup Iterators
*/
- class Active
+ class Active
{
public:
/**
template <class Iterator>
bool operator () (const Iterator &i) const;
};
-
+
/**
* Filter that evaluates to true if
* either the iterator points to an
template <class Iterator>
bool operator () (const Iterator &i) const;
};
-
+
/**
* Filter that evaluates to true if
bool operator () (const Iterator &i) const;
};
-
+
/**
* Filter for iterators that
* evaluates to true if either the
*
* @ingroup Iterators
*/
- class LevelEqualTo
+ class LevelEqualTo
{
public:
/**
*
* @ingroup Iterators
*/
- class SubdomainEqualTo
+ class SubdomainEqualTo
{
public:
/**
* called with a pair of iterators denoting a range on which they
* shall act, by choosing a filtered iterator instead of usual ones.
*
- * This class is used in step-18 and
- * step-32.
+ * This class is used in step-18.
*
*
* <h3>Predicates</h3>
* {
* return (static_cast<unsigned int>(c->level()) == level);
* };
- * @endcode
+ * @endcode
* then
* @code
* std::bind2nd (std::ptr_fun(&level_equal_to<active_cell_iterator>), 3)
*
* Finally, classes can be predicates. The following class is one:
* @code
- * class Active
+ * class Active
* {
* public:
* template <class Iterator>
* will automatically be advanced
* to the first cell that has.
*/
- template <typename Predicate>
+ template <typename Predicate>
FilteredIterator (Predicate p,
const BaseIterator &bi);
* Destructor.
*/
~FilteredIterator ();
-
+
/**
* Assignment operator. Copy the
* iterator value of the
*/
FilteredIterator &
set_to_next_positive (const BaseIterator &bi);
-
+
/**
* As above, but search for the
* previous iterator from @p bi
*/
FilteredIterator &
set_to_previous_positive (const BaseIterator &bi);
-
+
/**
* Compare for equality of the
* underlying iterator values of
*
* We do not compare the
* predicates.
- */
+ */
bool operator < (const FilteredIterator &fi) const;
/**
<< "The element " << arg1
<< " with which you want to compare or which you want to"
<< " assign from is invalid since it does not satisfy the predicate.");
-
+
private:
/**
* type of this pointer.
*/
virtual PredicateBase * clone () const;
-
+
private:
/**
* Copy of the predicate.
operator = (const BaseIterator &bi)
{
Assert ((bi.state() != IteratorState::valid) || (*predicate)(bi),
- ExcInvalidElement(bi));
+ ExcInvalidElement(bi));
BaseIterator::operator = (bi);
return *this;
}
while ((this->state() == IteratorState::valid) &&
( ! (*predicate)(*this)))
BaseIterator::operator++ ();
-
+
return *this;
}
while ((this->state() == IteratorState::valid) &&
( ! (*predicate)(*this)))
BaseIterator::operator-- ();
-
+
return *this;
}
operator ++ (int)
{
const FilteredIterator old_state = *this;
-
+
if (this->state() == IteratorState::valid)
do
BaseIterator::operator++ ();
operator -- (int)
{
const FilteredIterator old_state = *this;
-
+
if (this->state() == IteratorState::valid)
do
BaseIterator::operator-- ();
-namespace IteratorFilters
+namespace IteratorFilters
{
// ---------------- IteratorFilters::Active ---------
}
-// ---------------- IteratorFilters::LevelEqualTo ---------
+// ---------------- IteratorFilters::LevelEqualTo ---------
inline
LevelEqualTo::LevelEqualTo (const unsigned int level)
:
// $Id$
// Version: $Name$
//
-// Copyright (C) 2009 by the deal.II authors
+// Copyright (C) 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
- step-21: Multiphase flow through porous media
- step-22: Stokes flow
- step-31: Thermal convection (Boussinesq flow)
-- step-32: A %parallel Boussinesq solver for mantle convection
Some of these programs were developed under contract from the California
Institute of Technology with support by the National Science Foundation
correctness. Existing tutorial programs typically employ simpler rather than
more complicated solver schemes for exposition but frequently suggest more
complicated schemes including hints on how they might be implemented in an
- appendix.
+ appendix.
<li> <i>Try algorithms:</i> The rapid prototyping abilities of deal.II may
also help in determining best algorithms on the scale of programs to which
* a vector-valued element has exactly one nonzero component if an element is
* primitive. This includes, in particular, all scalar elements as well as
* vector-valued elements assembled via the FESystem class from other
- * primitive (for example scalar) elements as shown in step-8,
+ * primitive (for example scalar) elements as shown in step-8,
* step-29, step-22 and several others. On the other hand,
* the FE_RaviartThomas class used in step-20 and step-21, or the FE_Nedelec
* class provide non-primitive finite elements because there, each
* cell is associated with.
*
* For programs that are parallelized based on MPI but where each processor
- * stores the entire triangulation (as in, for example, step-18
- * or step-32, subdomain ids are assigned to cells by
+ * stores the entire triangulation (as in, for example, step-18,
+ * subdomain ids are assigned to cells by
* partitioning a mesh, and each MPI process then only works on those cells it
* "owns", i.e. that belong to a subdomain that it is associated with
* (traditionally, this is the case for the subdomain id whose numerical value
* namespace. The MultithreadInfo class allows to query certain
* properties of the system, such as the number of CPUs. These
* facilities for %parallel computing are described in the
- * following. The step-9, step-13, step-14, step-32, step-35 and
- * step-37 tutorial programs also show their use in practice, with the
- * ones starting with step-32 using a more modern style of doing
+ * following. The step-9, step-13, step-14, and step-35
+ * tutorial programs also show their use in practice, with the
+ * ones starting with step-35 using a more modern style of doing
* things in which essentially we describe <i>what</i> can be done in
* %parallel, while the older tutorial programs describe <i>how</i>
* things have to be done in %parallel.
*
* On the other hand, programs running on distributed memory machines
* (i.e. clusters) need a different programming model built on top of MPI and
- * PETSc or Trilinos. This is described in the step-17, step-18 and step-32
+ * PETSc or Trilinos. This is described in the step-17, and step-18
* example programs.
*
* @anchor MTToC
* in at the positions indicated by <code>_1, _2</code> and <code>_3</code>.
*
* To see the WorkStream class used in practice on tasks like the ones
- * outlined above, take a look at the step-32, step-35 or step-37
- * tutorial programs.
+ * outlined above, take a look at the step-35
+ * tutorial program.
*
*
* @anchor MTThreads
// In the program, before any task-based parallelism is reached.
// Early in the main method is a good place to call this:
- tbb::task_scheduler_init init(n_desired_threads + 1);
+ tbb::task_scheduler_init init(n_desired_threads + 1);
* @endcode
* The method of setting the number of threads relies on this call to
* <code>task_scheduler_init</code> occurring before any other calls to the
// $Id$
// Version: $Name$
//
-// Copyright (C) 2005, 2006, 2007, 2008, 2009 by the deal.II authors
+// Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
* Interfaces to Trilinos exist in the TrilinosWrappers namespace,
* making matrices, vectors, and solvers look like the corresponding
* deal.II classes. Their use is explained in the @ref step_31
- * "step-31", step-32, and step-33
+ * "step-31", and step-33
* tutorial programs. The <a
* href="../../readme-petsc-trilinos.html">PETSc and Trilinos
* ReadMe</a> file explains how to configure deal.II to use this
<h3> Possible extensions </h3>
A close inspection of this program's performance shows that it is mostly
-dominated by matrix-vector operations. step-37 shows one way
+dominated by matrix-vector operations. The future step 37 will show one way
how this can be avoided by working with matrix-free methods.
Another avenue would be to use algebraic multigrid methods. The
fluid, expressed as the product of the density $\rho$, the thermal expansion
coefficient $\beta$,
the temperature <i>T</i> and the gravity vector <b>g</b> pointing
-downward. (A derivation of why the right hand side looks like it looks
-is given in the introduction of step-32.)
+downward.
While the first two equations describe how the fluid reacts to
temperature differences by moving around, the third equation states
how the fluid motion affects the temperature field: it is an advection
the Trilinos ML package that implements an Algebraic Multigrid (AMG)
method. We will use this preconditioner to precondition the second order
operator part of the momentum equation. The ability to solve problems in
-%parallel will be explored in step-32, using the same problem as
+%parallel will be explored in a future step 32, using the same problem as
discussed here.
this value for $c_R$ appears to work just fine for the current
program, we corrected the formula in the program and set $c_R$ to a
value that reproduces exactly the results we had before. We will,
-however, revisit this issue again in step-32.
+however, revisit this issue again in a future step 32.
Now, however, back to the discussion of what values of $c_k$ and
$\beta$ to choose:
There are various ways to extend the current program. Of particular interest
is, of course, to make it faster and/or increase the resolution of the
-program, in particular in 3d. This is the topic of the step-32
+program, in particular in 3d. This is the topic of the future step 32
tutorial program which will implement strategies to solve this problem in
%parallel on a cluster.
/* $Id$ */
/* Version: $Name: $ */
/* */
-/* Copyright (C) 2007, 2008, 2009 by the deal.II authors */
+/* Copyright (C) 2007, 2008, 2009, 2010 by the deal.II authors */
/* Author: Abner Salgado, Texas A&M University 2009 */
/* */
/* This file is subject to QPL and may not be distributed */
// graphical output.
//
// We will not elaborate on this process
- // here, but rather refer to step-31 and
- // step-32, where a similar procedure is used
+ // here, but rather refer to step-31,
+ // where a similar procedure is used
// (and is documented) to create a joint
// DoFHandler object for all variables.
//
* (in the sense that the method compress() needs to be called before the
* pattern can be used).
*
- * This class is used in step-32.
- *
* @author Martin Kronbichler, 2008, 2009
*/
namespace TrilinosWrappers
* where we have no access to the underlying representation of the matrix,
* and therefore cannot efficiently implement the condense()
* operation). This is the case discussed in step-17, @ref
- * step_18 "step-18", step-31, and step-32.
+ * step_18 "step-18", and step-31.
* </ul>
*
* In this case, one possibility is to distribute local entries to the final
* filter. Constrained dofs are
* transformed to local index space of
* the filter, and elements not present
- * in the IndexSet are ignored.
+ * in the IndexSet are ignored.
*
* This function provides an easy way to
* create a ConstraintMatrix for certain
* set_inhomogeneity().
*/
void add_lines (const IndexSet &lines);
-
+
/**
* Add an entry to a given
* line. The list of lines is
* Does the same as the function above
* but can treat
* non quadratic matrices.
- */
+ */
template <typename MatrixType>
void
distribute_local_to_global (const FullMatrix<double> &local_matrix,
*/
namespace MPI
{
-
+
/**
* Implementation of a parallel vector class based on PETSC and using MPI
* communication to synchronise distributed operations. All the functionality
* vector as empty.
*/
Vector ();
-
+
/**
* Constructor. Set dimension to
* @p n and initialize all
* communicator over which the
* different parts of the vector
* shall communicate
- *
+ *
* The constructor is made explicit
* to avoid accidents like this:
* <tt>v=0;</tt>. Presumably, the user
explicit Vector (const MPI_Comm &communicator,
const unsigned int n,
const unsigned int local_size);
-
-
+
+
/**
* Copy-constructor from deal.II
* vectors. Sets the dimension to that
* of the given vector, and copies all
* elements.
- *
+ *
* @arg local_size denotes the size
* of the chunk that shall be stored
* on the present process.
const dealii::Vector<Number> &v,
const unsigned int local_size);
-
+
/**
* Copy-constructor the
* values from a PETSc wrapper vector
* class.
- *
+ *
* @arg local_size denotes the size
* of the chunk that shall be stored
* on the present process.
* ignored during construction. That
* way, the ghost parameter can equal
* the set of locally relevant
- * degrees of freedom, see step-32.
+ * degrees of freedom.
*/
explicit Vector (const MPI_Comm &communicator,
const IndexSet & local,
const IndexSet & ghost = IndexSet(0));
-
-
+
+
/**
* Copy the given vector. Resize the
* present vector if necessary. Also
*/
Vector & operator = (const Vector &v);
-
+
/**
* Copy the given sequential
* (non-distributed) vector
* @p communicator denotes the MPI
* communicator henceforth to be used
* for this vector.
- *
+ *
* If @p fast is false, the vector
* is filled by zeros. Otherwise, the
* elements are left an unspecified
* state.
- */
+ */
void reinit (const MPI_Comm &communicator,
const unsigned int N,
const unsigned int local_size,
const bool fast = false);
-
+
/**
* Change the dimension to that of
* the vector @p v, and also take
const IndexSet & local,
const IndexSet & ghost = IndexSet(0));
-
+
/**
* Return a reference to the MPI
* communicator object in use with
virtual void create_vector (const unsigned int n,
const unsigned int local_size,
const IndexSet & ghostnodes);
-
+
private:
/**
*this = v;
}
-
-
+
+
inline
Vector &
Vector::operator = (const PetscScalar s)
return *this;
}
-
+
inline
// then first resize the present one
if (size() != v.size())
reinit (v.communicator, v.size(), v.local_size(), true);
-
+
const int ierr = VecCopy (v.vector, vector);
AssertThrow (ierr == 0, ExcPETScError(ierr));
-
+
return *this;
}
template <typename number>
inline
Vector &
- Vector::operator = (const dealii::Vector<number> &v)
+ Vector::operator = (const dealii::Vector<number> &v)
{
Assert (size() == v.size(),
ExcDimensionMismatch (size(), v.size()));