template <int dim>
class EvaluationBase
{
- public:
- virtual ~EvaluationBase ();
+ public:
+ virtual ~EvaluationBase ();
- void set_refinement_cycle (const unsigned int refinement_cycle);
+ void set_refinement_cycle (const unsigned int refinement_cycle);
- virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const = 0;
- protected:
- unsigned int refinement_cycle;
+ virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const = 0;
++ const Vector<double> &solution) const = 0;
+ protected:
+ unsigned int refinement_cycle;
};
template <int dim>
class PointValueEvaluation : public EvaluationBase<dim>
{
- public:
- PointValueEvaluation (const Point<dim> &evaluation_point,
- TableHandler &results_table);
-
- virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
-
- DeclException1 (ExcEvaluationPointNotFound,
- Point<dim>,
- << "The evaluation point " << arg1
- << " was not found among the vertices of the present grid.");
- private:
- const Point<dim> evaluation_point;
- TableHandler &results_table;
+ public:
+ PointValueEvaluation (const Point<dim> &evaluation_point,
+ TableHandler &results_table);
+
+ virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
++ const Vector<double> &solution) const;
+
+ DeclException1 (ExcEvaluationPointNotFound,
+ Point<dim>,
+ << "The evaluation point " << arg1
+ << " was not found among the vertices of the present grid.");
+ private:
+ const Point<dim> evaluation_point;
+ TableHandler &results_table;
};
void
PointValueEvaluation<dim>::
operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const
+ const Vector<double> &solution) const
{
- // First allocate a variable that
- // will hold the point
- // value. Initialize it with a
- // value that is clearly bogus,
- // so that if we fail to set it
- // to a reasonable value, we will
- // note at once. This may not be
- // necessary in a function as
- // small as this one, since we
- // can easily see all possible
- // paths of execution here, but
- // it proved to be helpful for
- // more complex cases, and so we
- // employ this strategy here as
- // well.
+ // First allocate a variable that
+ // will hold the point
+ // value. Initialize it with a
+ // value that is clearly bogus,
+ // so that if we fail to set it
+ // to a reasonable value, we will
+ // note at once. This may not be
+ // necessary in a function as
+ // small as this one, since we
+ // can easily see all possible
+ // paths of execution here, but
+ // it proved to be helpful for
+ // more complex cases, and so we
+ // employ this strategy here as
+ // well.
double point_value = 1e20;
- // Then loop over all cells and
- // all their vertices, and check
- // whether a vertex matches the
- // evaluation point. If this is
- // the case, then extract the
- // point value, set a flag that
- // we have found the point of
- // interest, and exit the loop.
+ // Then loop over all cells and
+ // all their vertices, and check
+ // whether a vertex matches the
+ // evaluation point. If this is
+ // the case, then extract the
+ // point value, set a flag that
+ // we have found the point of
+ // interest, and exit the loop.
typename DoFHandler<dim>::active_cell_iterator
- cell = dof_handler.begin_active(),
- endc = dof_handler.end();
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
bool evaluation_point_found = false;
for (; (cell!=endc) && !evaluation_point_found; ++cell)
for (unsigned int vertex=0;
template <int dim>
class SolutionOutput : public EvaluationBase<dim>
{
- public:
- SolutionOutput (const std::string &output_name_base,
- const typename DataOut<dim>::OutputFormat output_format);
-
- virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
- private:
- const std::string output_name_base;
- const typename DataOut<dim>::OutputFormat output_format;
+ public:
+ SolutionOutput (const std::string &output_name_base,
+ const typename DataOut<dim>::OutputFormat output_format);
+
+ virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
++ const Vector<double> &solution) const;
+ private:
+ const std::string output_name_base;
+ const typename DataOut<dim>::OutputFormat output_format;
};
template <int dim>
class EvaluationBase
{
- public:
- virtual ~EvaluationBase ();
+ public:
+ virtual ~EvaluationBase ();
- void set_refinement_cycle (const unsigned int refinement_cycle);
+ void set_refinement_cycle (const unsigned int refinement_cycle);
- virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const = 0;
- protected:
- unsigned int refinement_cycle;
+ virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const = 0;
++ const Vector<double> &solution) const = 0;
+ protected:
+ unsigned int refinement_cycle;
};
template <int dim>
class PointValueEvaluation : public EvaluationBase<dim>
{
- public:
- PointValueEvaluation (const Point<dim> &evaluation_point);
+ public:
+ PointValueEvaluation (const Point<dim> &evaluation_point);
- virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
+ virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
++ const Vector<double> &solution) const;
- DeclException1 (ExcEvaluationPointNotFound,
- Point<dim>,
- << "The evaluation point " << arg1
- << " was not found among the vertices of the present grid.");
- private:
- const Point<dim> evaluation_point;
+ DeclException1 (ExcEvaluationPointNotFound,
+ Point<dim>,
+ << "The evaluation point " << arg1
+ << " was not found among the vertices of the present grid.");
+ private:
+ const Point<dim> evaluation_point;
};
template <int dim>
class PointXDerivativeEvaluation : public EvaluationBase<dim>
{
- public:
- PointXDerivativeEvaluation (const Point<dim> &evaluation_point);
+ public:
+ PointXDerivativeEvaluation (const Point<dim> &evaluation_point);
- virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
+ virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
++ const Vector<double> &solution) const;
- DeclException1 (ExcEvaluationPointNotFound,
- Point<dim>,
- << "The evaluation point " << arg1
- << " was not found among the vertices of the present grid.");
- private:
- const Point<dim> evaluation_point;
+ DeclException1 (ExcEvaluationPointNotFound,
+ Point<dim>,
+ << "The evaluation point " << arg1
+ << " was not found among the vertices of the present grid.");
+ private:
+ const Point<dim> evaluation_point;
};
void
PointXDerivativeEvaluation<dim>::
operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const
+ const Vector<double> &solution) const
{
- // This time initialize the
- // return value with something
- // useful, since we will have to
- // add up a number of
- // contributions and take the
- // mean value afterwards...
+ // This time initialize the
+ // return value with something
+ // useful, since we will have to
+ // add up a number of
+ // contributions and take the
+ // mean value afterwards...
double point_derivative = 0;
- // ...then have some objects of
- // which the meaning wil become
- // clear below...
+ // ...then have some objects of
+ // which the meaning wil become
+ // clear below...
QTrapez<dim> vertex_quadrature;
FEValues<dim> fe_values (dof_handler.get_fe(),
vertex_quadrature,
template <int dim>
class GridOutput : public EvaluationBase<dim>
{
- public:
- GridOutput (const std::string &output_name_base);
+ public:
+ GridOutput (const std::string &output_name_base);
- virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
- private:
- const std::string output_name_base;
+ virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
++ const Vector<double> &solution) const;
+ private:
+ const std::string output_name_base;
};
template <int dim>
inline
void
- BodyForce<dim>::vector_value (const Point<dim> & /*p*/,
+ BodyForce<dim>::vector_value (const Point<dim> &/*p*/,
- Vector<double> &values) const
+ Vector<double> &values) const
{
Assert (values.size() == dim,
- ExcDimensionMismatch (values.size(), dim));
+ ExcDimensionMismatch (values.size(), dim));
const double g = 9.81;
const double rho = 7700;
template <int dim>
void
IncrementalBoundaryValues<dim>::
- vector_value (const Point<dim> & /*p*/,
+ vector_value (const Point<dim> &/*p*/,
- Vector<double> &values) const
+ Vector<double> &values) const
{
Assert (values.size() == dim,
- ExcDimensionMismatch (values.size(), dim));
+ ExcDimensionMismatch (values.size(), dim));
values = 0;
values(2) = -present_timestep * velocity;
};
- // And then we also have to define
- // these respective functions, of
- // course. Given our discussion in
- // the introduction of how the
- // solution should look like, the
- // following computations should be
- // straightforward:
+ // And then we also have to define
+ // these respective functions, of
+ // course. Given our discussion in
+ // the introduction of how the
+ // solution should look like, the
+ // following computations should be
+ // straightforward:
template <int dim>
- double RightHandSide<dim>::value (const Point<dim> & /*p*/,
+ double RightHandSide<dim>::value (const Point<dim> &/*p*/,
const unsigned int /*component*/) const
{
return 0;
template <int dim>
class NeutronDiffusionProblem
{
+ public:
+ class Parameters
+ {
public:
- class Parameters
- {
- public:
- Parameters ();
-
- static void declare_parameters (ParameterHandler &prm);
- void get_parameters (ParameterHandler &prm);
-
- unsigned int n_groups;
- unsigned int n_refinement_cycles;
-
- unsigned int fe_degree;
-
- double convergence_tolerance;
- };
-
-
-
- NeutronDiffusionProblem (const Parameters ¶meters);
- ~NeutronDiffusionProblem ();
-
- void run ();
-
- private:
- // @sect5{Private member functions}
-
- // There are not that many member
- // functions in this class since
- // most of the functionality has
- // been moved into the
- // <code>EnergyGroup</code> class
- // and is simply called from the
- // <code>run()</code> member
- // function of this class. The
- // ones that remain have
- // self-explanatory names:
- void initialize_problem();
-
- void refine_grid ();
-
- double get_total_fission_source () const;
-
-
- // @sect5{Private member variables}
-
- // Next, we have a few member
- // variables. In particular,
- // these are (i) a reference to
- // the parameter object (owned by
- // the main function of this
- // program, and passed to the
- // constructor of this class),
- // (ii) an object describing the
- // material parameters for the
- // number of energy groups
- // requested in the input file,
- // and (iii) the finite element
- // to be used by all energy
- // groups:
- const Parameters ¶meters;
- const MaterialData material_data;
- FE_Q<dim> fe;
-
- // Furthermore, we have (iv) the
- // value of the computed
- // eigenvalue at the present
- // iteration. This is, in fact,
- // the only part of the solution
- // that is shared between all
- // energy groups -- all other
- // parts of the solution, such as
- // neutron fluxes are particular
- // to one or the other energy
- // group, and are therefore
- // stored in objects that
- // describe a single energy
- // group:
- double k_eff;
-
- // Finally, (v), we have an array
- // of pointers to the energy
- // group objects. The length of
- // this array is, of course,
- // equal to the number of energy
- // groups specified in the
- // parameter file.
- std::vector<EnergyGroup<dim>*> energy_groups;
+ Parameters ();
+
+ static void declare_parameters (ParameterHandler &prm);
+ void get_parameters (ParameterHandler &prm);
+
+ unsigned int n_groups;
+ unsigned int n_refinement_cycles;
+
+ unsigned int fe_degree;
+
+ double convergence_tolerance;
+ };
+
+
+
+ NeutronDiffusionProblem (const Parameters ¶meters);
+ ~NeutronDiffusionProblem ();
+
+ void run ();
+
+ private:
+ // @sect5{Private member functions}
+
+ // There are not that many member
+ // functions in this class since
+ // most of the functionality has
+ // been moved into the
+ // <code>EnergyGroup</code> class
+ // and is simply called from the
+ // <code>run()</code> member
+ // function of this class. The
+ // ones that remain have
+ // self-explanatory names:
+ void initialize_problem();
+
+ void refine_grid ();
+
+ double get_total_fission_source () const;
+
+
+ // @sect5{Private member variables}
+
+ // Next, we have a few member
+ // variables. In particular,
+ // these are (i) a reference to
+ // the parameter object (owned by
+ // the main function of this
+ // program, and passed to the
+ // constructor of this class),
+ // (ii) an object describing the
+ // material parameters for the
+ // number of energy groups
+ // requested in the input file,
+ // and (iii) the finite element
+ // to be used by all energy
+ // groups:
- const Parameters ¶meters;
++ const Parameters ¶meters;
+ const MaterialData material_data;
+ FE_Q<dim> fe;
+
+ // Furthermore, we have (iv) the
+ // value of the computed
+ // eigenvalue at the present
+ // iteration. This is, in fact,
+ // the only part of the solution
+ // that is shared between all
+ // energy groups -- all other
+ // parts of the solution, such as
+ // neutron fluxes are particular
+ // to one or the other energy
+ // group, and are therefore
+ // stored in objects that
+ // describe a single energy
+ // group:
+ double k_eff;
+
+ // Finally, (v), we have an array
+ // of pointers to the energy
+ // group objects. The length of
+ // this array is, of course,
+ // equal to the number of energy
+ // groups specified in the
+ // parameter file.
+ std::vector<EnergyGroup<dim>*> energy_groups;
};
- // The constructor takes the
- // ParameterHandler object and stores
- // it in a reference. It also
- // initializes the DoF-Handler and
- // the finite element system, which
- // consists of two copies of the
- // scalar Q1 field, one for $v$ and
- // one for $w$:
+ // The constructor takes the
+ // ParameterHandler object and stores
+ // it in a reference. It also
+ // initializes the DoF-Handler and
+ // the finite element system, which
+ // consists of two copies of the
+ // scalar Q1 field, one for $v$ and
+ // one for $w$:
template <int dim>
- UltrasoundProblem<dim>::UltrasoundProblem (ParameterHandler& param)
- :
- prm(param),
- dof_handler(triangulation),
- fe(FE_Q<dim>(1), 2)
- UltrasoundProblem<dim>::UltrasoundProblem (ParameterHandler ¶m)
++ UltrasoundProblem<dim>::UltrasoundProblem (ParameterHandler ¶m)
+ :
+ prm(param),
+ dof_handler(triangulation),
+ fe(FE_Q<dim>(1), 2)
{}
template <int dim>
double
- TemperatureRightHandSide<dim>::value (const Point<dim> &p,
+ TemperatureRightHandSide<dim>::value (const Point<dim> &p,
- const unsigned int component) const
+ const unsigned int component) const
{
Assert (component == 0,
- ExcMessage ("Invalid operation for a scalar function."));
+ ExcMessage ("Invalid operation for a scalar function."));
Assert ((dim==2) || (dim==3), ExcNotImplemented());
template <class PreconditionerA, class PreconditionerMp>
BlockSchurPreconditioner<PreconditionerA, PreconditionerMp>::
- BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S,
+ BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S,
- const InverseMatrix<TrilinosWrappers::SparseMatrix,
- PreconditionerMp> &Mpinv,
- const PreconditionerA &Apreconditioner)
- :
- stokes_matrix (&S),
- m_inverse (&Mpinv),
- a_preconditioner (Apreconditioner),
- tmp (stokes_matrix->block(1,1).m())
+ const InverseMatrix<TrilinosWrappers::SparseMatrix,
+ PreconditionerMp> &Mpinv,
+ const PreconditionerA &Apreconditioner)
+ :
+ stokes_matrix (&S),
+ m_inverse (&Mpinv),
+ a_preconditioner (Apreconditioner),
+ tmp (stokes_matrix->block(1,1).m())
{}
template <int dim>
class BoussinesqFlowProblem
{
- public:
- BoussinesqFlowProblem ();
- void run ();
+ public:
+ BoussinesqFlowProblem ();
+ void run ();
+
+ private:
+ void setup_dofs ();
+ void assemble_stokes_preconditioner ();
+ void build_stokes_preconditioner ();
+ void assemble_stokes_system ();
+ void assemble_temperature_system (const double maximal_velocity);
+ void assemble_temperature_matrix ();
+ double get_maximal_velocity () const;
+ std::pair<double,double> get_extrapolated_temperature_range () const;
+ void solve ();
+ void output_results () const;
+ void refine_mesh (const unsigned int max_grid_level);
- private:
- void setup_dofs ();
- void assemble_stokes_preconditioner ();
- void build_stokes_preconditioner ();
- void assemble_stokes_system ();
- void assemble_temperature_system (const double maximal_velocity);
- void assemble_temperature_matrix ();
- double get_maximal_velocity () const;
- std::pair<double,double> get_extrapolated_temperature_range () const;
- void solve ();
- void output_results () const;
- void refine_mesh (const unsigned int max_grid_level);
-
- double
- compute_viscosity(const std::vector<double> &old_temperature,
- const std::vector<double> &old_old_temperature,
- const std::vector<Tensor<1,dim> > &old_temperature_grads,
- const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
- const std::vector<double> &old_temperature_laplacians,
- const std::vector<double> &old_old_temperature_laplacians,
- const std::vector<Tensor<1,dim> > &old_velocity_values,
- const std::vector<Tensor<1,dim> > &old_old_velocity_values,
- const std::vector<double> &gamma_values,
- const double global_u_infty,
- const double global_T_variation,
- const double cell_diameter) const;
-
-
- Triangulation<dim> triangulation;
- double global_Omega_diameter;
-
- const unsigned int stokes_degree;
- FESystem<dim> stokes_fe;
- DoFHandler<dim> stokes_dof_handler;
- ConstraintMatrix stokes_constraints;
-
- std::vector<unsigned int> stokes_block_sizes;
- TrilinosWrappers::BlockSparseMatrix stokes_matrix;
- TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix;
-
- TrilinosWrappers::BlockVector stokes_solution;
- TrilinosWrappers::BlockVector old_stokes_solution;
- TrilinosWrappers::BlockVector stokes_rhs;
-
-
- const unsigned int temperature_degree;
- FE_Q<dim> temperature_fe;
- DoFHandler<dim> temperature_dof_handler;
- ConstraintMatrix temperature_constraints;
-
- TrilinosWrappers::SparseMatrix temperature_mass_matrix;
- TrilinosWrappers::SparseMatrix temperature_stiffness_matrix;
- TrilinosWrappers::SparseMatrix temperature_matrix;
-
- TrilinosWrappers::Vector temperature_solution;
- TrilinosWrappers::Vector old_temperature_solution;
- TrilinosWrappers::Vector old_old_temperature_solution;
- TrilinosWrappers::Vector temperature_rhs;
-
-
- double time_step;
- double old_time_step;
- unsigned int timestep_number;
-
- std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionAMG> Amg_preconditioner;
- std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionIC> Mp_preconditioner;
-
- bool rebuild_stokes_matrix;
- bool rebuild_temperature_matrices;
- bool rebuild_stokes_preconditioner;
+ double
+ compute_viscosity(const std::vector<double> &old_temperature,
+ const std::vector<double> &old_old_temperature,
- const std::vector<Tensor<1,dim> > &old_temperature_grads,
- const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
++ const std::vector<Tensor<1,dim> > &old_temperature_grads,
++ const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
+ const std::vector<double> &old_temperature_laplacians,
+ const std::vector<double> &old_old_temperature_laplacians,
- const std::vector<Tensor<1,dim> > &old_velocity_values,
- const std::vector<Tensor<1,dim> > &old_old_velocity_values,
++ const std::vector<Tensor<1,dim> > &old_velocity_values,
++ const std::vector<Tensor<1,dim> > &old_old_velocity_values,
+ const std::vector<double> &gamma_values,
+ const double global_u_infty,
+ const double global_T_variation,
+ const double cell_diameter) const;
+
+
+ Triangulation<dim> triangulation;
+ double global_Omega_diameter;
+
+ const unsigned int stokes_degree;
+ FESystem<dim> stokes_fe;
+ DoFHandler<dim> stokes_dof_handler;
+ ConstraintMatrix stokes_constraints;
+
+ std::vector<unsigned int> stokes_block_sizes;
+ TrilinosWrappers::BlockSparseMatrix stokes_matrix;
+ TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix;
+
+ TrilinosWrappers::BlockVector stokes_solution;
+ TrilinosWrappers::BlockVector old_stokes_solution;
+ TrilinosWrappers::BlockVector stokes_rhs;
+
+
+ const unsigned int temperature_degree;
+ FE_Q<dim> temperature_fe;
+ DoFHandler<dim> temperature_dof_handler;
+ ConstraintMatrix temperature_constraints;
+
+ TrilinosWrappers::SparseMatrix temperature_mass_matrix;
+ TrilinosWrappers::SparseMatrix temperature_stiffness_matrix;
+ TrilinosWrappers::SparseMatrix temperature_matrix;
+
+ TrilinosWrappers::Vector temperature_solution;
+ TrilinosWrappers::Vector old_temperature_solution;
+ TrilinosWrappers::Vector old_old_temperature_solution;
+ TrilinosWrappers::Vector temperature_rhs;
+
+
+ double time_step;
+ double old_time_step;
+ unsigned int timestep_number;
+
+ std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionAMG> Amg_preconditioner;
+ std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionIC> Mp_preconditioner;
+
+ bool rebuild_stokes_matrix;
+ bool rebuild_temperature_matrices;
+ bool rebuild_stokes_preconditioner;
};
double
BoussinesqFlowProblem<dim>::
compute_viscosity (const std::vector<double> &old_temperature,
- const std::vector<double> &old_old_temperature,
- const std::vector<Tensor<1,dim> > &old_temperature_grads,
- const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
- const std::vector<double> &old_temperature_laplacians,
- const std::vector<double> &old_old_temperature_laplacians,
- const std::vector<Tensor<1,dim> > &old_velocity_values,
- const std::vector<Tensor<1,dim> > &old_old_velocity_values,
- const std::vector<double> &gamma_values,
- const double global_u_infty,
- const double global_T_variation,
- const double cell_diameter) const
+ const std::vector<double> &old_old_temperature,
- const std::vector<Tensor<1,dim> > &old_temperature_grads,
- const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
++ const std::vector<Tensor<1,dim> > &old_temperature_grads,
++ const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
+ const std::vector<double> &old_temperature_laplacians,
+ const std::vector<double> &old_old_temperature_laplacians,
- const std::vector<Tensor<1,dim> > &old_velocity_values,
- const std::vector<Tensor<1,dim> > &old_old_velocity_values,
++ const std::vector<Tensor<1,dim> > &old_velocity_values,
++ const std::vector<Tensor<1,dim> > &old_old_velocity_values,
+ const std::vector<double> &gamma_values,
+ const double global_u_infty,
+ const double global_T_variation,
+ const double cell_diameter) const
{
const double beta = 0.015 * dim;
const double alpha = 1;
template <int dim>
double
- TemperatureInitialValues<dim>::value (const Point<dim> &p,
+ TemperatureInitialValues<dim>::value (const Point<dim> &p,
- const unsigned int) const
+ const unsigned int) const
{
const double r = p.norm();
const double h = R1-R0;
template <class PreconditionerA, class PreconditionerMp>
class BlockSchurPreconditioner : public Subscriptor
{
- public:
- BlockSchurPreconditioner (const TrilinosWrappers::BlockSparseMatrix &S,
- const TrilinosWrappers::BlockSparseMatrix &Spre,
- const PreconditionerMp &Mppreconditioner,
- const PreconditionerA &Apreconditioner,
- const bool do_solve_A)
- :
- stokes_matrix (&S),
- stokes_preconditioner_matrix (&Spre),
- mp_preconditioner (Mppreconditioner),
- a_preconditioner (Apreconditioner),
- do_solve_A (do_solve_A)
- {}
-
- void vmult (TrilinosWrappers::MPI::BlockVector &dst,
- const TrilinosWrappers::MPI::BlockVector &src) const
- {
- TrilinosWrappers::MPI::Vector utmp(src.block(0));
-
- {
- SolverControl solver_control(5000, 1e-6 * src.block(1).l2_norm());
-
- SolverCG<TrilinosWrappers::MPI::Vector> solver(solver_control);
-
- solver.solve(stokes_preconditioner_matrix->block(1,1),
- dst.block(1), src.block(1),
- mp_preconditioner);
-
- dst.block(1) *= -1.0;
- }
-
- {
- stokes_matrix->block(0,1).vmult(utmp, dst.block(1));
- utmp*=-1.0;
- utmp.add(src.block(0));
- }
-
- if (do_solve_A == true)
- {
- SolverControl solver_control(5000, utmp.l2_norm()*1e-2);
- TrilinosWrappers::SolverCG solver(solver_control);
- solver.solve(stokes_matrix->block(0,0), dst.block(0), utmp,
- a_preconditioner);
- }
- else
- a_preconditioner.vmult (dst.block(0), utmp);
- }
-
- private:
- const SmartPointer<const TrilinosWrappers::BlockSparseMatrix> stokes_matrix;
- const SmartPointer<const TrilinosWrappers::BlockSparseMatrix> stokes_preconditioner_matrix;
- const PreconditionerMp &mp_preconditioner;
- const PreconditionerA &a_preconditioner;
- const bool do_solve_A;
+ public:
- BlockSchurPreconditioner (const TrilinosWrappers::BlockSparseMatrix &S,
- const TrilinosWrappers::BlockSparseMatrix &Spre,
++ BlockSchurPreconditioner (const TrilinosWrappers::BlockSparseMatrix &S,
++ const TrilinosWrappers::BlockSparseMatrix &Spre,
+ const PreconditionerMp &Mppreconditioner,
+ const PreconditionerA &Apreconditioner,
+ const bool do_solve_A)
+ :
+ stokes_matrix (&S),
+ stokes_preconditioner_matrix (&Spre),
+ mp_preconditioner (Mppreconditioner),
+ a_preconditioner (Apreconditioner),
+ do_solve_A (do_solve_A)
+ {}
+
+ void vmult (TrilinosWrappers::MPI::BlockVector &dst,
+ const TrilinosWrappers::MPI::BlockVector &src) const
+ {
+ TrilinosWrappers::MPI::Vector utmp(src.block(0));
+
+ {
+ SolverControl solver_control(5000, 1e-6 * src.block(1).l2_norm());
+
+ SolverCG<TrilinosWrappers::MPI::Vector> solver(solver_control);
+
+ solver.solve(stokes_preconditioner_matrix->block(1,1),
+ dst.block(1), src.block(1),
+ mp_preconditioner);
+
+ dst.block(1) *= -1.0;
+ }
+
+ {
+ stokes_matrix->block(0,1).vmult(utmp, dst.block(1));
+ utmp*=-1.0;
+ utmp.add(src.block(0));
+ }
+
+ if (do_solve_A == true)
+ {
+ SolverControl solver_control(5000, utmp.l2_norm()*1e-2);
+ TrilinosWrappers::SolverCG solver(solver_control);
+ solver.solve(stokes_matrix->block(0,0), dst.block(0), utmp,
+ a_preconditioner);
+ }
+ else
+ a_preconditioner.vmult (dst.block(0), utmp);
+ }
+
+ private:
+ const SmartPointer<const TrilinosWrappers::BlockSparseMatrix> stokes_matrix;
+ const SmartPointer<const TrilinosWrappers::BlockSparseMatrix> stokes_preconditioner_matrix;
+ const PreconditionerMp &mp_preconditioner;
- const PreconditionerA &a_preconditioner;
++ const PreconditionerA &a_preconditioner;
+ const bool do_solve_A;
};
}
template <int dim>
class BoussinesqFlowProblem
{
- public:
- struct Parameters;
- BoussinesqFlowProblem (Parameters ¶meters);
- void run ();
+ public:
+ struct Parameters;
+ BoussinesqFlowProblem (Parameters ¶meters);
+ void run ();
+
+ private:
+ void setup_dofs ();
+ void assemble_stokes_preconditioner ();
+ void build_stokes_preconditioner ();
+ void assemble_stokes_system ();
+ void assemble_temperature_matrix ();
+ void assemble_temperature_system (const double maximal_velocity);
+ void project_temperature_field ();
+ double get_maximal_velocity () const;
+ double get_cfl_number () const;
+ double get_entropy_variation (const double average_temperature) const;
+ std::pair<double,double> get_extrapolated_temperature_range () const;
+ void solve ();
+ void output_results ();
+ void refine_mesh (const unsigned int max_grid_level);
- private:
- void setup_dofs ();
- void assemble_stokes_preconditioner ();
- void build_stokes_preconditioner ();
- void assemble_stokes_system ();
- void assemble_temperature_matrix ();
- void assemble_temperature_system (const double maximal_velocity);
- void project_temperature_field ();
- double get_maximal_velocity () const;
- double get_cfl_number () const;
- double get_entropy_variation (const double average_temperature) const;
- std::pair<double,double> get_extrapolated_temperature_range () const;
- void solve ();
- void output_results ();
- void refine_mesh (const unsigned int max_grid_level);
-
- double
- compute_viscosity(const std::vector<double> &old_temperature,
- const std::vector<double> &old_old_temperature,
- const std::vector<Tensor<1,dim> > &old_temperature_grads,
- const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
- const std::vector<double> &old_temperature_laplacians,
- const std::vector<double> &old_old_temperature_laplacians,
- const std::vector<Tensor<1,dim> > &old_velocity_values,
- const std::vector<Tensor<1,dim> > &old_old_velocity_values,
- const std::vector<SymmetricTensor<2,dim> > &old_strain_rates,
- const std::vector<SymmetricTensor<2,dim> > &old_old_strain_rates,
- const double global_u_infty,
- const double global_T_variation,
- const double average_temperature,
- const double global_entropy_variation,
- const double cell_diameter) const;
+ double
+ compute_viscosity(const std::vector<double> &old_temperature,
+ const std::vector<double> &old_old_temperature,
- const std::vector<Tensor<1,dim> > &old_temperature_grads,
- const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
++ const std::vector<Tensor<1,dim> > &old_temperature_grads,
++ const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
+ const std::vector<double> &old_temperature_laplacians,
+ const std::vector<double> &old_old_temperature_laplacians,
- const std::vector<Tensor<1,dim> > &old_velocity_values,
- const std::vector<Tensor<1,dim> > &old_old_velocity_values,
- const std::vector<SymmetricTensor<2,dim> > &old_strain_rates,
- const std::vector<SymmetricTensor<2,dim> > &old_old_strain_rates,
++ const std::vector<Tensor<1,dim> > &old_velocity_values,
++ const std::vector<Tensor<1,dim> > &old_old_velocity_values,
++ const std::vector<SymmetricTensor<2,dim> > &old_strain_rates,
++ const std::vector<SymmetricTensor<2,dim> > &old_old_strain_rates,
+ const double global_u_infty,
+ const double global_T_variation,
+ const double average_temperature,
+ const double global_entropy_variation,
+ const double cell_diameter) const;
+
+ public:
+
+ // The first significant new
+ // component is the definition
+ // of a struct for the
+ // parameters according to the
+ // discussion in the
+ // introduction. This structure
+ // is initialized by reading
+ // from a parameter file during
+ // construction of this object.
+ struct Parameters
+ {
+ Parameters (const std::string ¶meter_filename);
- public:
+ static void declare_parameters (ParameterHandler &prm);
+ void parse_parameters (ParameterHandler &prm);
- // The first significant new
- // component is the definition
- // of a struct for the
- // parameters according to the
- // discussion in the
- // introduction. This structure
- // is initialized by reading
- // from a parameter file during
- // construction of this object.
- struct Parameters
- {
- Parameters (const std::string ¶meter_filename);
+ double end_time;
- static void declare_parameters (ParameterHandler &prm);
- void parse_parameters (ParameterHandler &prm);
+ unsigned int initial_global_refinement;
+ unsigned int initial_adaptive_refinement;
- double end_time;
+ bool generate_graphical_output;
+ unsigned int graphical_output_interval;
- unsigned int initial_global_refinement;
- unsigned int initial_adaptive_refinement;
+ unsigned int adaptive_refinement_interval;
- bool generate_graphical_output;
- unsigned int graphical_output_interval;
+ double stabilization_alpha;
+ double stabilization_c_R;
+ double stabilization_beta;
- unsigned int adaptive_refinement_interval;
+ unsigned int stokes_velocity_degree;
+ bool use_locally_conservative_discretization;
- double stabilization_alpha;
- double stabilization_c_R;
- double stabilization_beta;
+ unsigned int temperature_degree;
+ };
- unsigned int stokes_velocity_degree;
- bool use_locally_conservative_discretization;
+ private:
+ Parameters ¶meters;
+
+ // The <code>pcout</code> (for
+ // <i>%parallel
+ // <code>std::cout</code></i>)
+ // object is used to simplify
+ // writing output: each MPI
+ // process can use this to
+ // generate output as usual,
+ // but since each of these
+ // processes will (hopefully)
+ // produce the same output it
+ // will just be replicated many
+ // times over; with the
+ // ConditionalOStream class,
+ // only the output generated by
+ // one MPI process will
+ // actually be printed to
+ // screen, whereas the output
+ // by all the other threads
+ // will simply be forgotten.
+ ConditionalOStream pcout;
+
+ // The following member
+ // variables will then again be
+ // similar to those in step-31
+ // (and to other tutorial
+ // programs). As mentioned in
+ // the introduction, we fully
+ // distribute computations, so
+ // we will have to use the
+ // parallel::distributed::Triangulation
+ // class (see step-40) but the
+ // remainder of these variables
+ // is rather standard with two
+ // exceptions:
+ //
+ // - The <code>mapping</code>
+ // variable is used to denote a
+ // higher-order polynomial
+ // mapping. As mentioned in the
+ // introduction, we use this
+ // mapping when forming
+ // integrals through quadrature
+ // for all cells that are
+ // adjacent to either the inner
+ // or outer boundaries of our
+ // domain where the boundary is
+ // curved.
+ //
+ // - In a bit of naming
+ // confusion, you will notice
+ // below that some of the
+ // variables from namespace
+ // TrilinosWrappers are taken
+ // from namespace
+ // TrilinosWrappers::MPI (such
+ // as the right hand side
+ // vectors) whereas others are
+ // not (such as the various
+ // matrices). For the matrices,
+ // we happen to use the same
+ // class names for %parallel
+ // and sequential data
+ // structures, i.e., all
+ // matrices will actually be
+ // considered %parallel
+ // below. On the other hand,
+ // for vectors, only those from
+ // namespace
+ // TrilinosWrappers::MPI are
+ // actually distributed. In
+ // particular, we will
+ // frequently have to query
+ // velocities and temperatures
+ // at arbitrary quadrature
+ // points; consequently, rather
+ // than importing ghost
+ // information of a vector
+ // whenever we need access to
+ // degrees of freedom that are
+ // relevant locally but owned
+ // by another processor, we
+ // solve linear systems in
+ // %parallel but then
+ // immediately initialize a
+ // vector including ghost
+ // entries of the solution for
+ // further processing. The
+ // various
+ // <code>*_solution</code>
+ // vectors are therefore filled
+ // immediately after solving
+ // their respective linear
+ // system in %parallel and will
+ // always contain values for
+ // all @ref
+ // GlossLocallyRelevantDof
+ // "locally relevant degrees of freedom";
+ // the fully
+ // distributed vectors that we
+ // obtain from the solution
+ // process and that only ever
+ // contain the @ref
+ // GlossLocallyOwnedDof
+ // "locally owned degrees of freedom"
+ // are destroyed
+ // immediately after the
+ // solution process and after
+ // we have copied the relevant
+ // values into the member
+ // variable vectors.
+ parallel::distributed::Triangulation<dim> triangulation;
+ double global_Omega_diameter;
+
+ const MappingQ<dim> mapping;
+
+ const FESystem<dim> stokes_fe;
+ DoFHandler<dim> stokes_dof_handler;
+ ConstraintMatrix stokes_constraints;
+
+ TrilinosWrappers::BlockSparseMatrix stokes_matrix;
+ TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix;
+
+ TrilinosWrappers::MPI::BlockVector stokes_solution;
+ TrilinosWrappers::MPI::BlockVector old_stokes_solution;
+ TrilinosWrappers::MPI::BlockVector stokes_rhs;
+
+
+ FE_Q<dim> temperature_fe;
+ DoFHandler<dim> temperature_dof_handler;
+ ConstraintMatrix temperature_constraints;
+
+ TrilinosWrappers::SparseMatrix temperature_mass_matrix;
+ TrilinosWrappers::SparseMatrix temperature_stiffness_matrix;
+ TrilinosWrappers::SparseMatrix temperature_matrix;
+
+ TrilinosWrappers::MPI::Vector temperature_solution;
+ TrilinosWrappers::MPI::Vector old_temperature_solution;
+ TrilinosWrappers::MPI::Vector old_old_temperature_solution;
+ TrilinosWrappers::MPI::Vector temperature_rhs;
+
+
+ double time_step;
+ double old_time_step;
+ unsigned int timestep_number;
+
+ std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionAMG> Amg_preconditioner;
+ std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionJacobi> Mp_preconditioner;
+ std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionJacobi> T_preconditioner;
+
+ bool rebuild_stokes_matrix;
+ bool rebuild_stokes_preconditioner;
+ bool rebuild_temperature_matrices;
+ bool rebuild_temperature_preconditioner;
+
+ // The next member variable,
+ // <code>computing_timer</code>
+ // is used to conveniently
+ // account for compute time
+ // spent in certain "sections"
+ // of the code that are
+ // repeatedly entered. For
+ // example, we will enter (and
+ // leave) sections for Stokes
+ // matrix assembly and would
+ // like to accumulate the run
+ // time spent in this section
+ // over all time steps. Every
+ // so many time steps as well
+ // as at the end of the program
+ // (through the destructor of
+ // the TimerOutput class) we
+ // will then produce a nice
+ // summary of the times spent
+ // in the different sections
+ // into which we categorize the
+ // run-time of this program.
+ TimerOutput computing_timer;
+
+ // After these member variables
+ // we have a number of
+ // auxiliary functions that
+ // have been broken out of the
+ // ones listed
+ // above. Specifically, there
+ // are first three functions
+ // that we call from
+ // <code>setup_dofs</code> and
+ // then the ones that do the
+ // assembling of linear
+ // systems:
+ void setup_stokes_matrix (const std::vector<IndexSet> &stokes_partitioning);
+ void setup_stokes_preconditioner (const std::vector<IndexSet> &stokes_partitioning);
+ void setup_temperature_matrices (const IndexSet &temperature_partitioning);
+
+
+ // Following the @ref
+ // MTWorkStream
+ // "task-based parallelization"
+ // paradigm,
+ // we split all the assembly
+ // routines into two parts: a
+ // first part that can do all
+ // the calculations on a
+ // certain cell without taking
+ // care of other threads, and a
+ // second part (which is
+ // writing the local data into
+ // the global matrices and
+ // vectors) which can be
+ // entered by only one thread
+ // at a time. In order to
+ // implement that, we provide
+ // functions for each of those
+ // two steps for all the four
+ // assembly routines that we
+ // use in this program. The
+ // following eight functions do
+ // exactly this:
+ void
+ local_assemble_stokes_preconditioner (const typename DoFHandler<dim>::active_cell_iterator &cell,
+ Assembly::Scratch::StokesPreconditioner<dim> &scratch,
+ Assembly::CopyData::StokesPreconditioner<dim> &data);
- unsigned int temperature_degree;
- };
+ void
+ copy_local_to_global_stokes_preconditioner (const Assembly::CopyData::StokesPreconditioner<dim> &data);
- private:
- Parameters ¶meters;
-
- // The <code>pcout</code> (for
- // <i>%parallel
- // <code>std::cout</code></i>)
- // object is used to simplify
- // writing output: each MPI
- // process can use this to
- // generate output as usual,
- // but since each of these
- // processes will (hopefully)
- // produce the same output it
- // will just be replicated many
- // times over; with the
- // ConditionalOStream class,
- // only the output generated by
- // one MPI process will
- // actually be printed to
- // screen, whereas the output
- // by all the other threads
- // will simply be forgotten.
- ConditionalOStream pcout;
-
- // The following member
- // variables will then again be
- // similar to those in step-31
- // (and to other tutorial
- // programs). As mentioned in
- // the introduction, we fully
- // distribute computations, so
- // we will have to use the
- // parallel::distributed::Triangulation
- // class (see step-40) but the
- // remainder of these variables
- // is rather standard with two
- // exceptions:
- //
- // - The <code>mapping</code>
- // variable is used to denote a
- // higher-order polynomial
- // mapping. As mentioned in the
- // introduction, we use this
- // mapping when forming
- // integrals through quadrature
- // for all cells that are
- // adjacent to either the inner
- // or outer boundaries of our
- // domain where the boundary is
- // curved.
- //
- // - In a bit of naming
- // confusion, you will notice
- // below that some of the
- // variables from namespace
- // TrilinosWrappers are taken
- // from namespace
- // TrilinosWrappers::MPI (such
- // as the right hand side
- // vectors) whereas others are
- // not (such as the various
- // matrices). For the matrices,
- // we happen to use the same
- // class names for %parallel
- // and sequential data
- // structures, i.e., all
- // matrices will actually be
- // considered %parallel
- // below. On the other hand,
- // for vectors, only those from
- // namespace
- // TrilinosWrappers::MPI are
- // actually distributed. In
- // particular, we will
- // frequently have to query
- // velocities and temperatures
- // at arbitrary quadrature
- // points; consequently, rather
- // than importing ghost
- // information of a vector
- // whenever we need access to
- // degrees of freedom that are
- // relevant locally but owned
- // by another processor, we
- // solve linear systems in
- // %parallel but then
- // immediately initialize a
- // vector including ghost
- // entries of the solution for
- // further processing. The
- // various
- // <code>*_solution</code>
- // vectors are therefore filled
- // immediately after solving
- // their respective linear
- // system in %parallel and will
- // always contain values for
- // all @ref
- // GlossLocallyRelevantDof
- // "locally relevant degrees of freedom";
- // the fully
- // distributed vectors that we
- // obtain from the solution
- // process and that only ever
- // contain the @ref
- // GlossLocallyOwnedDof
- // "locally owned degrees of freedom"
- // are destroyed
- // immediately after the
- // solution process and after
- // we have copied the relevant
- // values into the member
- // variable vectors.
- parallel::distributed::Triangulation<dim> triangulation;
- double global_Omega_diameter;
-
- const MappingQ<dim> mapping;
-
- const FESystem<dim> stokes_fe;
- DoFHandler<dim> stokes_dof_handler;
- ConstraintMatrix stokes_constraints;
-
- TrilinosWrappers::BlockSparseMatrix stokes_matrix;
- TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix;
-
- TrilinosWrappers::MPI::BlockVector stokes_solution;
- TrilinosWrappers::MPI::BlockVector old_stokes_solution;
- TrilinosWrappers::MPI::BlockVector stokes_rhs;
-
-
- FE_Q<dim> temperature_fe;
- DoFHandler<dim> temperature_dof_handler;
- ConstraintMatrix temperature_constraints;
-
- TrilinosWrappers::SparseMatrix temperature_mass_matrix;
- TrilinosWrappers::SparseMatrix temperature_stiffness_matrix;
- TrilinosWrappers::SparseMatrix temperature_matrix;
-
- TrilinosWrappers::MPI::Vector temperature_solution;
- TrilinosWrappers::MPI::Vector old_temperature_solution;
- TrilinosWrappers::MPI::Vector old_old_temperature_solution;
- TrilinosWrappers::MPI::Vector temperature_rhs;
-
-
- double time_step;
- double old_time_step;
- unsigned int timestep_number;
-
- std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionAMG> Amg_preconditioner;
- std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionJacobi> Mp_preconditioner;
- std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionJacobi> T_preconditioner;
-
- bool rebuild_stokes_matrix;
- bool rebuild_stokes_preconditioner;
- bool rebuild_temperature_matrices;
- bool rebuild_temperature_preconditioner;
-
- // The next member variable,
- // <code>computing_timer</code>
- // is used to conveniently
- // account for compute time
- // spent in certain "sections"
- // of the code that are
- // repeatedly entered. For
- // example, we will enter (and
- // leave) sections for Stokes
- // matrix assembly and would
- // like to accumulate the run
- // time spent in this section
- // over all time steps. Every
- // so many time steps as well
- // as at the end of the program
- // (through the destructor of
- // the TimerOutput class) we
- // will then produce a nice
- // summary of the times spent
- // in the different sections
- // into which we categorize the
- // run-time of this program.
- TimerOutput computing_timer;
-
- // After these member variables
- // we have a number of
- // auxiliary functions that
- // have been broken out of the
- // ones listed
- // above. Specifically, there
- // are first three functions
- // that we call from
- // <code>setup_dofs</code> and
- // then the ones that do the
- // assembling of linear
- // systems:
- void setup_stokes_matrix (const std::vector<IndexSet> &stokes_partitioning);
- void setup_stokes_preconditioner (const std::vector<IndexSet> &stokes_partitioning);
- void setup_temperature_matrices (const IndexSet &temperature_partitioning);
-
-
- // Following the @ref
- // MTWorkStream
- // "task-based parallelization"
- // paradigm,
- // we split all the assembly
- // routines into two parts: a
- // first part that can do all
- // the calculations on a
- // certain cell without taking
- // care of other threads, and a
- // second part (which is
- // writing the local data into
- // the global matrices and
- // vectors) which can be
- // entered by only one thread
- // at a time. In order to
- // implement that, we provide
- // functions for each of those
- // two steps for all the four
- // assembly routines that we
- // use in this program. The
- // following eight functions do
- // exactly this:
- void
- local_assemble_stokes_preconditioner (const typename DoFHandler<dim>::active_cell_iterator &cell,
- Assembly::Scratch::StokesPreconditioner<dim> &scratch,
- Assembly::CopyData::StokesPreconditioner<dim> &data);
-
- void
- copy_local_to_global_stokes_preconditioner (const Assembly::CopyData::StokesPreconditioner<dim> &data);
-
-
- void
- local_assemble_stokes_system (const typename DoFHandler<dim>::active_cell_iterator &cell,
- Assembly::Scratch::StokesSystem<dim> &scratch,
- Assembly::CopyData::StokesSystem<dim> &data);
-
- void
- copy_local_to_global_stokes_system (const Assembly::CopyData::StokesSystem<dim> &data);
-
-
- void
- local_assemble_temperature_matrix (const typename DoFHandler<dim>::active_cell_iterator &cell,
- Assembly::Scratch::TemperatureMatrix<dim> &scratch,
- Assembly::CopyData::TemperatureMatrix<dim> &data);
-
- void
- copy_local_to_global_temperature_matrix (const Assembly::CopyData::TemperatureMatrix<dim> &data);
-
-
-
- void
- local_assemble_temperature_rhs (const std::pair<double,double> global_T_range,
- const double global_max_velocity,
- const double global_entropy_variation,
- const typename DoFHandler<dim>::active_cell_iterator &cell,
- Assembly::Scratch::TemperatureRHS<dim> &scratch,
- Assembly::CopyData::TemperatureRHS<dim> &data);
-
- void
- copy_local_to_global_temperature_rhs (const Assembly::CopyData::TemperatureRHS<dim> &data);
-
- // Finally, we forward declare
- // a member class that we will
- // define later on and that
- // will be used to compute a
- // number of quantities from
- // our solution vectors that
- // we'd like to put into the
- // output files for
- // visualization.
- class Postprocessor;
+
+ void
+ local_assemble_stokes_system (const typename DoFHandler<dim>::active_cell_iterator &cell,
- Assembly::Scratch::StokesSystem<dim> &scratch,
++ Assembly::Scratch::StokesSystem<dim> &scratch,
+ Assembly::CopyData::StokesSystem<dim> &data);
+
+ void
+ copy_local_to_global_stokes_system (const Assembly::CopyData::StokesSystem<dim> &data);
+
+
+ void
+ local_assemble_temperature_matrix (const typename DoFHandler<dim>::active_cell_iterator &cell,
- Assembly::Scratch::TemperatureMatrix<dim> &scratch,
++ Assembly::Scratch::TemperatureMatrix<dim> &scratch,
+ Assembly::CopyData::TemperatureMatrix<dim> &data);
+
+ void
+ copy_local_to_global_temperature_matrix (const Assembly::CopyData::TemperatureMatrix<dim> &data);
+
+
+
+ void
+ local_assemble_temperature_rhs (const std::pair<double,double> global_T_range,
+ const double global_max_velocity,
+ const double global_entropy_variation,
+ const typename DoFHandler<dim>::active_cell_iterator &cell,
+ Assembly::Scratch::TemperatureRHS<dim> &scratch,
+ Assembly::CopyData::TemperatureRHS<dim> &data);
+
+ void
+ copy_local_to_global_temperature_rhs (const Assembly::CopyData::TemperatureRHS<dim> &data);
+
+ // Finally, we forward declare
+ // a member class that we will
+ // define later on and that
+ // will be used to compute a
+ // number of quantities from
+ // our solution vectors that
+ // we'd like to put into the
+ // output files for
+ // visualization.
+ class Postprocessor;
};
double
BoussinesqFlowProblem<dim>::
compute_viscosity (const std::vector<double> &old_temperature,
- const std::vector<double> &old_old_temperature,
- const std::vector<Tensor<1,dim> > &old_temperature_grads,
- const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
- const std::vector<double> &old_temperature_laplacians,
- const std::vector<double> &old_old_temperature_laplacians,
- const std::vector<Tensor<1,dim> > &old_velocity_values,
- const std::vector<Tensor<1,dim> > &old_old_velocity_values,
- const std::vector<SymmetricTensor<2,dim> > &old_strain_rates,
- const std::vector<SymmetricTensor<2,dim> > &old_old_strain_rates,
- const double global_u_infty,
- const double global_T_variation,
- const double average_temperature,
- const double global_entropy_variation,
- const double cell_diameter) const
+ const std::vector<double> &old_old_temperature,
- const std::vector<Tensor<1,dim> > &old_temperature_grads,
- const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
++ const std::vector<Tensor<1,dim> > &old_temperature_grads,
++ const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
+ const std::vector<double> &old_temperature_laplacians,
+ const std::vector<double> &old_old_temperature_laplacians,
- const std::vector<Tensor<1,dim> > &old_velocity_values,
- const std::vector<Tensor<1,dim> > &old_old_velocity_values,
- const std::vector<SymmetricTensor<2,dim> > &old_strain_rates,
- const std::vector<SymmetricTensor<2,dim> > &old_old_strain_rates,
++ const std::vector<Tensor<1,dim> > &old_velocity_values,
++ const std::vector<Tensor<1,dim> > &old_old_velocity_values,
++ const std::vector<SymmetricTensor<2,dim> > &old_strain_rates,
++ const std::vector<SymmetricTensor<2,dim> > &old_old_strain_rates,
+ const double global_u_infty,
+ const double global_T_variation,
+ const double average_temperature,
+ const double global_entropy_variation,
+ const double cell_diameter) const
{
if (global_u_infty == 0)
return 5e-3 * cell_diameter;
template <int dim>
struct EulerEquations
{
- // @sect4{Component description}
-
- // First a few variables that
- // describe the various components of our
- // solution vector in a generic way. This
- // includes the number of components in the
- // system (Euler's equations have one entry
- // for momenta in each spatial direction,
- // plus the energy and density components,
- // for a total of <code>dim+2</code>
- // components), as well as functions that
- // describe the index within the solution
- // vector of the first momentum component,
- // the density component, and the energy
- // density component. Note that all these
- // %numbers depend on the space dimension;
- // defining them in a generic way (rather
- // than by implicit convention) makes our
- // code more flexible and makes it easier
- // to later extend it, for example by
- // adding more components to the equations.
- static const unsigned int n_components = dim + 2;
- static const unsigned int first_momentum_component = 0;
- static const unsigned int density_component = dim;
- static const unsigned int energy_component = dim+1;
-
- // When generating graphical
- // output way down in this
- // program, we need to specify
- // the names of the solution
- // variables as well as how the
- // various components group into
- // vector and scalar fields. We
- // could describe this there, but
- // in order to keep things that
- // have to do with the Euler
- // equation localized here and
- // the rest of the program as
- // generic as possible, we
- // provide this sort of
- // information in the following
- // two functions:
- static
- std::vector<std::string>
- component_names ()
- {
- std::vector<std::string> names (dim, "momentum");
- names.push_back ("density");
- names.push_back ("energy_density");
+ // @sect4{Component description}
+
+ // First a few variables that
+ // describe the various components of our
+ // solution vector in a generic way. This
+ // includes the number of components in the
+ // system (Euler's equations have one entry
+ // for momenta in each spatial direction,
+ // plus the energy and density components,
+ // for a total of <code>dim+2</code>
+ // components), as well as functions that
+ // describe the index within the solution
+ // vector of the first momentum component,
+ // the density component, and the energy
+ // density component. Note that all these
+ // %numbers depend on the space dimension;
+ // defining them in a generic way (rather
+ // than by implicit convention) makes our
+ // code more flexible and makes it easier
+ // to later extend it, for example by
+ // adding more components to the equations.
+ static const unsigned int n_components = dim + 2;
+ static const unsigned int first_momentum_component = 0;
+ static const unsigned int density_component = dim;
+ static const unsigned int energy_component = dim+1;
+
+ // When generating graphical
+ // output way down in this
+ // program, we need to specify
+ // the names of the solution
+ // variables as well as how the
+ // various components group into
+ // vector and scalar fields. We
+ // could describe this there, but
+ // in order to keep things that
+ // have to do with the Euler
+ // equation localized here and
+ // the rest of the program as
+ // generic as possible, we
+ // provide this sort of
+ // information in the following
+ // two functions:
+ static
+ std::vector<std::string>
+ component_names ()
+ {
+ std::vector<std::string> names (dim, "momentum");
+ names.push_back ("density");
+ names.push_back ("energy_density");
- return names;
- }
+ return names;
+ }
- static
+ static
+ std::vector<DataComponentInterpretation::DataComponentInterpretation>
+ component_interpretation ()
+ {
std::vector<DataComponentInterpretation::DataComponentInterpretation>
- component_interpretation ()
- {
- std::vector<DataComponentInterpretation::DataComponentInterpretation>
- data_component_interpretation
- (dim, DataComponentInterpretation::component_is_part_of_vector);
- data_component_interpretation
- .push_back (DataComponentInterpretation::component_is_scalar);
- data_component_interpretation
- .push_back (DataComponentInterpretation::component_is_scalar);
-
- return data_component_interpretation;
- }
+ data_component_interpretation
+ (dim, DataComponentInterpretation::component_is_part_of_vector);
+ data_component_interpretation
+ .push_back (DataComponentInterpretation::component_is_scalar);
+ data_component_interpretation
+ .push_back (DataComponentInterpretation::component_is_scalar);
+
+ return data_component_interpretation;
+ }
- // @sect4{Transformations between variables}
-
- // Next, we define the gas
- // constant. We will set it to 1.4
- // in its definition immediately
- // following the declaration of
- // this class (unlike integer
- // variables, like the ones above,
- // static const floating point
- // member variables cannot be
- // initialized within the class
- // declaration in C++). This value
- // of 1.4 is representative of a
- // gas that consists of molecules
- // composed of two atoms, such as
- // air which consists up to small
- // traces almost entirely of $N_2$
- // and $O_2$.
- static const double gas_gamma;
-
-
- // In the following, we will need to
- // compute the kinetic energy and the
- // pressure from a vector of conserved
- // variables. This we can do based on the
- // energy density and the kinetic energy
- // $\frac 12 \rho |\mathbf v|^2 =
- // \frac{|\rho \mathbf v|^2}{2\rho}$
- // (note that the independent variables
- // contain the momentum components $\rho
- // v_i$, not the velocities $v_i$).
- //
- // There is one slight problem: We will
- // need to call the following functions
- // with input arguments of type
- // <code>std::vector@<number@></code> and
- // <code>Vector@<number@></code>. The
- // problem is that the former has an
- // access operator
- // <code>operator[]</code> whereas the
- // latter, for historical reasons, has
- // <code>operator()</code>. We wouldn't
- // be able to write the function in a
- // generic way if we were to use one or
- // the other of these. Fortunately, we
- // can use the following trick: instead
- // of writing <code>v[i]</code> or
- // <code>v(i)</code>, we can use
- // <code>*(v.begin() + i)</code>, i.e. we
- // generate an iterator that points to
- // the <code>i</code>th element, and then
- // dereference it. This works for both
- // kinds of vectors -- not the prettiest
- // solution, but one that works.
- template <typename number, typename InputVector>
- static
- number
- compute_kinetic_energy (const InputVector &W)
- {
- number kinetic_energy = 0;
- for (unsigned int d=0; d<dim; ++d)
- kinetic_energy += *(W.begin()+first_momentum_component+d) *
- *(W.begin()+first_momentum_component+d);
- kinetic_energy *= 1./(2 * *(W.begin() + density_component));
+ // @sect4{Transformations between variables}
+
+ // Next, we define the gas
+ // constant. We will set it to 1.4
+ // in its definition immediately
+ // following the declaration of
+ // this class (unlike integer
+ // variables, like the ones above,
+ // static const floating point
+ // member variables cannot be
+ // initialized within the class
+ // declaration in C++). This value
+ // of 1.4 is representative of a
+ // gas that consists of molecules
+ // composed of two atoms, such as
+ // air which consists up to small
+ // traces almost entirely of $N_2$
+ // and $O_2$.
+ static const double gas_gamma;
+
+
+ // In the following, we will need to
+ // compute the kinetic energy and the
+ // pressure from a vector of conserved
+ // variables. This we can do based on the
+ // energy density and the kinetic energy
+ // $\frac 12 \rho |\mathbf v|^2 =
+ // \frac{|\rho \mathbf v|^2}{2\rho}$
+ // (note that the independent variables
+ // contain the momentum components $\rho
+ // v_i$, not the velocities $v_i$).
+ //
+ // There is one slight problem: We will
+ // need to call the following functions
+ // with input arguments of type
+ // <code>std::vector@<number@></code> and
+ // <code>Vector@<number@></code>. The
+ // problem is that the former has an
+ // access operator
+ // <code>operator[]</code> whereas the
+ // latter, for historical reasons, has
+ // <code>operator()</code>. We wouldn't
+ // be able to write the function in a
+ // generic way if we were to use one or
+ // the other of these. Fortunately, we
+ // can use the following trick: instead
+ // of writing <code>v[i]</code> or
+ // <code>v(i)</code>, we can use
+ // <code>*(v.begin() + i)</code>, i.e. we
+ // generate an iterator that points to
+ // the <code>i</code>th element, and then
+ // dereference it. This works for both
+ // kinds of vectors -- not the prettiest
+ // solution, but one that works.
+ template <typename number, typename InputVector>
+ static
+ number
+ compute_kinetic_energy (const InputVector &W)
+ {
+ number kinetic_energy = 0;
+ for (unsigned int d=0; d<dim; ++d)
+ kinetic_energy += *(W.begin()+first_momentum_component+d) *
+ *(W.begin()+first_momentum_component+d);
+ kinetic_energy *= 1./(2 * *(W.begin() + density_component));
- return kinetic_energy;
- }
+ return kinetic_energy;
+ }
- template <typename number, typename InputVector>
- static
- number
- compute_pressure (const InputVector &W)
- {
- return ((gas_gamma-1.0) *
- (*(W.begin() + energy_component) -
- compute_kinetic_energy<number>(W)));
- }
+ template <typename number, typename InputVector>
+ static
+ number
+ compute_pressure (const InputVector &W)
+ {
+ return ((gas_gamma-1.0) *
+ (*(W.begin() + energy_component) -
+ compute_kinetic_energy<number>(W)));
+ }
- // @sect4{EulerEquations::compute_flux_matrix}
-
- // We define the flux function
- // $F(W)$ as one large matrix.
- // Each row of this matrix
- // represents a scalar
- // conservation law for the
- // component in that row. The
- // exact form of this matrix is
- // given in the
- // introduction. Note that we
- // know the size of the matrix:
- // it has as many rows as the
- // system has components, and
- // <code>dim</code> columns;
- // rather than using a FullMatrix
- // object for such a matrix
- // (which has a variable number
- // of rows and columns and must
- // therefore allocate memory on
- // the heap each time such a
- // matrix is created), we use a
- // rectangular array of numbers
- // right away.
- //
- // We templatize the numerical type of
- // the flux function so that we may use
- // the automatic differentiation type
- // here. Similarly, we will call the
- // function with different input vector
- // data types, so we templatize on it as
- // well:
- template <typename InputVector, typename number>
- static
- void compute_flux_matrix (const InputVector &W,
- number (&flux)[n_components][dim])
+ // @sect4{EulerEquations::compute_flux_matrix}
+
+ // We define the flux function
+ // $F(W)$ as one large matrix.
+ // Each row of this matrix
+ // represents a scalar
+ // conservation law for the
+ // component in that row. The
+ // exact form of this matrix is
+ // given in the
+ // introduction. Note that we
+ // know the size of the matrix:
+ // it has as many rows as the
+ // system has components, and
+ // <code>dim</code> columns;
+ // rather than using a FullMatrix
+ // object for such a matrix
+ // (which has a variable number
+ // of rows and columns and must
+ // therefore allocate memory on
+ // the heap each time such a
+ // matrix is created), we use a
+ // rectangular array of numbers
+ // right away.
+ //
+ // We templatize the numerical type of
+ // the flux function so that we may use
+ // the automatic differentiation type
+ // here. Similarly, we will call the
+ // function with different input vector
+ // data types, so we templatize on it as
+ // well:
+ template <typename InputVector, typename number>
+ static
+ void compute_flux_matrix (const InputVector &W,
+ number (&flux)[n_components][dim])
+ {
+ // First compute the pressure that
+ // appears in the flux matrix, and
+ // then compute the first
+ // <code>dim</code> columns of the
+ // matrix that correspond to the
+ // momentum terms:
+ const number pressure = compute_pressure<number> (W);
+
+ for (unsigned int d=0; d<dim; ++d)
{
- // First compute the pressure that
- // appears in the flux matrix, and
- // then compute the first
- // <code>dim</code> columns of the
- // matrix that correspond to the
- // momentum terms:
- const number pressure = compute_pressure<number> (W);
-
- for (unsigned int d=0; d<dim; ++d)
- {
- for (unsigned int e=0; e<dim; ++e)
- flux[first_momentum_component+d][e]
- = W[first_momentum_component+d] *
- W[first_momentum_component+e] /
- W[density_component];
-
- flux[first_momentum_component+d][d] += pressure;
- }
+ for (unsigned int e=0; e<dim; ++e)
+ flux[first_momentum_component+d][e]
+ = W[first_momentum_component+d] *
+ W[first_momentum_component+e] /
+ W[density_component];
- // Then the terms for the
- // density (i.e. mass
- // conservation), and,
- // lastly, conservation of
- // energy:
- for (unsigned int d=0; d<dim; ++d)
- flux[density_component][d] = W[first_momentum_component+d];
-
- for (unsigned int d=0; d<dim; ++d)
- flux[energy_component][d] = W[first_momentum_component+d] /
- W[density_component] *
- (W[energy_component] + pressure);
+ flux[first_momentum_component+d][d] += pressure;
}
+ // Then the terms for the
+ // density (i.e. mass
+ // conservation), and,
+ // lastly, conservation of
+ // energy:
+ for (unsigned int d=0; d<dim; ++d)
+ flux[density_component][d] = W[first_momentum_component+d];
+
+ for (unsigned int d=0; d<dim; ++d)
+ flux[energy_component][d] = W[first_momentum_component+d] /
+ W[density_component] *
+ (W[energy_component] + pressure);
+ }
- // @sect4{EulerEquations::compute_normal_flux}
-
- // On the boundaries of the
- // domain and across hanging
- // nodes we use a numerical flux
- // function to enforce boundary
- // conditions. This routine is
- // the basic Lax-Friedrich's flux
- // with a stabilization parameter
- // $\alpha$. It's form has also
- // been given already in the
- // introduction:
- template <typename InputVector>
- static
- void numerical_normal_flux (const Point<dim> &normal,
- const InputVector &Wplus,
- const InputVector &Wminus,
- const double alpha,
- Sacado::Fad::DFad<double> (&normal_flux)[n_components])
- {
- Sacado::Fad::DFad<double> iflux[n_components][dim];
- Sacado::Fad::DFad<double> oflux[n_components][dim];
-
- compute_flux_matrix (Wplus, iflux);
- compute_flux_matrix (Wminus, oflux);
- for (unsigned int di=0; di<n_components; ++di)
- {
- normal_flux[di] = 0;
- for (unsigned int d=0; d<dim; ++d)
- normal_flux[di] += 0.5*(iflux[di][d] + oflux[di][d]) * normal[d];
+ // @sect4{EulerEquations::compute_normal_flux}
+
+ // On the boundaries of the
+ // domain and across hanging
+ // nodes we use a numerical flux
+ // function to enforce boundary
+ // conditions. This routine is
+ // the basic Lax-Friedrich's flux
+ // with a stabilization parameter
+ // $\alpha$. It's form has also
+ // been given already in the
+ // introduction:
+ template <typename InputVector>
+ static
+ void numerical_normal_flux (const Point<dim> &normal,
+ const InputVector &Wplus,
+ const InputVector &Wminus,
+ const double alpha,
+ Sacado::Fad::DFad<double> (&normal_flux)[n_components])
+ {
+ Sacado::Fad::DFad<double> iflux[n_components][dim];
+ Sacado::Fad::DFad<double> oflux[n_components][dim];
- normal_flux[di] += 0.5*alpha*(Wplus[di] - Wminus[di]);
- }
- }
+ compute_flux_matrix (Wplus, iflux);
+ compute_flux_matrix (Wminus, oflux);
- // @sect4{EulerEquations::compute_forcing_vector}
-
- // In the same way as describing the flux
- // function $\mathbf F(\mathbf w)$, we
- // also need to have a way to describe
- // the right hand side forcing term. As
- // mentioned in the introduction, we
- // consider only gravity here, which
- // leads to the specific form $\mathbf
- // G(\mathbf w) = \left(
- // g_1\rho, g_2\rho, g_3\rho, 0,
- // \rho \mathbf g \cdot \mathbf v
- // \right)^T$, shown here for
- // the 3d case. More specifically, we
- // will consider only $\mathbf
- // g=(0,0,-1)^T$ in 3d, or $\mathbf
- // g=(0,-1)^T$ in 2d. This naturally
- // leads to the following function:
- template <typename InputVector, typename number>
- static
- void compute_forcing_vector (const InputVector &W,
- number (&forcing)[n_components])
+ for (unsigned int di=0; di<n_components; ++di)
{
- const double gravity = -1.0;
+ normal_flux[di] = 0;
+ for (unsigned int d=0; d<dim; ++d)
+ normal_flux[di] += 0.5*(iflux[di][d] + oflux[di][d]) * normal[d];
- for (unsigned int c=0; c<n_components; ++c)
- switch (c)
- {
- case first_momentum_component+dim-1:
- forcing[c] = gravity * W[density_component];
- break;
- case energy_component:
- forcing[c] = gravity *
- W[density_component] *
- W[first_momentum_component+dim-1];
- break;
- default:
- forcing[c] = 0;
- }
+ normal_flux[di] += 0.5*alpha*(Wplus[di] - Wminus[di]);
}
+ }
+ // @sect4{EulerEquations::compute_forcing_vector}
+
+ // In the same way as describing the flux
+ // function $\mathbf F(\mathbf w)$, we
+ // also need to have a way to describe
+ // the right hand side forcing term. As
+ // mentioned in the introduction, we
+ // consider only gravity here, which
+ // leads to the specific form $\mathbf
+ // G(\mathbf w) = \left(
+ // g_1\rho, g_2\rho, g_3\rho, 0,
+ // \rho \mathbf g \cdot \mathbf v
+ // \right)^T$, shown here for
+ // the 3d case. More specifically, we
+ // will consider only $\mathbf
+ // g=(0,0,-1)^T$ in 3d, or $\mathbf
+ // g=(0,-1)^T$ in 2d. This naturally
+ // leads to the following function:
+ template <typename InputVector, typename number>
+ static
+ void compute_forcing_vector (const InputVector &W,
+ number (&forcing)[n_components])
+ {
+ const double gravity = -1.0;
- // @sect4{Dealing with boundary conditions}
+ for (unsigned int c=0; c<n_components; ++c)
+ switch (c)
+ {
+ case first_momentum_component+dim-1:
+ forcing[c] = gravity * W[density_component];
+ break;
+ case energy_component:
+ forcing[c] = gravity *
+ W[density_component] *
+ W[first_momentum_component+dim-1];
+ break;
+ default:
+ forcing[c] = 0;
+ }
+ }
- // Another thing we have to deal with is
- // boundary conditions. To this end, let
- // us first define the kinds of boundary
- // conditions we currently know how to
- // deal with:
- enum BoundaryKind
- {
- inflow_boundary,
- outflow_boundary,
- no_penetration_boundary,
- pressure_boundary
- };
+ // @sect4{Dealing with boundary conditions}
- // The next part is to actually decide
- // what to do at each kind of
- // boundary. To this end, remember from
- // the introduction that boundary
- // conditions are specified by choosing a
- // value $\mathbf w^-$ on the outside of
- // a boundary given an inhomogeneity
- // $\mathbf j$ and possibly the
- // solution's value $\mathbf w^+$ on the
- // inside. Both are then passed to the
- // numerical flux $\mathbf
- // H(\mathbf{w}^+, \mathbf{w}^-,
- // \mathbf{n})$ to define boundary
- // contributions to the bilinear form.
- //
- // Boundary conditions can in some cases
- // be specified for each component of the
- // solution vector independently. For
- // example, if component $c$ is marked
- // for inflow, then $w^-_c = j_c$. If it
- // is an outflow, then $w^-_c =
- // w^+_c$. These two simple cases are
- // handled first in the function below.
- //
- // There is a little snag that makes this
- // function unpleasant from a C++
- // language viewpoint: The output vector
- // <code>Wminus</code> will of course be
- // modified, so it shouldn't be a
- // <code>const</code> argument. Yet it is
- // in the implementation below, and needs
- // to be in order to allow the code to
- // compile. The reason is that we call
- // this function at a place where
- // <code>Wminus</code> is of type
- // <code>Table@<2,Sacado::Fad::DFad@<double@>
- // @></code>, this being 2d table with
- // indices representing the quadrature
- // point and the vector component,
- // respectively. We call this function
- // with <code>Wminus[q]</code> as last
- // argument; subscripting a 2d table
- // yields a temporary accessor object
- // representing a 1d vector, just what we
- // want here. The problem is that a
- // temporary accessor object can't be
- // bound to a non-const reference
- // argument of a function, as we would
- // like here, according to the C++ 1998
- // and 2003 standards (something that
- // will be fixed with the next standard
- // in the form of rvalue references). We
- // get away with making the output
- // argument here a constant because it is
- // the <i>accessor</i> object that's
- // constant, not the table it points to:
- // that one can still be written to. The
- // hack is unpleasant nevertheless
- // because it restricts the kind of data
- // types that may be used as template
- // argument to this function: a regular
- // vector isn't going to do because that
- // one can not be written to when marked
- // <code>const</code>. With no good
- // solution around at the moment, we'll
- // go with the pragmatic, even if not
- // pretty, solution shown here:
- template <typename DataVector>
- static
- void
- compute_Wminus (const BoundaryKind (&boundary_kind)[n_components],
- const Point<dim> &normal_vector,
- const DataVector &Wplus,
- const Vector<double> &boundary_values,
- const DataVector &Wminus)
- {
- for (unsigned int c = 0; c < n_components; c++)
- switch (boundary_kind[c])
- {
- case inflow_boundary:
- {
- Wminus[c] = boundary_values(c);
- break;
- }
+ // Another thing we have to deal with is
+ // boundary conditions. To this end, let
+ // us first define the kinds of boundary
+ // conditions we currently know how to
+ // deal with:
+ enum BoundaryKind
+ {
+ inflow_boundary,
+ outflow_boundary,
+ no_penetration_boundary,
+ pressure_boundary
+ };
- case outflow_boundary:
- {
- Wminus[c] = Wplus[c];
- break;
- }
- // Prescribed pressure boundary
- // conditions are a bit more
- // complicated by the fact that
- // even though the pressure is
- // prescribed, we really are
- // setting the energy component
- // here, which will depend on
- // velocity and pressure. So
- // even though this seems like
- // a Dirichlet type boundary
- // condition, we get
- // sensitivities of energy to
- // velocity and density (unless
- // these are also prescribed):
- case pressure_boundary:
- {
- const typename DataVector::value_type
- density = (boundary_kind[density_component] ==
- inflow_boundary
- ?
- boundary_values(density_component)
- :
- Wplus[density_component]);
+ // The next part is to actually decide
+ // what to do at each kind of
+ // boundary. To this end, remember from
+ // the introduction that boundary
+ // conditions are specified by choosing a
+ // value $\mathbf w^-$ on the outside of
+ // a boundary given an inhomogeneity
+ // $\mathbf j$ and possibly the
+ // solution's value $\mathbf w^+$ on the
+ // inside. Both are then passed to the
+ // numerical flux $\mathbf
+ // H(\mathbf{w}^+, \mathbf{w}^-,
+ // \mathbf{n})$ to define boundary
+ // contributions to the bilinear form.
+ //
+ // Boundary conditions can in some cases
+ // be specified for each component of the
+ // solution vector independently. For
+ // example, if component $c$ is marked
+ // for inflow, then $w^-_c = j_c$. If it
+ // is an outflow, then $w^-_c =
+ // w^+_c$. These two simple cases are
+ // handled first in the function below.
+ //
+ // There is a little snag that makes this
+ // function unpleasant from a C++
+ // language viewpoint: The output vector
+ // <code>Wminus</code> will of course be
+ // modified, so it shouldn't be a
+ // <code>const</code> argument. Yet it is
+ // in the implementation below, and needs
+ // to be in order to allow the code to
+ // compile. The reason is that we call
+ // this function at a place where
+ // <code>Wminus</code> is of type
+ // <code>Table@<2,Sacado::Fad::DFad@<double@>
+ // @></code>, this being 2d table with
+ // indices representing the quadrature
+ // point and the vector component,
+ // respectively. We call this function
+ // with <code>Wminus[q]</code> as last
+ // argument; subscripting a 2d table
+ // yields a temporary accessor object
+ // representing a 1d vector, just what we
+ // want here. The problem is that a
+ // temporary accessor object can't be
+ // bound to a non-const reference
+ // argument of a function, as we would
+ // like here, according to the C++ 1998
+ // and 2003 standards (something that
+ // will be fixed with the next standard
+ // in the form of rvalue references). We
+ // get away with making the output
+ // argument here a constant because it is
+ // the <i>accessor</i> object that's
+ // constant, not the table it points to:
+ // that one can still be written to. The
+ // hack is unpleasant nevertheless
+ // because it restricts the kind of data
+ // types that may be used as template
+ // argument to this function: a regular
+ // vector isn't going to do because that
+ // one can not be written to when marked
+ // <code>const</code>. With no good
+ // solution around at the moment, we'll
+ // go with the pragmatic, even if not
+ // pretty, solution shown here:
+ template <typename DataVector>
+ static
+ void
+ compute_Wminus (const BoundaryKind (&boundary_kind)[n_components],
+ const Point<dim> &normal_vector,
+ const DataVector &Wplus,
+ const Vector<double> &boundary_values,
+ const DataVector &Wminus)
+ {
+ for (unsigned int c = 0; c < n_components; c++)
+ switch (boundary_kind[c])
+ {
+ case inflow_boundary:
+ {
+ Wminus[c] = boundary_values(c);
+ break;
+ }
- typename DataVector::value_type kinetic_energy = 0;
- for (unsigned int d=0; d<dim; ++d)
- if (boundary_kind[d] == inflow_boundary)
- kinetic_energy += boundary_values(d)*boundary_values(d);
- else
- kinetic_energy += Wplus[d]*Wplus[d];
- kinetic_energy *= 1./2./density;
+ case outflow_boundary:
+ {
+ Wminus[c] = Wplus[c];
+ break;
+ }
- Wminus[c] = boundary_values(c) / (gas_gamma-1.0) +
- kinetic_energy;
+ // Prescribed pressure boundary
+ // conditions are a bit more
+ // complicated by the fact that
+ // even though the pressure is
+ // prescribed, we really are
+ // setting the energy component
+ // here, which will depend on
+ // velocity and pressure. So
+ // even though this seems like
+ // a Dirichlet type boundary
+ // condition, we get
+ // sensitivities of energy to
+ // velocity and density (unless
+ // these are also prescribed):
+ case pressure_boundary:
+ {
+ const typename DataVector::value_type
+ density = (boundary_kind[density_component] ==
+ inflow_boundary
+ ?
+ boundary_values(density_component)
+ :
+ Wplus[density_component]);
+
+ typename DataVector::value_type kinetic_energy = 0;
+ for (unsigned int d=0; d<dim; ++d)
+ if (boundary_kind[d] == inflow_boundary)
+ kinetic_energy += boundary_values(d)*boundary_values(d);
+ else
+ kinetic_energy += Wplus[d]*Wplus[d];
+ kinetic_energy *= 1./2./density;
+
+ Wminus[c] = boundary_values(c) / (gas_gamma-1.0) +
+ kinetic_energy;
+
+ break;
+ }
- break;
- }
+ case no_penetration_boundary:
+ {
+ // We prescribe the
+ // velocity (we are dealing with a
+ // particular component here so
+ // that the average of the
+ // velocities is orthogonal to the
+ // surface normal. This creates
+ // sensitivies of across the
+ // velocity components.
+ Sacado::Fad::DFad<double> vdotn = 0;
+ for (unsigned int d = 0; d < dim; d++)
+ {
+ vdotn += Wplus[d]*normal_vector[d];
+ }
- case no_penetration_boundary:
- {
- // We prescribe the
- // velocity (we are dealing with a
- // particular component here so
- // that the average of the
- // velocities is orthogonal to the
- // surface normal. This creates
- // sensitivies of across the
- // velocity components.
- Sacado::Fad::DFad<double> vdotn = 0;
- for (unsigned int d = 0; d < dim; d++) {
- vdotn += Wplus[d]*normal_vector[d];
- }
-
- Wminus[c] = Wplus[c] - 2.0*vdotn*normal_vector[c];
- break;
- }
+ Wminus[c] = Wplus[c] - 2.0*vdotn*normal_vector[c];
+ break;
+ }
- default:
- Assert (false, ExcNotImplemented());
- }
- }
+ default:
+ Assert (false, ExcNotImplemented());
+ }
+ }
- // @sect4{EulerEquations::compute_refinement_indicators}
-
- // In this class, we also want to specify
- // how to refine the mesh. The class
- // <code>ConservationLaw</code> that will
- // use all the information we provide
- // here in the <code>EulerEquation</code>
- // class is pretty agnostic about the
- // particular conservation law it solves:
- // as doesn't even really care how many
- // components a solution vector
- // has. Consequently, it can't know what
- // a reasonable refinement indicator
- // would be. On the other hand, here we
- // do, or at least we can come up with a
- // reasonable choice: we simply look at
- // the gradient of the density, and
- // compute
- // $\eta_K=\log\left(1+|\nabla\rho(x_K)|\right)$,
- // where $x_K$ is the center of cell $K$.
- //
- // There are certainly a number of
- // equally reasonable refinement
- // indicators, but this one does, and it
- // is easy to compute:
- static
- void
- compute_refinement_indicators (const DoFHandler<dim> &dof_handler,
- const Mapping<dim> &mapping,
- const Vector<double> &solution,
- Vector<double> &refinement_indicators)
- {
- const unsigned int dofs_per_cell = dof_handler.get_fe().dofs_per_cell;
- std::vector<unsigned int> dofs (dofs_per_cell);
+ // @sect4{EulerEquations::compute_refinement_indicators}
+
+ // In this class, we also want to specify
+ // how to refine the mesh. The class
+ // <code>ConservationLaw</code> that will
+ // use all the information we provide
+ // here in the <code>EulerEquation</code>
+ // class is pretty agnostic about the
+ // particular conservation law it solves:
+ // as doesn't even really care how many
+ // components a solution vector
+ // has. Consequently, it can't know what
+ // a reasonable refinement indicator
+ // would be. On the other hand, here we
+ // do, or at least we can come up with a
+ // reasonable choice: we simply look at
+ // the gradient of the density, and
+ // compute
+ // $\eta_K=\log\left(1+|\nabla\rho(x_K)|\right)$,
+ // where $x_K$ is the center of cell $K$.
+ //
+ // There are certainly a number of
+ // equally reasonable refinement
+ // indicators, but this one does, and it
+ // is easy to compute:
+ static
+ void
+ compute_refinement_indicators (const DoFHandler<dim> &dof_handler,
+ const Mapping<dim> &mapping,
- const Vector<double> &solution,
++ const Vector<double> &solution,
+ Vector<double> &refinement_indicators)
+ {
+ const unsigned int dofs_per_cell = dof_handler.get_fe().dofs_per_cell;
+ std::vector<unsigned int> dofs (dofs_per_cell);
- const QMidpoint<dim> quadrature_formula;
- const UpdateFlags update_flags = update_gradients;
- FEValues<dim> fe_v (mapping, dof_handler.get_fe(),
- quadrature_formula, update_flags);
+ const QMidpoint<dim> quadrature_formula;
+ const UpdateFlags update_flags = update_gradients;
+ FEValues<dim> fe_v (mapping, dof_handler.get_fe(),
+ quadrature_formula, update_flags);
- std::vector<std::vector<Tensor<1,dim> > >
- dU (1, std::vector<Tensor<1,dim> >(n_components));
+ std::vector<std::vector<Tensor<1,dim> > >
+ dU (1, std::vector<Tensor<1,dim> >(n_components));
- typename DoFHandler<dim>::active_cell_iterator
- cell = dof_handler.begin_active(),
- endc = dof_handler.end();
- for (unsigned int cell_no=0; cell!=endc; ++cell, ++cell_no)
- {
- fe_v.reinit(cell);
- fe_v.get_function_grads (solution, dU);
+ typename DoFHandler<dim>::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+ for (unsigned int cell_no=0; cell!=endc; ++cell, ++cell_no)
+ {
+ fe_v.reinit(cell);
+ fe_v.get_function_grads (solution, dU);
- refinement_indicators(cell_no)
- = std::log(1+
- std::sqrt(dU[0][density_component] *
- dU[0][density_component]));
- }
+ refinement_indicators(cell_no)
+ = std::log(1+
+ std::sqrt(dU[0][density_component] *
+ dU[0][density_component]));
}
+ }
EulerEquations<dim>::Postprocessor::
compute_derived_quantities_vector (const std::vector<Vector<double> > &uh,
const std::vector<std::vector<Tensor<1,dim> > > &duh,
- const std::vector<std::vector<Tensor<2,dim> > > & /*dduh*/,
- const std::vector<Point<dim> > & /*normals*/,
- const std::vector<Point<dim> > & /*evaluation_points*/,
+ const std::vector<std::vector<Tensor<2,dim> > > &/*dduh*/,
+ const std::vector<Point<dim> > &/*normals*/,
+ const std::vector<Point<dim> > &/*evaluation_points*/,
std::vector<Vector<double> > &computed_quantities) const
{
- // At the beginning of the function, let us
- // make sure that all variables have the
- // correct sizes, so that we can access
- // individual vector elements without
- // having to wonder whether we might read
- // or write invalid elements; we also check
- // that the <code>duh</code> vector only
- // contains data if we really need it (the
- // system knows about this because we say
- // so in the
- // <code>get_needed_update_flags()</code>
- // function below). For the inner vectors,
- // we check that at least the first element
- // of the outer vector has the correct
- // inner size:
+ // At the beginning of the function, let us
+ // make sure that all variables have the
+ // correct sizes, so that we can access
+ // individual vector elements without
+ // having to wonder whether we might read
+ // or write invalid elements; we also check
+ // that the <code>duh</code> vector only
+ // contains data if we really need it (the
+ // system knows about this because we say
+ // so in the
+ // <code>get_needed_update_flags()</code>
+ // function below). For the inner vectors,
+ // we check that at least the first element
+ // of the outer vector has the correct
+ // inner size:
const unsigned int n_quadrature_points = uh.size();
if (do_schlieren_plot == true)
template <int dim>
class NavierStokesProjection
{
- public:
- NavierStokesProjection (const RunTimeParameters::Data_Storage &data);
-
- void run (const bool verbose = false,
- const unsigned int n_plots = 10);
- protected:
- RunTimeParameters::MethodFormulation type;
-
- const unsigned int deg;
- const double dt;
- const double t_0, T, Re;
-
- EquationData::Velocity<dim> vel_exact;
- std::map<unsigned int, double> boundary_values;
- std::vector<types::boundary_id> boundary_indicators;
-
- Triangulation<dim> triangulation;
-
- FE_Q<dim> fe_velocity;
- FE_Q<dim> fe_pressure;
-
- DoFHandler<dim> dof_handler_velocity;
- DoFHandler<dim> dof_handler_pressure;
-
- QGauss<dim> quadrature_pressure;
- QGauss<dim> quadrature_velocity;
-
- SparsityPattern sparsity_pattern_velocity;
- SparsityPattern sparsity_pattern_pressure;
- SparsityPattern sparsity_pattern_pres_vel;
-
- SparseMatrix<double> vel_Laplace_plus_Mass;
- SparseMatrix<double> vel_it_matrix[dim];
- SparseMatrix<double> vel_Mass;
- SparseMatrix<double> vel_Laplace;
- SparseMatrix<double> vel_Advection;
- SparseMatrix<double> pres_Laplace;
- SparseMatrix<double> pres_Mass;
- SparseMatrix<double> pres_Diff[dim];
- SparseMatrix<double> pres_iterative;
-
- Vector<double> pres_n;
- Vector<double> pres_n_minus_1;
- Vector<double> phi_n;
- Vector<double> phi_n_minus_1;
- Vector<double> u_n[dim];
- Vector<double> u_n_minus_1[dim];
- Vector<double> u_star[dim];
- Vector<double> force[dim];
- Vector<double> v_tmp;
- Vector<double> pres_tmp;
- Vector<double> rot_u;
-
- SparseILU<double> prec_velocity[dim];
- SparseILU<double> prec_pres_Laplace;
- SparseDirectUMFPACK prec_mass;
- SparseDirectUMFPACK prec_vel_mass;
-
- DeclException2 (ExcInvalidTimeStep,
- double, double,
- << " The time step " << arg1 << " is out of range."
- << std::endl
- << " The permitted range is (0," << arg2 << "]");
-
- void create_triangulation_and_dofs (const unsigned int n_refines);
-
- void initialize();
-
- void interpolate_velocity ();
-
- void diffusion_step (const bool reinit_prec);
-
- void projection_step (const bool reinit_prec);
-
- void update_pressure (const bool reinit_prec);
-
- private:
- unsigned int vel_max_its;
- unsigned int vel_Krylov_size;
- unsigned int vel_off_diagonals;
- unsigned int vel_update_prec;
- double vel_eps;
- double vel_diag_strength;
-
- void initialize_velocity_matrices();
-
- void initialize_pressure_matrices();
-
- // The next few structures and functions
- // are for doing various things in
- // parallel. They follow the scheme laid
- // out in @ref threads, using the
- // WorkStream class. As explained there,
- // this requires us to declare two
- // structures for each of the assemblers,
- // a per-task data and a scratch data
- // structure. These are then handed over
- // to functions that assemble local
- // contributions and that copy these
- // local contributions to the global
- // objects.
- //
- // One of the things that are specific to
- // this program is that we don't just
- // have a single DoFHandler object that
- // represents both the velocities and the
- // pressure, but we use individual
- // DoFHandler objects for these two kinds
- // of variables. We pay for this
- // optimization when we want to assemble
- // terms that involve both variables,
- // such as the divergence of the velocity
- // and the gradient of the pressure,
- // times the respective test
- // functions. When doing so, we can't
- // just anymore use a single FEValues
- // object, but rather we need two, and
- // they need to be initialized with cell
- // iterators that point to the same cell
- // in the triangulation but different
- // DoFHandlers.
- //
- // To do this in practice, we declare a
- // "synchronous" iterator -- an object
- // that internally consists of several
- // (in our case two) iterators, and each
- // time the synchronous iteration is
- // moved up one step, each of the
- // iterators stored internally is moved
- // up one step as well, thereby always
- // staying in sync. As it so happens,
- // there is a deal.II class that
- // facilitates this sort of thing.
- typedef std_cxx1x::tuple< typename DoFHandler<dim>::active_cell_iterator,
- typename DoFHandler<dim>::active_cell_iterator
- > IteratorTuple;
-
- typedef SynchronousIterators<IteratorTuple> IteratorPair;
-
- void initialize_gradient_operator();
-
- struct InitGradPerTaskData
- {
- unsigned int d;
- unsigned int vel_dpc;
- unsigned int pres_dpc;
- FullMatrix<double> local_grad;
- std::vector<unsigned int> vel_local_dof_indices;
- std::vector<unsigned int> pres_local_dof_indices;
-
- InitGradPerTaskData (const unsigned int dd,
- const unsigned int vdpc,
- const unsigned int pdpc)
- :
- d(dd),
- vel_dpc (vdpc),
- pres_dpc (pdpc),
- local_grad (vdpc, pdpc),
- vel_local_dof_indices (vdpc),
- pres_local_dof_indices (pdpc)
- {}
- };
+ public:
+ NavierStokesProjection (const RunTimeParameters::Data_Storage &data);
+
+ void run (const bool verbose = false,
+ const unsigned int n_plots = 10);
+ protected:
+ RunTimeParameters::MethodFormulation type;
+
+ const unsigned int deg;
+ const double dt;
+ const double t_0, T, Re;
+
+ EquationData::Velocity<dim> vel_exact;
+ std::map<unsigned int, double> boundary_values;
+ std::vector<types::boundary_id> boundary_indicators;
+
+ Triangulation<dim> triangulation;
+
+ FE_Q<dim> fe_velocity;
+ FE_Q<dim> fe_pressure;
+
+ DoFHandler<dim> dof_handler_velocity;
+ DoFHandler<dim> dof_handler_pressure;
+
+ QGauss<dim> quadrature_pressure;
+ QGauss<dim> quadrature_velocity;
+
+ SparsityPattern sparsity_pattern_velocity;
+ SparsityPattern sparsity_pattern_pressure;
+ SparsityPattern sparsity_pattern_pres_vel;
+
+ SparseMatrix<double> vel_Laplace_plus_Mass;
+ SparseMatrix<double> vel_it_matrix[dim];
+ SparseMatrix<double> vel_Mass;
+ SparseMatrix<double> vel_Laplace;
+ SparseMatrix<double> vel_Advection;
+ SparseMatrix<double> pres_Laplace;
+ SparseMatrix<double> pres_Mass;
+ SparseMatrix<double> pres_Diff[dim];
+ SparseMatrix<double> pres_iterative;
+
+ Vector<double> pres_n;
+ Vector<double> pres_n_minus_1;
+ Vector<double> phi_n;
+ Vector<double> phi_n_minus_1;
+ Vector<double> u_n[dim];
+ Vector<double> u_n_minus_1[dim];
+ Vector<double> u_star[dim];
+ Vector<double> force[dim];
+ Vector<double> v_tmp;
+ Vector<double> pres_tmp;
+ Vector<double> rot_u;
+
+ SparseILU<double> prec_velocity[dim];
+ SparseILU<double> prec_pres_Laplace;
+ SparseDirectUMFPACK prec_mass;
+ SparseDirectUMFPACK prec_vel_mass;
+
+ DeclException2 (ExcInvalidTimeStep,
+ double, double,
+ << " The time step " << arg1 << " is out of range."
+ << std::endl
+ << " The permitted range is (0," << arg2 << "]");
+
+ void create_triangulation_and_dofs (const unsigned int n_refines);
+
+ void initialize();
+
+ void interpolate_velocity ();
+
+ void diffusion_step (const bool reinit_prec);
+
+ void projection_step (const bool reinit_prec);
+
+ void update_pressure (const bool reinit_prec);
+
+ private:
+ unsigned int vel_max_its;
+ unsigned int vel_Krylov_size;
+ unsigned int vel_off_diagonals;
+ unsigned int vel_update_prec;
+ double vel_eps;
+ double vel_diag_strength;
+
+ void initialize_velocity_matrices();
+
+ void initialize_pressure_matrices();
+
+ // The next few structures and functions
+ // are for doing various things in
+ // parallel. They follow the scheme laid
+ // out in @ref threads, using the
+ // WorkStream class. As explained there,
+ // this requires us to declare two
+ // structures for each of the assemblers,
+ // a per-task data and a scratch data
+ // structure. These are then handed over
+ // to functions that assemble local
+ // contributions and that copy these
+ // local contributions to the global
+ // objects.
+ //
+ // One of the things that are specific to
+ // this program is that we don't just
+ // have a single DoFHandler object that
+ // represents both the velocities and the
+ // pressure, but we use individual
+ // DoFHandler objects for these two kinds
+ // of variables. We pay for this
+ // optimization when we want to assemble
+ // terms that involve both variables,
+ // such as the divergence of the velocity
+ // and the gradient of the pressure,
+ // times the respective test
+ // functions. When doing so, we can't
+ // just anymore use a single FEValues
+ // object, but rather we need two, and
+ // they need to be initialized with cell
+ // iterators that point to the same cell
+ // in the triangulation but different
+ // DoFHandlers.
+ //
+ // To do this in practice, we declare a
+ // "synchronous" iterator -- an object
+ // that internally consists of several
+ // (in our case two) iterators, and each
+ // time the synchronous iteration is
+ // moved up one step, each of the
+ // iterators stored internally is moved
+ // up one step as well, thereby always
+ // staying in sync. As it so happens,
+ // there is a deal.II class that
+ // facilitates this sort of thing.
+ typedef std_cxx1x::tuple< typename DoFHandler<dim>::active_cell_iterator,
+ typename DoFHandler<dim>::active_cell_iterator
+ > IteratorTuple;
+
+ typedef SynchronousIterators<IteratorTuple> IteratorPair;
+
+ void initialize_gradient_operator();
+
+ struct InitGradPerTaskData
+ {
+ unsigned int d;
+ unsigned int vel_dpc;
+ unsigned int pres_dpc;
+ FullMatrix<double> local_grad;
+ std::vector<unsigned int> vel_local_dof_indices;
+ std::vector<unsigned int> pres_local_dof_indices;
+
+ InitGradPerTaskData (const unsigned int dd,
+ const unsigned int vdpc,
+ const unsigned int pdpc)
+ :
+ d(dd),
+ vel_dpc (vdpc),
+ pres_dpc (pdpc),
+ local_grad (vdpc, pdpc),
+ vel_local_dof_indices (vdpc),
+ pres_local_dof_indices (pdpc)
+ {}
+ };
- struct InitGradScratchData
- {
- unsigned int nqp;
- FEValues<dim> fe_val_vel;
- FEValues<dim> fe_val_pres;
- InitGradScratchData (const FE_Q<dim> &fe_v,
- const FE_Q<dim> &fe_p,
- const QGauss<dim> &quad,
- const UpdateFlags flags_v,
- const UpdateFlags flags_p)
- :
- nqp (quad.size()),
- fe_val_vel (fe_v, quad, flags_v),
- fe_val_pres (fe_p, quad, flags_p)
- {}
- InitGradScratchData (const InitGradScratchData &data)
- :
- nqp (data.nqp),
- fe_val_vel (data.fe_val_vel.get_fe(),
- data.fe_val_vel.get_quadrature(),
- data.fe_val_vel.get_update_flags()),
- fe_val_pres (data.fe_val_pres.get_fe(),
- data.fe_val_pres.get_quadrature(),
- data.fe_val_pres.get_update_flags())
- {}
- };
+ struct InitGradScratchData
+ {
+ unsigned int nqp;
+ FEValues<dim> fe_val_vel;
+ FEValues<dim> fe_val_pres;
+ InitGradScratchData (const FE_Q<dim> &fe_v,
+ const FE_Q<dim> &fe_p,
+ const QGauss<dim> &quad,
+ const UpdateFlags flags_v,
+ const UpdateFlags flags_p)
+ :
+ nqp (quad.size()),
+ fe_val_vel (fe_v, quad, flags_v),
+ fe_val_pres (fe_p, quad, flags_p)
+ {}
+ InitGradScratchData (const InitGradScratchData &data)
+ :
+ nqp (data.nqp),
+ fe_val_vel (data.fe_val_vel.get_fe(),
+ data.fe_val_vel.get_quadrature(),
+ data.fe_val_vel.get_update_flags()),
+ fe_val_pres (data.fe_val_pres.get_fe(),
+ data.fe_val_pres.get_quadrature(),
+ data.fe_val_pres.get_update_flags())
+ {}
+ };
- void assemble_one_cell_of_gradient (const IteratorPair &SI,
- InitGradScratchData &scratch,
- InitGradPerTaskData &data);
- void assemble_one_cell_of_gradient (const IteratorPair &SI,
++ void assemble_one_cell_of_gradient (const IteratorPair &SI,
+ InitGradScratchData &scratch,
+ InitGradPerTaskData &data);
- void copy_gradient_local_to_global (const InitGradPerTaskData &data);
+ void copy_gradient_local_to_global (const InitGradPerTaskData &data);
- // The same general layout also applies
- // to the following classes and functions
- // implementing the assembly of the
- // advection term:
- void assemble_advection_term();
+ // The same general layout also applies
+ // to the following classes and functions
+ // implementing the assembly of the
+ // advection term:
+ void assemble_advection_term();
- struct AdvectionPerTaskData
- {
- FullMatrix<double> local_advection;
- std::vector<unsigned int> local_dof_indices;
- AdvectionPerTaskData (const unsigned int dpc)
- :
- local_advection (dpc, dpc),
- local_dof_indices (dpc)
- {}
- };
+ struct AdvectionPerTaskData
+ {
+ FullMatrix<double> local_advection;
+ std::vector<unsigned int> local_dof_indices;
+ AdvectionPerTaskData (const unsigned int dpc)
+ :
+ local_advection (dpc, dpc),
+ local_dof_indices (dpc)
+ {}
+ };
- struct AdvectionScratchData
- {
- unsigned int nqp;
- unsigned int dpc;
- std::vector< Point<dim> > u_star_local;
- std::vector< Tensor<1,dim> > grad_u_star;
- std::vector<double> u_star_tmp;
- FEValues<dim> fe_val;
- AdvectionScratchData (const FE_Q<dim> &fe,
- const QGauss<dim> &quad,
- const UpdateFlags flags)
- :
- nqp (quad.size()),
- dpc (fe.dofs_per_cell),
- u_star_local (nqp),
- grad_u_star (nqp),
- u_star_tmp (nqp),
- fe_val (fe, quad, flags)
- {}
-
- AdvectionScratchData (const AdvectionScratchData &data)
- :
- nqp (data.nqp),
- dpc (data.dpc),
- u_star_local (nqp),
- grad_u_star (nqp),
- u_star_tmp (nqp),
- fe_val (data.fe_val.get_fe(),
- data.fe_val.get_quadrature(),
- data.fe_val.get_update_flags())
- {}
- };
+ struct AdvectionScratchData
+ {
+ unsigned int nqp;
+ unsigned int dpc;
+ std::vector< Point<dim> > u_star_local;
+ std::vector< Tensor<1,dim> > grad_u_star;
+ std::vector<double> u_star_tmp;
+ FEValues<dim> fe_val;
+ AdvectionScratchData (const FE_Q<dim> &fe,
+ const QGauss<dim> &quad,
+ const UpdateFlags flags)
+ :
+ nqp (quad.size()),
+ dpc (fe.dofs_per_cell),
+ u_star_local (nqp),
+ grad_u_star (nqp),
+ u_star_tmp (nqp),
+ fe_val (fe, quad, flags)
+ {}
+
+ AdvectionScratchData (const AdvectionScratchData &data)
+ :
+ nqp (data.nqp),
+ dpc (data.dpc),
+ u_star_local (nqp),
+ grad_u_star (nqp),
+ u_star_tmp (nqp),
+ fe_val (data.fe_val.get_fe(),
+ data.fe_val.get_quadrature(),
+ data.fe_val.get_update_flags())
+ {}
+ };
- void assemble_one_cell_of_advection (const typename DoFHandler<dim>::active_cell_iterator &cell,
- AdvectionScratchData &scratch,
- AdvectionPerTaskData &data);
+ void assemble_one_cell_of_advection (const typename DoFHandler<dim>::active_cell_iterator &cell,
+ AdvectionScratchData &scratch,
+ AdvectionPerTaskData &data);
- void copy_advection_local_to_global (const AdvectionPerTaskData &data);
+ void copy_advection_local_to_global (const AdvectionPerTaskData &data);
- // The final few functions implement the
- // diffusion solve as well as
- // postprocessing the output, including
- // computing the curl of the velocity:
- void diffusion_component_solve (const unsigned int d);
+ // The final few functions implement the
+ // diffusion solve as well as
+ // postprocessing the output, including
+ // computing the curl of the velocity:
+ void diffusion_component_solve (const unsigned int d);
- void output_results (const unsigned int step);
+ void output_results (const unsigned int step);
- void assemble_vorticity (const bool reinit_prec);
+ void assemble_vorticity (const bool reinit_prec);
};
template <int dim, int fe_degree, typename number>
class LaplaceOperator : public Subscriptor
{
- public:
- LaplaceOperator ();
+ public:
+ LaplaceOperator ();
- void clear();
+ void clear();
- void reinit (const MGDoFHandler<dim> &dof_handler,
- const ConstraintMatrix &constraints,
- const unsigned int level = numbers::invalid_unsigned_int);
+ void reinit (const MGDoFHandler<dim> &dof_handler,
- const ConstraintMatrix &constraints,
++ const ConstraintMatrix &constraints,
+ const unsigned int level = numbers::invalid_unsigned_int);
- unsigned int m () const;
- unsigned int n () const;
+ unsigned int m () const;
+ unsigned int n () const;
- void vmult (Vector<double> &dst,
- const Vector<double> &src) const;
- void Tvmult (Vector<double> &dst,
- const Vector<double> &src) const;
- void vmult_add (Vector<double> &dst,
- const Vector<double> &src) const;
- void Tvmult_add (Vector<double> &dst,
- const Vector<double> &src) const;
+ void vmult (Vector<double> &dst,
+ const Vector<double> &src) const;
+ void Tvmult (Vector<double> &dst,
+ const Vector<double> &src) const;
+ void vmult_add (Vector<double> &dst,
+ const Vector<double> &src) const;
+ void Tvmult_add (Vector<double> &dst,
+ const Vector<double> &src) const;
- number el (const unsigned int row,
- const unsigned int col) const;
- void set_diagonal (const Vector<number> &diagonal);
+ number el (const unsigned int row,
+ const unsigned int col) const;
+ void set_diagonal (const Vector<number> &diagonal);
- std::size_t memory_consumption () const;
+ std::size_t memory_consumption () const;
- private:
- void local_apply (const MatrixFree<dim,number> &data,
- Vector<double> &dst,
- const Vector<double> &src,
- const std::pair<unsigned int,unsigned int> &cell_range) const;
+ private:
+ void local_apply (const MatrixFree<dim,number> &data,
+ Vector<double> &dst,
+ const Vector<double> &src,
+ const std::pair<unsigned int,unsigned int> &cell_range) const;
- void evaluate_coefficient(const Coefficient<dim> &function);
+ void evaluate_coefficient(const Coefficient<dim> &function);
- MatrixFree<dim,number> data;
- AlignedVector<VectorizedArray<number> > coefficient;
+ MatrixFree<dim,number> data;
+ AlignedVector<VectorizedArray<number> > coefficient;
- Vector<number> diagonal_values;
- bool diagonal_is_available;
+ Vector<number> diagonal_values;
+ bool diagonal_is_available;
};
template <int dim, int fe_degree, typename number>
void
LaplaceOperator<dim,fe_degree,number>::reinit (const MGDoFHandler<dim> &dof_handler,
- const ConstraintMatrix &constraints,
- const unsigned int level)
- const ConstraintMatrix &constraints,
++ const ConstraintMatrix &constraints,
+ const unsigned int level)
{
typename MatrixFree<dim,number>::AdditionalData additional_data;
additional_data.tasks_parallel_scheme =
/* further information on this license. */
- // @sect3{Include files}
- //
- // Most of the include files we need for this
- // program have already been discussed in
- // previous programs. In particular, all of
- // the following should already be familiar
- // friends:
+ // @sect3{Include files}
+ //
+ // Most of the include files we need for this
+ // program have already been discussed in
+ // previous programs. In particular, all of
+ // the following should already be familiar
+ // friends:
#include <deal.II/base/quadrature_lib.h>
#include <deal.II/base/function.h>
+
+#include <deal.II/lac/abstract_linear_algebra.h>
+
+#define USE_PETSC_LA
+
+namespace LA
+{
+#ifdef USE_PETSC_LA
+ using namespace dealii::LinearAlgebraPETSc;
+#else
+ using namespace dealii::LinearAlgebraTrilinos;
+#endif
+}
+
+
#include <deal.II/lac/vector.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/solver_cg.h>
template <int dim>
class LaplaceProblem
{
- public:
- LaplaceProblem ();
- ~LaplaceProblem ();
+ public:
+ LaplaceProblem ();
+ ~LaplaceProblem ();
- void run ();
+ void run ();
- private:
- void setup_system ();
- void assemble_system ();
- void solve ();
- void refine_grid ();
- void output_results (const unsigned int cycle) const;
+ private:
+ void setup_system ();
+ void assemble_system ();
+ void solve ();
+ void refine_grid ();
+ void output_results (const unsigned int cycle) const;
- MPI_Comm mpi_communicator;
+ MPI_Comm mpi_communicator;
- parallel::distributed::Triangulation<dim> triangulation;
+ parallel::distributed::Triangulation<dim> triangulation;
- DoFHandler<dim> dof_handler;
- FE_Q<dim> fe;
+ DoFHandler<dim> dof_handler;
+ FE_Q<dim> fe;
- IndexSet locally_owned_dofs;
- IndexSet locally_relevant_dofs;
+ IndexSet locally_owned_dofs;
+ IndexSet locally_relevant_dofs;
- ConstraintMatrix constraints;
+ ConstraintMatrix constraints;
- LA::MPI::SparseMatrix system_matrix;
- LA::MPI::Vector locally_relevant_solution;
- LA::MPI::Vector system_rhs;
- PETScWrappers::MPI::SparseMatrix system_matrix;
- PETScWrappers::MPI::Vector locally_relevant_solution;
- PETScWrappers::MPI::Vector system_rhs;
++ LA::MPI::SparseMatrix system_matrix;
++ LA::MPI::Vector locally_relevant_solution;
++ LA::MPI::Vector system_rhs;
- ConditionalOStream pcout;
+ ConditionalOStream pcout;
};
locally_relevant_dofs);
locally_relevant_solution = 0;
system_rhs.reinit (mpi_communicator,
- dof_handler.n_dofs(),
- dof_handler.n_locally_owned_dofs());
+ locally_owned_dofs);
-
++
system_rhs = 0;
- // The next step is to compute hanging node
- // and boundary value constraints, which we
- // combine into a single object storing all
- // constraints.
- //
- // As with all other things in %parallel,
- // the mantra must be that no processor can
- // store all information about the entire
- // universe. As a consequence, we need to
- // tell the constraints object for which
- // degrees of freedom it can store
- // constraints and for which it may not
- // expect any information to store. In our
- // case, as explained in the @ref
- // distributed module, the degrees of
- // freedom we need to care about on each
- // processor are the locally relevant ones,
- // so we pass this to the
- // ConstraintMatrix::reinit function. As a
- // side note, if you forget to pass this
- // argument, the ConstraintMatrix class
- // will allocate an array with length equal
- // to the largest DoF index it has seen so
- // far. For processors with high MPI
- // process number, this may be very large
- // -- maybe on the order of billions. The
- // program would then allocate more memory
- // than for likely all other operations
- // combined for this single array.
+ // The next step is to compute hanging node
+ // and boundary value constraints, which we
+ // combine into a single object storing all
+ // constraints.
+ //
+ // As with all other things in %parallel,
+ // the mantra must be that no processor can
+ // store all information about the entire
+ // universe. As a consequence, we need to
+ // tell the constraints object for which
+ // degrees of freedom it can store
+ // constraints and for which it may not
+ // expect any information to store. In our
+ // case, as explained in the @ref
+ // distributed module, the degrees of
+ // freedom we need to care about on each
+ // processor are the locally relevant ones,
+ // so we pass this to the
+ // ConstraintMatrix::reinit function. As a
+ // side note, if you forget to pass this
+ // argument, the ConstraintMatrix class
+ // will allocate an array with length equal
+ // to the largest DoF index it has seen so
+ // far. For processors with high MPI
+ // process number, this may be very large
+ // -- maybe on the order of billions. The
+ // program would then allocate more memory
+ // than for likely all other operations
+ // combined for this single array.
constraints.clear ();
constraints.reinit (locally_relevant_dofs);
DoFTools::make_hanging_node_constraints (dof_handler, constraints);
template <int dim>
void LaplaceProblem<dim>::solve ()
{
- PETScWrappers::MPI::Vector
+ LA::MPI::Vector
- completely_distributed_solution (mpi_communicator,
- locally_owned_dofs);
+ completely_distributed_solution (mpi_communicator,
- dof_handler.n_dofs(),
- dof_handler.n_locally_owned_dofs());
++ locally_owned_dofs);
SolverControl solver_control (dof_handler.n_dofs(), 1e-12);
+#ifdef USE_PETSC_LA
+
PETScWrappers::SolverCG solver(solver_control, mpi_communicator);
- // Ask for a symmetric preconditioner by
- // setting the first parameter in
- // AdditionalData to true.
+ // Ask for a symmetric preconditioner by
+ // setting the first parameter in
+ // AdditionalData to true.
- PETScWrappers::PreconditionBoomerAMG
+ LA::MPI::PreconditionAMG
- preconditioner(system_matrix,
- PETScWrappers::PreconditionBoomerAMG::AdditionalData(true));
+ preconditioner(system_matrix,
+ PETScWrappers::PreconditionBoomerAMG::AdditionalData(true));
solver.solve (system_matrix, completely_distributed_solution, system_rhs,
preconditioner);
pcout << " Solved in " << solver_control.last_step()
<< " iterations." << std::endl;
- // Ask for a symmetric preconditioner by
- // setting the first parameter in
- // AdditionalData to true.
+#else
+
+ TrilinosWrappers::SolverCG solver(solver_control);
+
- preconditioner;
++ // Ask for a symmetric preconditioner by
++ // setting the first parameter in
++ // AdditionalData to true.
+ LA::MPI::PreconditionAMG
-
++ preconditioner;
+ preconditioner.initialize(system_matrix);
++
+
+ solver.solve (system_matrix, completely_distributed_solution, system_rhs,
+ preconditioner);
+
+ pcout << " Solved in " << solver_control.last_step()
+ << " iterations." << std::endl;
+
+#endif
+
constraints.distribute (completely_distributed_solution);
locally_relevant_solution = completely_distributed_solution;
MPI_Comm _mpi_communicator,
ConditionalOStream _pcout);
- void plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor,
- SymmetricTensor<2,dim> &strain_tensor,
+ void plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor,
+ SymmetricTensor<2,dim> &strain_tensor,
- unsigned int &elast_points,
- unsigned int &plast_points,
+ unsigned int &elast_points,
+ unsigned int &plast_points,
double &yield);
- void linearized_plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor_linearized,
- SymmetricTensor<4,dim> &stress_strain_tensor,
- SymmetricTensor<2,dim> &strain_tensor);
+ void linearized_plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor_linearized,
+ SymmetricTensor<4,dim> &stress_strain_tensor,
+ SymmetricTensor<2,dim> &strain_tensor);
inline SymmetricTensor<2,dim> get_strain (const FEValues<dim> &fe_values,
const unsigned int shape_func,
const unsigned int q_point) const;
}
template <int dim>
- void ConstitutiveLaw<dim>::linearized_plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor_linearized,
- SymmetricTensor<4,dim> &stress_strain_tensor,
- SymmetricTensor<2,dim> &strain_tensor)
+ void ConstitutiveLaw<dim>::linearized_plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor_linearized,
- SymmetricTensor<4,dim> &stress_strain_tensor,
- SymmetricTensor<2,dim> &strain_tensor)
++ SymmetricTensor<4,dim> &stress_strain_tensor,
++ SymmetricTensor<2,dim> &strain_tensor)
{
if (dim == 3)
- {
- SymmetricTensor<2,dim> stress_tensor;
- stress_tensor = (stress_strain_tensor_kappa + stress_strain_tensor_mu)*strain_tensor;
- double tmp = E/((1+nu)*(1-2*nu));
+ {
+ SymmetricTensor<2,dim> stress_tensor;
+ stress_tensor = (stress_strain_tensor_kappa + stress_strain_tensor_mu)*strain_tensor;
+ double tmp = E/((1+nu)*(1-2*nu));
- stress_strain_tensor = stress_strain_tensor_mu;
- stress_strain_tensor_linearized = stress_strain_tensor_mu;
+ stress_strain_tensor = stress_strain_tensor_mu;
+ stress_strain_tensor_linearized = stress_strain_tensor_mu;
- SymmetricTensor<2,dim> deviator_stress_tensor = deviator(stress_tensor);
+ SymmetricTensor<2,dim> deviator_stress_tensor = deviator(stress_tensor);
- double deviator_stress_tensor_norm = deviator_stress_tensor.norm ();
+ double deviator_stress_tensor_norm = deviator_stress_tensor.norm ();
- double beta = 1.0;
- if (deviator_stress_tensor_norm >= sigma_0)
- {
- beta = (sigma_0 + gamma)/deviator_stress_tensor_norm;
- stress_strain_tensor *= beta;
- stress_strain_tensor_linearized *= beta;
- deviator_stress_tensor /= deviator_stress_tensor_norm;
- stress_strain_tensor_linearized -= beta*2*mu*outer_product(deviator_stress_tensor, deviator_stress_tensor);
- }
+ double beta = 1.0;
+ if (deviator_stress_tensor_norm >= sigma_0)
+ {
+ beta = (sigma_0 + gamma)/deviator_stress_tensor_norm;
+ stress_strain_tensor *= beta;
+ stress_strain_tensor_linearized *= beta;
+ deviator_stress_tensor /= deviator_stress_tensor_norm;
+ stress_strain_tensor_linearized -= beta*2*mu*outer_product(deviator_stress_tensor, deviator_stress_tensor);
+ }
- stress_strain_tensor += stress_strain_tensor_kappa;
- stress_strain_tensor_linearized += stress_strain_tensor_kappa;
- }
+ stress_strain_tensor += stress_strain_tensor_kappa;
+ stress_strain_tensor_linearized += stress_strain_tensor_kappa;
+ }
}
namespace EquationData
template <class PreconditionerA, class PreconditionerMp>
BlockSchurPreconditioner<PreconditionerA, PreconditionerMp>::
- BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S,
+ BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S,
const InverseMatrix<TrilinosWrappers::SparseMatrix,
- PreconditionerMp> &Mpinv,
+ PreconditionerMp> &Mpinv,
const PreconditionerA &Apreconditioner)
- :
- darcy_matrix (&S),
- m_inverse (&Mpinv),
- a_preconditioner (Apreconditioner),
- tmp (darcy_matrix->block(1,1).m())
+ :
+ darcy_matrix (&S),
+ m_inverse (&Mpinv),
+ a_preconditioner (Apreconditioner),
+ tmp (darcy_matrix->block(1,1).m())
{}
template <int dim>
class TwoPhaseFlowProblem
{
- public:
- TwoPhaseFlowProblem (const unsigned int degree);
- void run ();
-
- private:
- void setup_dofs ();
- void assemble_darcy_preconditioner ();
- void build_darcy_preconditioner ();
- void assemble_darcy_system ();
- void assemble_saturation_system ();
- void assemble_saturation_matrix ();
- void assemble_saturation_rhs ();
- void assemble_saturation_rhs_cell_term (const FEValues<dim> &saturation_fe_values,
- const FEValues<dim> &darcy_fe_values,
- const double global_max_u_F_prime,
- const double global_S_variation,
- const std::vector<unsigned int> &local_dof_indices);
- void assemble_saturation_rhs_boundary_term (const FEFaceValues<dim> &saturation_fe_face_values,
- const FEFaceValues<dim> &darcy_fe_face_values,
- const std::vector<unsigned int> &local_dof_indices);
- void solve ();
- void refine_mesh (const unsigned int min_grid_level,
- const unsigned int max_grid_level);
- void output_results () const;
-
- // We follow with a number of
- // helper functions that are
- // used in a variety of places
- // throughout the program:
- double get_max_u_F_prime () const;
- std::pair<double,double> get_extrapolated_saturation_range () const;
- bool determine_whether_to_solve_for_pressure_and_velocity () const;
- void project_back_saturation ();
- double compute_viscosity (const std::vector<double> &old_saturation,
- const std::vector<double> &old_old_saturation,
- const std::vector<Tensor<1,dim> > &old_saturation_grads,
- const std::vector<Tensor<1,dim> > &old_old_saturation_grads,
- const std::vector<Vector<double> > &present_darcy_values,
- const double global_max_u_F_prime,
- const double global_S_variation,
- const double cell_diameter) const;
-
-
- // This all is followed by the
- // member variables, most of
- // which are similar to the
- // ones in step-31, with the
- // exception of the ones that
- // pertain to the macro time
- // stepping for the
- // velocity/pressure system:
- Triangulation<dim> triangulation;
- double global_Omega_diameter;
-
- const unsigned int degree;
-
- const unsigned int darcy_degree;
- FESystem<dim> darcy_fe;
- DoFHandler<dim> darcy_dof_handler;
- ConstraintMatrix darcy_constraints;
-
- ConstraintMatrix darcy_preconditioner_constraints;
-
- TrilinosWrappers::BlockSparseMatrix darcy_matrix;
- TrilinosWrappers::BlockSparseMatrix darcy_preconditioner_matrix;
-
- TrilinosWrappers::BlockVector darcy_solution;
- TrilinosWrappers::BlockVector darcy_rhs;
-
- TrilinosWrappers::BlockVector last_computed_darcy_solution;
- TrilinosWrappers::BlockVector second_last_computed_darcy_solution;
-
-
- const unsigned int saturation_degree;
- FE_Q<dim> saturation_fe;
- DoFHandler<dim> saturation_dof_handler;
- ConstraintMatrix saturation_constraints;
-
- TrilinosWrappers::SparseMatrix saturation_matrix;
-
-
- TrilinosWrappers::Vector saturation_solution;
- TrilinosWrappers::Vector old_saturation_solution;
- TrilinosWrappers::Vector old_old_saturation_solution;
- TrilinosWrappers::Vector saturation_rhs;
-
- TrilinosWrappers::Vector saturation_matching_last_computed_darcy_solution;
-
- const double saturation_refinement_threshold;
-
- double time;
- const double end_time;
-
- double current_macro_time_step;
- double old_macro_time_step;
-
- double time_step;
- double old_time_step;
- unsigned int timestep_number;
-
- const double viscosity;
- const double porosity;
- const double AOS_threshold;
-
- std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionIC> Amg_preconditioner;
- std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionIC> Mp_preconditioner;
-
- bool rebuild_saturation_matrix;
-
- // At the very end we declare a
- // variable that denotes the
- // material model. Compared to
- // step-21, we do this here as
- // a member variable since we
- // will want to use it in a
- // variety of places and so
- // having a central place where
- // such a variable is declared
- // will make it simpler to
- // replace one class by another
- // (e.g. replace
- // RandomMedium::KInverse by
- // SingleCurvingCrack::KInverse).
- const RandomMedium::KInverse<dim> k_inverse;
+ public:
+ TwoPhaseFlowProblem (const unsigned int degree);
+ void run ();
+
+ private:
+ void setup_dofs ();
+ void assemble_darcy_preconditioner ();
+ void build_darcy_preconditioner ();
+ void assemble_darcy_system ();
+ void assemble_saturation_system ();
+ void assemble_saturation_matrix ();
+ void assemble_saturation_rhs ();
+ void assemble_saturation_rhs_cell_term (const FEValues<dim> &saturation_fe_values,
+ const FEValues<dim> &darcy_fe_values,
+ const double global_max_u_F_prime,
+ const double global_S_variation,
+ const std::vector<unsigned int> &local_dof_indices);
+ void assemble_saturation_rhs_boundary_term (const FEFaceValues<dim> &saturation_fe_face_values,
+ const FEFaceValues<dim> &darcy_fe_face_values,
+ const std::vector<unsigned int> &local_dof_indices);
+ void solve ();
+ void refine_mesh (const unsigned int min_grid_level,
+ const unsigned int max_grid_level);
+ void output_results () const;
+
+ // We follow with a number of
+ // helper functions that are
+ // used in a variety of places
+ // throughout the program:
+ double get_max_u_F_prime () const;
+ std::pair<double,double> get_extrapolated_saturation_range () const;
+ bool determine_whether_to_solve_for_pressure_and_velocity () const;
+ void project_back_saturation ();
+ double compute_viscosity (const std::vector<double> &old_saturation,
+ const std::vector<double> &old_old_saturation,
- const std::vector<Tensor<1,dim> > &old_saturation_grads,
- const std::vector<Tensor<1,dim> > &old_old_saturation_grads,
++ const std::vector<Tensor<1,dim> > &old_saturation_grads,
++ const std::vector<Tensor<1,dim> > &old_old_saturation_grads,
+ const std::vector<Vector<double> > &present_darcy_values,
+ const double global_max_u_F_prime,
+ const double global_S_variation,
+ const double cell_diameter) const;
+
+
+ // This all is followed by the
+ // member variables, most of
+ // which are similar to the
+ // ones in step-31, with the
+ // exception of the ones that
+ // pertain to the macro time
+ // stepping for the
+ // velocity/pressure system:
+ Triangulation<dim> triangulation;
+ double global_Omega_diameter;
+
+ const unsigned int degree;
+
+ const unsigned int darcy_degree;
+ FESystem<dim> darcy_fe;
+ DoFHandler<dim> darcy_dof_handler;
+ ConstraintMatrix darcy_constraints;
+
+ ConstraintMatrix darcy_preconditioner_constraints;
+
+ TrilinosWrappers::BlockSparseMatrix darcy_matrix;
+ TrilinosWrappers::BlockSparseMatrix darcy_preconditioner_matrix;
+
+ TrilinosWrappers::BlockVector darcy_solution;
+ TrilinosWrappers::BlockVector darcy_rhs;
+
+ TrilinosWrappers::BlockVector last_computed_darcy_solution;
+ TrilinosWrappers::BlockVector second_last_computed_darcy_solution;
+
+
+ const unsigned int saturation_degree;
+ FE_Q<dim> saturation_fe;
+ DoFHandler<dim> saturation_dof_handler;
+ ConstraintMatrix saturation_constraints;
+
+ TrilinosWrappers::SparseMatrix saturation_matrix;
+
+
+ TrilinosWrappers::Vector saturation_solution;
+ TrilinosWrappers::Vector old_saturation_solution;
+ TrilinosWrappers::Vector old_old_saturation_solution;
+ TrilinosWrappers::Vector saturation_rhs;
+
+ TrilinosWrappers::Vector saturation_matching_last_computed_darcy_solution;
+
+ const double saturation_refinement_threshold;
+
+ double time;
+ const double end_time;
+
+ double current_macro_time_step;
+ double old_macro_time_step;
+
+ double time_step;
+ double old_time_step;
+ unsigned int timestep_number;
+
+ const double viscosity;
+ const double porosity;
+ const double AOS_threshold;
+
+ std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionIC> Amg_preconditioner;
+ std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionIC> Mp_preconditioner;
+
+ bool rebuild_saturation_matrix;
+
+ // At the very end we declare a
+ // variable that denotes the
+ // material model. Compared to
+ // step-21, we do this here as
+ // a member variable since we
+ // will want to use it in a
+ // variety of places and so
+ // having a central place where
+ // such a variable is declared
+ // will make it simpler to
+ // replace one class by another
+ // (e.g. replace
+ // RandomMedium::KInverse by
+ // SingleCurvingCrack::KInverse).
+ const RandomMedium::KInverse<dim> k_inverse;
};
#include <deal.II/numerics/matrix_tools.h>
#include <deal.II/numerics/data_out.h>
- #endif
+#include <deal.II/lac/abstract_linear_algebra.h>
+
+#define USE_PETSC_LA
+
+namespace LA
+{
+#ifdef USE_PETSC_LA
+ using namespace dealii::LinearAlgebraPETSc;
+#else
+ using namespace dealii::LinearAlgebraDealII;
++#endif
+// using namespace dealii::LinearAlgebraTrilinos;
+}
+
#include <fstream>
#include <iostream>
template <int dim>
class Step6
{
- public:
- Step6 ();
- ~Step6 ();
+ public:
+ Step6 ();
+ ~Step6 ();
- void run ();
+ void run ();
- private:
- void setup_system ();
- void assemble_system ();
- void solve ();
- void refine_grid ();
- void output_results (const unsigned int cycle) const;
+ private:
+ void setup_system ();
+ void assemble_system ();
+ void solve ();
+ void refine_grid ();
+ void output_results (const unsigned int cycle) const;
- Triangulation<dim> triangulation;
+ Triangulation<dim> triangulation;
- DoFHandler<dim> dof_handler;
- FE_Q<dim> fe;
+ DoFHandler<dim> dof_handler;
+ FE_Q<dim> fe;
- // This is the new variable in
- // the main class. We need an
- // object which holds a list of
- // constraints to hold the
- // hanging nodes and the
- // boundary conditions.
- ConstraintMatrix constraints;
+ // This is the new variable in
+ // the main class. We need an
+ // object which holds a list of
+ // constraints to hold the
+ // hanging nodes and the
+ // boundary conditions.
+ ConstraintMatrix constraints;
- SparsityPattern sparsity_pattern;
- LA::SparseMatrix system_matrix;
+ SparsityPattern sparsity_pattern;
- SparseMatrix<double> system_matrix;
++ LA::SparseMatrix system_matrix;
- LA::Vector solution;
- LA::Vector system_rhs;
- Vector<double> solution;
- Vector<double> system_rhs;
++ LA::Vector solution;
++ LA::Vector system_rhs;
};
// transfer the contributions from @p cell_matrix and @cell_rhs into the global objects.
constraints.distribute_local_to_global(cell_matrix, cell_rhs, local_dof_indices, system_matrix, system_rhs);
}
- // Now we are done assembling the linear
+
- #ifdef USE_PETSC_LA
++#ifdef USE_PETSC_LA
+ system_matrix.compress();
+ system_rhs.compress();
- #endif
++#endif
+
+// Now we are done assembling the linear
- // system. The constrained nodes are still
- // in the linear system (there is a one on
- // the diagonal of the matrix and all other
- // entries for this line are set to zero)
- // but the computed values are invalid. We
- // compute the correct values for these
- // nodes at the end of the
- // <code>solve</code> function.
+ // system. The constrained nodes are still
+ // in the linear system (there is a one on
+ // the diagonal of the matrix and all other
+ // entries for this line are set to zero)
+ // but the computed values are invalid. We
+ // compute the correct values for these
+ // nodes at the end of the
+ // <code>solve</code> function.
}
void Step6<dim>::solve ()
{
SolverControl solver_control (1000, 1e-12);
- SolverCG<> solver (solver_control);
+ SolverCG<LA::Vector> solver (solver_control);
- PreconditionSSOR<> preconditioner;
+ LA::PreconditionSSOR preconditioner;
preconditioner.initialize(system_matrix, 1.2);
-
+
solver.solve (system_matrix, solution, system_rhs,
preconditioner);
}
- // @sect3{The <code>main</code> function}
-
- // The main function is unaltered in
- // its functionality from the
- // previous example, but we have
- // taken a step of additional
- // caution. Sometimes, something goes
- // wrong (such as insufficient disk
- // space upon writing an output file,
- // not enough memory when trying to
- // allocate a vector or a matrix, or
- // if we can't read from or write to
- // a file for whatever reason), and
- // in these cases the library will
- // throw exceptions. Since these are
- // run-time problems, not programming
- // errors that can be fixed once and
- // for all, this kind of exceptions
- // is not switched off in optimized
- // mode, in contrast to the
- // <code>Assert</code> macro which we have
- // used to test against programming
- // errors. If uncaught, these
- // exceptions propagate the call tree
- // up to the <code>main</code> function, and
- // if they are not caught there
- // either, the program is aborted. In
- // many cases, like if there is not
- // enough memory or disk space, we
- // can't do anything but we can at
- // least print some text trying to
- // explain the reason why the program
- // failed. A way to do so is shown in
- // the following. It is certainly
- // useful to write any larger program
- // in this way, and you can do so by
- // more or less copying this function
- // except for the <code>try</code> block that
- // actually encodes the functionality
- // particular to the present
- // application.
+ // @sect3{The <code>main</code> function}
+
+ // The main function is unaltered in
+ // its functionality from the
+ // previous example, but we have
+ // taken a step of additional
+ // caution. Sometimes, something goes
+ // wrong (such as insufficient disk
+ // space upon writing an output file,
+ // not enough memory when trying to
+ // allocate a vector or a matrix, or
+ // if we can't read from or write to
+ // a file for whatever reason), and
+ // in these cases the library will
+ // throw exceptions. Since these are
+ // run-time problems, not programming
+ // errors that can be fixed once and
+ // for all, this kind of exceptions
+ // is not switched off in optimized
+ // mode, in contrast to the
+ // <code>Assert</code> macro which we have
+ // used to test against programming
+ // errors. If uncaught, these
+ // exceptions propagate the call tree
+ // up to the <code>main</code> function, and
+ // if they are not caught there
+ // either, the program is aborted. In
+ // many cases, like if there is not
+ // enough memory or disk space, we
+ // can't do anything but we can at
+ // least print some text trying to
+ // explain the reason why the program
+ // failed. A way to do so is shown in
+ // the following. It is certainly
+ // useful to write any larger program
+ // in this way, and you can do so by
+ // more or less copying this function
+ // except for the <code>try</code> block that
+ // actually encodes the functionality
+ // particular to the present
+ // application.
-int main ()
+int main (int argc, char *argv[])
{
- // The general idea behind the
- // layout of this function is as
- // follows: let's try to run the
- // program as we did before...
+ // The general idea behind the
+ // layout of this function is as
+ // follows: let's try to run the
+ // program as we did before...
try
{
deallog.depth_console (0);
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv);
- Step6<2> laplace_problem_2d;
- laplace_problem_2d.run ();
+ {
- Step6<2> laplace_problem_2d;
- laplace_problem_2d.run ();
++ Step6<2> laplace_problem_2d;
++ laplace_problem_2d.run ();
+ }
}
- // ...and if this should fail, try
- // to gather as much information as
- // possible. Specifically, if the
- // exception that was thrown is an
- // object of a class that is
- // derived from the C++ standard
- // class <code>exception</code>, then we can
- // use the <code>what</code> member function
- // to get a string which describes
- // the reason why the exception was
- // thrown.
- //
- // The deal.II exception classes
- // are all derived from the
- // standard class, and in
- // particular, the <code>exc.what()</code>
- // function will return
- // approximately the same string as
- // would be generated if the
- // exception was thrown using the
- // <code>Assert</code> macro. You have seen
- // the output of such an exception
- // in the previous example, and you
- // then know that it contains the
- // file and line number of where
- // the exception occured, and some
- // other information. This is also
- // what the following statements
- // would print.
- //
- // Apart from this, there isn't
- // much that we can do except
- // exiting the program with an
- // error code (this is what the
- // <code>return 1;</code> does):
+ // ...and if this should fail, try
+ // to gather as much information as
+ // possible. Specifically, if the
+ // exception that was thrown is an
+ // object of a class that is
+ // derived from the C++ standard
+ // class <code>exception</code>, then we can
+ // use the <code>what</code> member function
+ // to get a string which describes
+ // the reason why the exception was
+ // thrown.
+ //
+ // The deal.II exception classes
+ // are all derived from the
+ // standard class, and in
+ // particular, the <code>exc.what()</code>
+ // function will return
+ // approximately the same string as
+ // would be generated if the
+ // exception was thrown using the
+ // <code>Assert</code> macro. You have seen
+ // the output of such an exception
+ // in the previous example, and you
+ // then know that it contains the
+ // file and line number of where
+ // the exception occured, and some
+ // other information. This is also
+ // what the following statements
+ // would print.
+ //
+ // Apart from this, there isn't
+ // much that we can do except
+ // exiting the program with an
+ // error code (this is what the
+ // <code>return 1;</code> does):
catch (std::exception &exc)
{
std::cerr << std::endl << std::endl
- // @sect3{GradientEstimation class declaration}
-
- // Now, finally, here comes the class
- // that will compute the difference
- // approximation of the gradient on
- // each cell and weighs that with a
- // power of the mesh size, as
- // described in the introduction.
- // This class is a simple version of
- // the <code>DerivativeApproximation</code>
- // class in the library, that uses
- // similar techniques to obtain
- // finite difference approximations
- // of the gradient of a finite
- // element field, or if higher
- // derivatives.
- //
- // The
- // class has one public static
- // function <code>estimate</code> that is
- // called to compute a vector of
- // error indicators, and one private
- // function that does the actual work
- // on an interval of all active
- // cells. The latter is called by the
- // first one in order to be able to
- // do the computations in parallel if
- // your computer has more than one
- // processor. While the first
- // function accepts as parameter a
- // vector into which the error
- // indicator is written for each
- // cell. This vector is passed on to
- // the second function that actually
- // computes the error indicators on
- // some cells, and the respective
- // elements of the vector are
- // written. By the way, we made it
- // somewhat of a convention to use
- // vectors of floats for error
- // indicators rather than the common
- // vectors of doubles, as the
- // additional accuracy is not
- // necessary for estimated values.
- //
- // In addition to these two
- // functions, the class declares to
- // exceptions which are raised when a
- // cell has no neighbors in each of
- // the space directions (in which
- // case the matrix described in the
- // introduction would be singular and
- // can't be inverted), while the
- // other one is used in the more
- // common case of invalid parameters
- // to a function, namely a vector of
- // wrong size.
- //
- // Two annotations to this class are
- // still in order: the first is that
- // the class has no non-static member
- // functions or variables, so this is
- // not really a class, but rather
- // serves the purpose of a
- // <code>namespace</code> in C++. The reason
- // that we chose a class over a
- // namespace is that this way we can
- // declare functions that are
- // private, i.e. visible to the
- // outside world but not
- // callable. This can be done with
- // namespaces as well, if one
- // declares some functions in header
- // files in the namespace and
- // implements these and other
- // functions in the implementation
- // file. The functions not declared
- // in the header file are still in
- // the namespace but are not callable
- // from outside. However, as we have
- // only one file here, it is not
- // possible to hide functions in the
- // present case.
- //
- // The second is that the dimension
- // template parameter is attached to
- // the function rather than to the
- // class itself. This way, you don't
- // have to specify the template
- // parameter yourself as in most
- // other cases, but the compiler can
- // figure its value out itself from
- // the dimension of the DoF handler
- // object that one passes as first
- // argument.
- //
- // Finally note that the
- // <code>IndexInterval</code> typedef is
- // introduced as a convenient
- // abbreviation for an otherwise
- // lengthy type name.
+ // @sect3{GradientEstimation class declaration}
+
+ // Now, finally, here comes the class
+ // that will compute the difference
+ // approximation of the gradient on
+ // each cell and weighs that with a
+ // power of the mesh size, as
+ // described in the introduction.
+ // This class is a simple version of
+ // the <code>DerivativeApproximation</code>
+ // class in the library, that uses
+ // similar techniques to obtain
+ // finite difference approximations
+ // of the gradient of a finite
+ // element field, or if higher
+ // derivatives.
+ //
+ // The
+ // class has one public static
+ // function <code>estimate</code> that is
+ // called to compute a vector of
+ // error indicators, and one private
+ // function that does the actual work
+ // on an interval of all active
+ // cells. The latter is called by the
+ // first one in order to be able to
+ // do the computations in parallel if
+ // your computer has more than one
+ // processor. While the first
+ // function accepts as parameter a
+ // vector into which the error
+ // indicator is written for each
+ // cell. This vector is passed on to
+ // the second function that actually
+ // computes the error indicators on
+ // some cells, and the respective
+ // elements of the vector are
+ // written. By the way, we made it
+ // somewhat of a convention to use
+ // vectors of floats for error
+ // indicators rather than the common
+ // vectors of doubles, as the
+ // additional accuracy is not
+ // necessary for estimated values.
+ //
+ // In addition to these two
+ // functions, the class declares to
+ // exceptions which are raised when a
+ // cell has no neighbors in each of
+ // the space directions (in which
+ // case the matrix described in the
+ // introduction would be singular and
+ // can't be inverted), while the
+ // other one is used in the more
+ // common case of invalid parameters
+ // to a function, namely a vector of
+ // wrong size.
+ //
+ // Two annotations to this class are
+ // still in order: the first is that
+ // the class has no non-static member
+ // functions or variables, so this is
+ // not really a class, but rather
+ // serves the purpose of a
+ // <code>namespace</code> in C++. The reason
+ // that we chose a class over a
+ // namespace is that this way we can
+ // declare functions that are
+ // private, i.e. visible to the
+ // outside world but not
+ // callable. This can be done with
+ // namespaces as well, if one
+ // declares some functions in header
+ // files in the namespace and
+ // implements these and other
+ // functions in the implementation
+ // file. The functions not declared
+ // in the header file are still in
+ // the namespace but are not callable
+ // from outside. However, as we have
+ // only one file here, it is not
+ // possible to hide functions in the
+ // present case.
+ //
+ // The second is that the dimension
+ // template parameter is attached to
+ // the function rather than to the
+ // class itself. This way, you don't
+ // have to specify the template
+ // parameter yourself as in most
+ // other cases, but the compiler can
+ // figure its value out itself from
+ // the dimension of the DoF handler
+ // object that one passes as first
+ // argument.
+ //
+ // Finally note that the
+ // <code>IndexInterval</code> typedef is
+ // introduced as a convenient
+ // abbreviation for an otherwise
+ // lengthy type name.
class GradientEstimation
{
- public:
- template <int dim>
- static void estimate (const DoFHandler<dim> &dof,
- const Vector<double> &solution,
- Vector<float> &error_per_cell);
-
- DeclException2 (ExcInvalidVectorLength,
- int, int,
- << "Vector has length " << arg1 << ", but should have "
- << arg2);
- DeclException0 (ExcInsufficientDirections);
-
- private:
- typedef std::pair<unsigned int,unsigned int> IndexInterval;
-
- template <int dim>
- static void estimate_interval (const DoFHandler<dim> &dof,
- const Vector<double> &solution,
- const IndexInterval &index_interval,
- Vector<float> &error_per_cell);
+ public:
+ template <int dim>
+ static void estimate (const DoFHandler<dim> &dof,
- const Vector<double> &solution,
++ const Vector<double> &solution,
+ Vector<float> &error_per_cell);
+
+ DeclException2 (ExcInvalidVectorLength,
+ int, int,
+ << "Vector has length " << arg1 << ", but should have "
+ << arg2);
+ DeclException0 (ExcInsufficientDirections);
+
+ private:
+ typedef std::pair<unsigned int,unsigned int> IndexInterval;
+
+ template <int dim>
+ static void estimate_interval (const DoFHandler<dim> &dof,
- const Vector<double> &solution,
++ const Vector<double> &solution,
+ const IndexInterval &index_interval,
+ Vector<float> &error_per_cell);
};
template <int dim>
void
GradientEstimation::estimate (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution,
+ const Vector<double> &solution,
Vector<float> &error_per_cell)
{
- // Before starting with the work,
- // we check that the vector into
- // which the results are written,
- // has the right size. It is a
- // common error that such
- // parameters have the wrong size,
- // but the resulting damage by not
- // catching these errors are very
- // subtle as they are usually
- // corruption of data somewhere in
- // memory. Often, the problems
- // emerging from this are not
- // reproducible, and we found that
- // it is well worth the effort to
- // check for such things.
+ // Before starting with the work,
+ // we check that the vector into
+ // which the results are written,
+ // has the right size. It is a
+ // common error that such
+ // parameters have the wrong size,
+ // but the resulting damage by not
+ // catching these errors are very
+ // subtle as they are usually
+ // corruption of data somewhere in
+ // memory. Often, the problems
+ // emerging from this are not
+ // reproducible, and we found that
+ // it is well worth the effort to
+ // check for such things.
Assert (error_per_cell.size() == dof_handler.get_tria().n_active_cells(),
ExcInvalidVectorLength (error_per_cell.size(),
dof_handler.get_tria().n_active_cells()));
*/
class ConditionalOStream
{
- public:
- /**
- * Constructor. Set the stream to which
- * we want to write, and the condition
- * based on which writes are actually
- * forwarded. Per default the condition
- * of an object is active.
- */
- ConditionalOStream (std::ostream &stream,
- const bool active = true);
-
- /**
- * Depending on the
- * <tt>active</tt> flag set the
- * condition of this stream to
- * active (true) or non-active
- * (false). An object of this
- * class prints to <tt>cout</tt>
- * if and only if its condition
- * is active.
- */
- void set_condition (const bool active);
-
- /**
- * Return the condition of the object.
- */
- bool is_active() const;
-
- /**
- * Return a reference to the stream
- * currently in use.
- */
- std::ostream & get_stream () const;
-
- /**
- * Output a constant something through
- * this stream. This function must be @p
- * const so that member objects of this
- * type can also be used from @p const
- * member functions of the surrounding
- * class.
- */
- template <typename T>
- const ConditionalOStream &
- operator << (const T &t) const;
-
- /**
- * Treat ostream manipulators. This
- * function must be @p const so that
- * member objects of this type can also
- * be used from @p const member functions
- * of the surrounding class.
- *
- * Note that compilers want to see this
- * treated differently from the general
- * template above since functions like @p
- * std::endl are actually overloaded and
- * can't be bound directly to a template
- * type.
- */
- const ConditionalOStream &
- operator<< (std::ostream& (*p) (std::ostream&)) const;
-
- private:
- /**
- * Reference to the stream we
- * want to write to.
- */
- std::ostream &output_stream;
-
- /**
- * Stores the actual condition
- * the object is in.
- */
- bool active_flag;
+ public:
+ /**
+ * Constructor. Set the stream to which
+ * we want to write, and the condition
+ * based on which writes are actually
+ * forwarded. Per default the condition
+ * of an object is active.
+ */
+ ConditionalOStream (std::ostream &stream,
+ const bool active = true);
+
+ /**
+ * Depending on the
+ * <tt>active</tt> flag set the
+ * condition of this stream to
+ * active (true) or non-active
+ * (false). An object of this
+ * class prints to <tt>cout</tt>
+ * if and only if its condition
+ * is active.
+ */
+ void set_condition (const bool active);
+
+ /**
+ * Return the condition of the object.
+ */
+ bool is_active() const;
+
+ /**
+ * Return a reference to the stream
+ * currently in use.
+ */
+ std::ostream &get_stream () const;
+
+ /**
+ * Output a constant something through
+ * this stream. This function must be @p
+ * const so that member objects of this
+ * type can also be used from @p const
+ * member functions of the surrounding
+ * class.
+ */
+ template <typename T>
+ const ConditionalOStream &
+ operator << (const T &t) const;
+
+ /**
+ * Treat ostream manipulators. This
+ * function must be @p const so that
+ * member objects of this type can also
+ * be used from @p const member functions
+ * of the surrounding class.
+ *
+ * Note that compilers want to see this
+ * treated differently from the general
+ * template above since functions like @p
+ * std::endl are actually overloaded and
+ * can't be bound directly to a template
+ * type.
+ */
+ const ConditionalOStream &
+ operator<< (std::ostream& (*p) (std::ostream &)) const;
+
+ private:
+ /**
+ * Reference to the stream we
+ * want to write to.
+ */
- std::ostream &output_stream;
++ std::ostream &output_stream;
+
+ /**
+ * Stores the actual condition
+ * the object is in.
+ */
+ bool active_flag;
};
template <int dim, int spacedim=dim>
class DataOutInterface : private DataOutBase
{
- public:
- /*
- * Import a few names that were
- * previously in this class and have then
- * moved to the base class. Since the
- * base class is inherited from
- * privately, we need to re-import these
- * symbols to make sure that references
- * to DataOutInterface<dim,spacedim>::XXX
- * remain valid.
- */
- using DataOutBase::OutputFormat;
- using DataOutBase::default_format;
- using DataOutBase::dx;
- using DataOutBase::gnuplot;
- using DataOutBase::povray;
- using DataOutBase::eps;
- using DataOutBase::tecplot;
- using DataOutBase::tecplot_binary;
- using DataOutBase::vtk;
- using DataOutBase::vtu;
- using DataOutBase::deal_II_intermediate;
- using DataOutBase::parse_output_format;
- using DataOutBase::get_output_format_names;
- using DataOutBase::determine_intermediate_format_dimensions;
-
- /**
- * Constructor.
- */
- DataOutInterface ();
-
- /**
- * Destructor. Does nothing, but is
- * declared virtual since this class has
- * virtual functions.
- */
- virtual ~DataOutInterface ();
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in OpenDX format. See
- * DataOutBase::write_dx.
- */
- void write_dx (std::ostream &out) const;
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in EPS format. See
- * DataOutBase::write_eps.
- */
- void write_eps (std::ostream &out) const;
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in GMV format. See
- * DataOutBase::write_gmv.
- */
- void write_gmv (std::ostream &out) const;
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in GNUPLOT format. See
- * DataOutBase::write_gnuplot.
- */
- void write_gnuplot (std::ostream &out) const;
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in POVRAY format. See
- * DataOutBase::write_povray.
- */
- void write_povray (std::ostream &out) const;
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in Tecplot format. See
- * DataOutBase::write_tecplot.
- */
- void write_tecplot (std::ostream &out) const;
-
- /**
- * Obtain data through
- * get_patches() and write it in
- * the Tecplot binary output
- * format. Note that the name of
- * the output file must be
- * specified through the
- * TecplotFlags interface.
- */
- void write_tecplot_binary (std::ostream &out) const;
-
- /**
- * Obtain data through
- * get_patches() and write it to
- * <tt>out</tt> in UCD format for
- * AVS. See
- * DataOutBase::write_ucd.
- */
- void write_ucd (std::ostream &out) const;
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in Vtk format. See
- * DataOutBase::write_vtk.
- */
- void write_vtk (std::ostream &out) const;
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in Vtu (VTK's XML) format. See
- * DataOutBase::write_vtu.
- *
- * Some visualization programs,
- * such as ParaView, can read
- * several separate VTU files to
- * parallelize visualization. In
- * that case, you need a
- * <code>.pvtu</code> file that
- * describes which VTU files form
- * a group. The
- * DataOutInterface::write_pvtu_record()
- * function can generate such a
- * master record. Likewise,
- * DataOutInterface::write_visit_record()
- * does the same for VisIt. Finally,
- * DataOutInterface::write_pvd_record()
- * can be used to group together
- * the files that jointly make up
- * a time dependent simulation.
- */
- void write_vtu (std::ostream &out) const;
-
- /**
- * Collective MPI call to write the
- * solution from all participating nodes
- * (those in the given communicator) to a
- * single compressed .vtu file on a
- * shared file system. The communicator
- * can be a sub communicator of the one
- * used by the computation. This routine
- * uses MPI I/O to achieve high
- * performance on parallel filesystems.
- * Also see
- * DataOutInterface::write_vtu().
- */
- void write_vtu_in_parallel (const char* filename, MPI_Comm comm) const;
-
- /**
- * Some visualization programs, such as
- * ParaView, can read several separate
- * VTU files to parallelize
- * visualization. In that case, you need
- * a <code>.pvtu</code> file that
- * describes which VTU files (written,
- * for example, through the write_vtu()
- * function) form a group. The current
- * function can generate such a master
- * record.
- *
- * The file so written contains a list of
- * (scalar or vector) fields whose values
- * are described by the individual files
- * that comprise the set of parallel VTU
- * files along with the names of these
- * files. This function gets the names
- * and types of fields through the
- * get_patches() function of this class
- * like all the other write_xxx()
- * functions. The second argument to this
- * function specifies the names of the
- * files that form the parallel set.
- *
- * @note See DataOutBase::write_vtu for
- * writing each piece. Also note that
- * only one parallel process needs to
- * call the current function, listing the
- * names of the files written by all
- * parallel processes.
- *
- * @note The use of this function is
- * explained in step-40.
- *
- * @note In order to tell Paraview to
- * group together multiple <code>pvtu</code>
- * files that each describe one time
- * step of a time dependent simulation,
- * see the
- * DataOutInterface::write_pvd_record()
- * function.
- *
- * @note At the time of writing,
- * the other big VTK-based
- * visualization program, VisIt,
- * can not read <code>pvtu</code>
- * records. However, it can read
- * visit records as written by
- * the write_visit_record()
- * function.
- */
- void write_pvtu_record (std::ostream &out,
- const std::vector<std::string> &piece_names) const;
-
- /**
- * In ParaView it is possible to visualize time-dependent
- * data tagged with the current
- * integration time of a time dependent simulation. To use this
- * feature you need a <code>.pvd</code>
- * file that describes which VTU or PVTU file
- * belongs to which timestep. This function writes a file that
- * provides this mapping, i.e., it takes a list of pairs each of
- * which indicates a particular time instant and the corresponding
- * file that contains the graphical data for this time instant.
- *
- * A typical use case, in program that computes a time dependent
- * solution, would be the following (<code>time</code> and
- * <code>time_step</code> are member variables of the class with types
- * <code>double</code> and <code>unsigned int</code>, respectively;
- * the variable <code>times_and_names</code> is of type
- * <code>std::vector@<std::pair@<double,std::string@> @></code>):
- *
- * @code
- * template <int dim>
- * void MyEquation<dim>::output_results () const
- * {
- * DataOut<dim> data_out;
- *
- * data_out.attach_dof_handler (dof_handler);
- * data_out.add_data_vector (solution, "U");
- * data_out.build_patches ();
- *
- * const std::string filename = "solution-" +
- * Utilities::int_to_string (timestep_number, 3) +
- * ".vtu";
- * std::ofstream output (filename.c_str());
- * data_out.write_vtu (output);
- *
- * times_and_names.push_back (std::pair<double,std::string> (time, filename));
- * std::ofstream pvd_output ("solution.pvd");
- * data_out.write_pvd_record (pvd_output, times_and_names);
- * }
- * @endcode
- *
- * @note See DataOutBase::write_vtu or
- * DataOutInterface::write_pvtu_record for
- * writing solutions at each timestep.
- *
- * @note The second element of each pair, i.e., the file in which
- * the graphical data for each time is stored, may itself be again
- * a file that references other files. For example, it could be
- * the name for a <code>.pvtu</code> file that references multiple
- * parts of a parallel computation.
- *
- * @author Marco Engelhard, 2012
- */
- void write_pvd_record (std::ostream &out,
- const std::vector<std::pair<double,std::string> > ×_and_names) const;
-
- /**
- * This function is the exact
- * equivalent of the
- * write_pvtu_record() function
- * but for the VisIt
- * visualization program. See
- * there for the purpose of this
- * function.
- *
- * This function is documented
- * in the "Creating a master file
- * for parallel" section (section 5.7)
- * of the "Getting data into VisIt"
- * report that can be found here:
- * https://wci.llnl.gov/codes/visit/2.0.0/GettingDataIntoVisIt2.0.0.pdf
- */
- void write_visit_record (std::ostream &out,
- const std::vector<std::string> &piece_names) const;
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in deal.II intermediate
- * format. See
- * DataOutBase::write_deal_II_intermediate.
- *
- * Note that the intermediate
- * format is what its name
- * suggests: a direct
- * representation of internal
- * data. It isn't standardized
- * and will change whenever we
- * change our internal
- * representation. You can only
- * expect to process files
- * written in this format using
- * the same version of deal.II
- * that was used for writing.
- */
- void write_deal_II_intermediate (std::ostream &out) const;
-
- XDMFEntry create_xdmf_entry (const char *h5_filename,
- const double cur_time,
- MPI_Comm comm) const;
-
- void write_xdmf_file (const std::vector<XDMFEntry> &entries,
- const char *filename,
- MPI_Comm comm) const;
-
- void write_hdf5_parallel (const char* filename, MPI_Comm comm) const;
- /**
- * Write data and grid to <tt>out</tt>
- * according to the given data
- * format. This function simply
- * calls the appropriate
- * <tt>write_*</tt> function. If no
- * output format is requested,
- * the <tt>default_format</tt> is
- * written.
- *
- * An error occurs if no format
- * is provided and the default
- * format is <tt>default_format</tt>.
- */
- void write (std::ostream &out,
- const OutputFormat output_format = default_format) const;
-
- /**
- * Set the default format. The
- * value set here is used
- * anytime, output for format
- * <tt>default_format</tt> is
- * requested.
- */
- void set_default_format (const OutputFormat default_format);
-
- /**
- * Set the flags to be used for
- * output in OpenDX format.
- */
- void set_flags (const DXFlags &dx_flags);
-
- /**
- * Set the flags to be used for
- * output in UCD format.
- */
- void set_flags (const UcdFlags &ucd_flags);
-
- /**
- * Set the flags to be used for
- * output in GNUPLOT format.
- */
- void set_flags (const GnuplotFlags &gnuplot_flags);
-
- /**
- * Set the flags to be used for
- * output in POVRAY format.
- */
- void set_flags (const PovrayFlags &povray_flags);
-
- /**
- * Set the flags to be used for
- * output in EPS output.
- */
- void set_flags (const EpsFlags &eps_flags);
-
- /**
- * Set the flags to be used for
- * output in GMV format.
- */
- void set_flags (const GmvFlags &gmv_flags);
-
- /**
- * Set the flags to be used for
- * output in Tecplot format.
- */
- void set_flags (const TecplotFlags &tecplot_flags);
-
- /**
- * Set the flags to be used for
- * output in VTK format.
- */
- void set_flags (const VtkFlags &vtk_flags);
-
- /**
- * Set the flags to be used for output in
- * deal.II intermediate format.
- */
- void set_flags (const Deal_II_IntermediateFlags &deal_II_intermediate_flags);
-
- /**
- * A function that returns the same
- * string as the respective function in
- * the base class does; the only
- * exception being that if the parameter
- * is omitted, then the value for the
- * present default format is returned,
- * i.e. the correct suffix for the format
- * that was set through
- * set_default_format() or
- * parse_parameters() before calling this
- * function.
- */
- std::string
- default_suffix (const OutputFormat output_format = default_format) const;
-
- /**
- * Declare parameters for all
- * output formats by declaring
- * subsections within the
- * parameter file for each output
- * format and call the respective
- * <tt>declare_parameters</tt>
- * functions of the flag classes
- * for each output format.
- *
- * Some of the declared
- * subsections may not contain
- * entries, if the respective
- * format does not export any
- * flags.
- *
- * Note that the top-level
- * parameters denoting the number
- * of subdivisions per patch and
- * the output format are not
- * declared, since they are only
- * passed to virtual functions
- * and are not stored inside
- * objects of this type. You have
- * to declare them yourself.
- */
- static void declare_parameters (ParameterHandler &prm);
-
- /**
- * Read the parameters declared
- * in <tt>declare_parameters</tt> and
- * set the flags for the output
- * formats accordingly.
- *
- * The flags thus obtained
- * overwrite all previous
- * contents of the flag objects
- * as default-constructed or set
- * by the set_flags() function.
- */
- void parse_parameters (ParameterHandler &prm);
-
- /**
- * Determine an estimate for
- * the memory consumption (in
- * bytes) of this
- * object. Since sometimes
- * the size of objects can
- * not be determined exactly
- * (for example: what is the
- * memory consumption of an
- * STL <tt>std::map</tt> type with a
- * certain number of
- * elements?), this is only
- * an estimate. however often
- * quite close to the true
- * value.
- */
- std::size_t memory_consumption () const;
-
- protected:
- /**
- * This is the abstract function
- * through which derived classes
- * propagate preprocessed data in
- * the form of Patch
- * structures (declared in the
- * base class DataOutBase) to
- * the actual output
- * function. You need to overload
- * this function to allow the
- * output functions to know what
- * they shall print.
- */
- virtual
- const std::vector<typename DataOutBase::Patch<dim,spacedim> > &
- get_patches () const = 0;
-
- /**
- * Abstract virtual function
- * through which the names of
- * data sets are obtained by the
- * output functions of the base
- * class.
- */
- virtual
- std::vector<std::string>
- get_dataset_names () const = 0;
-
- /**
- * This functions returns
- * information about how the
- * individual components of
- * output files that consist of
- * more than one data set are to
- * be interpreted.
- *
- * It returns a list of index
- * pairs and corresponding name
- * indicating which components of
- * the output are to be
- * considered vector-valued
- * rather than just a collection
- * of scalar data. The index
- * pairs are inclusive; for
- * example, if we have a Stokes
- * problem in 2d with components
- * (u,v,p), then the
- * corresponding vector data
- * range should be (0,1), and the
- * returned list would consist of
- * only a single element with a
- * tuple such as (0,1,"velocity").
- *
- * Since some of the derived
- * classes do not know about
- * vector data, this function has
- * a default implementation that
- * simply returns an empty
- * string, meaning that all data
- * is to be considered a
- * collection of scalar fields.
- */
- virtual
- std::vector<std_cxx1x::tuple<unsigned int, unsigned int, std::string> >
- get_vector_data_ranges () const;
-
- /**
- * The default number of
- * subdivisions for patches. This
- * is filled by parse_parameters()
- * and should be obeyed by
- * build_patches() in derived
- * classes.
- */
- unsigned int default_subdivisions;
+ public:
+ /*
+ * Import a few names that were
+ * previously in this class and have then
+ * moved to the base class. Since the
+ * base class is inherited from
+ * privately, we need to re-import these
+ * symbols to make sure that references
+ * to DataOutInterface<dim,spacedim>::XXX
+ * remain valid.
+ */
+ using DataOutBase::OutputFormat;
+ using DataOutBase::default_format;
+ using DataOutBase::dx;
+ using DataOutBase::gnuplot;
+ using DataOutBase::povray;
+ using DataOutBase::eps;
+ using DataOutBase::tecplot;
+ using DataOutBase::tecplot_binary;
+ using DataOutBase::vtk;
+ using DataOutBase::vtu;
+ using DataOutBase::deal_II_intermediate;
+ using DataOutBase::parse_output_format;
+ using DataOutBase::get_output_format_names;
+ using DataOutBase::determine_intermediate_format_dimensions;
+
+ /**
+ * Constructor.
+ */
+ DataOutInterface ();
+
+ /**
+ * Destructor. Does nothing, but is
+ * declared virtual since this class has
+ * virtual functions.
+ */
+ virtual ~DataOutInterface ();
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in OpenDX format. See
+ * DataOutBase::write_dx.
+ */
+ void write_dx (std::ostream &out) const;
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in EPS format. See
+ * DataOutBase::write_eps.
+ */
+ void write_eps (std::ostream &out) const;
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in GMV format. See
+ * DataOutBase::write_gmv.
+ */
+ void write_gmv (std::ostream &out) const;
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in GNUPLOT format. See
+ * DataOutBase::write_gnuplot.
+ */
+ void write_gnuplot (std::ostream &out) const;
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in POVRAY format. See
+ * DataOutBase::write_povray.
+ */
+ void write_povray (std::ostream &out) const;
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in Tecplot format. See
+ * DataOutBase::write_tecplot.
+ */
+ void write_tecplot (std::ostream &out) const;
+
+ /**
+ * Obtain data through
+ * get_patches() and write it in
+ * the Tecplot binary output
+ * format. Note that the name of
+ * the output file must be
+ * specified through the
+ * TecplotFlags interface.
+ */
+ void write_tecplot_binary (std::ostream &out) const;
+
+ /**
+ * Obtain data through
+ * get_patches() and write it to
+ * <tt>out</tt> in UCD format for
+ * AVS. See
+ * DataOutBase::write_ucd.
+ */
+ void write_ucd (std::ostream &out) const;
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in Vtk format. See
+ * DataOutBase::write_vtk.
+ */
+ void write_vtk (std::ostream &out) const;
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in Vtu (VTK's XML) format. See
+ * DataOutBase::write_vtu.
+ *
+ * Some visualization programs,
+ * such as ParaView, can read
+ * several separate VTU files to
+ * parallelize visualization. In
+ * that case, you need a
+ * <code>.pvtu</code> file that
+ * describes which VTU files form
+ * a group. The
+ * DataOutInterface::write_pvtu_record()
+ * function can generate such a
+ * master record. Likewise,
+ * DataOutInterface::write_visit_record()
+ * does the same for VisIt. Finally,
+ * DataOutInterface::write_pvd_record()
+ * can be used to group together
+ * the files that jointly make up
+ * a time dependent simulation.
+ */
+ void write_vtu (std::ostream &out) const;
+
+ /**
+ * Collective MPI call to write the
+ * solution from all participating nodes
+ * (those in the given communicator) to a
+ * single compressed .vtu file on a
+ * shared file system. The communicator
+ * can be a sub communicator of the one
+ * used by the computation. This routine
+ * uses MPI I/O to achieve high
+ * performance on parallel filesystems.
+ * Also see
+ * DataOutInterface::write_vtu().
+ */
+ void write_vtu_in_parallel (const char *filename, MPI_Comm comm) const;
+
+ /**
+ * Some visualization programs, such as
+ * ParaView, can read several separate
+ * VTU files to parallelize
+ * visualization. In that case, you need
+ * a <code>.pvtu</code> file that
+ * describes which VTU files (written,
+ * for example, through the write_vtu()
+ * function) form a group. The current
+ * function can generate such a master
+ * record.
+ *
+ * The file so written contains a list of
+ * (scalar or vector) fields whose values
+ * are described by the individual files
+ * that comprise the set of parallel VTU
+ * files along with the names of these
+ * files. This function gets the names
+ * and types of fields through the
+ * get_patches() function of this class
+ * like all the other write_xxx()
+ * functions. The second argument to this
+ * function specifies the names of the
+ * files that form the parallel set.
+ *
+ * @note See DataOutBase::write_vtu for
+ * writing each piece. Also note that
+ * only one parallel process needs to
+ * call the current function, listing the
+ * names of the files written by all
+ * parallel processes.
+ *
+ * @note The use of this function is
+ * explained in step-40.
+ *
+ * @note In order to tell Paraview to
+ * group together multiple <code>pvtu</code>
+ * files that each describe one time
+ * step of a time dependent simulation,
+ * see the
+ * DataOutInterface::write_pvd_record()
+ * function.
+ *
+ * @note At the time of writing,
+ * the other big VTK-based
+ * visualization program, VisIt,
+ * can not read <code>pvtu</code>
+ * records. However, it can read
+ * visit records as written by
+ * the write_visit_record()
+ * function.
+ */
+ void write_pvtu_record (std::ostream &out,
+ const std::vector<std::string> &piece_names) const;
+
+ /**
+ * In ParaView it is possible to visualize time-dependent
+ * data tagged with the current
+ * integration time of a time dependent simulation. To use this
+ * feature you need a <code>.pvd</code>
+ * file that describes which VTU or PVTU file
+ * belongs to which timestep. This function writes a file that
+ * provides this mapping, i.e., it takes a list of pairs each of
+ * which indicates a particular time instant and the corresponding
+ * file that contains the graphical data for this time instant.
+ *
+ * A typical use case, in program that computes a time dependent
+ * solution, would be the following (<code>time</code> and
+ * <code>time_step</code> are member variables of the class with types
+ * <code>double</code> and <code>unsigned int</code>, respectively;
+ * the variable <code>times_and_names</code> is of type
+ * <code>std::vector@<std::pair@<double,std::string@> @></code>):
+ *
+ * @code
+ * template <int dim>
+ * void MyEquation<dim>::output_results () const
+ * {
+ * DataOut<dim> data_out;
+ *
+ * data_out.attach_dof_handler (dof_handler);
+ * data_out.add_data_vector (solution, "U");
+ * data_out.build_patches ();
+ *
+ * const std::string filename = "solution-" +
+ * Utilities::int_to_string (timestep_number, 3) +
+ * ".vtu";
+ * std::ofstream output (filename.c_str());
+ * data_out.write_vtu (output);
+ *
+ * times_and_names.push_back (std::pair<double,std::string> (time, filename));
+ * std::ofstream pvd_output ("solution.pvd");
+ * data_out.write_pvd_record (pvd_output, times_and_names);
+ * }
+ * @endcode
+ *
+ * @note See DataOutBase::write_vtu or
+ * DataOutInterface::write_pvtu_record for
+ * writing solutions at each timestep.
+ *
+ * @note The second element of each pair, i.e., the file in which
+ * the graphical data for each time is stored, may itself be again
+ * a file that references other files. For example, it could be
+ * the name for a <code>.pvtu</code> file that references multiple
+ * parts of a parallel computation.
+ *
+ * @author Marco Engelhard, 2012
+ */
+ void write_pvd_record (std::ostream &out,
- const std::vector<std::pair<double,std::string> > ×_and_names) const;
++ const std::vector<std::pair<double,std::string> > ×_and_names) const;
+
+ /**
+ * This function is the exact
+ * equivalent of the
+ * write_pvtu_record() function
+ * but for the VisIt
+ * visualization program. See
+ * there for the purpose of this
+ * function.
+ *
+ * This function is documented
+ * in the "Creating a master file
+ * for parallel" section (section 5.7)
+ * of the "Getting data into VisIt"
+ * report that can be found here:
+ * https://wci.llnl.gov/codes/visit/2.0.0/GettingDataIntoVisIt2.0.0.pdf
+ */
+ void write_visit_record (std::ostream &out,
+ const std::vector<std::string> &piece_names) const;
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in deal.II intermediate
+ * format. See
+ * DataOutBase::write_deal_II_intermediate.
+ *
+ * Note that the intermediate
+ * format is what its name
+ * suggests: a direct
+ * representation of internal
+ * data. It isn't standardized
+ * and will change whenever we
+ * change our internal
+ * representation. You can only
+ * expect to process files
+ * written in this format using
+ * the same version of deal.II
+ * that was used for writing.
+ */
+ void write_deal_II_intermediate (std::ostream &out) const;
+
+ XDMFEntry create_xdmf_entry (const char *h5_filename,
+ const double cur_time,
+ MPI_Comm comm) const;
+
+ void write_xdmf_file (const std::vector<XDMFEntry> &entries,
+ const char *filename,
+ MPI_Comm comm) const;
+
+ void write_hdf5_parallel (const char *filename, MPI_Comm comm) const;
+ /**
+ * Write data and grid to <tt>out</tt>
+ * according to the given data
+ * format. This function simply
+ * calls the appropriate
+ * <tt>write_*</tt> function. If no
+ * output format is requested,
+ * the <tt>default_format</tt> is
+ * written.
+ *
+ * An error occurs if no format
+ * is provided and the default
+ * format is <tt>default_format</tt>.
+ */
+ void write (std::ostream &out,
+ const OutputFormat output_format = default_format) const;
+
+ /**
+ * Set the default format. The
+ * value set here is used
+ * anytime, output for format
+ * <tt>default_format</tt> is
+ * requested.
+ */
+ void set_default_format (const OutputFormat default_format);
+
+ /**
+ * Set the flags to be used for
+ * output in OpenDX format.
+ */
+ void set_flags (const DXFlags &dx_flags);
+
+ /**
+ * Set the flags to be used for
+ * output in UCD format.
+ */
+ void set_flags (const UcdFlags &ucd_flags);
+
+ /**
+ * Set the flags to be used for
+ * output in GNUPLOT format.
+ */
+ void set_flags (const GnuplotFlags &gnuplot_flags);
+
+ /**
+ * Set the flags to be used for
+ * output in POVRAY format.
+ */
+ void set_flags (const PovrayFlags &povray_flags);
+
+ /**
+ * Set the flags to be used for
+ * output in EPS output.
+ */
+ void set_flags (const EpsFlags &eps_flags);
+
+ /**
+ * Set the flags to be used for
+ * output in GMV format.
+ */
+ void set_flags (const GmvFlags &gmv_flags);
+
+ /**
+ * Set the flags to be used for
+ * output in Tecplot format.
+ */
+ void set_flags (const TecplotFlags &tecplot_flags);
+
+ /**
+ * Set the flags to be used for
+ * output in VTK format.
+ */
+ void set_flags (const VtkFlags &vtk_flags);
+
+ /**
+ * Set the flags to be used for output in
+ * deal.II intermediate format.
+ */
+ void set_flags (const Deal_II_IntermediateFlags &deal_II_intermediate_flags);
+
+ /**
+ * A function that returns the same
+ * string as the respective function in
+ * the base class does; the only
+ * exception being that if the parameter
+ * is omitted, then the value for the
+ * present default format is returned,
+ * i.e. the correct suffix for the format
+ * that was set through
+ * set_default_format() or
+ * parse_parameters() before calling this
+ * function.
+ */
+ std::string
+ default_suffix (const OutputFormat output_format = default_format) const;
+
+ /**
+ * Declare parameters for all
+ * output formats by declaring
+ * subsections within the
+ * parameter file for each output
+ * format and call the respective
+ * <tt>declare_parameters</tt>
+ * functions of the flag classes
+ * for each output format.
+ *
+ * Some of the declared
+ * subsections may not contain
+ * entries, if the respective
+ * format does not export any
+ * flags.
+ *
+ * Note that the top-level
+ * parameters denoting the number
+ * of subdivisions per patch and
+ * the output format are not
+ * declared, since they are only
+ * passed to virtual functions
+ * and are not stored inside
+ * objects of this type. You have
+ * to declare them yourself.
+ */
+ static void declare_parameters (ParameterHandler &prm);
+
+ /**
+ * Read the parameters declared
+ * in <tt>declare_parameters</tt> and
+ * set the flags for the output
+ * formats accordingly.
+ *
+ * The flags thus obtained
+ * overwrite all previous
+ * contents of the flag objects
+ * as default-constructed or set
+ * by the set_flags() function.
+ */
+ void parse_parameters (ParameterHandler &prm);
+
+ /**
+ * Determine an estimate for
+ * the memory consumption (in
+ * bytes) of this
+ * object. Since sometimes
+ * the size of objects can
+ * not be determined exactly
+ * (for example: what is the
+ * memory consumption of an
+ * STL <tt>std::map</tt> type with a
+ * certain number of
+ * elements?), this is only
+ * an estimate. however often
+ * quite close to the true
+ * value.
+ */
+ std::size_t memory_consumption () const;
+
+ protected:
+ /**
+ * This is the abstract function
+ * through which derived classes
+ * propagate preprocessed data in
+ * the form of Patch
+ * structures (declared in the
+ * base class DataOutBase) to
+ * the actual output
+ * function. You need to overload
+ * this function to allow the
+ * output functions to know what
+ * they shall print.
+ */
+ virtual
+ const std::vector<typename DataOutBase::Patch<dim,spacedim> > &
+ get_patches () const = 0;
+
+ /**
+ * Abstract virtual function
+ * through which the names of
+ * data sets are obtained by the
+ * output functions of the base
+ * class.
+ */
+ virtual
+ std::vector<std::string>
+ get_dataset_names () const = 0;
+
+ /**
+ * This functions returns
+ * information about how the
+ * individual components of
+ * output files that consist of
+ * more than one data set are to
+ * be interpreted.
+ *
+ * It returns a list of index
+ * pairs and corresponding name
+ * indicating which components of
+ * the output are to be
+ * considered vector-valued
+ * rather than just a collection
+ * of scalar data. The index
+ * pairs are inclusive; for
+ * example, if we have a Stokes
+ * problem in 2d with components
+ * (u,v,p), then the
+ * corresponding vector data
+ * range should be (0,1), and the
+ * returned list would consist of
+ * only a single element with a
+ * tuple such as (0,1,"velocity").
+ *
+ * Since some of the derived
+ * classes do not know about
+ * vector data, this function has
+ * a default implementation that
+ * simply returns an empty
+ * string, meaning that all data
+ * is to be considered a
+ * collection of scalar fields.
+ */
+ virtual
+ std::vector<std_cxx1x::tuple<unsigned int, unsigned int, std::string> >
+ get_vector_data_ranges () const;
+
+ /**
+ * The default number of
+ * subdivisions for patches. This
+ * is filled by parse_parameters()
+ * and should be obeyed by
+ * build_patches() in derived
+ * classes.
+ */
+ unsigned int default_subdivisions;
- private:
- /**
- * Standard output format. Use
- * this format, if output format
- * default_format is
- * requested. It can be changed
- * by the <tt>set_format</tt> function
- * or in a parameter file.
- */
- OutputFormat default_fmt;
-
- /**
- * Flags to be used upon output
- * of OpenDX data. Can be changed by
- * using the <tt>set_flags</tt>
- * function.
- */
- DXFlags dx_flags;
-
- /**
- * Flags to be used upon output
- * of UCD data. Can be changed by
- * using the <tt>set_flags</tt>
- * function.
- */
- UcdFlags ucd_flags;
-
- /**
- * Flags to be used upon output
- * of GNUPLOT data. Can be
- * changed by using the
- * <tt>set_flags</tt> function.
- */
- GnuplotFlags gnuplot_flags;
-
- /**
- * Flags to be used upon output
- * of POVRAY data. Can be changed
- * by using the <tt>set_flags</tt>
- * function.
- */
- PovrayFlags povray_flags;
-
- /**
- * Flags to be used upon output
- * of EPS data in one space
- * dimension. Can be changed by
- * using the <tt>set_flags</tt>
- * function.
- */
- EpsFlags eps_flags;
-
- /**
- * Flags to be used upon output
- * of gmv data in one space
- * dimension. Can be changed by
- * using the <tt>set_flags</tt>
- * function.
- */
- GmvFlags gmv_flags;
-
- /**
- * Flags to be used upon output
- * of Tecplot data in one space
- * dimension. Can be changed by
- * using the <tt>set_flags</tt>
- * function.
- */
- TecplotFlags tecplot_flags;
-
- /**
- * Flags to be used upon output
- * of vtk data in one space
- * dimension. Can be changed by
- * using the <tt>set_flags</tt>
- * function.
- */
- VtkFlags vtk_flags;
-
- /**
- * Flags to be used upon output of
- * deal.II intermediate data in one space
- * dimension. Can be changed by using the
- * <tt>set_flags</tt> function.
- */
- Deal_II_IntermediateFlags deal_II_intermediate_flags;
+ private:
+ /**
+ * Standard output format. Use
+ * this format, if output format
+ * default_format is
+ * requested. It can be changed
+ * by the <tt>set_format</tt> function
+ * or in a parameter file.
+ */
+ OutputFormat default_fmt;
+
+ /**
+ * Flags to be used upon output
+ * of OpenDX data. Can be changed by
+ * using the <tt>set_flags</tt>
+ * function.
+ */
+ DXFlags dx_flags;
+
+ /**
+ * Flags to be used upon output
+ * of UCD data. Can be changed by
+ * using the <tt>set_flags</tt>
+ * function.
+ */
+ UcdFlags ucd_flags;
+
+ /**
+ * Flags to be used upon output
+ * of GNUPLOT data. Can be
+ * changed by using the
+ * <tt>set_flags</tt> function.
+ */
+ GnuplotFlags gnuplot_flags;
+
+ /**
+ * Flags to be used upon output
+ * of POVRAY data. Can be changed
+ * by using the <tt>set_flags</tt>
+ * function.
+ */
+ PovrayFlags povray_flags;
+
+ /**
+ * Flags to be used upon output
+ * of EPS data in one space
+ * dimension. Can be changed by
+ * using the <tt>set_flags</tt>
+ * function.
+ */
+ EpsFlags eps_flags;
+
+ /**
+ * Flags to be used upon output
+ * of gmv data in one space
+ * dimension. Can be changed by
+ * using the <tt>set_flags</tt>
+ * function.
+ */
+ GmvFlags gmv_flags;
+
+ /**
+ * Flags to be used upon output
+ * of Tecplot data in one space
+ * dimension. Can be changed by
+ * using the <tt>set_flags</tt>
+ * function.
+ */
+ TecplotFlags tecplot_flags;
+
+ /**
+ * Flags to be used upon output
+ * of vtk data in one space
+ * dimension. Can be changed by
+ * using the <tt>set_flags</tt>
+ * function.
+ */
+ VtkFlags vtk_flags;
+
+ /**
+ * Flags to be used upon output of
+ * deal.II intermediate data in one space
+ * dimension. Can be changed by using the
+ * <tt>set_flags</tt> function.
+ */
+ Deal_II_IntermediateFlags deal_II_intermediate_flags;
};
*/
class ExceptionBase : public std::exception
{
- public:
- /**
- * Default constructor.
- */
- ExceptionBase ();
-
- /**
- * The constructor takes the file in which the
- * error happened, the line and the violated
- * condition as well as the name of the
- * exception class as a <tt>char*</tt> as arguments.
- */
- ExceptionBase (const char* f, const int l, const char *func,
- const char* c, const char *e);
-
- /**
- * Copy constructor.
- */
- ExceptionBase (const ExceptionBase &exc);
-
- /**
- * Destructor. Empty, but needed
- * for the sake of exception
- * specification, since the base
- * class has this exception
- * specification and the
- * automatically generated
- * destructor would have a
- * different one due to member
- * objects.
- */
- virtual ~ExceptionBase () throw();
-
- /**
- * Set the file name and line of where the
- * exception appeared as well as the violated
- * condition and the name of the exception as
- * a char pointer.
- */
- void set_fields (const char *f,
- const int l,
- const char *func,
- const char *c,
- const char *e);
-
- /**
- * Print out the general part of the error
- * information.
- */
- void print_exc_data (std::ostream &out) const;
-
- /**
- * Print more specific information about the
- * exception which occured. Overload this
- * function in your own exception classes.
- */
- virtual void print_info (std::ostream &out) const;
-
-
- /**
- * Function derived from the base class
- * which allows to pass information like
- * the line and name of the file where the
- * exception occurred as well as user
- * information.
- *
- * This function is mainly used
- * when using exceptions
- * declared by the
- * <tt>DeclException*</tt>
- * macros with the
- * <tt>throw</tt> mechanism or
- * the <tt>AssertThrow</tt>
- * macro.
- */
- virtual const char * what () const throw ();
-
- /**
- * Print a stacktrace, if one has
- * been recorded previously, to
- * the given stream.
- */
- void print_stack_trace (std::ostream &out) const;
-
- protected:
- /**
- * Name of the file this exception happen in.
- */
- const char *file;
-
- /**
- * Line number in this file.
- */
- unsigned int line;
-
- /**
- * Name of the function, pretty printed.
- */
- const char *function;
-
- /**
- * The violated condition, as a string.
- */
- const char *cond;
-
- /**
- * Name of the exception and call sequence.
- */
- const char *exc;
-
- /**
- * A backtrace to the position
- * where the problem happened, if
- * the system supports this.
- */
- char ** stacktrace;
-
- /**
- * The number of stacktrace
- * frames that are stored in the
- * previous variable. Zero if the
- * system does not support stack
- * traces.
- */
- int n_stacktrace_frames;
+ public:
+ /**
+ * Default constructor.
+ */
+ ExceptionBase ();
+
+ /**
+ * The constructor takes the file in which the
+ * error happened, the line and the violated
+ * condition as well as the name of the
+ * exception class as a <tt>char*</tt> as arguments.
+ */
+ ExceptionBase (const char *f, const int l, const char *func,
+ const char *c, const char *e);
+
+ /**
+ * Copy constructor.
+ */
+ ExceptionBase (const ExceptionBase &exc);
+
+ /**
+ * Destructor. Empty, but needed
+ * for the sake of exception
+ * specification, since the base
+ * class has this exception
+ * specification and the
+ * automatically generated
+ * destructor would have a
+ * different one due to member
+ * objects.
+ */
+ virtual ~ExceptionBase () throw();
+
+ /**
+ * Set the file name and line of where the
+ * exception appeared as well as the violated
+ * condition and the name of the exception as
+ * a char pointer.
+ */
+ void set_fields (const char *f,
+ const int l,
+ const char *func,
+ const char *c,
+ const char *e);
+
+ /**
+ * Print out the general part of the error
+ * information.
+ */
+ void print_exc_data (std::ostream &out) const;
+
+ /**
+ * Print more specific information about the
+ * exception which occured. Overload this
+ * function in your own exception classes.
+ */
+ virtual void print_info (std::ostream &out) const;
+
+
+ /**
+ * Function derived from the base class
+ * which allows to pass information like
+ * the line and name of the file where the
+ * exception occurred as well as user
+ * information.
+ *
+ * This function is mainly used
+ * when using exceptions
+ * declared by the
+ * <tt>DeclException*</tt>
+ * macros with the
+ * <tt>throw</tt> mechanism or
+ * the <tt>AssertThrow</tt>
+ * macro.
+ */
+ virtual const char *what () const throw ();
+
+ /**
+ * Print a stacktrace, if one has
+ * been recorded previously, to
+ * the given stream.
+ */
+ void print_stack_trace (std::ostream &out) const;
+
+ protected:
+ /**
+ * Name of the file this exception happen in.
+ */
- const char *file;
++ const char *file;
+
+ /**
+ * Line number in this file.
+ */
+ unsigned int line;
+
+ /**
+ * Name of the function, pretty printed.
+ */
- const char *function;
++ const char *function;
+
+ /**
+ * The violated condition, as a string.
+ */
- const char *cond;
++ const char *cond;
+
+ /**
+ * Name of the exception and call sequence.
+ */
- const char *exc;
++ const char *exc;
+
+ /**
+ * A backtrace to the position
+ * where the problem happened, if
+ * the system supports this.
+ */
+ char **stacktrace;
+
+ /**
+ * The number of stacktrace
+ * frames that are stored in the
+ * previous variable. Zero if the
+ * system does not support stack
+ * traces.
+ */
+ int n_stacktrace_frames;
};
*/
class LogStream : public Subscriptor
{
+ public:
+ /**
+ * A subclass allowing for the
+ * safe generation and removal of
+ * prefices.
+ *
+ * Somewhere at the beginning of
+ * a block, create one of these
+ * objects, and it will appear as
+ * a prefix in LogStream output
+ * like @p deallog. At the end of
+ * the block, the prefix will
+ * automatically be removed, when
+ * this object is destroyed.
+ */
+ class Prefix
+ {
public:
- /**
- * A subclass allowing for the
- * safe generation and removal of
- * prefices.
- *
- * Somewhere at the beginning of
- * a block, create one of these
- * objects, and it will appear as
- * a prefix in LogStream output
- * like @p deallog. At the end of
- * the block, the prefix will
- * automatically be removed, when
- * this object is destroyed.
- */
- class Prefix
- {
- public:
- /**
- * Set a new prefix for
- * @p deallog, which will be
- * removed when the variable
- * is destroyed .
- */
- Prefix(const std::string& text);
-
- /**
- * Set a new prefix for the
- * given stream, which will
- * be removed when the
- * variable is destroyed .
- */
- Prefix(const std::string& text, LogStream& stream);
-
- /**
- * Remove the prefix
- * associated with this
- * variable.
- */
- ~Prefix ();
-
- private:
- SmartPointer<LogStream,LogStream::Prefix> stream;
- };
-
- /**
- * Standard constructor, since we
- * intend to provide an object
- * <tt>deallog</tt> in the library. Set the
- * standard output stream to <tt>std::cerr</tt>.
- */
- LogStream ();
-
- /**
- * Destructor.
- */
- ~LogStream();
-
- /**
- * Enable output to a second
- * stream <tt>o</tt>.
- */
- void attach (std::ostream& o);
-
- /**
- * Disable output to the second
- * stream. You may want to call
- * <tt>close</tt> on the stream that was
- * previously attached to this object.
- */
- void detach ();
-
- /**
- * Setup the logstream for
- * regression test mode.
- *
- * This sets the parameters
- * #double_threshold,
- * #float_threshold, and #offset
- * to nonzero values. The exact
- * values being used have been
- * determined experimentally and
- * can be found in the source
- * code.
- *
- * Called with an argument
- * <tt>false</tt>, switches off
- * test mode and sets all
- * involved parameters to zero.
- */
- void test_mode (bool on=true);
-
- /**
- * Gives the default stream (<tt>std_out</tt>).
- */
- std::ostream& get_console ();
-
- /**
- * Gives the file stream.
- */
- std::ostream& get_file_stream ();
-
- /**
- * @return true, if file stream
- * has already been attached.
- */
- bool has_file () const;
-
- /**
- * Reroutes cerr to LogStream.
- * Works as a switch, turning
- * logging of <tt>cerr</tt> on
- * and off alternatingly with
- * every call.
- */
- void log_cerr ();
-
- /**
- * Return the prefix string.
- */
- const std::string& get_prefix () const;
-
- /**
- * @deprecated Use Prefix instead
- *
- * Push another prefix on the
- * stack. Prefixes are
- * automatically separated by a
- * colon and there is a double
- * colon after the last prefix.
- */
- void push (const std::string& text);
-
- /**
- * @deprecated Use Prefix instead
- *
- * Remove the last prefix.
- */
- void pop ();
-
- /**
- * Maximum number of levels to be
- * printed on the console. This
- * function allows to restrict
- * console output to the upmost
- * levels of iterations. Only
- * output with less than <tt>n</tt>
- * prefixes is printed. By calling
- * this function with <tt>n=0</tt>, no
- * console output will be written.
- *
- * The previous value of this
- * parameter is returned.
- */
- unsigned int depth_console (const unsigned int n);
-
- /**
- * Maximum number of levels to be
- * written to the log file. The
- * functionality is the same as
- * <tt>depth_console</tt>, nevertheless,
- * this function should be used
- * with care, since it may spoile
- * the value of a log file.
- *
- * The previous value of this
- * parameter is returned.
- */
- unsigned int depth_file (const unsigned int n);
-
- /**
- * Set time printing flag. If this flag
- * is true, each output line will
- * be prepended by the user time used
- * by the running program so far.
- *
- * The previous value of this
- * parameter is returned.
- */
- bool log_execution_time (const bool flag);
-
- /**
- * Output time differences
- * between consecutive logs. If
- * this function is invoked with
- * <tt>true</tt>, the time difference
- * between the previous log line
- * and the recent one is
- * printed. If it is invoked with
- * <tt>false</tt>, the accumulated
- * time since start of the
- * program is printed (default
- * behavior).
- *
- * The measurement of times is
- * not changed by this function,
- * just the output.
- *
- * The previous value of this
- * parameter is returned.
- */
- bool log_time_differences (const bool flag);
-
- /**
- * Write detailed timing
- * information.
- *
- *
- */
- void timestamp();
-
- /**
- * Log the thread id.
- */
- bool log_thread_id (const bool flag);
-
- /**
- * Set a threshold for the
- * minimal absolute value of
- * double values. All numbers
- * with a smaller absolute value
- * will be printed as zero.
- *
- * The default value for this
- * threshold is zero,
- * i.e. numbers are printed
- * according to their real value.
- *
- * This feature is mostly useful
- * for automated tests: there,
- * one would like to reproduce
- * the exact same solution in
- * each run of a
- * testsuite. However, subtle
- * difference in processor,
- * operating system, or compiler
- * version can lead to
- * differences in the last few
- * digits of numbers, due to
- * different rounding. While one
- * can avoid trouble for most
- * numbers when comparing with
- * stored results by simply
- * limiting the accuracy of
- * output, this does not hold for
- * numbers very close to zero,
- * i.e. zero plus accumulated
- * round-off. For these numbers,
- * already the first digit is
- * tainted by round-off. Using
- * the present function, it is
- * possible to eliminate this
- * source of problems, by simply
- * writing zero to the output in
- * this case.
- */
- void threshold_double(const double t);
- /**
- * The same as
- * threshold_double(), but for
- * float values.
- */
- void threshold_float(const float t);
-
- /**
- * Output a constant something
- * through this stream.
- */
- template <typename T>
- LogStream & operator << (const T &t);
-
- /**
- * Output double precision
- * numbers through this
- * stream.
- *
- * If they are set, this function
- * applies the methods for making
- * floating point output
- * reproducible as discussed in
- * the introduction.
- */
- LogStream & operator << (const double t);
-
- /**
- * Output single precision
- * numbers through this
- * stream.
- *
- * If they are set, this function
- * applies the methods for making
- * floating point output
- * reproducible as discussed in
- * the introduction.
- */
- LogStream & operator << (const float t);
-
- /**
- * Treat ostream
- * manipulators. This passes on
- * the whole thing to the
- * template function with the
- * exception of the
- * <tt>std::endl</tt>
- * manipulator, for which special
- * action is performed: write the
- * temporary stream buffer
- * including a header to the file
- * and <tt>std::cout</tt> and
- * empty the buffer.
- *
- * An overload of this function is needed
- * anyway, since the compiler can't bind
- * manipulators like @p std::endl
- * directly to template arguments @p T
- * like in the previous general
- * template. This is due to the fact that
- * @p std::endl is actually an overloaded
- * set of functions for @p std::ostream,
- * @p std::wostream, and potentially more
- * of this kind. This function is
- * therefore necessary to pick one
- * element from this overload set.
- */
- LogStream & operator<< (std::ostream& (*p) (std::ostream&));
-
- /**
- * Determine an estimate for
- * the memory consumption (in
- * bytes) of this
- * object. Since sometimes
- * the size of objects can
- * not be determined exactly
- * (for example: what is the
- * memory consumption of an
- * STL <tt>std::map</tt> type with a
- * certain number of
- * elements?), this is only
- * an estimate. however often
- * quite close to the true
- * value.
- */
- std::size_t memory_consumption () const;
-
- /**
- * Exception.
- */
- DeclException0(ExcNoFileStreamGiven);
+ /**
+ * Set a new prefix for
+ * @p deallog, which will be
+ * removed when the variable
+ * is destroyed .
+ */
+ Prefix(const std::string &text);
+
+ /**
+ * Set a new prefix for the
+ * given stream, which will
+ * be removed when the
+ * variable is destroyed .
+ */
+ Prefix(const std::string &text, LogStream &stream);
+
+ /**
+ * Remove the prefix
+ * associated with this
+ * variable.
+ */
+ ~Prefix ();
private:
-
- /**
- * Stack of strings which are printed
- * at the beginning of each line to
- * allow identification where the
- * output was generated.
- */
- std::stack<std::string> prefixes;
-
- /**
- * Default stream, where the output
- * is to go to. This stream defaults
- * to <tt>std::cerr</tt>, but can be set to another
- * stream through the constructor.
- */
- std::ostream *std_out;
-
- /**
- * Pointer to a stream, where a copy of
- * the output is to go to. Usually, this
- * will be a file stream.
- *
- * You can set and reset this stream
- * by the <tt>attach</tt> function.
- */
- std::ostream *file;
-
- /**
- * Value denoting the number of
- * prefixes to be printed to the
- * standard output. If more than
- * this number of prefixes is
- * pushed to the stack, then no
- * output will be generated until
- * the number of prefixes shrinks
- * back below this number.
- */
- unsigned int std_depth;
-
- /**
- * Same for the maximum depth of
- * prefixes for output to a file.
- */
- unsigned int file_depth;
-
- /**
- * Flag for printing execution time.
- */
- bool print_utime;
-
- /**
- * Flag for printing time differences.
- */
- bool diff_utime;
-
- /**
- * Time of last output line.
- */
- double last_time;
-
- /**
- * Threshold for printing double
- * values. Every number with
- * absolute value less than this
- * is printed as zero.
- */
- double double_threshold;
-
- /**
- * Threshold for printing float
- * values. Every number with
- * absolute value less than this
- * is printed as zero.
- */
- float float_threshold;
-
- /**
- * An offset added to every float
- * or double number upon
- * output. This is done after the
- * number is compared to
- * #double_threshold or #float_threshold,
- * but before rounding.
- *
- * This functionality was
- * introduced to produce more
- * reproducible floating point
- * output for regression
- * tests. The rationale is, that
- * an exact output value is much
- * more likely to be 1/8 than
- * 0.124997. If we round to two
- * digits though, 1/8 becomes
- * unreliably either .12 or .13
- * due to machine accuracy. On
- * the other hand, if we add a
- * something above machine
- * accuracy first, we will always
- * get .13.
- *
- * It is safe to leave this
- * value equal to zero. For
- * regression tests, the function
- * test_mode() sets it to a
- * reasonable value.
- *
- * The offset is relative to the
- * magnitude of the number.
- */
- double offset;
-
- /**
- * Flag for printing thread id.
- */
- bool print_thread_id;
-
- /**
- * The value times() returned
- * on initialization.
- */
- double reference_time_val;
-
- /**
- * The tms structure times()
- * filled on initialization.
- */
- struct tms reference_tms;
-
- /**
- * Original buffer of
- * <tt>std::cerr</tt>. We store
- * the address of that buffer
- * when #log_cerr is called, and
- * reset it to this value if
- * #log_cerr is called a second
- * time, or when the destructor
- * of this class is run.
- */
- std::streambuf *old_cerr;
-
- /**
- * Print head of line. This prints
- * optional time information and
- * the contents of the prefix stack.
- */
- void print_line_head ();
-
- /**
- * Actually do the work of
- * writing output. This function
- * unifies the work that is
- * common to the two
- * <tt>operator<<</tt> functions.
- */
- template <typename T>
- void print (const T &t);
- /**
- * Check if we are on a new line
- * and print the header before
- * the data.
- */
- std::ostringstream& get_stream();
-
- /**
- * Type of the stream map
- */
- typedef std::map<unsigned int, std_cxx1x::shared_ptr<std::ostringstream> > stream_map_type;
-
- /**
- * We generate a stringstream for
- * every process that sends log
- * messages.
- */
- stream_map_type outstreams;
+ SmartPointer<LogStream,LogStream::Prefix> stream;
+ };
+
+ /**
+ * Standard constructor, since we
+ * intend to provide an object
+ * <tt>deallog</tt> in the library. Set the
+ * standard output stream to <tt>std::cerr</tt>.
+ */
+ LogStream ();
+
+ /**
+ * Destructor.
+ */
+ ~LogStream();
+
+ /**
+ * Enable output to a second
+ * stream <tt>o</tt>.
+ */
+ void attach (std::ostream &o);
+
+ /**
+ * Disable output to the second
+ * stream. You may want to call
+ * <tt>close</tt> on the stream that was
+ * previously attached to this object.
+ */
+ void detach ();
+
+ /**
+ * Setup the logstream for
+ * regression test mode.
+ *
+ * This sets the parameters
+ * #double_threshold,
+ * #float_threshold, and #offset
+ * to nonzero values. The exact
+ * values being used have been
+ * determined experimentally and
+ * can be found in the source
+ * code.
+ *
+ * Called with an argument
+ * <tt>false</tt>, switches off
+ * test mode and sets all
+ * involved parameters to zero.
+ */
+ void test_mode (bool on=true);
+
+ /**
+ * Gives the default stream (<tt>std_out</tt>).
+ */
+ std::ostream &get_console ();
+
+ /**
+ * Gives the file stream.
+ */
+ std::ostream &get_file_stream ();
+
+ /**
+ * @return true, if file stream
+ * has already been attached.
+ */
+ bool has_file () const;
+
+ /**
+ * Reroutes cerr to LogStream.
+ * Works as a switch, turning
+ * logging of <tt>cerr</tt> on
+ * and off alternatingly with
+ * every call.
+ */
+ void log_cerr ();
+
+ /**
+ * Return the prefix string.
+ */
+ const std::string &get_prefix () const;
+
+ /**
+ * @deprecated Use Prefix instead
+ *
+ * Push another prefix on the
+ * stack. Prefixes are
+ * automatically separated by a
+ * colon and there is a double
+ * colon after the last prefix.
+ */
+ void push (const std::string &text);
+
+ /**
+ * @deprecated Use Prefix instead
+ *
+ * Remove the last prefix.
+ */
+ void pop ();
+
+ /**
+ * Maximum number of levels to be
+ * printed on the console. This
+ * function allows to restrict
+ * console output to the upmost
+ * levels of iterations. Only
+ * output with less than <tt>n</tt>
+ * prefixes is printed. By calling
+ * this function with <tt>n=0</tt>, no
+ * console output will be written.
+ *
+ * The previous value of this
+ * parameter is returned.
+ */
+ unsigned int depth_console (const unsigned int n);
+
+ /**
+ * Maximum number of levels to be
+ * written to the log file. The
+ * functionality is the same as
+ * <tt>depth_console</tt>, nevertheless,
+ * this function should be used
+ * with care, since it may spoile
+ * the value of a log file.
+ *
+ * The previous value of this
+ * parameter is returned.
+ */
+ unsigned int depth_file (const unsigned int n);
+
+ /**
+ * Set time printing flag. If this flag
+ * is true, each output line will
+ * be prepended by the user time used
+ * by the running program so far.
+ *
+ * The previous value of this
+ * parameter is returned.
+ */
+ bool log_execution_time (const bool flag);
+
+ /**
+ * Output time differences
+ * between consecutive logs. If
+ * this function is invoked with
+ * <tt>true</tt>, the time difference
+ * between the previous log line
+ * and the recent one is
+ * printed. If it is invoked with
+ * <tt>false</tt>, the accumulated
+ * time since start of the
+ * program is printed (default
+ * behavior).
+ *
+ * The measurement of times is
+ * not changed by this function,
+ * just the output.
+ *
+ * The previous value of this
+ * parameter is returned.
+ */
+ bool log_time_differences (const bool flag);
+
+ /**
+ * Write detailed timing
+ * information.
+ *
+ *
+ */
+ void timestamp();
+
+ /**
+ * Log the thread id.
+ */
+ bool log_thread_id (const bool flag);
+
+ /**
+ * Set a threshold for the
+ * minimal absolute value of
+ * double values. All numbers
+ * with a smaller absolute value
+ * will be printed as zero.
+ *
+ * The default value for this
+ * threshold is zero,
+ * i.e. numbers are printed
+ * according to their real value.
+ *
+ * This feature is mostly useful
+ * for automated tests: there,
+ * one would like to reproduce
+ * the exact same solution in
+ * each run of a
+ * testsuite. However, subtle
+ * difference in processor,
+ * operating system, or compiler
+ * version can lead to
+ * differences in the last few
+ * digits of numbers, due to
+ * different rounding. While one
+ * can avoid trouble for most
+ * numbers when comparing with
+ * stored results by simply
+ * limiting the accuracy of
+ * output, this does not hold for
+ * numbers very close to zero,
+ * i.e. zero plus accumulated
+ * round-off. For these numbers,
+ * already the first digit is
+ * tainted by round-off. Using
+ * the present function, it is
+ * possible to eliminate this
+ * source of problems, by simply
+ * writing zero to the output in
+ * this case.
+ */
+ void threshold_double(const double t);
+ /**
+ * The same as
+ * threshold_double(), but for
+ * float values.
+ */
+ void threshold_float(const float t);
+
+ /**
+ * Output a constant something
+ * through this stream.
+ */
+ template <typename T>
+ LogStream &operator << (const T &t);
+
+ /**
+ * Output double precision
+ * numbers through this
+ * stream.
+ *
+ * If they are set, this function
+ * applies the methods for making
+ * floating point output
+ * reproducible as discussed in
+ * the introduction.
+ */
+ LogStream &operator << (const double t);
+
+ /**
+ * Output single precision
+ * numbers through this
+ * stream.
+ *
+ * If they are set, this function
+ * applies the methods for making
+ * floating point output
+ * reproducible as discussed in
+ * the introduction.
+ */
+ LogStream &operator << (const float t);
+
+ /**
+ * Treat ostream
+ * manipulators. This passes on
+ * the whole thing to the
+ * template function with the
+ * exception of the
+ * <tt>std::endl</tt>
+ * manipulator, for which special
+ * action is performed: write the
+ * temporary stream buffer
+ * including a header to the file
+ * and <tt>std::cout</tt> and
+ * empty the buffer.
+ *
+ * An overload of this function is needed
+ * anyway, since the compiler can't bind
+ * manipulators like @p std::endl
+ * directly to template arguments @p T
+ * like in the previous general
+ * template. This is due to the fact that
+ * @p std::endl is actually an overloaded
+ * set of functions for @p std::ostream,
+ * @p std::wostream, and potentially more
+ * of this kind. This function is
+ * therefore necessary to pick one
+ * element from this overload set.
+ */
+ LogStream &operator<< (std::ostream& (*p) (std::ostream &));
+
+ /**
+ * Determine an estimate for
+ * the memory consumption (in
+ * bytes) of this
+ * object. Since sometimes
+ * the size of objects can
+ * not be determined exactly
+ * (for example: what is the
+ * memory consumption of an
+ * STL <tt>std::map</tt> type with a
+ * certain number of
+ * elements?), this is only
+ * an estimate. however often
+ * quite close to the true
+ * value.
+ */
+ std::size_t memory_consumption () const;
+
+ /**
+ * Exception.
+ */
+ DeclException0(ExcNoFileStreamGiven);
+
+ private:
+
+ /**
+ * Stack of strings which are printed
+ * at the beginning of each line to
+ * allow identification where the
+ * output was generated.
+ */
+ std::stack<std::string> prefixes;
+
+ /**
+ * Default stream, where the output
+ * is to go to. This stream defaults
+ * to <tt>std::cerr</tt>, but can be set to another
+ * stream through the constructor.
+ */
- std::ostream *std_out;
++ std::ostream *std_out;
+
+ /**
+ * Pointer to a stream, where a copy of
+ * the output is to go to. Usually, this
+ * will be a file stream.
+ *
+ * You can set and reset this stream
+ * by the <tt>attach</tt> function.
+ */
- std::ostream *file;
++ std::ostream *file;
+
+ /**
+ * Value denoting the number of
+ * prefixes to be printed to the
+ * standard output. If more than
+ * this number of prefixes is
+ * pushed to the stack, then no
+ * output will be generated until
+ * the number of prefixes shrinks
+ * back below this number.
+ */
+ unsigned int std_depth;
+
+ /**
+ * Same for the maximum depth of
+ * prefixes for output to a file.
+ */
+ unsigned int file_depth;
+
+ /**
+ * Flag for printing execution time.
+ */
+ bool print_utime;
+
+ /**
+ * Flag for printing time differences.
+ */
+ bool diff_utime;
+
+ /**
+ * Time of last output line.
+ */
+ double last_time;
+
+ /**
+ * Threshold for printing double
+ * values. Every number with
+ * absolute value less than this
+ * is printed as zero.
+ */
+ double double_threshold;
+
+ /**
+ * Threshold for printing float
+ * values. Every number with
+ * absolute value less than this
+ * is printed as zero.
+ */
+ float float_threshold;
+
+ /**
+ * An offset added to every float
+ * or double number upon
+ * output. This is done after the
+ * number is compared to
+ * #double_threshold or #float_threshold,
+ * but before rounding.
+ *
+ * This functionality was
+ * introduced to produce more
+ * reproducible floating point
+ * output for regression
+ * tests. The rationale is, that
+ * an exact output value is much
+ * more likely to be 1/8 than
+ * 0.124997. If we round to two
+ * digits though, 1/8 becomes
+ * unreliably either .12 or .13
+ * due to machine accuracy. On
+ * the other hand, if we add a
+ * something above machine
+ * accuracy first, we will always
+ * get .13.
+ *
+ * It is safe to leave this
+ * value equal to zero. For
+ * regression tests, the function
+ * test_mode() sets it to a
+ * reasonable value.
+ *
+ * The offset is relative to the
+ * magnitude of the number.
+ */
+ double offset;
+
+ /**
+ * Flag for printing thread id.
+ */
+ bool print_thread_id;
+
+ /**
+ * The value times() returned
+ * on initialization.
+ */
+ double reference_time_val;
+
+ /**
+ * The tms structure times()
+ * filled on initialization.
+ */
+ struct tms reference_tms;
+
+ /**
+ * Original buffer of
+ * <tt>std::cerr</tt>. We store
+ * the address of that buffer
+ * when #log_cerr is called, and
+ * reset it to this value if
+ * #log_cerr is called a second
+ * time, or when the destructor
+ * of this class is run.
+ */
+ std::streambuf *old_cerr;
+
+ /**
+ * Print head of line. This prints
+ * optional time information and
+ * the contents of the prefix stack.
+ */
+ void print_line_head ();
+
+ /**
+ * Actually do the work of
+ * writing output. This function
+ * unifies the work that is
+ * common to the two
+ * <tt>operator<<</tt> functions.
+ */
+ template <typename T>
+ void print (const T &t);
+ /**
+ * Check if we are on a new line
+ * and print the header before
+ * the data.
+ */
+ std::ostringstream &get_stream();
+
+ /**
+ * Type of the stream map
+ */
+ typedef std::map<unsigned int, std_cxx1x::shared_ptr<std::ostringstream> > stream_map_type;
+
+ /**
+ * We generate a stringstream for
+ * every process that sends log
+ * messages.
+ */
+ stream_map_type outstreams;
};
namespace internal
{
#if DEAL_II_USE_MT == 1
- /**
- * Take a range argument and call the
- * given function with its begin and end.
- */
+ /**
+ * Take a range argument and call the
+ * given function with its begin and end.
+ */
template <typename RangeType, typename Function>
void apply_to_subranges (const tbb::blocked_range<RangeType> &range,
- const Function &f)
+ const Function &f)
{
f (range.begin(), range.end());
}
namespace Patterns
{
- /**
- * Base class to declare common
- * interface. The purpose of this
- * class is mostly to define the
- * interface of patterns, and to
- * force derived classes to have a
- * <tt>clone</tt> function. It is thus,
- * in the languages of the "Design
- * Patterns" book (Gamma et al.), a
- * "prototype".
- */
+ /**
+ * Base class to declare common
+ * interface. The purpose of this
+ * class is mostly to define the
+ * interface of patterns, and to
+ * force derived classes to have a
+ * <tt>clone</tt> function. It is thus,
+ * in the languages of the "Design
+ * Patterns" book (Gamma et al.), a
+ * "prototype".
+ */
class PatternBase
{
- public:
- /**
- * Make destructor of this and all
- * derived classes virtual.
- */
- virtual ~PatternBase ();
-
- /**
- * Return <tt>true</tt> if the given string
- * matches the pattern.
- */
- virtual bool match (const std::string &test_string) const = 0;
-
- /**
- * Return a string describing the
- * pattern.
- */
- virtual std::string description () const = 0;
-
- /**
- * Return a pointer to an
- * exact copy of the
- * object. This is necessary
- * since we want to store
- * objects of this type in
- * containers, were we need
- * to copy objects without
- * knowledge of their actual
- * data type (we only have
- * pointers to the base
- * class).
- *
- * Ownership of the objects
- * returned by this function
- * is passed to the caller of
- * this function.
- */
- virtual PatternBase * clone () const = 0;
-
- /**
- * Determine an estimate for
- * the memory consumption (in
- * bytes) of this object. To
- * avoid unnecessary
- * overhead, we do not force
- * derived classes to provide
- * this function as a virtual
- * overloaded one, but rather
- * try to cast the present
- * object to one of the known
- * derived classes and if
- * that fails then take the
- * size of this base class
- * instead and add 32 byte
- * (this value is arbitrary,
- * it should account for
- * virtual function tables,
- * and some possible data
- * elements). Since there are
- * usually not many thousands
- * of objects of this type
- * around, and since the
- * memory_consumption
- * mechanism is used to find
- * out where memory in the
- * range of many megabytes
- * is, this seems like a
- * reasonable approximation.
- *
- * On the other hand, if you
- * know that your class
- * deviates from this
- * assumption significantly,
- * you can still overload
- * this function.
- */
- virtual std::size_t memory_consumption () const;
+ public:
+ /**
+ * Make destructor of this and all
+ * derived classes virtual.
+ */
+ virtual ~PatternBase ();
+
+ /**
+ * Return <tt>true</tt> if the given string
+ * matches the pattern.
+ */
+ virtual bool match (const std::string &test_string) const = 0;
+
+ /**
+ * Return a string describing the
+ * pattern.
+ */
+ virtual std::string description () const = 0;
+
+ /**
+ * Return a pointer to an
+ * exact copy of the
+ * object. This is necessary
+ * since we want to store
+ * objects of this type in
+ * containers, were we need
+ * to copy objects without
+ * knowledge of their actual
+ * data type (we only have
+ * pointers to the base
+ * class).
+ *
+ * Ownership of the objects
+ * returned by this function
+ * is passed to the caller of
+ * this function.
+ */
+ virtual PatternBase *clone () const = 0;
+
+ /**
+ * Determine an estimate for
+ * the memory consumption (in
+ * bytes) of this object. To
+ * avoid unnecessary
+ * overhead, we do not force
+ * derived classes to provide
+ * this function as a virtual
+ * overloaded one, but rather
+ * try to cast the present
+ * object to one of the known
+ * derived classes and if
+ * that fails then take the
+ * size of this base class
+ * instead and add 32 byte
+ * (this value is arbitrary,
+ * it should account for
+ * virtual function tables,
+ * and some possible data
+ * elements). Since there are
+ * usually not many thousands
+ * of objects of this type
+ * around, and since the
+ * memory_consumption
+ * mechanism is used to find
+ * out where memory in the
+ * range of many megabytes
+ * is, this seems like a
+ * reasonable approximation.
+ *
+ * On the other hand, if you
+ * know that your class
+ * deviates from this
+ * assumption significantly,
+ * you can still overload
+ * this function.
+ */
+ virtual std::size_t memory_consumption () const;
};
- /**
- * Returns pointer to the correct
- * derived class based on description.
- */
- PatternBase * pattern_factory (const std::string& description);
-
- /**
- * Test for the string being an
- * integer. If bounds are given
- * to the constructor, then the
- * integer given also needs to be
- * within the interval specified
- * by these bounds. Note that
- * unlike common convention in
- * the C++ standard library, both
- * bounds of this interval are
- * inclusive; the reason is that
- * in practice in most cases, one
- * needs closed intervals, but
- * these can only be realized
- * with inclusive bounds for
- * non-integer values. We thus
- * stay consistent by always
- * using closed intervals.
- *
- * If the upper bound given to
- * the constructor is smaller
- * than the lower bound, then the
- * infinite interval is implied,
- * i.e. every integer is allowed.
- *
- * Giving bounds may be useful if
- * for example a value can only
- * be positive and less than a
- * reasonable upper bound (for
- * example the number of
- * refinement steps to be
- * performed), or in many other
- * cases.
- */
+ /**
+ * Returns pointer to the correct
+ * derived class based on description.
+ */
+ PatternBase *pattern_factory (const std::string &description);
+
+ /**
+ * Test for the string being an
+ * integer. If bounds are given
+ * to the constructor, then the
+ * integer given also needs to be
+ * within the interval specified
+ * by these bounds. Note that
+ * unlike common convention in
+ * the C++ standard library, both
+ * bounds of this interval are
+ * inclusive; the reason is that
+ * in practice in most cases, one
+ * needs closed intervals, but
+ * these can only be realized
+ * with inclusive bounds for
+ * non-integer values. We thus
+ * stay consistent by always
+ * using closed intervals.
+ *
+ * If the upper bound given to
+ * the constructor is smaller
+ * than the lower bound, then the
+ * infinite interval is implied,
+ * i.e. every integer is allowed.
+ *
+ * Giving bounds may be useful if
+ * for example a value can only
+ * be positive and less than a
+ * reasonable upper bound (for
+ * example the number of
+ * refinement steps to be
+ * performed), or in many other
+ * cases.
+ */
class Integer : public PatternBase
{
- public:
- /**
- * Minimal integer value. If
- * the numeric_limits class
- * is available use this
- * information to obtain the
- * extremal values, otherwise
- * set it so that this class
- * understands that all values
- * are allowed.
- */
- static const int min_int_value;
-
- /**
- * Maximal integer value. If
- * the numeric_limits class
- * is available use this
- * information to obtain the
- * extremal values, otherwise
- * set it so that this class
- * understands that all values
- * are allowed.
- */
- static const int max_int_value;
-
- /**
- * Constructor. Bounds can be
- * specified within which a
- * valid parameter has to
- * be. If the upper bound is
- * smaller than the lower
- * bound, then the infinite
- * interval is meant. The
- * default values are chosen
- * such that no bounds are
- * enforced on parameters.
- */
- Integer (const int lower_bound = min_int_value,
- const int upper_bound = max_int_value);
-
- /**
- * Return <tt>true</tt> if the
- * string is an integer and
- * its value is within the
- * specified range.
- */
- virtual bool match (const std::string &test_string) const;
-
- /**
- * Return a description of
- * the pattern that valid
- * strings are expected to
- * match. If bounds were
- * specified to the
- * constructor, then include
- * them into this
- * description.
- */
- virtual std::string description () const;
-
- /**
- * Return a copy of the
- * present object, which is
- * newly allocated on the
- * heap. Ownership of that
- * object is transferred to
- * the caller of this
- * function.
- */
- virtual PatternBase * clone () const;
-
- /**
- * Creates new object if the start of
- * description matches
- * description_init. Ownership of that
- * object is transferred to the caller
- * of this function.
- */
- static Integer* create (const std::string& description);
-
- private:
- /**
- * Value of the lower
- * bound. A number that
- * satisfies the @ref match
- * operation of this class
- * must be equal to this
- * value or larger, if the
- * bounds of the interval for
- * a valid range.
- */
- const int lower_bound;
-
- /**
- * Value of the upper
- * bound. A number that
- * satisfies the @ref match
- * operation of this class
- * must be equal to this
- * value or less, if the
- * bounds of the interval for
- * a valid range.
- */
- const int upper_bound;
-
- /**
- * Initial part of description
- */
- static const char* description_init;
+ public:
+ /**
+ * Minimal integer value. If
+ * the numeric_limits class
+ * is available use this
+ * information to obtain the
+ * extremal values, otherwise
+ * set it so that this class
+ * understands that all values
+ * are allowed.
+ */
+ static const int min_int_value;
+
+ /**
+ * Maximal integer value. If
+ * the numeric_limits class
+ * is available use this
+ * information to obtain the
+ * extremal values, otherwise
+ * set it so that this class
+ * understands that all values
+ * are allowed.
+ */
+ static const int max_int_value;
+
+ /**
+ * Constructor. Bounds can be
+ * specified within which a
+ * valid parameter has to
+ * be. If the upper bound is
+ * smaller than the lower
+ * bound, then the infinite
+ * interval is meant. The
+ * default values are chosen
+ * such that no bounds are
+ * enforced on parameters.
+ */
+ Integer (const int lower_bound = min_int_value,
+ const int upper_bound = max_int_value);
+
+ /**
+ * Return <tt>true</tt> if the
+ * string is an integer and
+ * its value is within the
+ * specified range.
+ */
+ virtual bool match (const std::string &test_string) const;
+
+ /**
+ * Return a description of
+ * the pattern that valid
+ * strings are expected to
+ * match. If bounds were
+ * specified to the
+ * constructor, then include
+ * them into this
+ * description.
+ */
+ virtual std::string description () const;
+
+ /**
+ * Return a copy of the
+ * present object, which is
+ * newly allocated on the
+ * heap. Ownership of that
+ * object is transferred to
+ * the caller of this
+ * function.
+ */
+ virtual PatternBase *clone () const;
+
+ /**
+ * Creates new object if the start of
+ * description matches
+ * description_init. Ownership of that
+ * object is transferred to the caller
+ * of this function.
+ */
+ static Integer *create (const std::string &description);
+
+ private:
+ /**
+ * Value of the lower
+ * bound. A number that
+ * satisfies the @ref match
+ * operation of this class
+ * must be equal to this
+ * value or larger, if the
+ * bounds of the interval for
+ * a valid range.
+ */
+ const int lower_bound;
+
+ /**
+ * Value of the upper
+ * bound. A number that
+ * satisfies the @ref match
+ * operation of this class
+ * must be equal to this
+ * value or less, if the
+ * bounds of the interval for
+ * a valid range.
+ */
+ const int upper_bound;
+
+ /**
+ * Initial part of description
+ */
+ static const char *description_init;
};
- /**
- * Test for the string being a
- * <tt>double</tt>. If bounds are
- * given to the constructor, then
- * the integer given also needs
- * to be within the interval
- * specified by these
- * bounds. Note that unlike
- * common convention in the C++
- * standard library, both bounds
- * of this interval are
- * inclusive; the reason is that
- * in practice in most cases, one
- * needs closed intervals, but
- * these can only be realized
- * with inclusive bounds for
- * non-integer values. We thus
- * stay consistent by always
- * using closed intervals.
- *
- * If the upper bound given to
- * the constructor is smaller
- * than the lower bound, then the
- * infinite interval is implied,
- * i.e. every integer is allowed.
- *
- * Giving bounds may be useful if
- * for example a value can only
- * be positive and less than a
- * reasonable upper bound (for
- * example damping parameters are
- * frequently only reasonable if
- * between zero and one), or in
- * many other cases.
- */
+ /**
+ * Test for the string being a
+ * <tt>double</tt>. If bounds are
+ * given to the constructor, then
+ * the integer given also needs
+ * to be within the interval
+ * specified by these
+ * bounds. Note that unlike
+ * common convention in the C++
+ * standard library, both bounds
+ * of this interval are
+ * inclusive; the reason is that
+ * in practice in most cases, one
+ * needs closed intervals, but
+ * these can only be realized
+ * with inclusive bounds for
+ * non-integer values. We thus
+ * stay consistent by always
+ * using closed intervals.
+ *
+ * If the upper bound given to
+ * the constructor is smaller
+ * than the lower bound, then the
+ * infinite interval is implied,
+ * i.e. every integer is allowed.
+ *
+ * Giving bounds may be useful if
+ * for example a value can only
+ * be positive and less than a
+ * reasonable upper bound (for
+ * example damping parameters are
+ * frequently only reasonable if
+ * between zero and one), or in
+ * many other cases.
+ */
class Double : public PatternBase
{
- public:
- /**
- * Minimal double value. If the
- * <tt>std::numeric_limits</tt>
- * class is available use this
- * information to obtain the
- * extremal values, otherwise
- * set it so that this class
- * understands that all values
- * are allowed.
- */
- static const double min_double_value;
-
- /**
- * Maximal double value. If the
- * numeric_limits class is
- * available use this
- * information to obtain the
- * extremal values, otherwise
- * set it so that this class
- * understands that all values
- * are allowed.
- */
- static const double max_double_value;
-
- /**
- * Constructor. Bounds can be
- * specified within which a
- * valid parameter has to
- * be. If the upper bound is
- * smaller than the lower
- * bound, then the infinite
- * interval is meant. The
- * default values are chosen
- * such that no bounds are
- * enforced on parameters.
- */
- Double (const double lower_bound = min_double_value,
- const double upper_bound = max_double_value);
-
- /**
- * Return <tt>true</tt> if the
- * string is a number and its
- * value is within the
- * specified range.
- */
- virtual bool match (const std::string &test_string) const;
-
- /**
- * Return a description of
- * the pattern that valid
- * strings are expected to
- * match. If bounds were
- * specified to the
- * constructor, then include
- * them into this
- * description.
- */
- virtual std::string description () const;
-
- /**
- * Return a copy of the
- * present object, which is
- * newly allocated on the
- * heap. Ownership of that
- * object is transferred to
- * the caller of this
- * function.
- */
- virtual PatternBase * clone () const;
-
- /**
- * Creates new object if the start of
- * description matches
- * description_init. Ownership of that
- * object is transferred to the caller
- * of this function.
- */
- static Double* create (const std::string& description);
-
- private:
- /**
- * Value of the lower
- * bound. A number that
- * satisfies the @ref match
- * operation of this class
- * must be equal to this
- * value or larger, if the
- * bounds of the interval for
- * a valid range.
- */
- const double lower_bound;
-
- /**
- * Value of the upper
- * bound. A number that
- * satisfies the @ref match
- * operation of this class
- * must be equal to this
- * value or less, if the
- * bounds of the interval for
- * a valid range.
- */
- const double upper_bound;
-
- /**
- * Initial part of description
- */
- static const char* description_init;
+ public:
+ /**
+ * Minimal double value. If the
+ * <tt>std::numeric_limits</tt>
+ * class is available use this
+ * information to obtain the
+ * extremal values, otherwise
+ * set it so that this class
+ * understands that all values
+ * are allowed.
+ */
+ static const double min_double_value;
+
+ /**
+ * Maximal double value. If the
+ * numeric_limits class is
+ * available use this
+ * information to obtain the
+ * extremal values, otherwise
+ * set it so that this class
+ * understands that all values
+ * are allowed.
+ */
+ static const double max_double_value;
+
+ /**
+ * Constructor. Bounds can be
+ * specified within which a
+ * valid parameter has to
+ * be. If the upper bound is
+ * smaller than the lower
+ * bound, then the infinite
+ * interval is meant. The
+ * default values are chosen
+ * such that no bounds are
+ * enforced on parameters.
+ */
+ Double (const double lower_bound = min_double_value,
+ const double upper_bound = max_double_value);
+
+ /**
+ * Return <tt>true</tt> if the
+ * string is a number and its
+ * value is within the
+ * specified range.
+ */
+ virtual bool match (const std::string &test_string) const;
+
+ /**
+ * Return a description of
+ * the pattern that valid
+ * strings are expected to
+ * match. If bounds were
+ * specified to the
+ * constructor, then include
+ * them into this
+ * description.
+ */
+ virtual std::string description () const;
+
+ /**
+ * Return a copy of the
+ * present object, which is
+ * newly allocated on the
+ * heap. Ownership of that
+ * object is transferred to
+ * the caller of this
+ * function.
+ */
+ virtual PatternBase *clone () const;
+
+ /**
+ * Creates new object if the start of
+ * description matches
+ * description_init. Ownership of that
+ * object is transferred to the caller
+ * of this function.
+ */
+ static Double *create (const std::string &description);
+
+ private:
+ /**
+ * Value of the lower
+ * bound. A number that
+ * satisfies the @ref match
+ * operation of this class
+ * must be equal to this
+ * value or larger, if the
+ * bounds of the interval for
+ * a valid range.
+ */
+ const double lower_bound;
+
+ /**
+ * Value of the upper
+ * bound. A number that
+ * satisfies the @ref match
+ * operation of this class
+ * must be equal to this
+ * value or less, if the
+ * bounds of the interval for
+ * a valid range.
+ */
+ const double upper_bound;
+
+ /**
+ * Initial part of description
+ */
+ static const char *description_init;
+ };
+
+ /**
+ * Test for the string being one
+ * of a sequence of values given
+ * like a regular expression. For
+ * example, if the string given
+ * to the constructor is
+ * <tt>"red|blue|black"</tt>, then the
+ * @ref match function returns
+ * <tt>true</tt> exactly if the string
+ * is either "red" or "blue" or
+ * "black". Spaces around the
+ * pipe signs do not matter and
+ * are eliminated.
+ */
+ class Selection : public PatternBase
+ {
+ public:
+ /**
+ * Constructor. Take the
+ * given parameter as the
+ * specification of valid
+ * strings.
+ */
+ Selection (const std::string &seq);
+
+ /**
+ * Return <tt>true</tt> if the
+ * string is an element of
+ * the description list
+ * passed to the constructor.
+ */
+ virtual bool match (const std::string &test_string) const;
+
+ /**
+ * Return a description of
+ * the pattern that valid
+ * strings are expected to
+ * match. Here, this is the
+ * list of valid strings
+ * passed to the constructor.
+ */
+ virtual std::string description () const;
+
+ /**
+ * Return a copy of the
+ * present object, which is
+ * newly allocated on the
+ * heap. Ownership of that
+ * object is transferred to
+ * the caller of this
+ * function.
+ */
+ virtual PatternBase *clone () const;
+
+ /**
+ * Determine an estimate for
+ * the memory consumption (in
+ * bytes) of this object.
+ */
+ std::size_t memory_consumption () const;
+
+ /**
+ * Creates new object if the start of
+ * description matches
+ * description_init. Ownership of that
+ * object is transferred to the caller
+ * of this function.
+ */
+ static Selection *create (const std::string &description);
+
+ private:
+ /**
+ * List of valid strings as
+ * passed to the
+ * constructor. We don't make
+ * this string constant, as
+ * we process it somewhat in
+ * the constructor.
+ */
+ std::string sequence;
+
+ /**
+ * Initial part of description
+ */
+ static const char *description_init;
};
- /**
- * Test for the string being one
- * of a sequence of values given
- * like a regular expression. For
- * example, if the string given
- * to the constructor is
- * <tt>"red|blue|black"</tt>, then the
- * @ref match function returns
- * <tt>true</tt> exactly if the string
- * is either "red" or "blue" or
- * "black". Spaces around the
- * pipe signs do not matter and
- * are eliminated.
- */
- class Selection : public PatternBase
- {
- public:
- /**
- * Constructor. Take the
- * given parameter as the
- * specification of valid
- * strings.
- */
- Selection (const std::string &seq);
-
- /**
- * Return <tt>true</tt> if the
- * string is an element of
- * the description list
- * passed to the constructor.
- */
- virtual bool match (const std::string &test_string) const;
-
- /**
- * Return a description of
- * the pattern that valid
- * strings are expected to
- * match. Here, this is the
- * list of valid strings
- * passed to the constructor.
- */
- virtual std::string description () const;
-
- /**
- * Return a copy of the
- * present object, which is
- * newly allocated on the
- * heap. Ownership of that
- * object is transferred to
- * the caller of this
- * function.
- */
- virtual PatternBase * clone () const;
-
- /**
- * Determine an estimate for
- * the memory consumption (in
- * bytes) of this object.
- */
- std::size_t memory_consumption () const;
-
- /**
- * Creates new object if the start of
- * description matches
- * description_init. Ownership of that
- * object is transferred to the caller
- * of this function.
- */
- static Selection* create (const std::string& description);
-
- private:
- /**
- * List of valid strings as
- * passed to the
- * constructor. We don't make
- * this string constant, as
- * we process it somewhat in
- * the constructor.
- */
- std::string sequence;
-
- /**
- * Initial part of description
- */
- static const char* description_init;
- };
- List (const PatternBase &base_pattern,
+ /**
+ * This pattern matches a list of
+ * comma-separated values each of which
+ * have to match a pattern given to the
+ * constructor. With two additional
+ * parameters, the number of elements this
+ * list has to have can be specified. If
+ * none is specified, the list may have
+ * zero or more entries.
+ */
+ class List : public PatternBase
+ {
+ public:
+ /**
+ * Maximal integer value. If
+ * the numeric_limits class
+ * is available use this
+ * information to obtain the
+ * extremal values, otherwise
+ * set it so that this class
+ * understands that all values
+ * are allowed.
+ */
+ static const unsigned int max_int_value;
+
+ /**
+ * Constructor. Take the
+ * given parameter as the
+ * specification of valid
+ * elements of the list.
+ *
+ * The two other arguments can
+ * be used to denote minimal
+ * and maximal allowable
+ * lengths of the list.
+ */
++ List (const PatternBase &base_pattern,
+ const unsigned int min_elements = 0,
+ const unsigned int max_elements = max_int_value);
+
+ /**
+ * Destructor.
+ */
+ virtual ~List ();
+
+ /**
+ * Return <tt>true</tt> if the
+ * string is a comma-separated
+ * list of strings each of
+ * which match the pattern
+ * given to the constructor.
+ */
+ virtual bool match (const std::string &test_string) const;
+
+ /**
+ * Return a description of
+ * the pattern that valid
+ * strings are expected to
+ * match.
+ */
+ virtual std::string description () const;
+
+ /**
+ * Return a copy of the
+ * present object, which is
+ * newly allocated on the
+ * heap. Ownership of that
+ * object is transferred to
+ * the caller of this
+ * function.
+ */
+ virtual PatternBase *clone () const;
+
+ /**
+ * Creates new object if the start of
+ * description matches
+ * description_init. Ownership of that
+ * object is transferred to the caller
+ * of this function.
+ */
+ static List *create (const std::string &description);
+
+ /**
+ * Determine an estimate for
+ * the memory consumption (in
+ * bytes) of this object.
+ */
+ std::size_t memory_consumption () const;
+
+ /** @addtogroup Exceptions
+ * @{ */
+
+ /**
+ * Exception.
+ */
+ DeclException2 (ExcInvalidRange,
+ int, int,
+ << "The values " << arg1 << " and " << arg2
+ << " do not form a valid range.");
+ //@}
+ private:
+ /**
+ * Copy of the pattern that
+ * each element of the list has
+ * to satisfy.
+ */
+ PatternBase *pattern;
+
+ /**
+ * Minimum number of elements
+ * the list must have.
+ */
+ const unsigned int min_elements;
+
+ /**
+ * Maximum number of elements
+ * the list must have.
+ */
+ const unsigned int max_elements;
- /**
- * This pattern matches a list of
- * comma-separated values each of which
- * have to match a pattern given to the
- * constructor. With two additional
- * parameters, the number of elements this
- * list has to have can be specified. If
- * none is specified, the list may have
- * zero or more entries.
- */
- class List : public PatternBase
- {
- public:
- /**
- * Maximal integer value. If
- * the numeric_limits class
- * is available use this
- * information to obtain the
- * extremal values, otherwise
- * set it so that this class
- * understands that all values
- * are allowed.
- */
- static const unsigned int max_int_value;
-
- /**
- * Constructor. Take the
- * given parameter as the
- * specification of valid
- * elements of the list.
- *
- * The two other arguments can
- * be used to denote minimal
- * and maximal allowable
- * lengths of the list.
- */
- List (const PatternBase &base_pattern,
- const unsigned int min_elements = 0,
- const unsigned int max_elements = max_int_value);
-
- /**
- * Destructor.
- */
- virtual ~List ();
-
- /**
- * Return <tt>true</tt> if the
- * string is a comma-separated
- * list of strings each of
- * which match the pattern
- * given to the constructor.
- */
- virtual bool match (const std::string &test_string) const;
-
- /**
- * Return a description of
- * the pattern that valid
- * strings are expected to
- * match.
- */
- virtual std::string description () const;
-
- /**
- * Return a copy of the
- * present object, which is
- * newly allocated on the
- * heap. Ownership of that
- * object is transferred to
- * the caller of this
- * function.
- */
- virtual PatternBase * clone () const;
-
- /**
- * Creates new object if the start of
- * description matches
- * description_init. Ownership of that
- * object is transferred to the caller
- * of this function.
- */
- static List* create (const std::string& description);
-
- /**
- * Determine an estimate for
- * the memory consumption (in
- * bytes) of this object.
- */
- std::size_t memory_consumption () const;
-
- /** @addtogroup Exceptions
- * @{ */
-
- /**
- * Exception.
- */
- DeclException2 (ExcInvalidRange,
- int, int,
- << "The values " << arg1 << " and " << arg2
- << " do not form a valid range.");
- //@}
- private:
- /**
- * Copy of the pattern that
- * each element of the list has
- * to satisfy.
- */
- PatternBase *pattern;
-
- /**
- * Minimum number of elements
- * the list must have.
- */
- const unsigned int min_elements;
-
- /**
- * Maximum number of elements
- * the list must have.
- */
- const unsigned int max_elements;
-
- /**
- * Initial part of description
- */
- static const char* description_init;
+ /**
+ * Initial part of description
+ */
+ static const char *description_init;
};
inline
- const IndexSet& Partitioner::ghost_indices() const
- const IndexSet &Partitioner::ghost_indices() const
++ const IndexSet &Partitioner::ghost_indices() const
{
return ghost_indices_data;
}
template <int dim>
class QProjector
{
- std::vector<Point<dim> > &q_points,
+ public:
+ /**
+ * Define a typedef for a
+ * quadrature that acts on an
+ * object of one dimension
+ * less. For cells, this would
+ * then be a face quadrature.
+ */
+ typedef Quadrature<dim-1> SubQuadrature;
+
+ /**
+ * Compute the quadrature points
+ * on the cell if the given
+ * quadrature formula is used on
+ * face <tt>face_no</tt>. For further
+ * details, see the general doc
+ * for this class.
+ */
+ static void project_to_face (const SubQuadrature &quadrature,
+ const unsigned int face_no,
+ std::vector<Point<dim> > &q_points);
+
+ /**
+ * Compute the cell quadrature
+ * formula corresponding to using
+ * <tt>quadrature</tt> on face
+ * <tt>face_no</tt>. For further
+ * details, see the general doc
+ * for this class.
+ */
+ static Quadrature<dim>
+ project_to_face (const SubQuadrature &quadrature,
+ const unsigned int face_no);
+
+ /**
+ * Compute the quadrature points on the
+ * cell if the given quadrature formula is
+ * used on face <tt>face_no</tt>, subface
+ * number <tt>subface_no</tt> corresponding
+ * to RefineCase::Type
+ * <tt>ref_case</tt>. The last argument is
+ * only used in 3D.
+ *
+ * @note Only the points are
+ * transformed. The quadrature
+ * weights are the same as those
+ * of the original rule.
+ */
+ static void project_to_subface (const SubQuadrature &quadrature,
+ const unsigned int face_no,
+ const unsigned int subface_no,
- project_to_child (const Quadrature<dim> &quadrature,
++ std::vector<Point<dim> > &q_points,
+ const RefinementCase<dim-1> &ref_case=RefinementCase<dim-1>::isotropic_refinement);
+
+ /**
+ * Compute the cell quadrature formula
+ * corresponding to using
+ * <tt>quadrature</tt> on subface
+ * <tt>subface_no</tt> of face
+ * <tt>face_no</tt> with
+ * RefinementCase<dim-1>
+ * <tt>ref_case</tt>. The last argument is
+ * only used in 3D.
+ *
+ * @note Only the points are
+ * transformed. The quadrature
+ * weights are the same as those
+ * of the original rule.
+ */
+ static Quadrature<dim>
+ project_to_subface (const SubQuadrature &quadrature,
+ const unsigned int face_no,
+ const unsigned int subface_no,
+ const RefinementCase<dim-1> &ref_case=RefinementCase<dim-1>::isotropic_refinement);
+
+ /**
+ * Take a face quadrature formula
+ * and generate a cell quadrature
+ * formula from it where the
+ * quadrature points of the given
+ * argument are projected on all
+ * faces.
+ *
+ * The weights of the new rule
+ * are replications of the
+ * original weights. Thus, the
+ * sum of the weights is not one,
+ * but the number of faces, which
+ * is the surface of the
+ * reference cell.
+ *
+ * This in particular allows us
+ * to extract a subset of points
+ * corresponding to a single face
+ * and use it as a quadrature on
+ * this face, as is done in
+ * FEFaceValues.
+ *
+ * @note In 3D, this function
+ * produces eight sets of
+ * quadrature points for each
+ * face, in order to cope
+ * possibly different
+ * orientations of the mesh.
+ */
+ static Quadrature<dim>
+ project_to_all_faces (const SubQuadrature &quadrature);
+
+ /**
+ * Take a face quadrature formula
+ * and generate a cell quadrature
+ * formula from it where the
+ * quadrature points of the given
+ * argument are projected on all
+ * subfaces.
+ *
+ * Like in project_to_all_faces(),
+ * the weights of the new rule
+ * sum up to the number of faces
+ * (not subfaces), which
+ * is the surface of the
+ * reference cell.
+ *
+ * This in particular allows us
+ * to extract a subset of points
+ * corresponding to a single subface
+ * and use it as a quadrature on
+ * this face, as is done in
+ * FESubfaceValues.
+ */
+ static Quadrature<dim>
+ project_to_all_subfaces (const SubQuadrature &quadrature);
+
+ /**
+ * Project a given quadrature
+ * formula to a child of a
+ * cell. You may want to use this
+ * function in case you want to
+ * extend an integral only over
+ * the area which a potential
+ * child would occupy. The child
+ * numbering is the same as the
+ * children would be numbered
+ * upon refinement of the cell.
+ *
+ * As integration using this
+ * quadrature formula now only
+ * extends over a fraction of the
+ * cell, the weights of the
+ * resulting object are divided by
+ * GeometryInfo<dim>::children_per_cell.
+ */
+ static
+ Quadrature<dim>
- project_to_all_children (const Quadrature<dim> &quadrature);
++ project_to_child (const Quadrature<dim> &quadrature,
+ const unsigned int child_no);
+
+ /**
+ * Project a quadrature rule to
+ * all children of a
+ * cell. Similarly to
+ * project_to_all_subfaces(),
+ * this function replicates the
+ * formula generated by
+ * project_to_child() for all
+ * children, such that the
+ * weights sum up to one, the
+ * volume of the total cell
+ * again.
+ *
+ * The child
+ * numbering is the same as the
+ * children would be numbered
+ * upon refinement of the cell.
+ */
+ static
+ Quadrature<dim>
++ project_to_all_children (const Quadrature<dim> &quadrature);
+
+ /**
+ * Project the onedimensional
+ * rule <tt>quadrature</tt> to
+ * the straight line connecting
+ * the points <tt>p1</tt> and
+ * <tt>p2</tt>.
+ */
+ static
+ Quadrature<dim>
+ project_to_line(const Quadrature<1> &quadrature,
+ const Point<dim> &p1,
+ const Point<dim> &p2);
+
+ /**
+ * Since the
+ * project_to_all_faces() and
+ * project_to_all_subfaces()
+ * functions chain together the
+ * quadrature points and weights
+ * of all projections of a face
+ * quadrature formula to the
+ * faces or subfaces of a cell,
+ * we need a way to identify
+ * where the starting index of
+ * the points and weights for a
+ * particular face or subface
+ * is. This class provides this:
+ * there are static member
+ * functions that generate
+ * objects of this type, given
+ * face or subface indices, and
+ * you can then use the generated
+ * object in place of an integer
+ * that denotes the offset of a
+ * given dataset.
+ *
+ * @author Wolfgang Bangerth, 2003
+ */
+ class DataSetDescriptor
+ {
public:
- /**
- * Define a typedef for a
- * quadrature that acts on an
- * object of one dimension
- * less. For cells, this would
- * then be a face quadrature.
- */
- typedef Quadrature<dim-1> SubQuadrature;
-
- /**
- * Compute the quadrature points
- * on the cell if the given
- * quadrature formula is used on
- * face <tt>face_no</tt>. For further
- * details, see the general doc
- * for this class.
- */
- static void project_to_face (const SubQuadrature &quadrature,
- const unsigned int face_no,
- std::vector<Point<dim> > &q_points);
-
- /**
- * Compute the cell quadrature
- * formula corresponding to using
- * <tt>quadrature</tt> on face
- * <tt>face_no</tt>. For further
- * details, see the general doc
- * for this class.
- */
- static Quadrature<dim>
- project_to_face (const SubQuadrature &quadrature,
- const unsigned int face_no);
-
- /**
- * Compute the quadrature points on the
- * cell if the given quadrature formula is
- * used on face <tt>face_no</tt>, subface
- * number <tt>subface_no</tt> corresponding
- * to RefineCase::Type
- * <tt>ref_case</tt>. The last argument is
- * only used in 3D.
- *
- * @note Only the points are
- * transformed. The quadrature
- * weights are the same as those
- * of the original rule.
- */
- static void project_to_subface (const SubQuadrature &quadrature,
- const unsigned int face_no,
- const unsigned int subface_no,
- std::vector<Point<dim> > &q_points,
- const RefinementCase<dim-1> &ref_case=RefinementCase<dim-1>::isotropic_refinement);
-
- /**
- * Compute the cell quadrature formula
- * corresponding to using
- * <tt>quadrature</tt> on subface
- * <tt>subface_no</tt> of face
- * <tt>face_no</tt> with
- * RefinementCase<dim-1>
- * <tt>ref_case</tt>. The last argument is
- * only used in 3D.
- *
- * @note Only the points are
- * transformed. The quadrature
- * weights are the same as those
- * of the original rule.
- */
- static Quadrature<dim>
- project_to_subface (const SubQuadrature &quadrature,
- const unsigned int face_no,
- const unsigned int subface_no,
- const RefinementCase<dim-1> &ref_case=RefinementCase<dim-1>::isotropic_refinement);
-
- /**
- * Take a face quadrature formula
- * and generate a cell quadrature
- * formula from it where the
- * quadrature points of the given
- * argument are projected on all
- * faces.
- *
- * The weights of the new rule
- * are replications of the
- * original weights. Thus, the
- * sum of the weights is not one,
- * but the number of faces, which
- * is the surface of the
- * reference cell.
- *
- * This in particular allows us
- * to extract a subset of points
- * corresponding to a single face
- * and use it as a quadrature on
- * this face, as is done in
- * FEFaceValues.
- *
- * @note In 3D, this function
- * produces eight sets of
- * quadrature points for each
- * face, in order to cope
- * possibly different
- * orientations of the mesh.
- */
- static Quadrature<dim>
- project_to_all_faces (const SubQuadrature &quadrature);
-
- /**
- * Take a face quadrature formula
- * and generate a cell quadrature
- * formula from it where the
- * quadrature points of the given
- * argument are projected on all
- * subfaces.
- *
- * Like in project_to_all_faces(),
- * the weights of the new rule
- * sum up to the number of faces
- * (not subfaces), which
- * is the surface of the
- * reference cell.
- *
- * This in particular allows us
- * to extract a subset of points
- * corresponding to a single subface
- * and use it as a quadrature on
- * this face, as is done in
- * FESubfaceValues.
- */
- static Quadrature<dim>
- project_to_all_subfaces (const SubQuadrature &quadrature);
-
- /**
- * Project a given quadrature
- * formula to a child of a
- * cell. You may want to use this
- * function in case you want to
- * extend an integral only over
- * the area which a potential
- * child would occupy. The child
- * numbering is the same as the
- * children would be numbered
- * upon refinement of the cell.
- *
- * As integration using this
- * quadrature formula now only
- * extends over a fraction of the
- * cell, the weights of the
- * resulting object are divided by
- * GeometryInfo<dim>::children_per_cell.
- */
+ /**
+ * Default constructor. This
+ * doesn't do much except
+ * generating an invalid
+ * index, since you didn't
+ * give a valid descriptor of
+ * the cell, face, or subface
+ * you wanted.
+ */
+ DataSetDescriptor ();
+
+ /**
+ * Static function to
+ * generate the offset of a
+ * cell. Since we only have
+ * one cell per quadrature
+ * object, this offset is of
+ * course zero, but we carry
+ * this function around for
+ * consistency with the other
+ * static functions.
+ */
+ static DataSetDescriptor cell ();
+
+ /**
+ * Static function to generate an
+ * offset object for a given face of a
+ * cell with the given face
+ * orientation, flip and rotation. This
+ * function of course is only allowed
+ * if <tt>dim>=2</tt>, and the face
+ * orientation, flip and rotation are
+ * ignored if the space dimension
+ * equals 2.
+ *
+ * The last argument denotes
+ * the number of quadrature
+ * points the
+ * lower-dimensional face
+ * quadrature formula (the
+ * one that has been
+ * projected onto the faces)
+ * has.
+ */
static
- Quadrature<dim>
- project_to_child (const Quadrature<dim> &quadrature,
- const unsigned int child_no);
-
- /**
- * Project a quadrature rule to
- * all children of a
- * cell. Similarly to
- * project_to_all_subfaces(),
- * this function replicates the
- * formula generated by
- * project_to_child() for all
- * children, such that the
- * weights sum up to one, the
- * volume of the total cell
- * again.
- *
- * The child
- * numbering is the same as the
- * children would be numbered
- * upon refinement of the cell.
- */
+ DataSetDescriptor
+ face (const unsigned int face_no,
+ const bool face_orientation,
+ const bool face_flip,
+ const bool face_rotation,
+ const unsigned int n_quadrature_points);
+
+ /**
+ * Static function to generate an
+ * offset object for a given subface of
+ * a cell with the given face
+ * orientation, flip and rotation. This
+ * function of course is only allowed
+ * if <tt>dim>=2</tt>, and the face
+ * orientation, flip and rotation are
+ * ignored if the space dimension
+ * equals 2.
+ *
+ * The last but one argument denotes
+ * the number of quadrature
+ * points the
+ * lower-dimensional face
+ * quadrature formula (the
+ * one that has been
+ * projected onto the faces)
+ * has.
+ *
+ * Through the last argument
+ * anisotropic refinement can be
+ * respected.
+ */
static
- Quadrature<dim>
- project_to_all_children (const Quadrature<dim> &quadrature);
-
- /**
- * Project the onedimensional
- * rule <tt>quadrature</tt> to
- * the straight line connecting
- * the points <tt>p1</tt> and
- * <tt>p2</tt>.
- */
- static
- Quadrature<dim>
- project_to_line(const Quadrature<1>& quadrature,
- const Point<dim>& p1,
- const Point<dim>& p2);
-
- /**
- * Since the
- * project_to_all_faces() and
- * project_to_all_subfaces()
- * functions chain together the
- * quadrature points and weights
- * of all projections of a face
- * quadrature formula to the
- * faces or subfaces of a cell,
- * we need a way to identify
- * where the starting index of
- * the points and weights for a
- * particular face or subface
- * is. This class provides this:
- * there are static member
- * functions that generate
- * objects of this type, given
- * face or subface indices, and
- * you can then use the generated
- * object in place of an integer
- * that denotes the offset of a
- * given dataset.
- *
- * @author Wolfgang Bangerth, 2003
- */
- class DataSetDescriptor
- {
- public:
- /**
- * Default constructor. This
- * doesn't do much except
- * generating an invalid
- * index, since you didn't
- * give a valid descriptor of
- * the cell, face, or subface
- * you wanted.
- */
- DataSetDescriptor ();
-
- /**
- * Static function to
- * generate the offset of a
- * cell. Since we only have
- * one cell per quadrature
- * object, this offset is of
- * course zero, but we carry
- * this function around for
- * consistency with the other
- * static functions.
- */
- static DataSetDescriptor cell ();
-
- /**
- * Static function to generate an
- * offset object for a given face of a
- * cell with the given face
- * orientation, flip and rotation. This
- * function of course is only allowed
- * if <tt>dim>=2</tt>, and the face
- * orientation, flip and rotation are
- * ignored if the space dimension
- * equals 2.
- *
- * The last argument denotes
- * the number of quadrature
- * points the
- * lower-dimensional face
- * quadrature formula (the
- * one that has been
- * projected onto the faces)
- * has.
- */
- static
- DataSetDescriptor
- face (const unsigned int face_no,
- const bool face_orientation,
- const bool face_flip,
- const bool face_rotation,
- const unsigned int n_quadrature_points);
-
- /**
- * Static function to generate an
- * offset object for a given subface of
- * a cell with the given face
- * orientation, flip and rotation. This
- * function of course is only allowed
- * if <tt>dim>=2</tt>, and the face
- * orientation, flip and rotation are
- * ignored if the space dimension
- * equals 2.
- *
- * The last but one argument denotes
- * the number of quadrature
- * points the
- * lower-dimensional face
- * quadrature formula (the
- * one that has been
- * projected onto the faces)
- * has.
- *
- * Through the last argument
- * anisotropic refinement can be
- * respected.
- */
- static
- DataSetDescriptor
- subface (const unsigned int face_no,
- const unsigned int subface_no,
- const bool face_orientation,
- const bool face_flip,
- const bool face_rotation,
- const unsigned int n_quadrature_points,
- const internal::SubfaceCase<dim> ref_case=internal::SubfaceCase<dim>::case_isotropic);
-
- /**
- * Conversion operator to an
- * integer denoting the
- * offset of the first
- * element of this dataset in
- * the set of quadrature
- * formulas all projected
- * onto faces and
- * subfaces. This conversion
- * operator allows us to use
- * offset descriptor objects
- * in place of integer
- * offsets.
- */
- operator unsigned int () const;
-
- private:
- /**
- * Store the integer offset
- * for a given cell, face, or
- * subface.
- */
- const unsigned int dataset_offset;
-
- /**
- * This is the real
- * constructor, but it is
- * private and thus only
- * available to the static
- * member functions above.
- */
- DataSetDescriptor (const unsigned int dataset_offset);
- };
+ DataSetDescriptor
+ subface (const unsigned int face_no,
+ const unsigned int subface_no,
+ const bool face_orientation,
+ const bool face_flip,
+ const bool face_rotation,
+ const unsigned int n_quadrature_points,
+ const internal::SubfaceCase<dim> ref_case=internal::SubfaceCase<dim>::case_isotropic);
+
+ /**
+ * Conversion operator to an
+ * integer denoting the
+ * offset of the first
+ * element of this dataset in
+ * the set of quadrature
+ * formulas all projected
+ * onto faces and
+ * subfaces. This conversion
+ * operator allows us to use
+ * offset descriptor objects
+ * in place of integer
+ * offsets.
+ */
+ operator unsigned int () const;
private:
- /**
- * Given a quadrature object in
- * 2d, reflect all quadrature
- * points at the main diagonal
- * and return them with their
- * original weights.
- *
- * This function is necessary for
- * projecting a 2d quadrature
- * rule onto the faces of a 3d
- * cube, since there we need both
- * orientations.
- */
- static Quadrature<2> reflect (const Quadrature<2> &q);
-
- /**
- * Given a quadrature object in
- * 2d, rotate all quadrature
- * points by @p n_times * 90 degrees
- * counterclockwise
- * and return them with their
- * original weights.
- *
- * This function is necessary for
- * projecting a 2d quadrature
- * rule onto the faces of a 3d
- * cube, since there we need all
- * rotations to account for
- * face_flip and face_rotation
- * of non-standard faces.
- */
- static Quadrature<2> rotate (const Quadrature<2> &q,
- const unsigned int n_times);
+ /**
+ * Store the integer offset
+ * for a given cell, face, or
+ * subface.
+ */
+ const unsigned int dataset_offset;
+
+ /**
+ * This is the real
+ * constructor, but it is
+ * private and thus only
+ * available to the static
+ * member functions above.
+ */
+ DataSetDescriptor (const unsigned int dataset_offset);
+ };
+
+ private:
+ /**
+ * Given a quadrature object in
+ * 2d, reflect all quadrature
+ * points at the main diagonal
+ * and return them with their
+ * original weights.
+ *
+ * This function is necessary for
+ * projecting a 2d quadrature
+ * rule onto the faces of a 3d
+ * cube, since there we need both
+ * orientations.
+ */
+ static Quadrature<2> reflect (const Quadrature<2> &q);
+
+ /**
+ * Given a quadrature object in
+ * 2d, rotate all quadrature
+ * points by @p n_times * 90 degrees
+ * counterclockwise
+ * and return them with their
+ * original weights.
+ *
+ * This function is necessary for
+ * projecting a 2d quadrature
+ * rule onto the faces of a 3d
+ * cube, since there we need all
+ * rotations to account for
+ * face_flip and face_rotation
+ * of non-standard faces.
+ */
+ static Quadrature<2> rotate (const Quadrature<2> &q,
+ const unsigned int n_times);
};
/*@}*/
template<typename T, typename P = void>
class SmartPointer
{
- public:
- /**
- * Standard constructor for null
- * pointer. The id of this
- * pointer is set to the name of
- * the class P.
- */
- SmartPointer ();
-
- /*
- * Copy constructor for
- * SmartPointer. We do now
- * copy the object subscribed to
- * from <tt>tt</tt>, but subscribe
- * ourselves to it again.
- */
- template <class Q>
- SmartPointer (const SmartPointer<T,Q> &tt);
-
- /*
- * Copy constructor for
- * SmartPointer. We do now
- * copy the object subscribed to
- * from <tt>tt</tt>, but subscribe
- * ourselves to it again.
- */
- SmartPointer (const SmartPointer<T,P> &tt);
-
- /**
- * Constructor taking a normal
- * pointer. If possible, i.e. if
- * the pointer is not a null
- * pointer, the constructor
- * subscribes to the given object
- * to lock it, i.e. to prevent
- * its destruction before the end
- * of its use.
- *
- * The <tt>id</tt> is used in the
- * call to
- * Subscriptor::subscribe(id) and
- * by ~SmartPointer() in the call
- * to Subscriptor::unsubscribe().
- */
- SmartPointer (T *t, const char* id);
-
- /**
- * Constructor taking a normal
- * pointer. If possible, i.e. if
- * the pointer is not a null
- * pointer, the constructor
- * subscribes to the given object
- * to lock it, i.e. to prevent
- * its destruction before the end
- * of its use. The id of this
- * pointer is set to the name of
- * the class P.
- */
- SmartPointer (T *t);
-
-
- /**
- * Destructor, removing the
- * subscription.
- */
- ~SmartPointer();
-
- /**
- * Assignment operator for normal
- * pointers. The pointer
- * subscribes to the new object
- * automatically and unsubscribes
- * to an old one if it exists. It
- * will not try to subscribe to a
- * null-pointer, but still
- * delete the old subscription.
- */
- SmartPointer<T,P> & operator= (T *tt);
-
- /**
- * Assignment operator for
- * SmartPointer. The pointer
- * subscribes to the new object
- * automatically and unsubscribes
- * to an old one if it exists.
- */
- template <class Q>
- SmartPointer<T,P> & operator= (const SmartPointer<T,Q> &tt);
-
- /**
- * Assignment operator for
- * SmartPointer. The pointer
- * subscribes to the new object
- * automatically and unsubscribes
- * to an old one if it exists.
- */
- SmartPointer<T,P> & operator= (const SmartPointer<T,P> &tt);
-
- /**
- * Delete the object pointed to
- * and set the pointer to zero.
- */
- void clear ();
-
- /**
- * Conversion to normal pointer.
- */
- operator T* () const;
-
- /**
- * Dereferencing operator. This
- * operator throws an
- * ExcNotInitialized if the
- * pointer is a null pointer.
- */
- T& operator * () const;
-
- /**
- * Dereferencing operator. This
- * operator throws an
- * ExcNotInitialized if the
- * pointer is a null pointer.
- */
- T * operator -> () const;
-
- /**
- * Exchange the pointers of this
- * object and the argument. Since
- * both the objects to which is
- * pointed are subscribed to
- * before and after, we do not
- * have to change their
- * subscription counters.
- *
- * Note that this function (with
- * two arguments) and the
- * respective functions where one
- * of the arguments is a pointer
- * and the other one is a C-style
- * pointer are implemented in
- * global namespace.
- */
- template <class Q>
- void swap (SmartPointer<T,Q> &tt);
-
- /**
- * Swap pointers between this
- * object and the pointer
- * given. As this releases the
- * object pointed to presently,
- * we reduce its subscription
- * count by one, and increase it
- * at the object which we will
- * point to in the future.
- *
- * Note that we indeed need a
- * reference of a pointer, as we
- * want to change the pointer
- * variable which we are given.
- */
- void swap (T *&tt);
-
- /**
- * Return an estimate of the
- * amount of memory (in bytes)
- * used by this class. Note in
- * particular, that this only
- * includes the amount of memory
- * used by <b>this</b> object, not
- * by the object pointed to.
- */
- std::size_t memory_consumption () const;
-
- private:
- /**
- * Pointer to the object we want
- * to subscribt to. Since it is
- * often necessary to follow this
- * pointer when debugging, we
- * have deliberately chosen a
- * short name.
- */
- T * t;
- /**
- * The identification for the
- * subscriptor.
- */
- const char* const id;
+ public:
+ /**
+ * Standard constructor for null
+ * pointer. The id of this
+ * pointer is set to the name of
+ * the class P.
+ */
+ SmartPointer ();
+
+ /*
+ * Copy constructor for
+ * SmartPointer. We do now
+ * copy the object subscribed to
+ * from <tt>tt</tt>, but subscribe
+ * ourselves to it again.
+ */
+ template <class Q>
+ SmartPointer (const SmartPointer<T,Q> &tt);
+
+ /*
+ * Copy constructor for
+ * SmartPointer. We do now
+ * copy the object subscribed to
+ * from <tt>tt</tt>, but subscribe
+ * ourselves to it again.
+ */
+ SmartPointer (const SmartPointer<T,P> &tt);
+
+ /**
+ * Constructor taking a normal
+ * pointer. If possible, i.e. if
+ * the pointer is not a null
+ * pointer, the constructor
+ * subscribes to the given object
+ * to lock it, i.e. to prevent
+ * its destruction before the end
+ * of its use.
+ *
+ * The <tt>id</tt> is used in the
+ * call to
+ * Subscriptor::subscribe(id) and
+ * by ~SmartPointer() in the call
+ * to Subscriptor::unsubscribe().
+ */
+ SmartPointer (T *t, const char *id);
+
+ /**
+ * Constructor taking a normal
+ * pointer. If possible, i.e. if
+ * the pointer is not a null
+ * pointer, the constructor
+ * subscribes to the given object
+ * to lock it, i.e. to prevent
+ * its destruction before the end
+ * of its use. The id of this
+ * pointer is set to the name of
+ * the class P.
+ */
+ SmartPointer (T *t);
+
+
+ /**
+ * Destructor, removing the
+ * subscription.
+ */
+ ~SmartPointer();
+
+ /**
+ * Assignment operator for normal
+ * pointers. The pointer
+ * subscribes to the new object
+ * automatically and unsubscribes
+ * to an old one if it exists. It
+ * will not try to subscribe to a
+ * null-pointer, but still
+ * delete the old subscription.
+ */
+ SmartPointer<T,P> &operator= (T *tt);
+
+ /**
+ * Assignment operator for
+ * SmartPointer. The pointer
+ * subscribes to the new object
+ * automatically and unsubscribes
+ * to an old one if it exists.
+ */
+ template <class Q>
+ SmartPointer<T,P> &operator= (const SmartPointer<T,Q> &tt);
+
+ /**
+ * Assignment operator for
+ * SmartPointer. The pointer
+ * subscribes to the new object
+ * automatically and unsubscribes
+ * to an old one if it exists.
+ */
+ SmartPointer<T,P> &operator= (const SmartPointer<T,P> &tt);
+
+ /**
+ * Delete the object pointed to
+ * and set the pointer to zero.
+ */
+ void clear ();
+
+ /**
+ * Conversion to normal pointer.
+ */
+ operator T *() const;
+
+ /**
+ * Dereferencing operator. This
+ * operator throws an
+ * ExcNotInitialized if the
+ * pointer is a null pointer.
+ */
+ T &operator * () const;
+
+ /**
+ * Dereferencing operator. This
+ * operator throws an
+ * ExcNotInitialized if the
+ * pointer is a null pointer.
+ */
+ T *operator -> () const;
+
+ /**
+ * Exchange the pointers of this
+ * object and the argument. Since
+ * both the objects to which is
+ * pointed are subscribed to
+ * before and after, we do not
+ * have to change their
+ * subscription counters.
+ *
+ * Note that this function (with
+ * two arguments) and the
+ * respective functions where one
+ * of the arguments is a pointer
+ * and the other one is a C-style
+ * pointer are implemented in
+ * global namespace.
+ */
+ template <class Q>
+ void swap (SmartPointer<T,Q> &tt);
+
+ /**
+ * Swap pointers between this
+ * object and the pointer
+ * given. As this releases the
+ * object pointed to presently,
+ * we reduce its subscription
+ * count by one, and increase it
+ * at the object which we will
+ * point to in the future.
+ *
+ * Note that we indeed need a
+ * reference of a pointer, as we
+ * want to change the pointer
+ * variable which we are given.
+ */
- void swap (T *&tt);
++ void swap (T *&tt);
+
+ /**
+ * Return an estimate of the
+ * amount of memory (in bytes)
+ * used by this class. Note in
+ * particular, that this only
+ * includes the amount of memory
+ * used by <b>this</b> object, not
+ * by the object pointed to.
+ */
+ std::size_t memory_consumption () const;
+
+ private:
+ /**
+ * Pointer to the object we want
+ * to subscribt to. Since it is
+ * often necessary to follow this
+ * pointer when debugging, we
+ * have deliberately chosen a
+ * short name.
+ */
+ T *t;
+ /**
+ * The identification for the
+ * subscriptor.
+ */
+ const char *const id;
};
Iterator
lower_bound (Iterator first,
Iterator last,
- const T &val);
+ const T &val);
- /**
- * The same function as above, but taking
- * an argument that is used to compare
- * individual elements of the sequence of
- * objects pointed to by the iterators.
- */
+ /**
+ * The same function as above, but taking
+ * an argument that is used to compare
+ * individual elements of the sequence of
+ * objects pointed to by the iterators.
+ */
template<typename Iterator, typename T, typename Comp>
Iterator
lower_bound (Iterator first,
void
destroy_communicator (Epetra_Comm &communicator);
- /**
- * Return the number of MPI processes
- * there exist in the given communicator
- * object. If this is a sequential job,
- * it returns 1.
- */
+ /**
+ * Return the number of MPI processes
+ * there exist in the given communicator
+ * object. If this is a sequential job,
+ * it returns 1.
+ */
unsigned int get_n_mpi_processes (const Epetra_Comm &mpi_communicator);
- /**
- * Return the number of the present MPI
- * process in the space of processes
- * described by the given
- * communicator. This will be a unique
- * value for each process between zero
- * and (less than) the number of all
- * processes (given by
- * get_n_mpi_processes()).
- */
+ /**
+ * Return the number of the present MPI
+ * process in the space of processes
+ * described by the given
+ * communicator. This will be a unique
+ * value for each process between zero
+ * and (less than) the number of all
+ * processes (given by
+ * get_n_mpi_processes()).
+ */
unsigned int get_this_mpi_process (const Epetra_Comm &mpi_communicator);
- /**
- * Given a Trilinos Epetra map, create a
- * new map that has the same subdivision
- * of elements to processors but uses the
- * given communicator object instead of
- * the one stored in the first
- * argument. In essence, this means that
- * we create a map that communicates
- * among the same processors in the same
- * way, but using a separate channel.
- *
- * This function is typically used with a
- * communicator that has been obtained by
- * the duplicate_communicator() function.
- */
+ /**
+ * Given a Trilinos Epetra map, create a
+ * new map that has the same subdivision
+ * of elements to processors but uses the
+ * given communicator object instead of
+ * the one stored in the first
+ * argument. In essence, this means that
+ * we create a map that communicates
+ * among the same processors in the same
+ * way, but using a separate channel.
+ *
+ * This function is typically used with a
+ * communicator that has been obtained by
+ * the duplicate_communicator() function.
+ */
Epetra_Map
- duplicate_map (const Epetra_BlockMap &map,
+ duplicate_map (const Epetra_BlockMap &map,
const Epetra_Comm &comm);
}
template <int dim, int spacedim>
class PolicyBase
{
- public:
- /**
- * Destructor.
- */
- virtual ~PolicyBase ();
-
- /**
- * Distribute degrees of freedom on
- * the object given as last argument.
- */
- virtual
- NumberCache
- distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const = 0;
-
- /**
- * Renumber degrees of freedom as
- * specified by the first argument.
- */
- virtual
- NumberCache
- renumber_dofs (const std::vector<unsigned int> &new_numbers,
- dealii::DoFHandler<dim,spacedim> &dof_handler) const = 0;
+ public:
+ /**
+ * Destructor.
+ */
+ virtual ~PolicyBase ();
+
+ /**
+ * Distribute degrees of freedom on
+ * the object given as last argument.
+ */
+ virtual
+ NumberCache
+ distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const = 0;
+
+ /**
+ * Renumber degrees of freedom as
+ * specified by the first argument.
+ */
+ virtual
+ NumberCache
- renumber_dofs (const std::vector<unsigned int> &new_numbers,
++ renumber_dofs (const std::vector<unsigned int> &new_numbers,
+ dealii::DoFHandler<dim,spacedim> &dof_handler) const = 0;
};
template <int dim, int spacedim>
class Sequential : public PolicyBase<dim,spacedim>
{
- public:
- /**
- * Distribute degrees of freedom on
- * the object given as last argument.
- */
- virtual
- NumberCache
- distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const;
-
- /**
- * Renumber degrees of freedom as
- * specified by the first argument.
- */
- virtual
- NumberCache
- renumber_dofs (const std::vector<unsigned int> &new_numbers,
- dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+ public:
+ /**
+ * Distribute degrees of freedom on
+ * the object given as last argument.
+ */
+ virtual
+ NumberCache
+ distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+
+ /**
+ * Renumber degrees of freedom as
+ * specified by the first argument.
+ */
+ virtual
+ NumberCache
- renumber_dofs (const std::vector<unsigned int> &new_numbers,
++ renumber_dofs (const std::vector<unsigned int> &new_numbers,
+ dealii::DoFHandler<dim,spacedim> &dof_handler) const;
};
template <int dim, int spacedim>
class ParallelDistributed : public PolicyBase<dim,spacedim>
{
- public:
- /**
- * Distribute degrees of freedom on
- * the object given as last argument.
- */
- virtual
- NumberCache
- distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const;
-
- /**
- * Renumber degrees of freedom as
- * specified by the first argument.
- */
- virtual
- NumberCache
- renumber_dofs (const std::vector<unsigned int> &new_numbers,
- dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+ public:
+ /**
+ * Distribute degrees of freedom on
+ * the object given as last argument.
+ */
+ virtual
+ NumberCache
+ distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+
+ /**
+ * Renumber degrees of freedom as
+ * specified by the first argument.
+ */
+ virtual
+ NumberCache
- renumber_dofs (const std::vector<unsigned int> &new_numbers,
++ renumber_dofs (const std::vector<unsigned int> &new_numbers,
+ dealii::DoFHandler<dim,spacedim> &dof_handler) const;
};
}
}
void
block_wise (hp::DoFHandler<dim> &dof_handler);
- /**
- * Sort the degrees of freedom by
- * block. It does the same
- * thing as the above function,
- * only that it does this for one
- * single level of a multi-level
- * discretization. The
- * non-multigrid part of the
- * MGDoFHandler is not touched.
- */
+ /**
+ * Sort the degrees of freedom by
+ * block. It does the same
+ * thing as the above function,
+ * only that it does this for one
+ * single level of a multi-level
+ * discretization. The
+ * non-multigrid part of the
+ * MGDoFHandler is not touched.
+ */
template <int dim>
void
- block_wise (MGDoFHandler<dim> &dof_handler,
+ block_wise (MGDoFHandler<dim> &dof_handler,
const unsigned int level);
void
downstream (MGDoFHandler<dim> &dof_handler,
const unsigned int level,
- const Point<dim> &direction,
+ const Point<dim> &direction,
const bool dof_wise_renumbering = false);
- /**
- * @deprecated Use downstream()
- * instead.
- */
+ /**
+ * @deprecated Use downstream()
+ * instead.
+ */
template <int dim>
void
downstream_dg (MGDoFHandler<dim> &dof_handler,
const unsigned int level,
- const Point<dim> &direction);
+ const Point<dim> &direction);
- /**
- * @deprecated The new function
- * of this name computes the
- * renumbering and its inverse at
- * the same time. So, at least if
- * you need both, you should use
- * the other one.
- *
- * Computes the renumbering
- * vector needed by the
- * downstream_dg() function. Does
- * not perform the renumbering on
- * the DoFHandler dofs but
- * returns the renumbering
- * vector.
- */
+ /**
+ * @deprecated The new function
+ * of this name computes the
+ * renumbering and its inverse at
+ * the same time. So, at least if
+ * you need both, you should use
+ * the other one.
+ *
+ * Computes the renumbering
+ * vector needed by the
+ * downstream_dg() function. Does
+ * not perform the renumbering on
+ * the DoFHandler dofs but
+ * returns the renumbering
+ * vector.
+ */
template <class DH, int dim>
void
- compute_downstream_dg (std::vector<unsigned int>& new_dof_indices,
- const DH& dof_handler,
- const Point<dim>& direction);
-
- /**
- * Computes the renumbering
- * vector needed by the
- * downstream_dg() function. Does
- * not perform the renumbering on
- * the DoFHandler dofs but
- * returns the renumbering
- * vector.
- */
+ compute_downstream_dg (std::vector<unsigned int> &new_dof_indices,
+ const DH &dof_handler,
+ const Point<dim> &direction);
+
+ /**
+ * Computes the renumbering
+ * vector needed by the
+ * downstream_dg() function. Does
+ * not perform the renumbering on
+ * the DoFHandler dofs but
+ * returns the renumbering
+ * vector.
+ */
template <class DH, int dim>
void
- compute_downstream (std::vector<unsigned int>& new_dof_indices,
- std::vector<unsigned int>& reverse,
- const DH& dof_handler,
- const Point<dim>& direction,
+ compute_downstream (std::vector<unsigned int> &new_dof_indices,
+ std::vector<unsigned int> &reverse,
+ const DH &dof_handler,
+ const Point<dim> &direction,
const bool dof_wise_renumbering);
- /**
- * @deprecated Use
- * compute_downstream() instead
- */
+ /**
+ * @deprecated Use
+ * compute_downstream() instead
+ */
template <class DH, int dim>
void
- compute_downstream_dg (std::vector<unsigned int>& new_dof_indices,
- std::vector<unsigned int>& reverse,
- const DH& dof_handler,
- const Point<dim>& direction);
-
- /**
- * Computes the renumbering
- * vector needed by the
- * downstream_dg() function. Does
- * not perform the renumbering on
- * the MGDoFHandler dofs but
- * returns the renumbering
- * vector.
- */
+ compute_downstream_dg (std::vector<unsigned int> &new_dof_indices,
+ std::vector<unsigned int> &reverse,
+ const DH &dof_handler,
+ const Point<dim> &direction);
+
+ /**
+ * Computes the renumbering
+ * vector needed by the
+ * downstream_dg() function. Does
+ * not perform the renumbering on
+ * the MGDoFHandler dofs but
+ * returns the renumbering
+ * vector.
+ */
template <int dim>
void
- compute_downstream (std::vector<unsigned int>& new_dof_indices,
- std::vector<unsigned int>& reverse,
- const MGDoFHandler<dim>& dof_handler,
+ compute_downstream (std::vector<unsigned int> &new_dof_indices,
+ std::vector<unsigned int> &reverse,
+ const MGDoFHandler<dim> &dof_handler,
const unsigned int level,
- const Point<dim>& direction,
+ const Point<dim> &direction,
const bool dof_wise_renumbering);
- /**
- * @deprecated Use
- * compute_downstream() instead
- */
+ /**
+ * @deprecated Use
+ * compute_downstream() instead
+ */
template <int dim>
void
- compute_downstream_dg (std::vector<unsigned int>& new_dof_indices,
- std::vector<unsigned int>& reverse,
- const MGDoFHandler<dim>& dof_handler,
+ compute_downstream_dg (std::vector<unsigned int> &new_dof_indices,
+ std::vector<unsigned int> &reverse,
+ const MGDoFHandler<dim> &dof_handler,
const unsigned int level,
- const Point<dim>& direction);
-
- /**
- * Cell-wise clockwise numbering.
- *
- * This function produces a
- * (counter)clockwise ordering of
- * the mesh cells with respect to
- * the hub @p center and calls
- * cell_wise_dg(). Therefore, it
- * only works with Discontinuous
- * Galerkin Finite Elements,
- * i.e. all degrees of freedom
- * have to be associated with the
- * interior of the cell.
- */
+ const Point<dim> &direction);
+
+ /**
+ * Cell-wise clockwise numbering.
+ *
+ * This function produces a
+ * (counter)clockwise ordering of
+ * the mesh cells with respect to
+ * the hub @p center and calls
+ * cell_wise_dg(). Therefore, it
+ * only works with Discontinuous
+ * Galerkin Finite Elements,
+ * i.e. all degrees of freedom
+ * have to be associated with the
+ * interior of the cell.
+ */
template <class DH, int dim>
void
- clockwise_dg (DH& dof_handler,
- const Point<dim>& center,
+ clockwise_dg (DH &dof_handler,
+ const Point<dim> ¢er,
const bool counter = false);
- /**
- * Cell-wise clockwise numbering
- * on one level. See the other
- * function with the same name.
- */
+ /**
+ * Cell-wise clockwise numbering
+ * on one level. See the other
+ * function with the same name.
+ */
template <int dim>
void
- clockwise_dg (MGDoFHandler<dim> &dof_handler,
+ clockwise_dg (MGDoFHandler<dim> &dof_handler,
const unsigned int level,
const Point<dim> ¢er,
const bool counter = false);
void
extract_subdomain_dofs (const DH &dof_handler,
const types::subdomain_id subdomain_id,
- std::vector<bool> &selected_dofs);
+ std::vector<bool> &selected_dofs);
- /**
- * Extract the set of global DoF
- * indices that are owned by the
- * current processor. For regular
- * DoFHandler objects, this set
- * is the complete set with all
- * DoF indices. In either case,
- * it equals what
- * DoFHandler::locally_owned_dofs()
- * returns.
- */
+ /**
+ * Extract the set of global DoF
+ * indices that are owned by the
+ * current processor. For regular
+ * DoFHandler objects, this set
+ * is the complete set with all
+ * DoF indices. In either case,
+ * it equals what
+ * DoFHandler::locally_owned_dofs()
+ * returns.
+ */
template <class DH>
void
- extract_locally_owned_dofs (const DH & dof_handler,
- IndexSet & dof_set);
-
-
- /**
- * Extract the set of global DoF
- * indices that are active on the
- * current DoFHandler. For
- * regular DoFHandlers, these are
- * all DoF indices, but for
- * DoFHandler objects built on
- * parallel::distributed::Triangulation
- * this set is a superset of
- * DoFHandler::locally_owned_dofs()
- * and contains all DoF indices
- * that live on all locally owned
- * cells (including on the
- * interface to ghost
- * cells). However, it does not
- * contain the DoF indices that
- * are exclusively defined on
- * ghost or artificial cells (see
- * @ref GlossArtificialCell "the
- * glossary").
- *
- * The degrees of freedom identified by
- * this function equal those obtained
- * from the
- * dof_indices_with_subdomain_association()
- * function when called with the locally
- * owned subdomain id.
- */
+ extract_locally_owned_dofs (const DH &dof_handler,
+ IndexSet &dof_set);
+
+
+ /**
+ * Extract the set of global DoF
+ * indices that are active on the
+ * current DoFHandler. For
+ * regular DoFHandlers, these are
+ * all DoF indices, but for
+ * DoFHandler objects built on
+ * parallel::distributed::Triangulation
+ * this set is a superset of
+ * DoFHandler::locally_owned_dofs()
+ * and contains all DoF indices
+ * that live on all locally owned
+ * cells (including on the
+ * interface to ghost
+ * cells). However, it does not
+ * contain the DoF indices that
+ * are exclusively defined on
+ * ghost or artificial cells (see
+ * @ref GlossArtificialCell "the
+ * glossary").
+ *
+ * The degrees of freedom identified by
+ * this function equal those obtained
+ * from the
+ * dof_indices_with_subdomain_association()
+ * function when called with the locally
+ * owned subdomain id.
+ */
template <class DH>
void
- extract_locally_active_dofs (const DH & dof_handler,
- IndexSet & dof_set);
-
- /**
- * Extract the set of global DoF
- * indices that are active on the
- * current DoFHandler. For
- * regular DoFHandlers, these are
- * all DoF indices, but for
- * DoFHandler objects built on
- * parallel::distributed::Triangulation
- * this set is the union of
- * DoFHandler::locally_owned_dofs()
- * and the DoF indices on all
- * ghost cells. In essence, it is
- * the DoF indices on all cells
- * that are not artificial (see
- * @ref GlossArtificialCell "the glossary").
- */
+ extract_locally_active_dofs (const DH &dof_handler,
+ IndexSet &dof_set);
+
+ /**
+ * Extract the set of global DoF
+ * indices that are active on the
+ * current DoFHandler. For
+ * regular DoFHandlers, these are
+ * all DoF indices, but for
+ * DoFHandler objects built on
+ * parallel::distributed::Triangulation
+ * this set is the union of
+ * DoFHandler::locally_owned_dofs()
+ * and the DoF indices on all
+ * ghost cells. In essence, it is
+ * the DoF indices on all cells
+ * that are not artificial (see
+ * @ref GlossArtificialCell "the glossary").
+ */
template <class DH>
void
- extract_locally_relevant_dofs (const DH & dof_handler,
- IndexSet & dof_set);
-
- /**
- * For each DoF, return in the output
- * array to which subdomain (as given by
- * the <tt>cell->subdomain_id()</tt> function)
- * it belongs. The output array is
- * supposed to have the right size
- * already when calling this function.
- *
- * Note that degrees of freedom
- * associated with faces, edges, and
- * vertices may be associated with
- * multiple subdomains if they are
- * sitting on partition boundaries. In
- * these cases, we put them into one of
- * the associated partitions in an
- * undefined way. This may sometimes lead
- * to different numbers of degrees of
- * freedom in partitions, even if the
- * number of cells is perfectly
- * equidistributed. While this is
- * regrettable, it is not a problem in
- * practice since the number of degrees
- * of freedom on partition boundaries is
- * asymptotically vanishing as we refine
- * the mesh as long as the number of
- * partitions is kept constant.
- *
- * This function returns the association
- * of each DoF with one subdomain. If you
- * are looking for the association of
- * each @em cell with a subdomain, either
- * query the
- * <tt>cell->subdomain_id()</tt>
- * function, or use the
- * <tt>GridTools::get_subdomain_association</tt>
- * function.
- *
- * Note that this function is of
- * questionable use for DoFHandler objects built on
- * parallel::distributed::Triangulation
- * since in that case ownership of
- * individual degrees of freedom by MPI
- * processes is controlled by the DoF
- * handler object, not based on some
- * geometric algorithm in conjunction
- * with subdomain id. In particular, the
- * degrees of freedom identified by the
- * functions in this namespace as
- * associated with a subdomain are not
- * the same the
- * DoFHandler class
- * identifies as those it owns.
- */
+ extract_locally_relevant_dofs (const DH &dof_handler,
+ IndexSet &dof_set);
+
+ /**
+ * For each DoF, return in the output
+ * array to which subdomain (as given by
+ * the <tt>cell->subdomain_id()</tt> function)
+ * it belongs. The output array is
+ * supposed to have the right size
+ * already when calling this function.
+ *
+ * Note that degrees of freedom
+ * associated with faces, edges, and
+ * vertices may be associated with
+ * multiple subdomains if they are
+ * sitting on partition boundaries. In
+ * these cases, we put them into one of
+ * the associated partitions in an
+ * undefined way. This may sometimes lead
+ * to different numbers of degrees of
+ * freedom in partitions, even if the
+ * number of cells is perfectly
+ * equidistributed. While this is
+ * regrettable, it is not a problem in
+ * practice since the number of degrees
+ * of freedom on partition boundaries is
+ * asymptotically vanishing as we refine
+ * the mesh as long as the number of
+ * partitions is kept constant.
+ *
+ * This function returns the association
+ * of each DoF with one subdomain. If you
+ * are looking for the association of
+ * each @em cell with a subdomain, either
+ * query the
+ * <tt>cell->subdomain_id()</tt>
+ * function, or use the
+ * <tt>GridTools::get_subdomain_association</tt>
+ * function.
+ *
+ * Note that this function is of
+ * questionable use for DoFHandler objects built on
+ * parallel::distributed::Triangulation
+ * since in that case ownership of
+ * individual degrees of freedom by MPI
+ * processes is controlled by the DoF
+ * handler object, not based on some
+ * geometric algorithm in conjunction
+ * with subdomain id. In particular, the
+ * degrees of freedom identified by the
+ * functions in this namespace as
+ * associated with a subdomain are not
+ * the same the
+ * DoFHandler class
+ * identifies as those it owns.
+ */
template <class DH>
void
get_subdomain_association (const DH &dof_handler,
void
count_dofs_per_block (const DH &dof,
std::vector<unsigned int> &dofs_per_block,
- const std::vector<unsigned int> &target_block
+ const std::vector<unsigned int> &target_block
- = std::vector<unsigned int>());
-
- /**
- * @deprecated See the previous
- * function with the same name
- * for a description. This
- * function exists for
- * compatibility with older
- * versions only.
- */
+ = std::vector<unsigned int>());
+
+ /**
+ * @deprecated See the previous
+ * function with the same name
+ * for a description. This
+ * function exists for
+ * compatibility with older
+ * versions only.
+ */
template <int dim, int spacedim>
void
- count_dofs_per_component (const DoFHandler<dim,spacedim>& dof_handler,
- std::vector<unsigned int>& dofs_per_component,
+ count_dofs_per_component (const DoFHandler<dim,spacedim> &dof_handler,
+ std::vector<unsigned int> &dofs_per_component,
std::vector<unsigned int> target_component);
- /**
- * This function can be used when
- * different variables shall be
- * discretized on different
- * grids, where one grid is
- * coarser than the other. This
- * idea might seem nonsensical at
- * first, but has reasonable
- * applications in inverse
- * (parameter estimation)
- * problems, where there might
- * not be enough information to
- * recover the parameter on the
- * same grid as the state
- * variable; furthermore, the
- * smoothness properties of state
- * variable and parameter might
- * not be too much related, so
- * using different grids might be
- * an alternative to using
- * stronger regularization of the
- * problem.
- *
- * The basic idea of this
- * function is explained in the
- * following. Let us, for
- * convenience, denote by
- * ``parameter grid'' the coarser
- * of the two grids, and by
- * ``state grid'' the finer of
- * the two. We furthermore assume
- * that the finer grid can be
- * obtained by refinement of the
- * coarser one, i.e. the fine
- * grid is at least as much
- * refined as the coarse grid at
- * each point of the
- * domain. Then, each shape
- * function on the coarse grid
- * can be represented as a linear
- * combination of shape functions
- * on the fine grid (assuming
- * identical ansatz
- * spaces). Thus, if we
- * discretize as usual, using
- * shape functions on the fine
- * grid, we can consider the
- * restriction that the parameter
- * variable shall in fact be
- * discretized by shape functions
- * on the coarse grid as a
- * constraint. These constraints
- * are linear and happen to have
- * the form managed by the
- * ``ConstraintMatrix'' class.
- *
- * The construction of these
- * constraints is done as
- * follows: for each of the
- * degrees of freedom (i.e. shape
- * functions) on the coarse grid,
- * we compute its representation
- * on the fine grid, i.e. how the
- * linear combination of shape
- * functions on the fine grid
- * looks like that resembles the
- * shape function on the coarse
- * grid. From this information,
- * we can then compute the
- * constraints which have to hold
- * if a solution of a linear
- * equation on the fine grid
- * shall be representable on the
- * coarse grid. The exact
- * algorithm how these
- * constraints can be computed is
- * rather complicated and is best
- * understood by reading the
- * source code, which contains
- * many comments.
- *
- * Before explaining the use of
- * this function, we would like
- * to state that the total number
- * of degrees of freedom used for
- * the discretization is not
- * reduced by the use of this
- * function, i.e. even though we
- * discretize one variable on a
- * coarser grid, the total number
- * of degrees of freedom is that
- * of the fine grid. This seems
- * to be counter-productive,
- * since it does not give us a
- * benefit from using a coarser
- * grid. The reason why it may be
- * useful to choose this approach
- * nonetheless is three-fold:
- * first, as stated above, there
- * might not be enough
- * information to recover a
- * parameter on a fine grid,
- * i.e. we chose to discretize it
- * on the coarse grid not to save
- * DoFs, but for other
- * reasons. Second, the
- * ``ConstraintMatrix'' includes
- * the constraints into the
- * linear system of equations, by
- * which constrained nodes become
- * dummy nodes; we may therefore
- * exclude them from the linear
- * algebra, for example by
- * sorting them to the back of
- * the DoF numbers and simply
- * calling the solver for the
- * upper left block of the matrix
- * which works on the
- * non-constrained nodes only,
- * thus actually realizing the
- * savings in numerical effort
- * from the reduced number of
- * actual degrees of freedom. The
- * third reason is that for some
- * or other reason we have chosen
- * to use two different grids, it
- * may be actually quite
- * difficult to write a function
- * that assembles the system
- * matrix for finite element
- * spaces on different grids;
- * using the approach of
- * constraints as with this
- * function allows to use
- * standard techniques when
- * discretizing on only one grid
- * (the finer one) without having
- * to take care of the fact that
- * one or several of the variable
- * actually belong to different
- * grids.
- *
- * The use of this function is as
- * follows: it accepts as
- * parameters two DoF Handlers,
- * the first of which refers to
- * the coarse grid and the second
- * of which is the fine grid. On
- * both, a finite element is
- * represented by the DoF handler
- * objects, which will usually
- * have several components, which
- * may belong to different finite
- * elements. The second and
- * fourth parameter of this
- * function therefore state which
- * variable on the coarse grid
- * shall be used to restrict the
- * stated component on the fine
- * grid. Of course, the finite
- * elements used for the
- * respective components on the
- * two grids need to be the
- * same. An example may clarify
- * this: consider the parameter
- * estimation mentioned briefly
- * above; there, on the fine grid
- * the whole discretization is
- * done, thus the variables are
- * ``u'', ``q'', and the Lagrange
- * multiplier ``lambda'', which
- * are discretized using
- * continuous linear, piecewise
- * constant discontinuous, and
- * continuous linear elements,
- * respectively. Only the
- * parameter ``q'' shall be
- * represented on the coarse
- * grid, thus the DoFHandler
- * object on the coarse grid
- * represents only one variable,
- * discretized using piecewise
- * constant discontinuous
- * elements. Then, the parameter
- * denoting the component on the
- * coarse grid would be zero (the
- * only possible choice, since
- * the variable on the coarse
- * grid is scalar), and one on
- * the fine grid (corresponding
- * to the variable ``q''; zero
- * would be ``u'', two would be
- * ``lambda''). Furthermore, an
- * object of type IntergridMap
- * is needed; this could in
- * principle be generated by the
- * function itself from the two
- * DoFHandler objects, but since
- * it is probably available
- * anyway in programs that use
- * this function, we shall use it
- * instead of re-generating
- * it. Finally, the computed
- * constraints are entered into a
- * variable of type
- * ConstraintMatrix; the
- * constraints are added,
- * i.e. previous contents which
- * may have, for example, be
- * obtained from hanging nodes,
- * are not deleted, so that you
- * only need one object of this
- * type.
- */
+ /**
+ * This function can be used when
+ * different variables shall be
+ * discretized on different
+ * grids, where one grid is
+ * coarser than the other. This
+ * idea might seem nonsensical at
+ * first, but has reasonable
+ * applications in inverse
+ * (parameter estimation)
+ * problems, where there might
+ * not be enough information to
+ * recover the parameter on the
+ * same grid as the state
+ * variable; furthermore, the
+ * smoothness properties of state
+ * variable and parameter might
+ * not be too much related, so
+ * using different grids might be
+ * an alternative to using
+ * stronger regularization of the
+ * problem.
+ *
+ * The basic idea of this
+ * function is explained in the
+ * following. Let us, for
+ * convenience, denote by
+ * ``parameter grid'' the coarser
+ * of the two grids, and by
+ * ``state grid'' the finer of
+ * the two. We furthermore assume
+ * that the finer grid can be
+ * obtained by refinement of the
+ * coarser one, i.e. the fine
+ * grid is at least as much
+ * refined as the coarse grid at
+ * each point of the
+ * domain. Then, each shape
+ * function on the coarse grid
+ * can be represented as a linear
+ * combination of shape functions
+ * on the fine grid (assuming
+ * identical ansatz
+ * spaces). Thus, if we
+ * discretize as usual, using
+ * shape functions on the fine
+ * grid, we can consider the
+ * restriction that the parameter
+ * variable shall in fact be
+ * discretized by shape functions
+ * on the coarse grid as a
+ * constraint. These constraints
+ * are linear and happen to have
+ * the form managed by the
+ * ``ConstraintMatrix'' class.
+ *
+ * The construction of these
+ * constraints is done as
+ * follows: for each of the
+ * degrees of freedom (i.e. shape
+ * functions) on the coarse grid,
+ * we compute its representation
+ * on the fine grid, i.e. how the
+ * linear combination of shape
+ * functions on the fine grid
+ * looks like that resembles the
+ * shape function on the coarse
+ * grid. From this information,
+ * we can then compute the
+ * constraints which have to hold
+ * if a solution of a linear
+ * equation on the fine grid
+ * shall be representable on the
+ * coarse grid. The exact
+ * algorithm how these
+ * constraints can be computed is
+ * rather complicated and is best
+ * understood by reading the
+ * source code, which contains
+ * many comments.
+ *
+ * Before explaining the use of
+ * this function, we would like
+ * to state that the total number
+ * of degrees of freedom used for
+ * the discretization is not
+ * reduced by the use of this
+ * function, i.e. even though we
+ * discretize one variable on a
+ * coarser grid, the total number
+ * of degrees of freedom is that
+ * of the fine grid. This seems
+ * to be counter-productive,
+ * since it does not give us a
+ * benefit from using a coarser
+ * grid. The reason why it may be
+ * useful to choose this approach
+ * nonetheless is three-fold:
+ * first, as stated above, there
+ * might not be enough
+ * information to recover a
+ * parameter on a fine grid,
+ * i.e. we chose to discretize it
+ * on the coarse grid not to save
+ * DoFs, but for other
+ * reasons. Second, the
+ * ``ConstraintMatrix'' includes
+ * the constraints into the
+ * linear system of equations, by
+ * which constrained nodes become
+ * dummy nodes; we may therefore
+ * exclude them from the linear
+ * algebra, for example by
+ * sorting them to the back of
+ * the DoF numbers and simply
+ * calling the solver for the
+ * upper left block of the matrix
+ * which works on the
+ * non-constrained nodes only,
+ * thus actually realizing the
+ * savings in numerical effort
+ * from the reduced number of
+ * actual degrees of freedom. The
+ * third reason is that for some
+ * or other reason we have chosen
+ * to use two different grids, it
+ * may be actually quite
+ * difficult to write a function
+ * that assembles the system
+ * matrix for finite element
+ * spaces on different grids;
+ * using the approach of
+ * constraints as with this
+ * function allows to use
+ * standard techniques when
+ * discretizing on only one grid
+ * (the finer one) without having
+ * to take care of the fact that
+ * one or several of the variable
+ * actually belong to different
+ * grids.
+ *
+ * The use of this function is as
+ * follows: it accepts as
+ * parameters two DoF Handlers,
+ * the first of which refers to
+ * the coarse grid and the second
+ * of which is the fine grid. On
+ * both, a finite element is
+ * represented by the DoF handler
+ * objects, which will usually
+ * have several components, which
+ * may belong to different finite
+ * elements. The second and
+ * fourth parameter of this
+ * function therefore state which
+ * variable on the coarse grid
+ * shall be used to restrict the
+ * stated component on the fine
+ * grid. Of course, the finite
+ * elements used for the
+ * respective components on the
+ * two grids need to be the
+ * same. An example may clarify
+ * this: consider the parameter
+ * estimation mentioned briefly
+ * above; there, on the fine grid
+ * the whole discretization is
+ * done, thus the variables are
+ * ``u'', ``q'', and the Lagrange
+ * multiplier ``lambda'', which
+ * are discretized using
+ * continuous linear, piecewise
+ * constant discontinuous, and
+ * continuous linear elements,
+ * respectively. Only the
+ * parameter ``q'' shall be
+ * represented on the coarse
+ * grid, thus the DoFHandler
+ * object on the coarse grid
+ * represents only one variable,
+ * discretized using piecewise
+ * constant discontinuous
+ * elements. Then, the parameter
+ * denoting the component on the
+ * coarse grid would be zero (the
+ * only possible choice, since
+ * the variable on the coarse
+ * grid is scalar), and one on
+ * the fine grid (corresponding
+ * to the variable ``q''; zero
+ * would be ``u'', two would be
+ * ``lambda''). Furthermore, an
+ * object of type IntergridMap
+ * is needed; this could in
+ * principle be generated by the
+ * function itself from the two
+ * DoFHandler objects, but since
+ * it is probably available
+ * anyway in programs that use
+ * this function, we shall use it
+ * instead of re-generating
+ * it. Finally, the computed
+ * constraints are entered into a
+ * variable of type
+ * ConstraintMatrix; the
+ * constraints are added,
+ * i.e. previous contents which
+ * may have, for example, be
+ * obtained from hanging nodes,
+ * are not deleted, so that you
+ * only need one object of this
+ * type.
+ */
template <int dim, int spacedim>
void
compute_intergrid_constraints (const DoFHandler<dim,spacedim> &coarse_grid,
template <class DH>
void
map_dof_to_boundary_indices (const DH &dof_handler,
- std::vector<unsigned int> &mapping);
+ std::vector<unsigned int> &mapping);
- /**
- * Same as the previous function,
- * except that only those parts
- * of the boundary are considered
- * for which the boundary
- * indicator is listed in the
- * second argument.
- *
- * See the general doc of this
- * class for more information.
- */
+ /**
+ * Same as the previous function,
+ * except that only those parts
+ * of the boundary are considered
+ * for which the boundary
+ * indicator is listed in the
+ * second argument.
+ *
+ * See the general doc of this
+ * class for more information.
+ */
template <class DH>
void
map_dof_to_boundary_indices (const DH &dof_handler,
template <int dim>
class FE_Nothing : public FiniteElement<dim>
{
- public:
-
- /**
- * Constructor. Argument denotes the
- * number of components to give this
- * finite element (default = 1).
- */
- FE_Nothing (unsigned int n_components = 1);
-
- /**
- * A sort of virtual copy
- * constructor. Some places in
- * the library, for example the
- * constructors of FESystem as
- * well as the hp::FECollection
- * class, need to make copied of
- * finite elements without
- * knowing their exact type. They
- * do so through this function.
- */
- virtual
- FiniteElement<dim> *
- clone() const;
-
- /**
- * Return a string that uniquely
- * identifies a finite
- * element. In this case it is
- * <code>FE_Nothing@<dim@></code>.
- */
- virtual
- std::string
- get_name() const;
-
- /**
- * Determine the values a finite
- * element should compute on
- * initialization of data for
- * FEValues.
- *
- * Given a set of flags
- * indicating what quantities are
- * requested from a FEValues
- * object, update_once() and
- * update_each() compute which
- * values must really be
- * computed. Then, the
- * <tt>fill_*_values</tt> functions
- * are called with the result of
- * these.
- *
- * In this case, since the element
- * has zero degrees of freedom and
- * no information can be computed on
- * it, this function simply returns
- * the default (empty) set of update
- * flags.
- */
-
- virtual
- UpdateFlags
- update_once (const UpdateFlags flags) const;
-
- /**
- * Complementary function for
- * update_once().
- *
- * While update_once() returns
- * the values to be computed on
- * the unit cell for yielding the
- * required data, this function
- * determines the values that
- * must be recomputed on each
- * cell.
- *
- * Refer to update_once() for
- * more details.
- */
- virtual
- UpdateFlags
- update_each (const UpdateFlags flags) const;
-
- /**
- * Return the value of the
- * @p ith shape function at the
- * point @p p. @p p is a point
- * on the reference element. Because the
- * current element has no degrees of freedom,
- * this function should obviously not be
- * called in practice. All this function
- * really does, therefore, is trigger an
- * exception.
- */
- virtual
- double
- shape_value (const unsigned int i, const Point<dim> &p) const;
-
- /**
- * Fill the fields of
- * FEValues. This function
- * performs all the operations
- * needed to compute the data of an
- * FEValues object.
- *
- * In the current case, this function
- * returns no meaningful information,
- * since the element has no degrees of
- * freedom.
- */
- virtual
- void
- fill_fe_values (const Mapping<dim> & mapping,
- const typename Triangulation<dim>::cell_iterator & cell,
- const Quadrature<dim> & quadrature,
- typename Mapping<dim>::InternalDataBase & mapping_data,
- typename Mapping<dim>::InternalDataBase & fedata,
- FEValuesData<dim,dim> & data,
- CellSimilarity::Similarity & cell_similarity) const;
-
- /**
- * Fill the fields of
- * FEFaceValues. This function
- * performs all the operations
- * needed to compute the data of an
- * FEFaceValues object.
- *
- * In the current case, this function
- * returns no meaningful information,
- * since the element has no degrees of
- * freedom.
- */
- virtual
- void
- fill_fe_face_values (const Mapping<dim> & mapping,
- const typename Triangulation<dim> :: cell_iterator & cell,
- const unsigned int face,
- const Quadrature<dim-1> & quadrature,
- typename Mapping<dim> :: InternalDataBase & mapping_data,
- typename Mapping<dim> :: InternalDataBase & fedata,
- FEValuesData<dim,dim> & data) const;
-
- /**
- * Fill the fields of
- * FESubFaceValues. This function
- * performs all the operations
- * needed to compute the data of an
- * FESubFaceValues object.
- *
- * In the current case, this function
- * returns no meaningful information,
- * since the element has no degrees of
- * freedom.
- */
- virtual
- void
- fill_fe_subface_values (const Mapping<dim> & mapping,
- const typename Triangulation<dim>::cell_iterator & cell,
- const unsigned int face,
- const unsigned int subface,
- const Quadrature<dim-1> & quadrature,
- typename Mapping<dim>::InternalDataBase & mapping_data,
- typename Mapping<dim>::InternalDataBase & fedata,
- FEValuesData<dim,dim> & data) const;
-
- /**
- * Prepare internal data
- * structures and fill in values
- * independent of the
- * cell. Returns a pointer to an
- * object of which the caller of
- * this function then has to
- * assume ownership (which
- * includes destruction when it
- * is no more needed).
- *
- * In the current case, this function
- * just returns a default pointer, since
- * no meaningful data exists for this
- * element.
- */
- virtual
- typename Mapping<dim>::InternalDataBase *
- get_data (const UpdateFlags update_flags,
- const Mapping<dim> & mapping,
- const Quadrature<dim> & quadrature) const;
-
- /**
- * Return whether this element dominates
- * the one given as argument when they
- * meet at a common face,
- * whether it is the other way around,
- * whether neither dominates, or if
- * either could dominate.
- *
- * For a definition of domination, see
- * FiniteElementBase::Domination and in
- * particular the @ref hp_paper "hp paper".
- *
- * In the current case, this element
- * is always assumed to dominate, unless
- * it is also of type FE_Nothing(). In
- * that situation, either element can
- * dominate.
- */
- virtual
- FiniteElementDomination::Domination
- compare_for_face_domination (const FiniteElement<dim> & fe_other) const;
-
-
-
- virtual
- std::vector<std::pair<unsigned int, unsigned int> >
- hp_vertex_dof_identities (const FiniteElement<dim> &fe_other) const;
-
- virtual
- std::vector<std::pair<unsigned int, unsigned int> >
- hp_line_dof_identities (const FiniteElement<dim> &fe_other) const;
-
- virtual
- std::vector<std::pair<unsigned int, unsigned int> >
- hp_quad_dof_identities (const FiniteElement<dim> &fe_other) const;
-
- virtual
- bool
- hp_constraints_are_implemented () const;
-
- /**
- * Return the matrix
- * interpolating from a face of
- * of one element to the face of
- * the neighboring element.
- * The size of the matrix is
- * then <tt>source.#dofs_per_face</tt> times
- * <tt>this->#dofs_per_face</tt>.
- *
- * Since the current finite element has no
- * degrees of freedom, the interpolation
- * matrix is necessarily empty.
- */
-
- virtual
- void
- get_face_interpolation_matrix (const FiniteElement<dim> &source_fe,
- FullMatrix<double> &interpolation_matrix) const;
-
-
- /**
- * Return the matrix
- * interpolating from a face of
- * of one element to the subface of
- * the neighboring element.
- * The size of the matrix is
- * then <tt>source.#dofs_per_face</tt> times
- * <tt>this->#dofs_per_face</tt>.
- *
- * Since the current finite element has no
- * degrees of freedom, the interpolation
- * matrix is necessarily empty.
- */
-
- virtual
- void
- get_subface_interpolation_matrix (const FiniteElement<dim> & source_fe,
- const unsigned int index,
- FullMatrix<double> &interpolation_matrix) const;
+ public:
+
+ /**
+ * Constructor. Argument denotes the
+ * number of components to give this
+ * finite element (default = 1).
+ */
+ FE_Nothing (unsigned int n_components = 1);
+
+ /**
+ * A sort of virtual copy
+ * constructor. Some places in
+ * the library, for example the
+ * constructors of FESystem as
+ * well as the hp::FECollection
+ * class, need to make copied of
+ * finite elements without
+ * knowing their exact type. They
+ * do so through this function.
+ */
+ virtual
+ FiniteElement<dim> *
+ clone() const;
+
+ /**
+ * Return a string that uniquely
+ * identifies a finite
+ * element. In this case it is
+ * <code>FE_Nothing@<dim@></code>.
+ */
+ virtual
+ std::string
+ get_name() const;
+
+ /**
+ * Determine the values a finite
+ * element should compute on
+ * initialization of data for
+ * FEValues.
+ *
+ * Given a set of flags
+ * indicating what quantities are
+ * requested from a FEValues
+ * object, update_once() and
+ * update_each() compute which
+ * values must really be
+ * computed. Then, the
+ * <tt>fill_*_values</tt> functions
+ * are called with the result of
+ * these.
+ *
+ * In this case, since the element
+ * has zero degrees of freedom and
+ * no information can be computed on
+ * it, this function simply returns
+ * the default (empty) set of update
+ * flags.
+ */
+
+ virtual
+ UpdateFlags
+ update_once (const UpdateFlags flags) const;
+
+ /**
+ * Complementary function for
+ * update_once().
+ *
+ * While update_once() returns
+ * the values to be computed on
+ * the unit cell for yielding the
+ * required data, this function
+ * determines the values that
+ * must be recomputed on each
+ * cell.
+ *
+ * Refer to update_once() for
+ * more details.
+ */
+ virtual
+ UpdateFlags
+ update_each (const UpdateFlags flags) const;
+
+ /**
+ * Return the value of the
+ * @p ith shape function at the
+ * point @p p. @p p is a point
+ * on the reference element. Because the
+ * current element has no degrees of freedom,
+ * this function should obviously not be
+ * called in practice. All this function
+ * really does, therefore, is trigger an
+ * exception.
+ */
+ virtual
+ double
+ shape_value (const unsigned int i, const Point<dim> &p) const;
+
+ /**
+ * Fill the fields of
+ * FEValues. This function
+ * performs all the operations
+ * needed to compute the data of an
+ * FEValues object.
+ *
+ * In the current case, this function
+ * returns no meaningful information,
+ * since the element has no degrees of
+ * freedom.
+ */
+ virtual
+ void
+ fill_fe_values (const Mapping<dim> &mapping,
+ const typename Triangulation<dim>::cell_iterator &cell,
+ const Quadrature<dim> &quadrature,
+ typename Mapping<dim>::InternalDataBase &mapping_data,
+ typename Mapping<dim>::InternalDataBase &fedata,
+ FEValuesData<dim,dim> &data,
+ CellSimilarity::Similarity &cell_similarity) const;
+
+ /**
+ * Fill the fields of
+ * FEFaceValues. This function
+ * performs all the operations
+ * needed to compute the data of an
+ * FEFaceValues object.
+ *
+ * In the current case, this function
+ * returns no meaningful information,
+ * since the element has no degrees of
+ * freedom.
+ */
+ virtual
+ void
+ fill_fe_face_values (const Mapping<dim> &mapping,
+ const typename Triangulation<dim> :: cell_iterator &cell,
+ const unsigned int face,
+ const Quadrature<dim-1> & quadrature,
+ typename Mapping<dim> :: InternalDataBase &mapping_data,
+ typename Mapping<dim> :: InternalDataBase &fedata,
+ FEValuesData<dim,dim> &data) const;
+
+ /**
+ * Fill the fields of
+ * FESubFaceValues. This function
+ * performs all the operations
+ * needed to compute the data of an
+ * FESubFaceValues object.
+ *
+ * In the current case, this function
+ * returns no meaningful information,
+ * since the element has no degrees of
+ * freedom.
+ */
+ virtual
+ void
+ fill_fe_subface_values (const Mapping<dim> &mapping,
+ const typename Triangulation<dim>::cell_iterator &cell,
+ const unsigned int face,
+ const unsigned int subface,
+ const Quadrature<dim-1> & quadrature,
+ typename Mapping<dim>::InternalDataBase &mapping_data,
+ typename Mapping<dim>::InternalDataBase &fedata,
+ FEValuesData<dim,dim> &data) const;
+
+ /**
+ * Prepare internal data
+ * structures and fill in values
+ * independent of the
+ * cell. Returns a pointer to an
+ * object of which the caller of
+ * this function then has to
+ * assume ownership (which
+ * includes destruction when it
+ * is no more needed).
+ *
+ * In the current case, this function
+ * just returns a default pointer, since
+ * no meaningful data exists for this
+ * element.
+ */
+ virtual
+ typename Mapping<dim>::InternalDataBase *
+ get_data (const UpdateFlags update_flags,
+ const Mapping<dim> &mapping,
+ const Quadrature<dim> &quadrature) const;
+
+ /**
+ * Return whether this element dominates
+ * the one given as argument when they
+ * meet at a common face,
+ * whether it is the other way around,
+ * whether neither dominates, or if
+ * either could dominate.
+ *
+ * For a definition of domination, see
+ * FiniteElementBase::Domination and in
+ * particular the @ref hp_paper "hp paper".
+ *
+ * In the current case, this element
+ * is always assumed to dominate, unless
+ * it is also of type FE_Nothing(). In
+ * that situation, either element can
+ * dominate.
+ */
+ virtual
+ FiniteElementDomination::Domination
+ compare_for_face_domination (const FiniteElement<dim> &fe_other) const;
+
+
+
+ virtual
+ std::vector<std::pair<unsigned int, unsigned int> >
+ hp_vertex_dof_identities (const FiniteElement<dim> &fe_other) const;
+
+ virtual
+ std::vector<std::pair<unsigned int, unsigned int> >
+ hp_line_dof_identities (const FiniteElement<dim> &fe_other) const;
+
+ virtual
+ std::vector<std::pair<unsigned int, unsigned int> >
+ hp_quad_dof_identities (const FiniteElement<dim> &fe_other) const;
+
+ virtual
+ bool
+ hp_constraints_are_implemented () const;
+
+ /**
+ * Return the matrix
+ * interpolating from a face of
+ * of one element to the face of
+ * the neighboring element.
+ * The size of the matrix is
+ * then <tt>source.#dofs_per_face</tt> times
+ * <tt>this->#dofs_per_face</tt>.
+ *
+ * Since the current finite element has no
+ * degrees of freedom, the interpolation
+ * matrix is necessarily empty.
+ */
+
+ virtual
+ void
+ get_face_interpolation_matrix (const FiniteElement<dim> &source_fe,
+ FullMatrix<double> &interpolation_matrix) const;
+
+
+ /**
+ * Return the matrix
+ * interpolating from a face of
+ * of one element to the subface of
+ * the neighboring element.
+ * The size of the matrix is
+ * then <tt>source.#dofs_per_face</tt> times
+ * <tt>this->#dofs_per_face</tt>.
+ *
+ * Since the current finite element has no
+ * degrees of freedom, the interpolation
+ * matrix is necessarily empty.
+ */
+
+ virtual
+ void
+ get_subface_interpolation_matrix (const FiniteElement<dim> &source_fe,
+ const unsigned int index,
- FullMatrix<double> &interpolation_matrix) const;
++ FullMatrix<double> &interpolation_matrix) const;
};
template <class POLY, int dim, int spacedim=dim>
class FE_PolyTensor : public FiniteElement<dim,spacedim>
{
- public:
- /**
- * Constructor.
- *
- * @arg @c degree: constructor
- * argument for poly. May be
- * different from @p
- * fe_data.degree.
- */
- FE_PolyTensor (const unsigned int degree,
- const FiniteElementData<dim> &fe_data,
- const std::vector<bool> &restriction_is_additive_flags,
- const std::vector<ComponentMask> &nonzero_components);
+ public:
+ /**
+ * Constructor.
+ *
+ * @arg @c degree: constructor
+ * argument for poly. May be
+ * different from @p
+ * fe_data.degree.
+ */
+ FE_PolyTensor (const unsigned int degree,
+ const FiniteElementData<dim> &fe_data,
+ const std::vector<bool> &restriction_is_additive_flags,
+ const std::vector<ComponentMask> &nonzero_components);
- /**
- * Since these elements are
- * vector valued, an exception is
- * thrown.
- */
- virtual double shape_value (const unsigned int i,
- const Point<dim> &p) const;
+ /**
+ * Since these elements are
+ * vector valued, an exception is
+ * thrown.
+ */
+ virtual double shape_value (const unsigned int i,
+ const Point<dim> &p) const;
- virtual double shape_value_component (const unsigned int i,
- const Point<dim> &p,
- const unsigned int component) const;
+ virtual double shape_value_component (const unsigned int i,
+ const Point<dim> &p,
+ const unsigned int component) const;
- /**
- * Since these elements are
- * vector valued, an exception is
- * thrown.
- */
- virtual Tensor<1,dim> shape_grad (const unsigned int i,
- const Point<dim> &p) const;
+ /**
+ * Since these elements are
+ * vector valued, an exception is
+ * thrown.
+ */
+ virtual Tensor<1,dim> shape_grad (const unsigned int i,
+ const Point<dim> &p) const;
- virtual Tensor<1,dim> shape_grad_component (const unsigned int i,
- const Point<dim> &p,
- const unsigned int component) const;
+ virtual Tensor<1,dim> shape_grad_component (const unsigned int i,
+ const Point<dim> &p,
+ const unsigned int component) const;
- /**
- * Since these elements are
- * vector valued, an exception is
- * thrown.
- */
- virtual Tensor<2,dim> shape_grad_grad (const unsigned int i,
- const Point<dim> &p) const;
+ /**
+ * Since these elements are
+ * vector valued, an exception is
+ * thrown.
+ */
+ virtual Tensor<2,dim> shape_grad_grad (const unsigned int i,
+ const Point<dim> &p) const;
- virtual Tensor<2,dim> shape_grad_grad_component (const unsigned int i,
- const Point<dim> &p,
- const unsigned int component) const;
+ virtual Tensor<2,dim> shape_grad_grad_component (const unsigned int i,
+ const Point<dim> &p,
+ const unsigned int component) const;
- /**
- * Given <tt>flags</tt>,
- * determines the values which
- * must be computed only for the
- * reference cell. Make sure,
- * that #mapping_type is set by
- * the derived class, such that
- * this function can operate
- * correctly.
- */
- virtual UpdateFlags update_once (const UpdateFlags flags) const;
- /**
- * Given <tt>flags</tt>,
- * determines the values which
- * must be computed in each cell
- * cell. Make sure, that
- * #mapping_type is set by the
- * derived class, such that this
- * function can operate
- * correctly.
- */
- virtual UpdateFlags update_each (const UpdateFlags flags) const;
+ /**
+ * Given <tt>flags</tt>,
+ * determines the values which
+ * must be computed only for the
+ * reference cell. Make sure,
+ * that #mapping_type is set by
+ * the derived class, such that
+ * this function can operate
+ * correctly.
+ */
+ virtual UpdateFlags update_once (const UpdateFlags flags) const;
+ /**
+ * Given <tt>flags</tt>,
+ * determines the values which
+ * must be computed in each cell
+ * cell. Make sure, that
+ * #mapping_type is set by the
+ * derived class, such that this
+ * function can operate
+ * correctly.
+ */
+ virtual UpdateFlags update_each (const UpdateFlags flags) const;
- protected:
- /**
- * The mapping type to be used to
- * map shape functions from the
- * reference cell to the mesh
- * cell.
- */
- MappingType mapping_type;
+ protected:
+ /**
+ * The mapping type to be used to
+ * map shape functions from the
+ * reference cell to the mesh
+ * cell.
+ */
+ MappingType mapping_type;
- virtual
- typename Mapping<dim,spacedim>::InternalDataBase *
- get_data (const UpdateFlags,
- const Mapping<dim,spacedim>& mapping,
- const Quadrature<dim>& quadrature) const ;
+ virtual
+ typename Mapping<dim,spacedim>::InternalDataBase *
+ get_data (const UpdateFlags,
+ const Mapping<dim,spacedim> &mapping,
+ const Quadrature<dim> &quadrature) const ;
- virtual void
- fill_fe_values (const Mapping<dim,spacedim> &mapping,
- const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const Quadrature<dim> &quadrature,
- typename Mapping<dim,spacedim>::InternalDataBase &mapping_internal,
- typename Mapping<dim,spacedim>::InternalDataBase &fe_internal,
- FEValuesData<dim,spacedim> &data,
- CellSimilarity::Similarity &cell_similarity) const;
+ virtual void
+ fill_fe_values (const Mapping<dim,spacedim> &mapping,
+ const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const Quadrature<dim> &quadrature,
- typename Mapping<dim,spacedim>::InternalDataBase &mapping_internal,
- typename Mapping<dim,spacedim>::InternalDataBase &fe_internal,
++ typename Mapping<dim,spacedim>::InternalDataBase &mapping_internal,
++ typename Mapping<dim,spacedim>::InternalDataBase &fe_internal,
+ FEValuesData<dim,spacedim> &data,
+ CellSimilarity::Similarity &cell_similarity) const;
- virtual void
- fill_fe_face_values (const Mapping<dim,spacedim> &mapping,
- const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const Quadrature<dim-1> &quadrature,
- typename Mapping<dim,spacedim>::InternalDataBase &mapping_internal,
- typename Mapping<dim,spacedim>::InternalDataBase &fe_internal,
- FEValuesData<dim,spacedim>& data) const ;
+ virtual void
+ fill_fe_face_values (const Mapping<dim,spacedim> &mapping,
+ const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const Quadrature<dim-1> &quadrature,
+ typename Mapping<dim,spacedim>::InternalDataBase &mapping_internal,
+ typename Mapping<dim,spacedim>::InternalDataBase &fe_internal,
+ FEValuesData<dim,spacedim> &data) const ;
- virtual void
- fill_fe_subface_values (const Mapping<dim,spacedim> &mapping,
- const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int sub_no,
- const Quadrature<dim-1> &quadrature,
- typename Mapping<dim,spacedim>::InternalDataBase &mapping_internal,
- typename Mapping<dim,spacedim>::InternalDataBase &fe_internal,
- FEValuesData<dim,spacedim>& data) const ;
+ virtual void
+ fill_fe_subface_values (const Mapping<dim,spacedim> &mapping,
+ const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int sub_no,
+ const Quadrature<dim-1> &quadrature,
+ typename Mapping<dim,spacedim>::InternalDataBase &mapping_internal,
+ typename Mapping<dim,spacedim>::InternalDataBase &fe_internal,
+ FEValuesData<dim,spacedim> &data) const ;
- /**
- * Fields of cell-independent
- * data for FE_PolyTensor. Stores
- * the values of the shape
- * functions and their
- * derivatives on the reference
- * cell for later use.
- *
- * All tables are organized in a
- * way, that the value for shape
- * function <i>i</i> at
- * quadrature point <i>k</i> is
- * accessed by indices
- * <i>(i,k)</i>.
- */
- class InternalData : public FiniteElement<dim,spacedim>::InternalDataBase
- {
- public:
- /**
- * Array with shape function
- * values in quadrature
- * points. There is one
- * row for each shape
- * function, containing
- * values for each quadrature
- * point.
- */
- std::vector<std::vector<Tensor<1,dim> > > shape_values;
+ /**
+ * Fields of cell-independent
+ * data for FE_PolyTensor. Stores
+ * the values of the shape
+ * functions and their
+ * derivatives on the reference
+ * cell for later use.
+ *
+ * All tables are organized in a
+ * way, that the value for shape
+ * function <i>i</i> at
+ * quadrature point <i>k</i> is
+ * accessed by indices
+ * <i>(i,k)</i>.
+ */
+ class InternalData : public FiniteElement<dim,spacedim>::InternalDataBase
+ {
+ public:
+ /**
+ * Array with shape function
+ * values in quadrature
+ * points. There is one
+ * row for each shape
+ * function, containing
+ * values for each quadrature
+ * point.
+ */
+ std::vector<std::vector<Tensor<1,dim> > > shape_values;
- /**
- * Array with shape function
- * gradients in quadrature
- * points. There is one
- * row for each shape
- * function, containing
- * values for each quadrature
- * point.
- */
- std::vector< std::vector< DerivativeForm<1, dim, spacedim> > > shape_grads;
- };
+ /**
+ * Array with shape function
+ * gradients in quadrature
+ * points. There is one
+ * row for each shape
+ * function, containing
+ * values for each quadrature
+ * point.
+ */
+ std::vector< std::vector< DerivativeForm<1, dim, spacedim> > > shape_grads;
+ };
- /**
- * The polynomial space. Its type
- * is given by the template
- * parameter POLY.
- */
- POLY poly_space;
+ /**
+ * The polynomial space. Its type
+ * is given by the template
+ * parameter POLY.
+ */
+ POLY poly_space;
- /**
- * The inverse of the matrix
- * <i>a<sub>ij</sub></i> of node
- * values <i>N<sub>i</sub></i>
- * applied to polynomial
- * <i>p<sub>j</sub></i>. This
- * matrix is used to convert
- * polynomials in the "raw" basis
- * provided in #poly_space to the
- * basis dual to the node
- * functionals on the reference cell.
- *
- * This object is not filled by
- * FE_PolyTensor, but is a chance
- * for a derived class to allow
- * for reorganization of the
- * basis functions. If it is left
- * empty, the basis in
- * #poly_space is used.
- */
- FullMatrix<double> inverse_node_matrix;
+ /**
+ * The inverse of the matrix
+ * <i>a<sub>ij</sub></i> of node
+ * values <i>N<sub>i</sub></i>
+ * applied to polynomial
+ * <i>p<sub>j</sub></i>. This
+ * matrix is used to convert
+ * polynomials in the "raw" basis
+ * provided in #poly_space to the
+ * basis dual to the node
+ * functionals on the reference cell.
+ *
+ * This object is not filled by
+ * FE_PolyTensor, but is a chance
+ * for a derived class to allow
+ * for reorganization of the
+ * basis functions. If it is left
+ * empty, the basis in
+ * #poly_space is used.
+ */
+ FullMatrix<double> inverse_node_matrix;
- /**
- * If a shape function is
- * computed at a single point, we
- * must compute all of them to
- * apply #inverse_node_matrix. In
- * order to avoid too much
- * overhead, we cache the point
- * and the function values for
- * the next evaluation.
- */
- mutable Point<dim> cached_point;
+ /**
+ * If a shape function is
+ * computed at a single point, we
+ * must compute all of them to
+ * apply #inverse_node_matrix. In
+ * order to avoid too much
+ * overhead, we cache the point
+ * and the function values for
+ * the next evaluation.
+ */
+ mutable Point<dim> cached_point;
- /**
- * Cached shape function values after
- * call to
- * shape_value_component().
- */
- mutable std::vector<Tensor<1,dim> > cached_values;
+ /**
+ * Cached shape function values after
+ * call to
+ * shape_value_component().
+ */
+ mutable std::vector<Tensor<1,dim> > cached_values;
- /**
- * Cached shape function gradients after
- * call to
- * shape_grad_component().
- */
- mutable std::vector<Tensor<2,dim> > cached_grads;
+ /**
+ * Cached shape function gradients after
+ * call to
+ * shape_grad_component().
+ */
+ mutable std::vector<Tensor<2,dim> > cached_grads;
- /**
- * Cached second derivatives of
- * shape functions after call to
- * shape_grad_grad_component().
- */
- mutable std::vector<Tensor<3,dim> > cached_grad_grads;
+ /**
+ * Cached second derivatives of
+ * shape functions after call to
+ * shape_grad_grad_component().
+ */
+ mutable std::vector<Tensor<3,dim> > cached_grad_grads;
};
DEAL_II_NAMESPACE_CLOSE
template <class FE>
class FEFactory : public FEFactoryBase<FE::dimension,FE::dimension>
{
- public:
- /**
- * Create a FiniteElement and
- * return a pointer to it.
- */
- virtual FiniteElement<FE::dimension,FE::dimension>*
- get (const unsigned int degree) const;
-
- /**
- * Create a FiniteElement from a
- * quadrature formula (currently only
- * implemented for FE_Q) and return a
- * pointer to it.
- */
- virtual FiniteElement<FE::dimension,FE::dimension>*
- get (const Quadrature<1> &quad) const;
+ public:
+ /**
+ * Create a FiniteElement and
+ * return a pointer to it.
+ */
+ virtual FiniteElement<FE::dimension,FE::dimension> *
+ get (const unsigned int degree) const;
+
+ /**
+ * Create a FiniteElement from a
+ * quadrature formula (currently only
+ * implemented for FE_Q) and return a
+ * pointer to it.
+ */
+ virtual FiniteElement<FE::dimension,FE::dimension> *
+ get (const Quadrature<1> &quad) const;
};
- /**
- * @warning In most cases, you
- * will probably want to use
- * compute_base_renumbering().
- *
- * Compute the vector required to
- * renumber the dofs of a cell by
- * component. Furthermore,
- * compute the vector storing the
- * start indices of each
- * component in the local block
- * vector.
- *
- * The second vector is organized
- * such that there is a vector
- * for each base element
- * containing the start index for
- * each component served by this
- * base element.
- *
- * While the first vector is
- * checked to have the correct
- * size, the second one is
- * reinitialized for convenience.
- */
+ /**
+ * @warning In most cases, you
+ * will probably want to use
+ * compute_base_renumbering().
+ *
+ * Compute the vector required to
+ * renumber the dofs of a cell by
+ * component. Furthermore,
+ * compute the vector storing the
+ * start indices of each
+ * component in the local block
+ * vector.
+ *
+ * The second vector is organized
+ * such that there is a vector
+ * for each base element
+ * containing the start index for
+ * each component served by this
+ * base element.
+ *
+ * While the first vector is
+ * checked to have the correct
+ * size, the second one is
+ * reinitialized for convenience.
+ */
template<int dim, int spacedim>
void compute_component_wise(
- const FiniteElement<dim,spacedim>& fe,
- std::vector<unsigned int>& renumbering,
- std::vector<std::vector<unsigned int> >& start_indices);
-
- /**
- * Compute the vector required to
- * renumber the dofs of a cell by
- * block. Furthermore, compute
- * the vector storing either the
- * start indices or the size of
- * each local block vector.
- *
- * If the @p bool parameter is
- * true, @p block_data is filled
- * with the start indices of each
- * local block. If it is false,
- * then the block sizes are
- * returned.
- *
- * @todo Which way does this
- * vector map the numbers?
- */
+ const FiniteElement<dim,spacedim> &fe,
+ std::vector<unsigned int> &renumbering,
+ std::vector<std::vector<unsigned int> > &start_indices);
+
+ /**
+ * Compute the vector required to
+ * renumber the dofs of a cell by
+ * block. Furthermore, compute
+ * the vector storing either the
+ * start indices or the size of
+ * each local block vector.
+ *
+ * If the @p bool parameter is
+ * true, @p block_data is filled
+ * with the start indices of each
+ * local block. If it is false,
+ * then the block sizes are
+ * returned.
+ *
+ * @todo Which way does this
+ * vector map the numbers?
+ */
template<int dim, int spacedim>
void compute_block_renumbering (
- const FiniteElement<dim,spacedim>& fe,
- std::vector<unsigned int>& renumbering,
- std::vector<unsigned int>& block_data,
- const FiniteElement<dim,spacedim> &fe,
++ const FiniteElement<dim,spacedim> &fe,
+ std::vector<unsigned int> &renumbering,
+ std::vector<unsigned int> &block_data,
bool return_start_indices = true);
- /**
- * @name Generation of local matrices
- * @{
- */
- /**
- * Gives the interpolation matrix
- * that interpolates a @p fe1-
- * function to a @p fe2-function on
- * each cell. The interpolation_matrix
- * needs to be of size
- * <tt>(fe2.dofs_per_cell, fe1.dofs_per_cell)</tt>.
- *
- * Note, that if the finite element
- * space @p fe1 is a subset of
- * the finite element space
- * @p fe2 then the @p interpolation_matrix
- * is an embedding matrix.
- */
+ /**
+ * @name Generation of local matrices
+ * @{
+ */
+ /**
+ * Gives the interpolation matrix
+ * that interpolates a @p fe1-
+ * function to a @p fe2-function on
+ * each cell. The interpolation_matrix
+ * needs to be of size
+ * <tt>(fe2.dofs_per_cell, fe1.dofs_per_cell)</tt>.
+ *
+ * Note, that if the finite element
+ * space @p fe1 is a subset of
+ * the finite element space
+ * @p fe2 then the @p interpolation_matrix
+ * is an embedding matrix.
+ */
template <int dim, typename number, int spacedim>
void
get_interpolation_matrix(const FiniteElement<dim,spacedim> &fe1,
const DH2<dim,spacedim> &dof2,
OutVector &u2);
- /**
- * Gives the interpolation of a
- * the @p dof1-function @p u1 to
- * a @p dof2-function @p u2. @p
- * dof1 and @p dof2 need to be
- * DoFHandlers (or
- * hp::DoFHandlers) based on the
- * same triangulation. @p
- * constraints is a hanging node
- * constraints object
- * corresponding to @p dof2. This
- * object is particular important
- * when interpolating onto
- * continuous elements on grids
- * with hanging nodes (locally
- * refined grids).
- *
- * If the elements @p fe1 and @p fe2
- * are either both continuous or
- * both discontinuous then this
- * interpolation is the usual point
- * interpolation. The same is true
- * if @p fe1 is a continuous and
- * @p fe2 is a discontinuous finite
- * element. For the case that @p fe1
- * is a discontinuous and @p fe2 is
- * a continuous finite element
- * there is no point interpolation
- * defined at the discontinuities.
- * Therefore the meanvalue is taken
- * at the DoF values on the
- * discontinuities.
- */
+ /**
+ * Gives the interpolation of a
+ * the @p dof1-function @p u1 to
+ * a @p dof2-function @p u2. @p
+ * dof1 and @p dof2 need to be
+ * DoFHandlers (or
+ * hp::DoFHandlers) based on the
+ * same triangulation. @p
+ * constraints is a hanging node
+ * constraints object
+ * corresponding to @p dof2. This
+ * object is particular important
+ * when interpolating onto
+ * continuous elements on grids
+ * with hanging nodes (locally
+ * refined grids).
+ *
+ * If the elements @p fe1 and @p fe2
+ * are either both continuous or
+ * both discontinuous then this
+ * interpolation is the usual point
+ * interpolation. The same is true
+ * if @p fe1 is a continuous and
+ * @p fe2 is a discontinuous finite
+ * element. For the case that @p fe1
+ * is a discontinuous and @p fe2 is
+ * a continuous finite element
+ * there is no point interpolation
+ * defined at the discontinuities.
+ * Therefore the meanvalue is taken
+ * at the DoF values on the
+ * discontinuities.
+ */
template <int dim, int spacedim,
- template <int, int> class DH1,
- template <int, int> class DH2,
- class InVector, class OutVector>
+ template <int, int> class DH1,
+ template <int, int> class DH2,
+ class InVector, class OutVector>
- void interpolate (const DH1<dim,spacedim> &dof1,
+ void interpolate (const DH1<dim,spacedim> &dof1,
const InVector &u1,
- const DH2<dim,spacedim> &dof2,
+ const DH2<dim,spacedim> &dof2,
const ConstraintMatrix &constraints,
- OutVector& u2);
-
- /**
- * Gives the interpolation of the
- * @p fe1-function @p u1 to a
- * @p fe2-function, and
- * interpolates this to a second
- * @p fe1-function named
- * @p u1_interpolated.
- *
- * Note, that this function does
- * not work on continuous
- * elements at hanging nodes. For
- * that case use the
- * @p back_interpolate function,
- * below, that takes an
- * additional
- * @p ConstraintMatrix object.
- *
- * Furthermore note, that for the
- * specific case when the finite
- * element space corresponding to
- * @p fe1 is a subset of the
- * finite element space
- * corresponding to @p fe2, this
- * function is simply an identity
- * mapping.
- */
+ OutVector &u2);
+
+ /**
+ * Gives the interpolation of the
+ * @p fe1-function @p u1 to a
+ * @p fe2-function, and
+ * interpolates this to a second
+ * @p fe1-function named
+ * @p u1_interpolated.
+ *
+ * Note, that this function does
+ * not work on continuous
+ * elements at hanging nodes. For
+ * that case use the
+ * @p back_interpolate function,
+ * below, that takes an
+ * additional
+ * @p ConstraintMatrix object.
+ *
+ * Furthermore note, that for the
+ * specific case when the finite
+ * element space corresponding to
+ * @p fe1 is a subset of the
+ * finite element space
+ * corresponding to @p fe2, this
+ * function is simply an identity
+ * mapping.
+ */
template <int dim, class InVector, class OutVector, int spacedim>
void back_interpolate (const DoFHandler<dim,spacedim> &dof1,
const InVector &u1,
const FiniteElement<dim,spacedim> &fe2,
OutVector &u1_interpolated);
- /**
- * Gives the interpolation of the
- * @p dof1-function @p u1 to a
- * @p dof2-function, and
- * interpolates this to a second
- * @p dof1-function named
- * @p u1_interpolated.
- * @p constraints1 and
- * @p constraints2 are the
- * hanging node constraints
- * corresponding to @p dof1 and
- * @p dof2, respectively. These
- * objects are particular
- * important when continuous
- * elements on grids with hanging
- * nodes (locally refined grids)
- * are involved.
- *
- * Furthermore note, that for the
- * specific case when the finite
- * element space corresponding to
- * @p dof1 is a subset of the
- * finite element space
- * corresponding to @p dof2, this
- * function is simply an identity
- * mapping.
- */
+ /**
+ * Gives the interpolation of the
+ * @p dof1-function @p u1 to a
+ * @p dof2-function, and
+ * interpolates this to a second
+ * @p dof1-function named
+ * @p u1_interpolated.
+ * @p constraints1 and
+ * @p constraints2 are the
+ * hanging node constraints
+ * corresponding to @p dof1 and
+ * @p dof2, respectively. These
+ * objects are particular
+ * important when continuous
+ * elements on grids with hanging
+ * nodes (locally refined grids)
+ * are involved.
+ *
+ * Furthermore note, that for the
+ * specific case when the finite
+ * element space corresponding to
+ * @p dof1 is a subset of the
+ * finite element space
+ * corresponding to @p dof2, this
+ * function is simply an identity
+ * mapping.
+ */
template <int dim, class InVector, class OutVector, int spacedim>
- void back_interpolate (const DoFHandler<dim,spacedim>& dof1,
- const ConstraintMatrix& constraints1,
- const InVector& u1,
- const DoFHandler<dim,spacedim>& dof2,
- const ConstraintMatrix& constraints2,
- OutVector& u1_interpolated);
-
- /**
- * Gives $(Id-I_h)z_1$ for a given
- * @p dof1-function $z_1$, where $I_h$
- * is the interpolation from @p fe1
- * to @p fe2. The result $(Id-I_h)z_1$ is
- * written into @p z1_difference.
- *
- * Note, that this function does
- * not work for continuous
- * elements at hanging nodes. For
- * that case use the
- * @p interpolation_difference
- * function, below, that takes an
- * additional
- * @p ConstraintMatrix object.
- */
- void back_interpolate (const DoFHandler<dim,spacedim> &dof1,
++ void back_interpolate (const DoFHandler<dim,spacedim> &dof1,
+ const ConstraintMatrix &constraints1,
+ const InVector &u1,
- const DoFHandler<dim,spacedim> &dof2,
++ const DoFHandler<dim,spacedim> &dof2,
+ const ConstraintMatrix &constraints2,
+ OutVector &u1_interpolated);
+
+ /**
+ * Gives $(Id-I_h)z_1$ for a given
+ * @p dof1-function $z_1$, where $I_h$
+ * is the interpolation from @p fe1
+ * to @p fe2. The result $(Id-I_h)z_1$ is
+ * written into @p z1_difference.
+ *
+ * Note, that this function does
+ * not work for continuous
+ * elements at hanging nodes. For
+ * that case use the
+ * @p interpolation_difference
+ * function, below, that takes an
+ * additional
+ * @p ConstraintMatrix object.
+ */
template <int dim, class InVector, class OutVector, int spacedim>
void interpolation_difference(const DoFHandler<dim,spacedim> &dof1,
const InVector &z1,
const FiniteElement<dim,spacedim> &fe2,
OutVector &z1_difference);
- /**
- * Gives $(Id-I_h)z_1$ for a given
- * @p dof1-function $z_1$, where $I_h$
- * is the interpolation from @p fe1
- * to @p fe2. The result $(Id-I_h)z_1$ is
- * written into @p z1_difference.
- * @p constraints1 and
- * @p constraints2 are the
- * hanging node constraints
- * corresponding to @p dof1 and
- * @p dof2, respectively. These
- * objects are particular
- * important when continuous
- * elements on grids with hanging
- * nodes (locally refined grids)
- * are involved.
- */
+ /**
+ * Gives $(Id-I_h)z_1$ for a given
+ * @p dof1-function $z_1$, where $I_h$
+ * is the interpolation from @p fe1
+ * to @p fe2. The result $(Id-I_h)z_1$ is
+ * written into @p z1_difference.
+ * @p constraints1 and
+ * @p constraints2 are the
+ * hanging node constraints
+ * corresponding to @p dof1 and
+ * @p dof2, respectively. These
+ * objects are particular
+ * important when continuous
+ * elements on grids with hanging
+ * nodes (locally refined grids)
+ * are involved.
+ */
template <int dim, class InVector, class OutVector, int spacedim>
- void interpolation_difference(const DoFHandler<dim,spacedim>& dof1,
- const ConstraintMatrix& constraints1,
- const InVector& z1,
- const DoFHandler<dim,spacedim>& dof2,
- const ConstraintMatrix& constraints2,
- OutVector& z1_difference);
-
-
-
- /**
- * $L^2$ projection for
- * discontinuous
- * elements. Operates the same
- * direction as interpolate.
- *
- * The global projection can be
- * computed by local matrices if
- * the finite element spaces are
- * discontinuous. With continuous
- * elements, this is impossible,
- * since a global mass matrix
- * must be inverted.
- */
- void interpolation_difference(const DoFHandler<dim,spacedim> &dof1,
++ void interpolation_difference(const DoFHandler<dim,spacedim> &dof1,
+ const ConstraintMatrix &constraints1,
+ const InVector &z1,
- const DoFHandler<dim,spacedim> &dof2,
++ const DoFHandler<dim,spacedim> &dof2,
+ const ConstraintMatrix &constraints2,
+ OutVector &z1_difference);
+
+
+
+ /**
+ * $L^2$ projection for
+ * discontinuous
+ * elements. Operates the same
+ * direction as interpolate.
+ *
+ * The global projection can be
+ * computed by local matrices if
+ * the finite element spaces are
+ * discontinuous. With continuous
+ * elements, this is impossible,
+ * since a global mass matrix
+ * must be inverted.
+ */
template <int dim, class InVector, class OutVector, int spacedim>
- void project_dg (const DoFHandler<dim,spacedim>& dof1,
- const InVector& u1,
- const DoFHandler<dim,spacedim>& dof2,
- OutVector& u2);
-
- /**
- * Gives the patchwise
- * extrapolation of a @p dof1
- * function @p z1 to a @p dof2
- * function @p z2. @p dof1 and
- * @p dof2 need to be DoFHandler
- * based on the same triangulation.
- *
- * This function is interesting
- * for e.g. extrapolating
- * patchwise a piecewise linear
- * solution to a piecewise
- * quadratic solution.
- *
- * Note that the resulting field
- * does not satisfy continuity
- * requirements of the given
- * finite elements.
- *
- * When you use continuous
- * elements on grids with hanging
- * nodes, please use the
- * @p extrapolate function with
- * an additional
- * ConstraintMatrix argument,
- * see below.
- *
- * Since this function operates
- * on patches of cells, it is
- * required that the underlying
- * grid is refined at least once
- * for every coarse grid cell. If
- * this is not the case, an
- * exception will be raised.
- */
+ void project_dg (const DoFHandler<dim,spacedim> &dof1,
+ const InVector &u1,
+ const DoFHandler<dim,spacedim> &dof2,
+ OutVector &u2);
+
+ /**
+ * Gives the patchwise
+ * extrapolation of a @p dof1
+ * function @p z1 to a @p dof2
+ * function @p z2. @p dof1 and
+ * @p dof2 need to be DoFHandler
+ * based on the same triangulation.
+ *
+ * This function is interesting
+ * for e.g. extrapolating
+ * patchwise a piecewise linear
+ * solution to a piecewise
+ * quadratic solution.
+ *
+ * Note that the resulting field
+ * does not satisfy continuity
+ * requirements of the given
+ * finite elements.
+ *
+ * When you use continuous
+ * elements on grids with hanging
+ * nodes, please use the
+ * @p extrapolate function with
+ * an additional
+ * ConstraintMatrix argument,
+ * see below.
+ *
+ * Since this function operates
+ * on patches of cells, it is
+ * required that the underlying
+ * grid is refined at least once
+ * for every coarse grid cell. If
+ * this is not the case, an
+ * exception will be raised.
+ */
template <int dim, class InVector, class OutVector, int spacedim>
- void extrapolate (const DoFHandler<dim,spacedim>& dof1,
- const InVector& z1,
- const DoFHandler<dim,spacedim>& dof2,
- OutVector& z2);
-
- /**
- * Gives the patchwise
- * extrapolation of a @p dof1
- * function @p z1 to a @p dof2
- * function @p z2. @p dof1 and
- * @p dof2 need to be DoFHandler
- * based on the same triangulation.
- * @p constraints is a hanging
- * node constraints object
- * corresponding to
- * @p dof2. This object is
- * particular important when
- * interpolating onto continuous
- * elements on grids with hanging
- * nodes (locally refined grids).
- *
- * Otherwise, the same holds as
- * for the other @p extrapolate
- * function.
- */
+ void extrapolate (const DoFHandler<dim,spacedim> &dof1,
+ const InVector &z1,
+ const DoFHandler<dim,spacedim> &dof2,
+ OutVector &z2);
+
+ /**
+ * Gives the patchwise
+ * extrapolation of a @p dof1
+ * function @p z1 to a @p dof2
+ * function @p z2. @p dof1 and
+ * @p dof2 need to be DoFHandler
+ * based on the same triangulation.
+ * @p constraints is a hanging
+ * node constraints object
+ * corresponding to
+ * @p dof2. This object is
+ * particular important when
+ * interpolating onto continuous
+ * elements on grids with hanging
+ * nodes (locally refined grids).
+ *
+ * Otherwise, the same holds as
+ * for the other @p extrapolate
+ * function.
+ */
template <int dim, class InVector, class OutVector, int spacedim>
- void extrapolate (const DoFHandler<dim,spacedim>& dof1,
- const InVector& z1,
- const DoFHandler<dim,spacedim>& dof2,
- const ConstraintMatrix& constraints,
- OutVector& z2);
- //@}
- /**
- * The numbering of the degrees
- * of freedom in continuous finite
- * elements is hierarchic,
- * i.e. in such a way that we
- * first number the vertex dofs,
- * in the order of the vertices
- * as defined by the
- * triangulation, then the line
- * dofs in the order and
- * respecting the direction of
- * the lines, then the dofs on
- * quads, etc. However, we could
- * have, as well, numbered them
- * in a lexicographic way,
- * i.e. with indices first
- * running in x-direction, then
- * in y-direction and finally in
- * z-direction. Discontinuous
- * elements of class FE_DGQ()
- * are numbered in this way, for
- * example.
- *
- * This function constructs a
- * table which lexicographic
- * index each degree of freedom
- * in the hierarchic numbering
- * would have. It operates on the
- * continuous finite element
- * given as first argument, and
- * outputs the lexicographic
- * indices in the second.
- *
- * Note that since this function
- * uses specifics of the
- * continuous finite elements, it
- * can only operate on
- * FiniteElementData<dim> objects
- * inherent in FE_Q(). However,
- * this function does not take a
- * FE_Q object as it is also
- * invoked by the FE_Q()
- * constructor.
- *
- * It is assumed that the size of
- * the output argument already
- * matches the correct size,
- * which is equal to the number
- * of degrees of freedom in the
- * finite element.
- */
- void extrapolate (const DoFHandler<dim,spacedim> &dof1,
++ void extrapolate (const DoFHandler<dim,spacedim> &dof1,
+ const InVector &z1,
- const DoFHandler<dim,spacedim> &dof2,
++ const DoFHandler<dim,spacedim> &dof2,
+ const ConstraintMatrix &constraints,
+ OutVector &z2);
+ //@}
+ /**
+ * The numbering of the degrees
+ * of freedom in continuous finite
+ * elements is hierarchic,
+ * i.e. in such a way that we
+ * first number the vertex dofs,
+ * in the order of the vertices
+ * as defined by the
+ * triangulation, then the line
+ * dofs in the order and
+ * respecting the direction of
+ * the lines, then the dofs on
+ * quads, etc. However, we could
+ * have, as well, numbered them
+ * in a lexicographic way,
+ * i.e. with indices first
+ * running in x-direction, then
+ * in y-direction and finally in
+ * z-direction. Discontinuous
+ * elements of class FE_DGQ()
+ * are numbered in this way, for
+ * example.
+ *
+ * This function constructs a
+ * table which lexicographic
+ * index each degree of freedom
+ * in the hierarchic numbering
+ * would have. It operates on the
+ * continuous finite element
+ * given as first argument, and
+ * outputs the lexicographic
+ * indices in the second.
+ *
+ * Note that since this function
+ * uses specifics of the
+ * continuous finite elements, it
+ * can only operate on
+ * FiniteElementData<dim> objects
+ * inherent in FE_Q(). However,
+ * this function does not take a
+ * FE_Q object as it is also
+ * invoked by the FE_Q()
+ * constructor.
+ *
+ * It is assumed that the size of
+ * the output argument already
+ * matches the correct size,
+ * which is equal to the number
+ * of degrees of freedom in the
+ * finite element.
+ */
template <int dim>
void
hierarchic_to_lexicographic_numbering (const FiniteElementData<dim> &fe_data,
template <int dim, class VECTOR = Vector<double>, int spacedim=dim >
class MappingQ1Eulerian : public MappingQ1<dim,spacedim>
{
- public:
-
- /**
- * Constructor. It takes a
- * <tt>Vector<double> &</tt> as its
- * first argument to specify the
- * transformation of the whole
- * problem from the reference to
- * the current configuration.
- * The organization of the
- * elements in the @p Vector
- * must follow the concept how
- * deal.II stores solutions that
- * are associated to a
- * triangulation. This is
- * automatically the case if the
- * @p Vector represents the
- * solution of the previous step
- * of a nonlinear problem.
- * Alternatively, the @p Vector
- * can be initialized by
- * <tt>DoFAccessor::set_dof_values()</tt>.
- */
- MappingQ1Eulerian (const VECTOR &euler_transform_vectors,
- const DoFHandler<dim,spacedim> &shiftmap_dof_handler);
-
- /**
- * Return a pointer to a copy of the
- * present object. The caller of this
- * copy then assumes ownership of it.
- */
- virtual
- Mapping<dim,spacedim> * clone () const;
-
- /**
- * Always returns @p false because
- * MappingQ1Eulerian does not in general
- * preserve vertex locations (unless the
- * translation vector happens to provide
- * for zero displacements at vertex
- * locations).
- */
- bool preserves_vertex_locations () const;
-
- /**
- * Exception.
- */
- DeclException0 (ExcInactiveCell);
-
-
-
- protected:
- /**
- * Implementation of the interface in
- * MappingQ1. Overrides the function in
- * the base class, since we cannot use
- * any cell similarity for this class.
- */
- virtual void
- fill_fe_values (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const Quadrature<dim> &quadrature,
- typename Mapping<dim,spacedim>::InternalDataBase &mapping_data,
- typename std::vector<Point<spacedim> > &quadrature_points,
- std::vector<double> &JxW_values,
- std::vector<DerivativeForm<1,dim,spacedim> > &jacobians,
- std::vector<DerivativeForm<2,dim,spacedim> > &jacobian_grads,
- std::vector<DerivativeForm<1,spacedim,dim> > &inverse_jacobians,
- std::vector<Point<spacedim> > &cell_normal_vectors,
- CellSimilarity::Similarity &cell_similarity) const;
-
- /**
- * Reference to the vector of
- * shifts.
- */
- SmartPointer<const VECTOR, MappingQ1Eulerian<dim,VECTOR,spacedim> > euler_transform_vectors;
-
- /**
- * Pointer to the DoFHandler to
- * which the mapping vector is
- * associated.
- */
- SmartPointer<const DoFHandler<dim,spacedim>,MappingQ1Eulerian<dim,VECTOR,spacedim> > shiftmap_dof_handler;
-
-
- private:
- /**
- * Computes the support points of
- * the mapping. For
- * @p MappingQ1Eulerian these
- * are the vertices.
- */
- virtual void compute_mapping_support_points(
- const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- std::vector<Point<spacedim> > &a) const;
+ public:
+
+ /**
+ * Constructor. It takes a
+ * <tt>Vector<double> &</tt> as its
+ * first argument to specify the
+ * transformation of the whole
+ * problem from the reference to
+ * the current configuration.
+ * The organization of the
+ * elements in the @p Vector
+ * must follow the concept how
+ * deal.II stores solutions that
+ * are associated to a
+ * triangulation. This is
+ * automatically the case if the
+ * @p Vector represents the
+ * solution of the previous step
+ * of a nonlinear problem.
+ * Alternatively, the @p Vector
+ * can be initialized by
+ * <tt>DoFAccessor::set_dof_values()</tt>.
+ */
- MappingQ1Eulerian (const VECTOR &euler_transform_vectors,
++ MappingQ1Eulerian (const VECTOR &euler_transform_vectors,
+ const DoFHandler<dim,spacedim> &shiftmap_dof_handler);
+
+ /**
+ * Return a pointer to a copy of the
+ * present object. The caller of this
+ * copy then assumes ownership of it.
+ */
+ virtual
+ Mapping<dim,spacedim> *clone () const;
+
+ /**
+ * Always returns @p false because
+ * MappingQ1Eulerian does not in general
+ * preserve vertex locations (unless the
+ * translation vector happens to provide
+ * for zero displacements at vertex
+ * locations).
+ */
+ bool preserves_vertex_locations () const;
+
+ /**
+ * Exception.
+ */
+ DeclException0 (ExcInactiveCell);
+
+
+
+ protected:
+ /**
+ * Implementation of the interface in
+ * MappingQ1. Overrides the function in
+ * the base class, since we cannot use
+ * any cell similarity for this class.
+ */
+ virtual void
+ fill_fe_values (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const Quadrature<dim> &quadrature,
+ typename Mapping<dim,spacedim>::InternalDataBase &mapping_data,
+ typename std::vector<Point<spacedim> > &quadrature_points,
+ std::vector<double> &JxW_values,
+ std::vector<DerivativeForm<1,dim,spacedim> > &jacobians,
+ std::vector<DerivativeForm<2,dim,spacedim> > &jacobian_grads,
+ std::vector<DerivativeForm<1,spacedim,dim> > &inverse_jacobians,
+ std::vector<Point<spacedim> > &cell_normal_vectors,
+ CellSimilarity::Similarity &cell_similarity) const;
+
+ /**
+ * Reference to the vector of
+ * shifts.
+ */
+ SmartPointer<const VECTOR, MappingQ1Eulerian<dim,VECTOR,spacedim> > euler_transform_vectors;
+
+ /**
+ * Pointer to the DoFHandler to
+ * which the mapping vector is
+ * associated.
+ */
+ SmartPointer<const DoFHandler<dim,spacedim>,MappingQ1Eulerian<dim,VECTOR,spacedim> > shiftmap_dof_handler;
+
+
+ private:
+ /**
+ * Computes the support points of
+ * the mapping. For
+ * @p MappingQ1Eulerian these
+ * are the vertices.
+ */
+ virtual void compute_mapping_support_points(
+ const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ std::vector<Point<spacedim> > &a) const;
};
template <int dim, class VECTOR = Vector<double>, int spacedim=dim >
class MappingQEulerian : public MappingQ<dim, spacedim>
{
- const VECTOR &euler_vector,
- const DoFHandler<dim,spacedim> &euler_dof_handler);
+ public:
+ /**
+ * Constructor. The first argument is
+ * the polynomical degree of the desired
+ * Qp mapping. It then takes a
+ * <tt>Vector<double> &</tt> to specify the
+ * transformation of the domain
+ * from the reference to
+ * the current configuration.
+ * The organization of the
+ * elements in the @p Vector
+ * must follow the concept how
+ * deal.II stores solutions that
+ * are associated to a
+ * triangulation. This is
+ * automatically the case if the
+ * @p Vector represents the
+ * solution of the previous step
+ * of a nonlinear problem.
+ * Alternatively, the @p Vector
+ * can be initialized by
+ * <tt>DoFAccessor::set_dof_values()</tt>.
+ */
+
+ MappingQEulerian (const unsigned int degree,
++ const VECTOR &euler_vector,
++ const DoFHandler<dim,spacedim> &euler_dof_handler);
+
+ /**
+ * Return a pointer to a copy of the
+ * present object. The caller of this
+ * copy then assumes ownership of it.
+ */
+ virtual
+ Mapping<dim,spacedim> *clone () const;
+
+ /**
+ * Always returns @p false because
+ * MappingQ1Eulerian does not in general
+ * preserve vertex locations (unless the
+ * translation vector happens to provide
+ * for zero displacements at vertex
+ * locations).
+ */
+ bool preserves_vertex_locations () const;
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInactiveCell);
+
+ protected:
+ /**
+ * Implementation of the interface in
+ * MappingQ. Overrides the function in
+ * the base class, since we cannot use
+ * any cell similarity for this class.
+ */
+ virtual void
+ fill_fe_values (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const Quadrature<dim> &quadrature,
+ typename Mapping<dim,spacedim>::InternalDataBase &mapping_data,
+ typename std::vector<Point<spacedim> > &quadrature_points,
+ std::vector<double> &JxW_values,
+ std::vector<DerivativeForm<1,dim,spacedim> > &jacobians,
+ std::vector<DerivativeForm<2,dim,spacedim> > &jacobian_grads,
+ std::vector<DerivativeForm<1,spacedim,dim> > &inverse_jacobians,
+ std::vector<Point<spacedim> > &cell_normal_vectors,
+ CellSimilarity::Similarity &cell_similarity) const;
+
+ /**
+ * Reference to the vector of
+ * shifts.
+ */
+
+ SmartPointer<const VECTOR, MappingQEulerian<dim,VECTOR,spacedim> > euler_vector;
+
+ /**
+ * Pointer to the DoFHandler to
+ * which the mapping vector is
+ * associated.
+ */
+
+ SmartPointer<const DoFHandler<dim,spacedim>,MappingQEulerian<dim,VECTOR,spacedim> > euler_dof_handler;
+
+
+ private:
+
+ /**
+ * Special quadrature rule used
+ * to define the support points
+ * in the reference configuration.
+ */
+
+ class SupportQuadrature : public Quadrature<dim>
+ {
public:
- /**
- * Constructor. The first argument is
- * the polynomical degree of the desired
- * Qp mapping. It then takes a
- * <tt>Vector<double> &</tt> to specify the
- * transformation of the domain
- * from the reference to
- * the current configuration.
- * The organization of the
- * elements in the @p Vector
- * must follow the concept how
- * deal.II stores solutions that
- * are associated to a
- * triangulation. This is
- * automatically the case if the
- * @p Vector represents the
- * solution of the previous step
- * of a nonlinear problem.
- * Alternatively, the @p Vector
- * can be initialized by
- * <tt>DoFAccessor::set_dof_values()</tt>.
- */
-
- MappingQEulerian (const unsigned int degree,
- const VECTOR &euler_vector,
- const DoFHandler<dim,spacedim> &euler_dof_handler);
-
- /**
- * Return a pointer to a copy of the
- * present object. The caller of this
- * copy then assumes ownership of it.
- */
- virtual
- Mapping<dim,spacedim> * clone () const;
-
- /**
- * Always returns @p false because
- * MappingQ1Eulerian does not in general
- * preserve vertex locations (unless the
- * translation vector happens to provide
- * for zero displacements at vertex
- * locations).
- */
- bool preserves_vertex_locations () const;
-
- /**
- * Exception
- */
- DeclException0 (ExcInactiveCell);
-
- protected:
- /**
- * Implementation of the interface in
- * MappingQ. Overrides the function in
- * the base class, since we cannot use
- * any cell similarity for this class.
- */
- virtual void
- fill_fe_values (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const Quadrature<dim> &quadrature,
- typename Mapping<dim,spacedim>::InternalDataBase &mapping_data,
- typename std::vector<Point<spacedim> > &quadrature_points,
- std::vector<double> &JxW_values,
- std::vector<DerivativeForm<1,dim,spacedim> > &jacobians,
- std::vector<DerivativeForm<2,dim,spacedim> > &jacobian_grads,
- std::vector<DerivativeForm<1,spacedim,dim> > &inverse_jacobians,
- std::vector<Point<spacedim> > &cell_normal_vectors,
- CellSimilarity::Similarity &cell_similarity) const;
-
- /**
- * Reference to the vector of
- * shifts.
- */
-
- SmartPointer<const VECTOR, MappingQEulerian<dim,VECTOR,spacedim> > euler_vector;
-
- /**
- * Pointer to the DoFHandler to
- * which the mapping vector is
- * associated.
- */
-
- SmartPointer<const DoFHandler<dim,spacedim>,MappingQEulerian<dim,VECTOR,spacedim> > euler_dof_handler;
-
-
- private:
-
- /**
- * Special quadrature rule used
- * to define the support points
- * in the reference configuration.
- */
-
- class SupportQuadrature : public Quadrature<dim>
- {
- public:
- /**
- * Constructor, with an argument
- * defining the desired polynomial
- * degree.
- */
-
- SupportQuadrature (const unsigned int map_degree);
-
- };
-
- /**
- * A member variable holding the
- * quadrature points in the right
- * order.
- */
- const SupportQuadrature support_quadrature;
-
- /**
- * FEValues object used to query the
- * the given finite element field
- * at the support points in the
- * reference configuration.
- *
- * The variable is marked as
- * mutable since we have to call
- * FEValues::reinit from
- * compute_mapping_support_points,
- * a function that is 'const'.
- */
- mutable FEValues<dim,spacedim> fe_values;
-
- /**
- * A variable to guard access to
- * the fe_values variable.
- */
- mutable Threads::ThreadMutex fe_values_mutex;
-
- /**
- * Compute the positions of the
- * support points in the current
- * configuration
- */
- virtual void compute_mapping_support_points(
- const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- std::vector<Point<spacedim> > &a) const;
+ /**
+ * Constructor, with an argument
+ * defining the desired polynomial
+ * degree.
+ */
+
+ SupportQuadrature (const unsigned int map_degree);
+
+ };
+
+ /**
+ * A member variable holding the
+ * quadrature points in the right
+ * order.
+ */
+ const SupportQuadrature support_quadrature;
+
+ /**
+ * FEValues object used to query the
+ * the given finite element field
+ * at the support points in the
+ * reference configuration.
+ *
+ * The variable is marked as
+ * mutable since we have to call
+ * FEValues::reinit from
+ * compute_mapping_support_points,
+ * a function that is 'const'.
+ */
+ mutable FEValues<dim,spacedim> fe_values;
+
+ /**
+ * A variable to guard access to
+ * the fe_values variable.
+ */
+ mutable Threads::ThreadMutex fe_values_mutex;
+
+ /**
+ * Compute the positions of the
+ * support points in the current
+ * configuration
+ */
+ virtual void compute_mapping_support_points(
+ const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ std::vector<Point<spacedim> > &a) const;
};
*/
class GridGenerator
{
- public:
- /**
- * Initialize the given triangulation
- * with a hypercube (line in 1D, square
- * in 2D, etc) consisting of exactly one
- * cell. The hypercube volume is the
- * tensor product interval
- * <i>[left,right]<sup>dim</sup></i> in
- * the present number of dimensions,
- * where the limits are given as
- * arguments. They default to zero and
- * unity, then producing the unit
- * hypercube. All boundary indicators are
- * set to zero ("not colorized") for 2d
- * and 3d. In 1d the indicators are
- * colorized, see hyper_rectangle().
- *
- * @image html hyper_cubes.png
- *
- * See also
- * subdivided_hyper_cube() for a
- * coarse mesh consisting of
- * several cells. See
- * hyper_rectangle(), if
- * different lengths in different
- * ordinate directions are
- * required.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim, int spacedim>
- static void hyper_cube (Triangulation<dim,spacedim> &tria,
- const double left = 0.,
- const double right= 1.);
+ public:
+ /**
+ * Initialize the given triangulation
+ * with a hypercube (line in 1D, square
+ * in 2D, etc) consisting of exactly one
+ * cell. The hypercube volume is the
+ * tensor product interval
+ * <i>[left,right]<sup>dim</sup></i> in
+ * the present number of dimensions,
+ * where the limits are given as
+ * arguments. They default to zero and
+ * unity, then producing the unit
+ * hypercube. All boundary indicators are
+ * set to zero ("not colorized") for 2d
+ * and 3d. In 1d the indicators are
+ * colorized, see hyper_rectangle().
+ *
+ * @image html hyper_cubes.png
+ *
+ * See also
+ * subdivided_hyper_cube() for a
+ * coarse mesh consisting of
+ * several cells. See
+ * hyper_rectangle(), if
+ * different lengths in different
+ * ordinate directions are
+ * required.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim, int spacedim>
- static void hyper_cube (Triangulation<dim,spacedim> &tria,
++ static void hyper_cube (Triangulation<dim,spacedim> &tria,
+ const double left = 0.,
+ const double right= 1.);
- /**
- * Same as hyper_cube(), but
- * with the difference that not
- * only one cell is created but
- * each coordinate direction is
- * subdivided into
- * @p repetitions cells. Thus,
- * the number of cells filling
- * the given volume is
- * <tt>repetitions<sup>dim</sup></tt>.
- *
- * If spacedim=dim+1 the same
- * mesh as in the case
- * spacedim=dim is created, but
- * the vertices have an
- * additional coordinate =0. So,
- * if dim=1 one obtains line
- * along the x axis in the xy
- * plane, and if dim=3 one
- * obtains a square in lying in
- * the xy plane in 3d space.
- *
- * @note The triangulation needs
- * to be void upon calling this
- * function.
- */
- template <int dim>
- static void subdivided_hyper_cube (Triangulation<dim> &tria,
- const unsigned int repetitions,
- const double left = 0.,
- const double right= 1.);
+ /**
+ * Same as hyper_cube(), but
+ * with the difference that not
+ * only one cell is created but
+ * each coordinate direction is
+ * subdivided into
+ * @p repetitions cells. Thus,
+ * the number of cells filling
+ * the given volume is
+ * <tt>repetitions<sup>dim</sup></tt>.
+ *
+ * If spacedim=dim+1 the same
+ * mesh as in the case
+ * spacedim=dim is created, but
+ * the vertices have an
+ * additional coordinate =0. So,
+ * if dim=1 one obtains line
+ * along the x axis in the xy
+ * plane, and if dim=3 one
+ * obtains a square in lying in
+ * the xy plane in 3d space.
+ *
+ * @note The triangulation needs
+ * to be void upon calling this
+ * function.
+ */
+ template <int dim>
- static void subdivided_hyper_cube (Triangulation<dim> &tria,
++ static void subdivided_hyper_cube (Triangulation<dim> &tria,
+ const unsigned int repetitions,
+ const double left = 0.,
+ const double right= 1.);
- /**
- * Create a coordinate-parallel
- * brick from the two
- * diagonally opposite corner
- * points @p p1 and @p p2.
- *
- * If the @p colorize flag is
- * set, the
- * @p boundary_indicators of the
- * surfaces are assigned, such
- * that the lower one in
- * @p x-direction is 0, the
- * upper one is 1. The indicators
- * for the surfaces in
- * @p y-direction are 2 and 3,
- * the ones for @p z are 4 and
- * 5. Additionally, material ids
- * are assigned to the cells
- * according to the octant their
- * center is in: being in the right half
- * plane for any coordinate
- * direction <i>x<sub>i</sub></i>
- * adds 2<sup>i</sup>. For
- * instance, the center point
- * (1,-1,1) yields a material id 5.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim, int spacedim>
- static void hyper_rectangle (Triangulation<dim,spacedim> &tria,
- const Point<spacedim> &p1,
- const Point<spacedim> &p2,
- const bool colorize = false);
+ /**
+ * Create a coordinate-parallel
+ * brick from the two
+ * diagonally opposite corner
+ * points @p p1 and @p p2.
+ *
+ * If the @p colorize flag is
+ * set, the
+ * @p boundary_indicators of the
+ * surfaces are assigned, such
+ * that the lower one in
+ * @p x-direction is 0, the
+ * upper one is 1. The indicators
+ * for the surfaces in
+ * @p y-direction are 2 and 3,
+ * the ones for @p z are 4 and
+ * 5. Additionally, material ids
+ * are assigned to the cells
+ * according to the octant their
+ * center is in: being in the right half
+ * plane for any coordinate
+ * direction <i>x<sub>i</sub></i>
+ * adds 2<sup>i</sup>. For
+ * instance, the center point
+ * (1,-1,1) yields a material id 5.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim, int spacedim>
+ static void hyper_rectangle (Triangulation<dim,spacedim> &tria,
+ const Point<spacedim> &p1,
+ const Point<spacedim> &p2,
+ const bool colorize = false);
- /**
- * Create a coordinate-parallel
- * parallelepiped from the two
- * diagonally opposite corner
- * points @p p1 and @p p2. In
- * dimension @p i,
- * <tt>repetitions[i]</tt> cells are
- * generated.
- *
- * To get cells with an aspect
- * ratio different from that of
- * the domain, use different
- * numbers of subdivisions in
- * different coordinate
- * directions. The minimum number
- * of subdivisions in each
- * direction is
- * 1. @p repetitions is a list
- * of integers denoting the
- * number of subdivisions in each
- * coordinate direction.
- *
- * If the @p colorize flag is
- * set, the
- * @p boundary_indicators of the
- * surfaces are assigned, such
- * that the lower one in
- * @p x-direction is 0, the
- * upper one is 1. The indicators
- * for the surfaces in
- * @p y-direction are 2 and 3,
- * the ones for @p z are 4 and
- * 5. Additionally, material ids
- * are assigned to the cells
- * according to the octant their
- * center is in: being in the right half
- * plane for any coordinate
- * direction <i>x<sub>i</sub></i>
- * adds 2<sup>i</sup>. For
- * instance, the center point
- * (1,-1,1) yields a material id 5.
- *
- * Note that the @p colorize flag is
- * ignored in 1d and is assumed to always
- * be true. That means the boundary
- * indicator is 0 on the left and 1 on
- * the right. See step-15 for details.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- *
- * @note For an example of the
- * use of this function see the
- * step-28
- * tutorial program.
- */
- template <int dim>
- static
- void
- subdivided_hyper_rectangle (Triangulation<dim> &tria,
- const std::vector<unsigned int> &repetitions,
- const Point<dim> &p1,
- const Point<dim> &p2,
- const bool colorize=false);
+ /**
+ * Create a coordinate-parallel
+ * parallelepiped from the two
+ * diagonally opposite corner
+ * points @p p1 and @p p2. In
+ * dimension @p i,
+ * <tt>repetitions[i]</tt> cells are
+ * generated.
+ *
+ * To get cells with an aspect
+ * ratio different from that of
+ * the domain, use different
+ * numbers of subdivisions in
+ * different coordinate
+ * directions. The minimum number
+ * of subdivisions in each
+ * direction is
+ * 1. @p repetitions is a list
+ * of integers denoting the
+ * number of subdivisions in each
+ * coordinate direction.
+ *
+ * If the @p colorize flag is
+ * set, the
+ * @p boundary_indicators of the
+ * surfaces are assigned, such
+ * that the lower one in
+ * @p x-direction is 0, the
+ * upper one is 1. The indicators
+ * for the surfaces in
+ * @p y-direction are 2 and 3,
+ * the ones for @p z are 4 and
+ * 5. Additionally, material ids
+ * are assigned to the cells
+ * according to the octant their
+ * center is in: being in the right half
+ * plane for any coordinate
+ * direction <i>x<sub>i</sub></i>
+ * adds 2<sup>i</sup>. For
+ * instance, the center point
+ * (1,-1,1) yields a material id 5.
+ *
+ * Note that the @p colorize flag is
+ * ignored in 1d and is assumed to always
+ * be true. That means the boundary
+ * indicator is 0 on the left and 1 on
+ * the right. See step-15 for details.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ *
+ * @note For an example of the
+ * use of this function see the
+ * step-28
+ * tutorial program.
+ */
+ template <int dim>
+ static
+ void
+ subdivided_hyper_rectangle (Triangulation<dim> &tria,
+ const std::vector<unsigned int> &repetitions,
+ const Point<dim> &p1,
+ const Point<dim> &p2,
+ const bool colorize=false);
- /**
- * Like the previous
- * function. However, here the
- * second argument does not
- * denote the number of
- * subdivisions in each
- * coordinate direction, but a
- * sequence of step sizes for
- * each coordinate direction. The
- * domain will therefore be
- * subdivided into
- * <code>step_sizes[i].size()</code>
- * cells in coordinate direction
- * <code>i</code>, with widths
- * <code>step_sizes[i][j]</code>
- * for the <code>j</code>th cell.
- *
- * This function is therefore the
- * right one to generate graded
- * meshes where cells are
- * concentrated in certain areas,
- * rather than a uniformly
- * subdivided mesh as the
- * previous function generates.
- *
- * The step sizes have to add up
- * to the dimensions of the hyper
- * rectangle specified by the
- * points @p p1 and @p p2.
- */
- template <int dim>
- static
- void
- subdivided_hyper_rectangle(Triangulation<dim> &tria,
- const std::vector<std::vector<double> > &step_sizes,
- const Point<dim> &p_1,
- const Point<dim> &p_2,
- const bool colorize);
+ /**
+ * Like the previous
+ * function. However, here the
+ * second argument does not
+ * denote the number of
+ * subdivisions in each
+ * coordinate direction, but a
+ * sequence of step sizes for
+ * each coordinate direction. The
+ * domain will therefore be
+ * subdivided into
+ * <code>step_sizes[i].size()</code>
+ * cells in coordinate direction
+ * <code>i</code>, with widths
+ * <code>step_sizes[i][j]</code>
+ * for the <code>j</code>th cell.
+ *
+ * This function is therefore the
+ * right one to generate graded
+ * meshes where cells are
+ * concentrated in certain areas,
+ * rather than a uniformly
+ * subdivided mesh as the
+ * previous function generates.
+ *
+ * The step sizes have to add up
+ * to the dimensions of the hyper
+ * rectangle specified by the
+ * points @p p1 and @p p2.
+ */
+ template <int dim>
+ static
+ void
+ subdivided_hyper_rectangle(Triangulation<dim> &tria,
+ const std::vector<std::vector<double> > &step_sizes,
+ const Point<dim> &p_1,
+ const Point<dim> &p_2,
+ const bool colorize);
- /**
- * Like the previous function, but with
- * the following twist: the @p
- * material_id argument is a
- * dim-dimensional array that, for each
- * cell, indicates which material_id
- * should be set. In addition, and this
- * is the major new functionality, if the
- * material_id of a cell is <tt>(unsigned
- * char)(-1)</tt>, then that cell is
- * deleted from the triangulation,
- * i.e. the domain will have a void
- * there.
- */
- template <int dim>
- static
- void
- subdivided_hyper_rectangle (Triangulation<dim> &tria,
- const std::vector< std::vector<double> > &spacing,
- const Point<dim> &p,
- const Table<dim,types::material_id> &material_id,
- const bool colorize=false);
+ /**
+ * Like the previous function, but with
+ * the following twist: the @p
+ * material_id argument is a
+ * dim-dimensional array that, for each
+ * cell, indicates which material_id
+ * should be set. In addition, and this
+ * is the major new functionality, if the
+ * material_id of a cell is <tt>(unsigned
+ * char)(-1)</tt>, then that cell is
+ * deleted from the triangulation,
+ * i.e. the domain will have a void
+ * there.
+ */
+ template <int dim>
+ static
+ void
+ subdivided_hyper_rectangle (Triangulation<dim> &tria,
+ const std::vector< std::vector<double> > &spacing,
+ const Point<dim> &p,
+ const Table<dim,types::material_id> &material_id,
+ const bool colorize=false);
- /**
- * A parallelogram. The first
- * corner point is the
- * origin. The <tt>dim</tt>
- * adjacent points are the
- * one-dimensional subtensors of
- * the tensor provided and
- * additional points will be sums
- * of these two vectors.
- * Colorizing is done according
- * to hyper_rectangle().
- *
- * @note This function is
- * implemented in 2d only.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void
- parallelogram(Triangulation<dim>& tria,
- const Tensor<2,dim>& corners,
- const bool colorize=false);
+ /**
+ * A parallelogram. The first
+ * corner point is the
+ * origin. The <tt>dim</tt>
+ * adjacent points are the
+ * one-dimensional subtensors of
+ * the tensor provided and
+ * additional points will be sums
+ * of these two vectors.
+ * Colorizing is done according
+ * to hyper_rectangle().
+ *
+ * @note This function is
+ * implemented in 2d only.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void
- parallelogram(Triangulation<dim> &tria,
++ parallelogram(Triangulation<dim> &tria,
+ const Tensor<2,dim> &corners,
+ const bool colorize=false);
- /**
- * Hypercube with a layer of
- * hypercubes around it. The
- * first two parameters give the
- * lower and upper bound of the
- * inner hypercube in all
- * coordinate directions.
- * @p thickness marks the size of
- * the layer cells.
- *
- * If the flag colorize is set,
- * the outer cells get material
- * id's according to the
- * following scheme: extending
- * over the inner cube in
- * (+/-) x-direction: 1/2. In y-direction
- * 4/8, in z-direction 16/32. The cells
- * at corners and edges (3d) get
- * these values bitwise or'd.
- *
- * Presently only available in 2d
- * and 3d.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void enclosed_hyper_cube (Triangulation<dim> &tria,
- const double left = 0.,
- const double right= 1.,
- const double thickness = 1.,
- const bool colorize = false);
+ /**
+ * Hypercube with a layer of
+ * hypercubes around it. The
+ * first two parameters give the
+ * lower and upper bound of the
+ * inner hypercube in all
+ * coordinate directions.
+ * @p thickness marks the size of
+ * the layer cells.
+ *
+ * If the flag colorize is set,
+ * the outer cells get material
+ * id's according to the
+ * following scheme: extending
+ * over the inner cube in
+ * (+/-) x-direction: 1/2. In y-direction
+ * 4/8, in z-direction 16/32. The cells
+ * at corners and edges (3d) get
+ * these values bitwise or'd.
+ *
+ * Presently only available in 2d
+ * and 3d.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void enclosed_hyper_cube (Triangulation<dim> &tria,
+ const double left = 0.,
+ const double right= 1.,
+ const double thickness = 1.,
+ const bool colorize = false);
- /**
- * Initialize the given
- * triangulation with a
- * hyperball, i.e. a circle or a
- * ball around <tt>center</tt>
- * with given <tt>radius</tt>.
- *
- * In order to avoid degenerate
- * cells at the boundaries, the
- * circle is triangulated by five
- * cells, the ball by seven
- * cells. The diameter of the
- * center cell is chosen so that
- * the aspect ratio of the
- * boundary cells after one
- * refinement is optimized.
- *
- * This function is declared to
- * exist for triangulations of
- * all space dimensions, but
- * throws an error if called in
- * 1d.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void hyper_ball (Triangulation<dim> &tria,
- const Point<dim> ¢er = Point<dim>(),
- const double radius = 1.);
+ /**
+ * Initialize the given
+ * triangulation with a
+ * hyperball, i.e. a circle or a
+ * ball around <tt>center</tt>
+ * with given <tt>radius</tt>.
+ *
+ * In order to avoid degenerate
+ * cells at the boundaries, the
+ * circle is triangulated by five
+ * cells, the ball by seven
+ * cells. The diameter of the
+ * center cell is chosen so that
+ * the aspect ratio of the
+ * boundary cells after one
+ * refinement is optimized.
+ *
+ * This function is declared to
+ * exist for triangulations of
+ * all space dimensions, but
+ * throws an error if called in
+ * 1d.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void hyper_ball (Triangulation<dim> &tria,
+ const Point<dim> ¢er = Point<dim>(),
+ const double radius = 1.);
- /**
- * This class produces a half
- * hyper-ball around
- * <tt>center</tt>, which
- * contains four elements in 2d
- * and 6 in 3d. The cut plane is
- * perpendicular to the
- * <i>x</i>-axis.
- *
- * The boundary indicators for the final
- * triangulation are 0 for the curved boundary and
- * 1 for the cut plane.
- *
- * The appropriate
- * boundary class is
- * HalfHyperBallBoundary, or HyperBallBoundary.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void half_hyper_ball (Triangulation<dim> &tria,
- const Point<dim> ¢er = Point<dim>(),
- const double radius = 1.);
+ /**
+ * This class produces a half
+ * hyper-ball around
+ * <tt>center</tt>, which
+ * contains four elements in 2d
+ * and 6 in 3d. The cut plane is
+ * perpendicular to the
+ * <i>x</i>-axis.
+ *
+ * The boundary indicators for the final
+ * triangulation are 0 for the curved boundary and
+ * 1 for the cut plane.
+ *
+ * The appropriate
+ * boundary class is
+ * HalfHyperBallBoundary, or HyperBallBoundary.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void half_hyper_ball (Triangulation<dim> &tria,
+ const Point<dim> ¢er = Point<dim>(),
+ const double radius = 1.);
- /**
- * Create a cylinder around the
- * x-axis. The cylinder extends
- * from <tt>x=-half_length</tt> to
- * <tt>x=+half_length</tt> and its
- * projection into the
- * @p yz-plane is a circle of
- * radius @p radius.
- *
- * In two dimensions, the
- * cylinder is a rectangle from
- * <tt>x=-half_length</tt> to
- * <tt>x=+half_length</tt> and
- * from <tt>y=-radius</tt> to
- * <tt>y=radius</tt>.
- *
- * The boundaries are colored
- * according to the following
- * scheme: 0 for the hull of the
- * cylinder, 1 for the left hand
- * face and 2 for the right hand
- * face.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void cylinder (Triangulation<dim> &tria,
- const double radius = 1.,
- const double half_length = 1.);
+ /**
+ * Create a cylinder around the
+ * x-axis. The cylinder extends
+ * from <tt>x=-half_length</tt> to
+ * <tt>x=+half_length</tt> and its
+ * projection into the
+ * @p yz-plane is a circle of
+ * radius @p radius.
+ *
+ * In two dimensions, the
+ * cylinder is a rectangle from
+ * <tt>x=-half_length</tt> to
+ * <tt>x=+half_length</tt> and
+ * from <tt>y=-radius</tt> to
+ * <tt>y=radius</tt>.
+ *
+ * The boundaries are colored
+ * according to the following
+ * scheme: 0 for the hull of the
+ * cylinder, 1 for the left hand
+ * face and 2 for the right hand
+ * face.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void cylinder (Triangulation<dim> &tria,
+ const double radius = 1.,
+ const double half_length = 1.);
- /**
- * Create a cutted cone around
- * the x-axis. The cone extends
- * from <tt>x=-half_length</tt>
- * to <tt>x=half_length</tt> and
- * its projection into the @p
- * yz-plane is a circle of radius
- * @p radius_0 at
- * <tt>x=-half_length</tt> and a
- * circle of radius @p radius_1
- * at <tt>x=+half_length</tt>.
- * In between the radius is
- * linearly decreasing.
- *
- * In two dimensions, the cone is
- * a trapezoid from
- * <tt>x=-half_length</tt> to
- * <tt>x=+half_length</tt> and
- * from <tt>y=-radius_0</tt> to
- * <tt>y=radius_0</tt> at
- * <tt>x=-half_length</tt> and
- * from <tt>y=-radius_1</tt> to
- * <tt>y=radius_1</tt> at
- * <tt>x=+half_length</tt>. In
- * between the range of
- * <tt>y</tt> is linearly
- * decreasing.
- *
- * The boundaries are colored
- * according to the following
- * scheme: 0 for the hull of the
- * cone, 1 for the left hand
- * face and 2 for the right hand
- * face.
- *
- * An example of use can be found in the
- * documentation of the ConeBoundary
- * class, with which you probably want to
- * associate boundary indicator 0 (the
- * hull of the cone).
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- *
- * @author Markus Bürg, 2009
- */
- template <int dim>
- static void
- truncated_cone (Triangulation<dim> &tria,
- const double radius_0 = 1.0,
- const double radius_1 = 0.5,
- const double half_length = 1.0);
+ /**
+ * Create a cutted cone around
+ * the x-axis. The cone extends
+ * from <tt>x=-half_length</tt>
+ * to <tt>x=half_length</tt> and
+ * its projection into the @p
+ * yz-plane is a circle of radius
+ * @p radius_0 at
+ * <tt>x=-half_length</tt> and a
+ * circle of radius @p radius_1
+ * at <tt>x=+half_length</tt>.
+ * In between the radius is
+ * linearly decreasing.
+ *
+ * In two dimensions, the cone is
+ * a trapezoid from
+ * <tt>x=-half_length</tt> to
+ * <tt>x=+half_length</tt> and
+ * from <tt>y=-radius_0</tt> to
+ * <tt>y=radius_0</tt> at
+ * <tt>x=-half_length</tt> and
+ * from <tt>y=-radius_1</tt> to
+ * <tt>y=radius_1</tt> at
+ * <tt>x=+half_length</tt>. In
+ * between the range of
+ * <tt>y</tt> is linearly
+ * decreasing.
+ *
+ * The boundaries are colored
+ * according to the following
+ * scheme: 0 for the hull of the
+ * cone, 1 for the left hand
+ * face and 2 for the right hand
+ * face.
+ *
+ * An example of use can be found in the
+ * documentation of the ConeBoundary
+ * class, with which you probably want to
+ * associate boundary indicator 0 (the
+ * hull of the cone).
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ *
+ * @author Markus Bürg, 2009
+ */
+ template <int dim>
+ static void
+ truncated_cone (Triangulation<dim> &tria,
+ const double radius_0 = 1.0,
+ const double radius_1 = 0.5,
+ const double half_length = 1.0);
- /**
- * Initialize the given
- * triangulation with a hyper-L
- * consisting of exactly
- * <tt>2^dim-1</tt> cells. It
- * produces the hypercube with
- * the interval [<i>left,right</i>] without
- * the hypercube made out of the
- * interval [<i>(a+b)/2,b</i>].
- *
- * @image html hyper_l.png
- *
- * The triangulation needs to be
- * void upon calling this
- * function.
- *
- * This function is declared to
- * exist for triangulations of
- * all space dimensions, but
- * throws an error if called in
- * 1d.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void hyper_L (Triangulation<dim> &tria,
- const double left = -1.,
- const double right= 1.);
+ /**
+ * Initialize the given
+ * triangulation with a hyper-L
+ * consisting of exactly
+ * <tt>2^dim-1</tt> cells. It
+ * produces the hypercube with
+ * the interval [<i>left,right</i>] without
+ * the hypercube made out of the
+ * interval [<i>(a+b)/2,b</i>].
+ *
+ * @image html hyper_l.png
+ *
+ * The triangulation needs to be
+ * void upon calling this
+ * function.
+ *
+ * This function is declared to
+ * exist for triangulations of
+ * all space dimensions, but
+ * throws an error if called in
+ * 1d.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void hyper_L (Triangulation<dim> &tria,
+ const double left = -1.,
+ const double right= 1.);
- /**
- * Initialize the given
- * Triangulation with a hypercube
- * with a slit. In each
- * coordinate direction, the
- * hypercube extends from @p left
- * to @p right.
- *
- * In 2d, the split goes in
- * vertical direction from
- * <tt>x=(left+right)/2,
- * y=left</tt> to the center of
- * the square at
- * <tt>x=y=(left+right)/2</tt>.
- *
- * In 3d, the 2d domain is just
- * extended in the
- * <i>z</i>-direction, such that
- * a plane cuts the lower half of
- * a rectangle in two.
+ /**
+ * Initialize the given
+ * Triangulation with a hypercube
+ * with a slit. In each
+ * coordinate direction, the
+ * hypercube extends from @p left
+ * to @p right.
+ *
+ * In 2d, the split goes in
+ * vertical direction from
+ * <tt>x=(left+right)/2,
+ * y=left</tt> to the center of
+ * the square at
+ * <tt>x=y=(left+right)/2</tt>.
+ *
+ * In 3d, the 2d domain is just
+ * extended in the
+ * <i>z</i>-direction, such that
+ * a plane cuts the lower half of
+ * a rectangle in two.
- * This function is declared to
- * exist for triangulations of
- * all space dimensions, but
- * throws an error if called in
- * 1d.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void hyper_cube_slit (Triangulation<dim> &tria,
- const double left = 0.,
- const double right= 1.,
- const bool colorize = false);
+ * This function is declared to
+ * exist for triangulations of
+ * all space dimensions, but
+ * throws an error if called in
+ * 1d.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void hyper_cube_slit (Triangulation<dim> &tria,
+ const double left = 0.,
+ const double right= 1.,
+ const bool colorize = false);
- /**
- * Produce a hyper-shell,
- * the region between two
- * spheres around <tt>center</tt>,
- * with given
- * <tt>inner_radius</tt> and
- * <tt>outer_radius</tt>. The number
- * <tt>n_cells</tt> indicates the
- * number of cells of the resulting
- * triangulation, i.e., how many cells
- * form the ring (in 2d) or the shell
- * (in 3d).
- *
- * If the flag @p colorize is @p true,
- * then the outer boundary will have the
- * indicator 1, while the inner boundary
- * has id zero. If the flag is @p false,
- * both have indicator zero.
- *
- * In 2D, the number
- * <tt>n_cells</tt> of elements
- * for this initial triangulation
- * can be chosen arbitrarily. If
- * the number of initial cells is
- * zero (as is the default), then
- * it is computed adaptively such
- * that the resulting elements
- * have the least aspect ratio.
- *
- * In 3D, only two different numbers are
- * meaningful, 6 for a surface based on a
- * hexahedron (i.e. 6 panels on the inner
- * sphere extruded in radial direction to
- * form 6 cells) and 12 for the rhombic
- * dodecahedron. These give rise to the
- * following meshes upon one refinement:
- *
- * @image html hypershell3d-6.png
- * @image html hypershell3d-12.png
- *
- * Neither of these meshes is
- * particularly good since one ends up
- * with poorly shaped cells at the inner
- * edge upon refinement. For example,
- * this is the middle plane of the mesh
- * for the <code>n_cells=6</code>:
- *
- * @image html hyper_shell_6_cross_plane.png
- *
- * The mesh generated with
- * <code>n_cells=6</code> is better but
- * still not good. As a consequence, you
- * may also specify
- * <code>n_cells=96</code> as a third
- * option. The mesh generated in this way
- * is based on a once refined version of
- * the one with <code>n_cells=12</code>,
- * where all internal nodes are re-placed
- * along a shell somewhere between the
- * inner and outer boundary of the
- * domain. The following two images
- * compare half of the hyper shell for
- * <code>n_cells=12</code> and
- * <code>n_cells=96</code> (note that the
- * doubled radial lines on the cross
- * section are artifacts of the
- * visualization):
- *
- * @image html hyper_shell_12_cut.png
- * @image html hyper_shell_96_cut.png
- *
- * @note This function is declared to
- * exist for triangulations of
- * all space dimensions, but
- * throws an error if called in
- * 1d.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void hyper_shell (Triangulation<dim> &tria,
- const Point<dim> ¢er,
- const double inner_radius,
- const double outer_radius,
- const unsigned int n_cells = 0,
- bool colorize = false);
+ /**
+ * Produce a hyper-shell,
+ * the region between two
+ * spheres around <tt>center</tt>,
+ * with given
+ * <tt>inner_radius</tt> and
+ * <tt>outer_radius</tt>. The number
+ * <tt>n_cells</tt> indicates the
+ * number of cells of the resulting
+ * triangulation, i.e., how many cells
+ * form the ring (in 2d) or the shell
+ * (in 3d).
+ *
+ * If the flag @p colorize is @p true,
+ * then the outer boundary will have the
+ * indicator 1, while the inner boundary
+ * has id zero. If the flag is @p false,
+ * both have indicator zero.
+ *
+ * In 2D, the number
+ * <tt>n_cells</tt> of elements
+ * for this initial triangulation
+ * can be chosen arbitrarily. If
+ * the number of initial cells is
+ * zero (as is the default), then
+ * it is computed adaptively such
+ * that the resulting elements
+ * have the least aspect ratio.
+ *
+ * In 3D, only two different numbers are
+ * meaningful, 6 for a surface based on a
+ * hexahedron (i.e. 6 panels on the inner
+ * sphere extruded in radial direction to
+ * form 6 cells) and 12 for the rhombic
+ * dodecahedron. These give rise to the
+ * following meshes upon one refinement:
+ *
+ * @image html hypershell3d-6.png
+ * @image html hypershell3d-12.png
+ *
+ * Neither of these meshes is
+ * particularly good since one ends up
+ * with poorly shaped cells at the inner
+ * edge upon refinement. For example,
+ * this is the middle plane of the mesh
+ * for the <code>n_cells=6</code>:
+ *
+ * @image html hyper_shell_6_cross_plane.png
+ *
+ * The mesh generated with
+ * <code>n_cells=6</code> is better but
+ * still not good. As a consequence, you
+ * may also specify
+ * <code>n_cells=96</code> as a third
+ * option. The mesh generated in this way
+ * is based on a once refined version of
+ * the one with <code>n_cells=12</code>,
+ * where all internal nodes are re-placed
+ * along a shell somewhere between the
+ * inner and outer boundary of the
+ * domain. The following two images
+ * compare half of the hyper shell for
+ * <code>n_cells=12</code> and
+ * <code>n_cells=96</code> (note that the
+ * doubled radial lines on the cross
+ * section are artifacts of the
+ * visualization):
+ *
+ * @image html hyper_shell_12_cut.png
+ * @image html hyper_shell_96_cut.png
+ *
+ * @note This function is declared to
+ * exist for triangulations of
+ * all space dimensions, but
+ * throws an error if called in
+ * 1d.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void hyper_shell (Triangulation<dim> &tria,
+ const Point<dim> ¢er,
+ const double inner_radius,
+ const double outer_radius,
+ const unsigned int n_cells = 0,
+ bool colorize = false);
- /**
- * Produce a half hyper-shell,
- * i.e. the space between two
- * circles in two space
- * dimensions and the region
- * between two spheres in 3d,
- * with given inner and outer
- * radius and a given number of
- * elements for this initial
- * triangulation. However,
- * opposed to the previous
- * function, it does not produce
- * a whole shell, but only one
- * half of it, namely that part
- * for which the first component
- * is restricted to non-negative
- * values. The purpose of this
- * class is to enable
- * computations for solutions
- * which have rotational
- * symmetry, in which case the
- * half shell in 2d represents a
- * shell in 3d.
- *
- * If the number of
- * initial cells is zero (as is
- * the default), then it is
- * computed adaptively such that
- * the resulting elements have
- * the least aspect ratio.
- *
- * If colorize is set to true, the
- * inner, outer, left, and right
- * boundary get indicator 0, 1, 2,
- * and 3, respectively. Otherwise
- * all indicators are set to 0.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void half_hyper_shell (Triangulation<dim> &tria,
- const Point<dim> ¢er,
- const double inner_radius,
- const double outer_radius,
- const unsigned int n_cells = 0,
- const bool colorize = false);
+ /**
+ * Produce a half hyper-shell,
+ * i.e. the space between two
+ * circles in two space
+ * dimensions and the region
+ * between two spheres in 3d,
+ * with given inner and outer
+ * radius and a given number of
+ * elements for this initial
+ * triangulation. However,
+ * opposed to the previous
+ * function, it does not produce
+ * a whole shell, but only one
+ * half of it, namely that part
+ * for which the first component
+ * is restricted to non-negative
+ * values. The purpose of this
+ * class is to enable
+ * computations for solutions
+ * which have rotational
+ * symmetry, in which case the
+ * half shell in 2d represents a
+ * shell in 3d.
+ *
+ * If the number of
+ * initial cells is zero (as is
+ * the default), then it is
+ * computed adaptively such that
+ * the resulting elements have
+ * the least aspect ratio.
+ *
+ * If colorize is set to true, the
+ * inner, outer, left, and right
+ * boundary get indicator 0, 1, 2,
+ * and 3, respectively. Otherwise
+ * all indicators are set to 0.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void half_hyper_shell (Triangulation<dim> &tria,
+ const Point<dim> ¢er,
+ const double inner_radius,
+ const double outer_radius,
+ const unsigned int n_cells = 0,
+ const bool colorize = false);
- /**
- * Produce a domain that is the
- * intersection between a
- * hyper-shell with given inner
- * and outer radius, i.e. the
- * space between two circles in
- * two space dimensions and the
- * region between two spheres in
- * 3d, and the positive quadrant
- * (in 2d) or octant (in 3d). In
- * 2d, this is indeed a quarter
- * of the full annulus, while the
- * function is a misnomer in 3d
- * because there the domain is
- * not a quarter but one eighth
- * of the full shell.
- *
- * If the number of initial cells is zero
- * (as is the default), then it is
- * computed adaptively such that the
- * resulting elements have the least
- * aspect ratio in 2d.
- *
- * If colorize is set to true, the inner,
- * outer, left, and right boundary get
- * indicator 0, 1, 2, and 3 in 2d,
- * respectively. Otherwise all indicators
- * are set to 0. In 3d indicator 2 is at
- * the face x=0, 3 at y=0, 4 at z=0.
- *
- * @note The triangulation needs to be
- * void upon calling this function.
- */
- template <int dim>
- static void quarter_hyper_shell (Triangulation<dim> &tria,
- const Point<dim> ¢er,
- const double inner_radius,
- const double outer_radius,
- const unsigned int n_cells = 0,
- const bool colorize = false);
+ /**
+ * Produce a domain that is the
+ * intersection between a
+ * hyper-shell with given inner
+ * and outer radius, i.e. the
+ * space between two circles in
+ * two space dimensions and the
+ * region between two spheres in
+ * 3d, and the positive quadrant
+ * (in 2d) or octant (in 3d). In
+ * 2d, this is indeed a quarter
+ * of the full annulus, while the
+ * function is a misnomer in 3d
+ * because there the domain is
+ * not a quarter but one eighth
+ * of the full shell.
+ *
+ * If the number of initial cells is zero
+ * (as is the default), then it is
+ * computed adaptively such that the
+ * resulting elements have the least
+ * aspect ratio in 2d.
+ *
+ * If colorize is set to true, the inner,
+ * outer, left, and right boundary get
+ * indicator 0, 1, 2, and 3 in 2d,
+ * respectively. Otherwise all indicators
+ * are set to 0. In 3d indicator 2 is at
+ * the face x=0, 3 at y=0, 4 at z=0.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this function.
+ */
+ template <int dim>
+ static void quarter_hyper_shell (Triangulation<dim> &tria,
+ const Point<dim> ¢er,
+ const double inner_radius,
+ const double outer_radius,
+ const unsigned int n_cells = 0,
+ const bool colorize = false);
- /**
- * Produce a domain that is the space
- * between two cylinders in 3d, with
- * given length, inner and outer radius
- * and a given number of elements for
- * this initial triangulation. If @p
- * n_radial_cells is zero (as is the
- * default), then it is computed
- * adaptively such that the resulting
- * elements have the least aspect
- * ratio. The same holds for @p
- * n_axial_cells.
- *
- * @note Although this function
- * is declared as a template, it
- * does not make sense in 1D and
- * 2D.
- *
- * @note The triangulation needs
- * to be void upon calling this
- * function.
- */
- template <int dim>
- static void cylinder_shell (Triangulation<dim> &tria,
- const double length,
- const double inner_radius,
- const double outer_radius,
- const unsigned int n_radial_cells = 0,
- const unsigned int n_axial_cells = 0);
+ /**
+ * Produce a domain that is the space
+ * between two cylinders in 3d, with
+ * given length, inner and outer radius
+ * and a given number of elements for
+ * this initial triangulation. If @p
+ * n_radial_cells is zero (as is the
+ * default), then it is computed
+ * adaptively such that the resulting
+ * elements have the least aspect
+ * ratio. The same holds for @p
+ * n_axial_cells.
+ *
+ * @note Although this function
+ * is declared as a template, it
+ * does not make sense in 1D and
+ * 2D.
+ *
+ * @note The triangulation needs
+ * to be void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void cylinder_shell (Triangulation<dim> &tria,
+ const double length,
+ const double inner_radius,
+ const double outer_radius,
+ const unsigned int n_radial_cells = 0,
+ const unsigned int n_axial_cells = 0);
- /**
- * Produce the surface meshing of the
- * torus. The axis of the torus is the
- * $y$-axis while the plane of the torus
- * is the $x$-$z$ plane. The boundary of
- * this object can be described by the
- * TorusBoundary class.
- *
- * @param tria The triangulation to be
- * filled.
- *
- * @param R The radius of the circle,
- * which forms the middle line of the
- * torus containing the loop of
- * cells. Must be greater than @p r.
- *
- * @param r The inner radius of the
- * torus.
- */
+ /**
+ * Produce the surface meshing of the
+ * torus. The axis of the torus is the
+ * $y$-axis while the plane of the torus
+ * is the $x$-$z$ plane. The boundary of
+ * this object can be described by the
+ * TorusBoundary class.
+ *
+ * @param tria The triangulation to be
+ * filled.
+ *
+ * @param R The radius of the circle,
+ * which forms the middle line of the
+ * torus containing the loop of
+ * cells. Must be greater than @p r.
+ *
+ * @param r The inner radius of the
+ * torus.
+ */
- static void torus (Triangulation<2,3>& tria,
- const double R,
- const double r);
- static void torus (Triangulation<2,3> &tria,
++ static void torus (Triangulation<2,3> &tria,
+ const double R,
+ const double r);
- /**
- * This class produces a square
- * on the <i>xy</i>-plane with a
- * circular hole in the middle,
- * times the interval [0.L]
- * (only in 3d).
- *
- * @image html cubes_hole.png
- *
- * It is implemented in 2d and
- * 3d, and takes the following
- * arguments:
- *
- * @arg @p inner_radius: size of the
- * internal hole
- * @arg @p outer_radius: size of the
- * biggest enclosed cylinder
- * @arg @p L: extension on the @p z-direction
- * @arg @p repetitions: number of subdivisions
- * along the @p z-direction
- * @arg @p colorize: wether to assign different
- * boundary indicators to different faces.
- * The colors are given in lexicographic
- * ordering for the flat faces (0 to 3 in 2d,
- * 0 to 5 in 3d) plus the curved hole
- * (4 in 2d, and 6 in 3d).
- * If @p colorize is set to false, then flat faces
- * get the number 0 and the hole gets number 1.
- */
- template<int dim>
- static void hyper_cube_with_cylindrical_hole (Triangulation<dim> &triangulation,
+ /**
+ * This class produces a square
+ * on the <i>xy</i>-plane with a
+ * circular hole in the middle,
+ * times the interval [0.L]
+ * (only in 3d).
+ *
+ * @image html cubes_hole.png
+ *
+ * It is implemented in 2d and
+ * 3d, and takes the following
+ * arguments:
+ *
+ * @arg @p inner_radius: size of the
+ * internal hole
+ * @arg @p outer_radius: size of the
+ * biggest enclosed cylinder
+ * @arg @p L: extension on the @p z-direction
+ * @arg @p repetitions: number of subdivisions
+ * along the @p z-direction
+ * @arg @p colorize: wether to assign different
+ * boundary indicators to different faces.
+ * The colors are given in lexicographic
+ * ordering for the flat faces (0 to 3 in 2d,
+ * 0 to 5 in 3d) plus the curved hole
+ * (4 in 2d, and 6 in 3d).
+ * If @p colorize is set to false, then flat faces
+ * get the number 0 and the hole gets number 1.
+ */
+ template<int dim>
+ static void hyper_cube_with_cylindrical_hole (Triangulation<dim> &triangulation,
const double inner_radius = .25,
const double outer_radius = .5,
const double L = .5,
const unsigned int repetition = 1,
const bool colorize = false);
- /**
- * Produce a ring of cells in 3D that is
- * cut open, twisted and glued together
- * again. This results in a kind of
- * moebius-loop.
- *
- * @param tria The triangulation to be worked on.
- * @param n_cells The number of cells in the loop. Must be greater than 4.
- * @param n_rotations The number of rotations (Pi/2 each) to be performed before glueing the loop together.
- * @param R The radius of the circle, which forms the middle line of the torus containing the loop of cells. Must be greater than @p r.
- * @param r The radius of the cylinder bend together as loop.
- */
- static void moebius (Triangulation<3,3>& tria,
- const unsigned int n_cells,
- const unsigned int n_rotations,
- const double R,
- const double r);
+ /**
+ * Produce a ring of cells in 3D that is
+ * cut open, twisted and glued together
+ * again. This results in a kind of
+ * moebius-loop.
+ *
+ * @param tria The triangulation to be worked on.
+ * @param n_cells The number of cells in the loop. Must be greater than 4.
+ * @param n_rotations The number of rotations (Pi/2 each) to be performed before glueing the loop together.
+ * @param R The radius of the circle, which forms the middle line of the torus containing the loop of cells. Must be greater than @p r.
+ * @param r The radius of the cylinder bend together as loop.
+ */
- static void moebius (Triangulation<3,3> &tria,
++ static void moebius (Triangulation<3,3> &tria,
+ const unsigned int n_cells,
+ const unsigned int n_rotations,
+ const double R,
+ const double r);
- /**
- * Given the two triangulations
- * specified as the first two
- * arguments, create the
- * triangulation that contains
- * the cells of both
- * triangulation and store it in
- * the third parameter. Previous
- * content of @p result will be
- * deleted.
- *
- * This function is most often used
- * to compose meshes for more
- * complicated geometries if the
- * geometry can be composed of
- * simpler parts for which functions
- * exist to generate coarse meshes.
- * For example, the channel mesh used
- * in step-35 could in principle be
- * created using a mesh created by the
- * GridGenerator::hyper_cube_with_cylindrical_hole
- * function and several rectangles,
- * and merging them using the current
- * function. The rectangles will
- * have to be translated to the
- * right for this, a task that can
- * be done using the GridTools::shift
- * function (other tools to transform
- * individual mesh building blocks are
- * GridTools::transform, GridTools::rotate,
- * and GridTools::scale).
- *
- * @note The two input triangulations
- * must be coarse meshes that have
- * no refined cells.
- *
- * @note The function copies the material ids
- * of the cells of the two input
- * triangulations into the output
- * triangulation but it currently makes
- * no attempt to do the same for boundary
- * ids. In other words, if the two
- * coarse meshes have anything but
- * the default boundary indicators,
- * then you will currently have to set
- * boundary indicators again by hand
- * in the output triangulation.
- *
- * @note For a related operation
- * on refined meshes when both
- * meshes are derived from the
- * same coarse mesh, see
- * GridTools::create_union_triangulation .
- */
- template <int dim, int spacedim>
- static
- void
- merge_triangulations (const Triangulation<dim, spacedim> &triangulation_1,
- const Triangulation<dim, spacedim> &triangulation_2,
- Triangulation<dim, spacedim> &result);
+ /**
+ * Given the two triangulations
+ * specified as the first two
+ * arguments, create the
+ * triangulation that contains
+ * the cells of both
+ * triangulation and store it in
+ * the third parameter. Previous
+ * content of @p result will be
+ * deleted.
+ *
+ * This function is most often used
+ * to compose meshes for more
+ * complicated geometries if the
+ * geometry can be composed of
+ * simpler parts for which functions
+ * exist to generate coarse meshes.
+ * For example, the channel mesh used
+ * in step-35 could in principle be
+ * created using a mesh created by the
+ * GridGenerator::hyper_cube_with_cylindrical_hole
+ * function and several rectangles,
+ * and merging them using the current
+ * function. The rectangles will
+ * have to be translated to the
+ * right for this, a task that can
+ * be done using the GridTools::shift
+ * function (other tools to transform
+ * individual mesh building blocks are
+ * GridTools::transform, GridTools::rotate,
+ * and GridTools::scale).
+ *
+ * @note The two input triangulations
+ * must be coarse meshes that have
+ * no refined cells.
+ *
+ * @note The function copies the material ids
+ * of the cells of the two input
+ * triangulations into the output
+ * triangulation but it currently makes
+ * no attempt to do the same for boundary
+ * ids. In other words, if the two
+ * coarse meshes have anything but
+ * the default boundary indicators,
+ * then you will currently have to set
+ * boundary indicators again by hand
+ * in the output triangulation.
+ *
+ * @note For a related operation
+ * on refined meshes when both
+ * meshes are derived from the
+ * same coarse mesh, see
+ * GridTools::create_union_triangulation .
+ */
+ template <int dim, int spacedim>
+ static
+ void
+ merge_triangulations (const Triangulation<dim, spacedim> &triangulation_1,
+ const Triangulation<dim, spacedim> &triangulation_2,
+ Triangulation<dim, spacedim> &result);
- /**
- * This function transformes the
- * @p Triangulation @p tria
- * smoothly to a domain that is
- * described by the boundary
- * points in the map
- * @p new_points. This map maps
- * the point indices to the
- * boundary points in the
- * transformed domain.
- *
- * Note, that the
- * @p Triangulation is changed
- * in-place, therefore you don't
- * need to keep two
- * triangulations, but the given
- * triangulation is changed
- * (overwritten).
- *
- * In 1d, this function is not
- * currently implemented.
- */
- template <int dim>
- static void laplace_transformation (Triangulation<dim> &tria,
- const std::map<unsigned int,Point<dim> > &new_points);
+ /**
+ * This function transformes the
+ * @p Triangulation @p tria
+ * smoothly to a domain that is
+ * described by the boundary
+ * points in the map
+ * @p new_points. This map maps
+ * the point indices to the
+ * boundary points in the
+ * transformed domain.
+ *
+ * Note, that the
+ * @p Triangulation is changed
+ * in-place, therefore you don't
+ * need to keep two
+ * triangulations, but the given
+ * triangulation is changed
+ * (overwritten).
+ *
+ * In 1d, this function is not
+ * currently implemented.
+ */
+ template <int dim>
+ static void laplace_transformation (Triangulation<dim> &tria,
+ const std::map<unsigned int,Point<dim> > &new_points);
- /**
- * Exception
- */
- DeclException0 (ExcInvalidRadii);
- /**
- * Exception
- */
- DeclException1 (ExcInvalidRepetitions,
- int,
- << "The number of repetitions " << arg1
- << " must be >=1.");
- /**
- * Exception
- */
- DeclException1 (ExcInvalidRepetitionsDimension,
- int,
- << "The vector of repetitions must have "
- << arg1 <<" elements.");
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidRadii);
+ /**
+ * Exception
+ */
+ DeclException1 (ExcInvalidRepetitions,
+ int,
+ << "The number of repetitions " << arg1
+ << " must be >=1.");
+ /**
+ * Exception
+ */
+ DeclException1 (ExcInvalidRepetitionsDimension,
+ int,
+ << "The vector of repetitions must have "
+ << arg1 <<" elements.");
- private:
- /**
- * Perform the action specified
- * by the @p colorize flag of
- * the hyper_rectangle()
- * function of this class.
- */
- template <int dim, int spacedim>
- static
- void
- colorize_hyper_rectangle (Triangulation<dim,spacedim> &tria);
+ private:
+ /**
+ * Perform the action specified
+ * by the @p colorize flag of
+ * the hyper_rectangle()
+ * function of this class.
+ */
+ template <int dim, int spacedim>
+ static
+ void
+ colorize_hyper_rectangle (Triangulation<dim,spacedim> &tria);
- /**
- * Perform the action specified
- * by the @p colorize flag of
- * the
- * subdivided_hyper_rectangle()
- * function of this class. This
- * function is singled out
- * because it is dimension
- * specific.
- */
- template <int dim>
- static
- void
- colorize_subdivided_hyper_rectangle (Triangulation<dim> &tria,
- const Point<dim> &p1,
- const Point<dim> &p2,
- const double epsilon);
+ /**
+ * Perform the action specified
+ * by the @p colorize flag of
+ * the
+ * subdivided_hyper_rectangle()
+ * function of this class. This
+ * function is singled out
+ * because it is dimension
+ * specific.
+ */
+ template <int dim>
+ static
+ void
+ colorize_subdivided_hyper_rectangle (Triangulation<dim> &tria,
+ const Point<dim> &p1,
+ const Point<dim> &p2,
+ const double epsilon);
- /**
- * Assign boundary number zero to
- * the inner shell boundary and 1
- * to the outer.
- */
- template<int dim>
- static
- void
- colorize_hyper_shell (Triangulation<dim>& tria,
- const Point<dim>& center,
- const double inner_radius,
- const double outer_radius);
+ /**
+ * Assign boundary number zero to
+ * the inner shell boundary and 1
+ * to the outer.
+ */
+ template<int dim>
+ static
+ void
+ colorize_hyper_shell (Triangulation<dim> &tria,
+ const Point<dim> ¢er,
+ const double inner_radius,
+ const double outer_radius);
- /**
- * Assign boundary number zero the inner
- * shell boundary, one to the outer shell
- * boundary, two to the face with x=0,
- * three to the face with y=0, four to
- * the face with z=0.
- */
- template<int dim>
- static
- void
- colorize_quarter_hyper_shell(Triangulation<dim> & tria,
- const Point<dim>& center,
- const double inner_radius,
- const double outer_radius);
+ /**
+ * Assign boundary number zero the inner
+ * shell boundary, one to the outer shell
+ * boundary, two to the face with x=0,
+ * three to the face with y=0, four to
+ * the face with z=0.
+ */
+ template<int dim>
+ static
+ void
+ colorize_quarter_hyper_shell(Triangulation<dim> &tria,
+ const Point<dim> ¢er,
+ const double inner_radius,
+ const double outer_radius);
- /**
- * Solve the Laplace equation for
- * @p laplace_transformation
- * function for one of the
- * @p dim space
- * dimensions. Externalized into
- * a function of its own in order
- * to allow parallel execution.
- */
- static
- void
- laplace_solve (const SparseMatrix<double> &S,
- const std::map<unsigned int,double> &m,
- Vector<double> &u);
+ /**
+ * Solve the Laplace equation for
+ * @p laplace_transformation
+ * function for one of the
+ * @p dim space
+ * dimensions. Externalized into
+ * a function of its own in order
+ * to allow parallel execution.
+ */
+ static
+ void
+ laplace_solve (const SparseMatrix<double> &S,
+ const std::map<unsigned int,double> &m,
+ Vector<double> &u);
};
template <int dim, int spacedim=dim>
class GridIn
{
- public:
- /**
- * List of possible mesh input
- * formats. These values are used
- * when calling the function
- * read() in order to determine
- * the actual reader to be
- * called.
- */
- enum Format
- {
- /// Use GridIn::default_format stored in this object
- Default,
- /// Use read_unv()
- unv,
- /// Use read_ucd()
- ucd,
- /// Use read_dbmesh()
- dbmesh,
- /// Use read_xda()
- xda,
- /// Use read_msh()
- msh,
- /// Use read_netcdf()
- netcdf,
- /// Use read_tecplot()
- tecplot
- };
-
- /**
- * Constructor.
- */
- GridIn ();
-
- /**
- * Attach this triangulation
- * to be fed with the grid data.
- */
- void attach_triangulation (Triangulation<dim,spacedim> &tria);
-
- /**
- * Read from the given stream. If
- * no format is given,
- * GridIn::Format::Default is
- * used.
- */
- void read (std::istream &in, Format format=Default);
-
- /**
- * Open the file given by the
- * string and call the previous
- * function read(). This function
- * uses the PathSearch mechanism
- * to find files. The file class
- * used is <code>MESH</code>.
- */
- void read (const std::string &in, Format format=Default);
-
- /**
- * Read grid data from an unv
- * file as generated by the
- * Salome mesh generator.
- * Numerical data is ignored.
- *
- * Note the comments on
- * generating this file format in
- * the general documentation of
- * this class.
- */
- void read_unv(std::istream &in);
-
- /**
- * Read grid data from an ucd file.
- * Numerical data is ignored.
- */
- void read_ucd (std::istream &in);
-
- /**
- * Read grid data from a file
- * containing data in the DB mesh
- * format.
- */
- void read_dbmesh (std::istream &in);
-
- /**
- * Read grid data from a file
- * containing data in the XDA
- * format.
- */
- void read_xda (std::istream &in);
-
- /**
- * Read grid data from an msh
- * file, either version 1 or
- * version 2 of that file
- * format. The GMSH formats are
- * documented at
- * http://www.geuz.org/gmsh/ .
- *
- * @note The input function of
- * deal.II does not distinguish
- * between newline and other
- * whitespace. Therefore, deal.II
- * will be able to read files in
- * a slightly more general format
- * than Gmsh.
- */
- void read_msh (std::istream &in);
-
- /**
- * Read grid data from a NetCDF
- * file. The only data format
- * currently supported is the
- * <tt>TAU grid format</tt>.
- *
- * This function requires the
- * library to be linked with the
- * NetCDF library.
- */
- void read_netcdf (const std::string &filename);
-
- /**
- * Read grid data from a file containing
- * tecplot ASCII data. This also works in
- * the absence of any tecplot
- * installation.
- */
- void read_tecplot (std::istream &in);
-
- /**
- * Returns the standard suffix
- * for a file in this format.
- */
- static std::string default_suffix (const Format format);
-
- /**
- * Return the enum Format for the
- * format name.
- */
- static Format parse_format (const std::string &format_name);
-
- /**
- * Return a list of implemented input
- * formats. The different names are
- * separated by vertical bar signs (<tt>`|'</tt>)
- * as used by the ParameterHandler
- * classes.
- */
- static std::string get_format_names ();
-
- /**
- * Exception
- */
- DeclException1(ExcUnknownSectionType,
- int,
- << "The section type <" << arg1 << "> in an UNV "
- << "input file is not implemented.");
-
- /**
- * Exception
- */
- DeclException1(ExcUnknownElementType,
- int,
- << "The element type <" << arg1 << "> in an UNV "
- << "input file is not implemented.");
-
- /**
- * Exception
- */
- DeclException1 (ExcUnknownIdentifier,
- std::string,
- << "The identifier <" << arg1 << "> as name of a "
- << "part in an UCD input file is unknown or the "
- << "respective input routine is not implemented."
- << "(Maybe the space dimension of triangulation and "
- << "input file do not match?");
- /**
- * Exception
- */
- DeclException0 (ExcNoTriangulationSelected);
- /**
- * Exception
- */
- DeclException2 (ExcInvalidVertexIndex,
- int, int,
- << "Trying to access invalid vertex index " << arg2
- << " while creating cell " << arg1);
- /**
- * Exception
- */
- DeclException0 (ExcInvalidDBMeshFormat);
- /**
- * Exception
- */
- DeclException1 (ExcInvalidDBMESHInput,
- std::string,
- << "The string <" << arg1 << "> is not recognized at the present"
- << " position of a DB Mesh file.");
-
- /**
- * Exception
- */
- DeclException1 (ExcDBMESHWrongDimension,
- int,
- << "The specified dimension " << arg1
- << " is not the same as that of the triangulation to be created.");
-
- DeclException1 (ExcInvalidGMSHInput,
- std::string,
- << "The string <" << arg1 << "> is not recognized at the present"
- << " position of a Gmsh Mesh file.");
-
- DeclException1 (ExcGmshUnsupportedGeometry,
- int,
- << "The Element Identifier <" << arg1 << "> is not "
- << "supported in the Deal.II Library.\n"
- << "Supported elements are: \n"
- << "ELM-TYPE\n"
- << "1 Line (2 nodes, 1 edge).\n"
- << "3 Quadrilateral (4 nodes, 4 edges).\n"
- << "5 Hexahedron (8 nodes, 12 edges, 6 faces).\n"
- << "15 Point (1 node, ignored when read)");
-
-
- DeclException0 (ExcGmshNoCellInformation);
- protected:
- /**
- * Store address of the triangulation to
- * be fed with the data read in.
- */
- SmartPointer<Triangulation<dim,spacedim>,GridIn<dim,spacedim> > tria;
-
- /**
- * This function can write the
- * raw cell data objects created
- * by the <tt>read_*</tt> functions in
- * Gnuplot format to a
- * stream. This is sometimes
- * handy if one would like to see
- * what actually was created, if
- * it is known that the data is
- * not correct in some way, but
- * the Triangulation class
- * refuses to generate a
- * triangulation because of these
- * errors. In particular, the
- * output of this class writes
- * out the cell numbers along
- * with the direction of the
- * faces of each cell. In
- * particular the latter
- * information is needed to
- * verify whether the cell data
- * objects follow the
- * requirements of the ordering
- * of cells and their faces,
- * i.e. that all faces need to
- * have unique directions and
- * specified orientations with
- * respect to neighboring cells
- * (see the documentations to
- * this class and the
- * GridReordering class).
- *
- * The output of this function
- * consists of vectors for each
- * line bounding the cells
- * indicating the direction it
- * has with respect to the
- * orientation of this cell, and
- * the cell number. The whole
- * output is in a form such that
- * it can be read in by Gnuplot
- * and generate the full plot
- * without further ado by the
- * user.
- */
- static void debug_output_grid (const std::vector<CellData<dim> > &cells,
- const std::vector<Point<spacedim> > &vertices,
- std::ostream &out);
-
- private:
-
- /**
- * Skip empty lines in the input
- * stream, i.e. lines that
- * contain either nothing or only
- * whitespace.
- */
- static void skip_empty_lines (std::istream &in);
-
- /**
- * Skip lines of comment that
- * start with the indicated
- * character (e.g. <tt>#</tt>)
- * following the point where the
- * given input stream presently
- * is. After the call to this
- * function, the stream is at the
- * start of the first line after
- * the comment lines, or at the
- * same position as before if
- * there were no lines of
- * comments.
- */
- static void skip_comment_lines (std::istream &in,
- const char comment_start);
-
- /**
- * This function does the nasty work (due
- * to very lax conventions and different
- * versions of the tecplot format) of
- * extracting the important parameters from
- * a tecplot header, contained in the
- * string @p header. The other variables
- * are output variables, their value has no
- * influence on the function execution..
- */
- static void parse_tecplot_header(std::string &header,
- std::vector<unsigned int> &tecplot2deal,
- unsigned int &n_vars,
- unsigned int &n_vertices,
- unsigned int &n_cells,
- std::vector<unsigned int> &IJK,
- bool &structured,
- bool &blocked);
-
- /**
- * Input format used by read() if
- * no format is given.
- */
- Format default_format;
+ public:
+ /**
+ * List of possible mesh input
+ * formats. These values are used
+ * when calling the function
+ * read() in order to determine
+ * the actual reader to be
+ * called.
+ */
+ enum Format
+ {
+ /// Use GridIn::default_format stored in this object
+ Default,
+ /// Use read_unv()
+ unv,
+ /// Use read_ucd()
+ ucd,
+ /// Use read_dbmesh()
+ dbmesh,
+ /// Use read_xda()
+ xda,
+ /// Use read_msh()
+ msh,
+ /// Use read_netcdf()
+ netcdf,
+ /// Use read_tecplot()
+ tecplot
+ };
+
+ /**
+ * Constructor.
+ */
+ GridIn ();
+
+ /**
+ * Attach this triangulation
+ * to be fed with the grid data.
+ */
+ void attach_triangulation (Triangulation<dim,spacedim> &tria);
+
+ /**
+ * Read from the given stream. If
+ * no format is given,
+ * GridIn::Format::Default is
+ * used.
+ */
+ void read (std::istream &in, Format format=Default);
+
+ /**
+ * Open the file given by the
+ * string and call the previous
+ * function read(). This function
+ * uses the PathSearch mechanism
+ * to find files. The file class
+ * used is <code>MESH</code>.
+ */
+ void read (const std::string &in, Format format=Default);
+
+ /**
+ * Read grid data from an unv
+ * file as generated by the
+ * Salome mesh generator.
+ * Numerical data is ignored.
+ *
+ * Note the comments on
+ * generating this file format in
+ * the general documentation of
+ * this class.
+ */
+ void read_unv(std::istream &in);
+
+ /**
+ * Read grid data from an ucd file.
+ * Numerical data is ignored.
+ */
+ void read_ucd (std::istream &in);
+
+ /**
+ * Read grid data from a file
+ * containing data in the DB mesh
+ * format.
+ */
+ void read_dbmesh (std::istream &in);
+
+ /**
+ * Read grid data from a file
+ * containing data in the XDA
+ * format.
+ */
+ void read_xda (std::istream &in);
+
+ /**
+ * Read grid data from an msh
+ * file, either version 1 or
+ * version 2 of that file
+ * format. The GMSH formats are
+ * documented at
+ * http://www.geuz.org/gmsh/ .
+ *
+ * @note The input function of
+ * deal.II does not distinguish
+ * between newline and other
+ * whitespace. Therefore, deal.II
+ * will be able to read files in
+ * a slightly more general format
+ * than Gmsh.
+ */
+ void read_msh (std::istream &in);
+
+ /**
+ * Read grid data from a NetCDF
+ * file. The only data format
+ * currently supported is the
+ * <tt>TAU grid format</tt>.
+ *
+ * This function requires the
+ * library to be linked with the
+ * NetCDF library.
+ */
+ void read_netcdf (const std::string &filename);
+
+ /**
+ * Read grid data from a file containing
+ * tecplot ASCII data. This also works in
+ * the absence of any tecplot
+ * installation.
+ */
+ void read_tecplot (std::istream &in);
+
+ /**
+ * Returns the standard suffix
+ * for a file in this format.
+ */
+ static std::string default_suffix (const Format format);
+
+ /**
+ * Return the enum Format for the
+ * format name.
+ */
+ static Format parse_format (const std::string &format_name);
+
+ /**
+ * Return a list of implemented input
+ * formats. The different names are
+ * separated by vertical bar signs (<tt>`|'</tt>)
+ * as used by the ParameterHandler
+ * classes.
+ */
+ static std::string get_format_names ();
+
+ /**
+ * Exception
+ */
+ DeclException1(ExcUnknownSectionType,
+ int,
+ << "The section type <" << arg1 << "> in an UNV "
+ << "input file is not implemented.");
+
+ /**
+ * Exception
+ */
+ DeclException1(ExcUnknownElementType,
+ int,
+ << "The element type <" << arg1 << "> in an UNV "
+ << "input file is not implemented.");
+
+ /**
+ * Exception
+ */
+ DeclException1 (ExcUnknownIdentifier,
+ std::string,
+ << "The identifier <" << arg1 << "> as name of a "
+ << "part in an UCD input file is unknown or the "
+ << "respective input routine is not implemented."
+ << "(Maybe the space dimension of triangulation and "
+ << "input file do not match?");
+ /**
+ * Exception
+ */
+ DeclException0 (ExcNoTriangulationSelected);
+ /**
+ * Exception
+ */
+ DeclException2 (ExcInvalidVertexIndex,
+ int, int,
+ << "Trying to access invalid vertex index " << arg2
+ << " while creating cell " << arg1);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidDBMeshFormat);
+ /**
+ * Exception
+ */
+ DeclException1 (ExcInvalidDBMESHInput,
+ std::string,
+ << "The string <" << arg1 << "> is not recognized at the present"
+ << " position of a DB Mesh file.");
+
+ /**
+ * Exception
+ */
+ DeclException1 (ExcDBMESHWrongDimension,
+ int,
+ << "The specified dimension " << arg1
+ << " is not the same as that of the triangulation to be created.");
+
+ DeclException1 (ExcInvalidGMSHInput,
+ std::string,
+ << "The string <" << arg1 << "> is not recognized at the present"
+ << " position of a Gmsh Mesh file.");
+
+ DeclException1 (ExcGmshUnsupportedGeometry,
+ int,
+ << "The Element Identifier <" << arg1 << "> is not "
+ << "supported in the Deal.II Library.\n"
+ << "Supported elements are: \n"
+ << "ELM-TYPE\n"
+ << "1 Line (2 nodes, 1 edge).\n"
+ << "3 Quadrilateral (4 nodes, 4 edges).\n"
+ << "5 Hexahedron (8 nodes, 12 edges, 6 faces).\n"
+ << "15 Point (1 node, ignored when read)");
+
+
+ DeclException0 (ExcGmshNoCellInformation);
+ protected:
+ /**
+ * Store address of the triangulation to
+ * be fed with the data read in.
+ */
+ SmartPointer<Triangulation<dim,spacedim>,GridIn<dim,spacedim> > tria;
+
+ /**
+ * This function can write the
+ * raw cell data objects created
+ * by the <tt>read_*</tt> functions in
+ * Gnuplot format to a
+ * stream. This is sometimes
+ * handy if one would like to see
+ * what actually was created, if
+ * it is known that the data is
+ * not correct in some way, but
+ * the Triangulation class
+ * refuses to generate a
+ * triangulation because of these
+ * errors. In particular, the
+ * output of this class writes
+ * out the cell numbers along
+ * with the direction of the
+ * faces of each cell. In
+ * particular the latter
+ * information is needed to
+ * verify whether the cell data
+ * objects follow the
+ * requirements of the ordering
+ * of cells and their faces,
+ * i.e. that all faces need to
+ * have unique directions and
+ * specified orientations with
+ * respect to neighboring cells
+ * (see the documentations to
+ * this class and the
+ * GridReordering class).
+ *
+ * The output of this function
+ * consists of vectors for each
+ * line bounding the cells
+ * indicating the direction it
+ * has with respect to the
+ * orientation of this cell, and
+ * the cell number. The whole
+ * output is in a form such that
+ * it can be read in by Gnuplot
+ * and generate the full plot
+ * without further ado by the
+ * user.
+ */
+ static void debug_output_grid (const std::vector<CellData<dim> > &cells,
+ const std::vector<Point<spacedim> > &vertices,
+ std::ostream &out);
+
+ private:
+
+ /**
+ * Skip empty lines in the input
+ * stream, i.e. lines that
+ * contain either nothing or only
+ * whitespace.
+ */
+ static void skip_empty_lines (std::istream &in);
+
+ /**
+ * Skip lines of comment that
+ * start with the indicated
+ * character (e.g. <tt>#</tt>)
+ * following the point where the
+ * given input stream presently
+ * is. After the call to this
+ * function, the stream is at the
+ * start of the first line after
+ * the comment lines, or at the
+ * same position as before if
+ * there were no lines of
+ * comments.
+ */
+ static void skip_comment_lines (std::istream &in,
+ const char comment_start);
+
+ /**
+ * This function does the nasty work (due
+ * to very lax conventions and different
+ * versions of the tecplot format) of
+ * extracting the important parameters from
+ * a tecplot header, contained in the
+ * string @p header. The other variables
+ * are output variables, their value has no
+ * influence on the function execution..
+ */
+ static void parse_tecplot_header(std::string &header,
+ std::vector<unsigned int> &tecplot2deal,
- unsigned int &n_vars,
- unsigned int &n_vertices,
- unsigned int &n_cells,
++ unsigned int &n_vars,
++ unsigned int &n_vertices,
++ unsigned int &n_cells,
+ std::vector<unsigned int> &IJK,
+ bool &structured,
+ bool &blocked);
+
+ /**
+ * Input format used by read() if
+ * no format is given.
+ */
+ Format default_format;
};
};
- /**
- * An enriched quad with information about how the mesh fits together
- * so that we can move around the mesh efficiently.
- *
- * @author Michael Anderson, 2003
- */
+ /**
+ * An enriched quad with information about how the mesh fits together
+ * so that we can move around the mesh efficiently.
+ *
+ * @author Michael Anderson, 2003
+ */
class MQuad
{
- public:
- /**
- * v0 - v3 are indexes of the
- * vertices of the quad, s0 -
- * s3 are indexes for the
- * sides of the quad
- */
- MQuad (const unsigned int v0,
- const unsigned int v1,
- const unsigned int v2,
- const unsigned int v3,
- const unsigned int s0,
- const unsigned int s1,
- const unsigned int s2,
- const unsigned int s3,
- const CellData<2> &cd);
-
- /**
- * Stores the vertex numbers
- */
- unsigned int v[4];
- /**
- * Stores the side numbers
- */
- unsigned int side[4];
-
- /**
- * Copy of the @p CellData object
- * from which we construct the
- * data of this object.
- */
- CellData<2> original_cell_data;
+ public:
+ /**
+ * v0 - v3 are indexes of the
+ * vertices of the quad, s0 -
+ * s3 are indexes for the
+ * sides of the quad
+ */
+ MQuad (const unsigned int v0,
+ const unsigned int v1,
+ const unsigned int v2,
+ const unsigned int v3,
+ const unsigned int s0,
+ const unsigned int s1,
+ const unsigned int s2,
+ const unsigned int s3,
- const CellData<2> &cd);
++ const CellData<2> &cd);
+
+ /**
+ * Stores the vertex numbers
+ */
+ unsigned int v[4];
+ /**
+ * Stores the side numbers
+ */
+ unsigned int side[4];
+
+ /**
+ * Copy of the @p CellData object
+ * from which we construct the
+ * data of this object.
+ */
+ CellData<2> original_cell_data;
};
- /**
- * The enriched side class containing connectivity information.
- * Orientation is from v0 to v1; Initially this should have v0<v1.
- * After global orientation could be either way.
- *
- * @author Michael Anderson, 2003
- */
+ /**
+ * The enriched side class containing connectivity information.
+ * Orientation is from v0 to v1; Initially this should have v0<v1.
+ * After global orientation could be either way.
+ *
+ * @author Michael Anderson, 2003
+ */
struct MSide
{
- /**
- * Constructor.
- */
- MSide (const unsigned int initv0,
- const unsigned int initv1);
-
- /**
- * Return whether the sides
- * are equal, even if their
- * ends are reversed.
- */
- bool operator==(const MSide& s2) const;
-
- /**
- * Return the opposite.
- */
- bool operator!=(const MSide& s2) const;
-
- unsigned int v0;
- unsigned int v1;
- unsigned int Q0;
- unsigned int Q1;
-
- /**
- * Local side numbers on quads 0 and 1.
- */
- unsigned int lsn0, lsn1;
- bool Oriented;
-
- /**
- * This class makes a MSide have v0<v1
- */
- struct SideRectify;
-
- /**
- * Provides a side ordering,
- * s1<s2, without assuming
- * v0<v1 in either of the
- * sides.
- */
- struct SideSortLess;
+ /**
+ * Constructor.
+ */
+ MSide (const unsigned int initv0,
+ const unsigned int initv1);
+
+ /**
+ * Return whether the sides
+ * are equal, even if their
+ * ends are reversed.
+ */
+ bool operator==(const MSide &s2) const;
+
+ /**
+ * Return the opposite.
+ */
+ bool operator!=(const MSide &s2) const;
+
+ unsigned int v0;
+ unsigned int v1;
+ unsigned int Q0;
+ unsigned int Q1;
+
+ /**
+ * Local side numbers on quads 0 and 1.
+ */
+ unsigned int lsn0, lsn1;
+ bool Oriented;
+
+ /**
+ * This class makes a MSide have v0<v1
+ */
+ struct SideRectify;
+
+ /**
+ * Provides a side ordering,
+ * s1<s2, without assuming
+ * v0<v1 in either of the
+ * sides.
+ */
+ struct SideSortLess;
};
const unsigned int vertex);
- /**
- * Find and return an iterator to
- * the active cell that surrounds
- * a given point @p ref. The
- * type of the first parameter
- * may be either
- * Triangulation,
- * DoFHandler, or
- * MGDoFHandler, i.e. we
- * can find the cell around a
- * point for iterators into each
- * of these classes.
- *
- * This is solely a wrapper function
- * for the @p interpolate function
- * given below,
- * providing backward compatibility.
- * A Q1 mapping is used for the
- * boundary, and the iterator to
- * the cell in which the point
- * resides is returned.
- *
- * It is recommended to use the
- * other version of this function,
- * as it simultaneously delivers the
- * local coordinate of the given point
- * without additional computational cost.
- */
+ /**
+ * Find and return an iterator to
+ * the active cell that surrounds
+ * a given point @p ref. The
+ * type of the first parameter
+ * may be either
+ * Triangulation,
+ * DoFHandler, or
+ * MGDoFHandler, i.e. we
+ * can find the cell around a
+ * point for iterators into each
+ * of these classes.
+ *
+ * This is solely a wrapper function
+ * for the @p interpolate function
+ * given below,
+ * providing backward compatibility.
+ * A Q1 mapping is used for the
+ * boundary, and the iterator to
+ * the cell in which the point
+ * resides is returned.
+ *
+ * It is recommended to use the
+ * other version of this function,
+ * as it simultaneously delivers the
+ * local coordinate of the given point
+ * without additional computational cost.
+ */
template <int dim, template <int,int> class Container, int spacedim>
typename Container<dim,spacedim>::active_cell_iterator
- find_active_cell_around_point (const Container<dim,spacedim> &container,
+ find_active_cell_around_point (const Container<dim,spacedim> &container,
const Point<spacedim> &p);
- /**
- * Find and return an iterator to
- * the active cell that surrounds
- * a given point @p p. The
- * type of the first parameter
- * may be either
- * Triangulation,
- * DoFHandler, hp::DoFHandler, or
- * MGDoFHandler, i.e., we
- * can find the cell around a
- * point for iterators into each
- * of these classes.
- *
- * The algorithm used in this
- * function proceeds by first
- * looking for vertex located
- * closest to the given point, see
- * find_closest_vertex(). Secondly,
- * all adjacent cells to this point
- * are found in the mesh, see
- * find_cells_adjacent_to_vertex().
- * Lastly, for each of these cells,
- * it is tested whether the point is
- * inside. This check is performed
- * using arbitrary boundary mappings.
- * Still, it is possible that due
- * to roundoff errors, the point
- * cannot be located exactly inside
- * the unit cell. In this case,
- * even points at a very small
- * distance outside the unit cell
- * are allowed.
- *
- * If a point lies on the
- * boundary of two or more cells,
- * then the algorithm tries to identify
- * the cell that is of highest
- * refinement level.
- *
- * The function returns an
- * iterator to the cell, as well
- * as the local position of the
- * point inside the unit
- * cell. This local position
- * might be located slightly
- * outside an actual unit cell,
- * due to numerical roundoff.
- * Therefore, the point returned
- * by this function should
- * be projected onto the unit cell,
- * using GeometryInfo::project_to_unit_cell.
- * This is not automatically performed
- * by the algorithm.
- */
+ /**
+ * Find and return an iterator to
+ * the active cell that surrounds
+ * a given point @p p. The
+ * type of the first parameter
+ * may be either
+ * Triangulation,
+ * DoFHandler, hp::DoFHandler, or
+ * MGDoFHandler, i.e., we
+ * can find the cell around a
+ * point for iterators into each
+ * of these classes.
+ *
+ * The algorithm used in this
+ * function proceeds by first
+ * looking for vertex located
+ * closest to the given point, see
+ * find_closest_vertex(). Secondly,
+ * all adjacent cells to this point
+ * are found in the mesh, see
+ * find_cells_adjacent_to_vertex().
+ * Lastly, for each of these cells,
+ * it is tested whether the point is
+ * inside. This check is performed
+ * using arbitrary boundary mappings.
+ * Still, it is possible that due
+ * to roundoff errors, the point
+ * cannot be located exactly inside
+ * the unit cell. In this case,
+ * even points at a very small
+ * distance outside the unit cell
+ * are allowed.
+ *
+ * If a point lies on the
+ * boundary of two or more cells,
+ * then the algorithm tries to identify
+ * the cell that is of highest
+ * refinement level.
+ *
+ * The function returns an
+ * iterator to the cell, as well
+ * as the local position of the
+ * point inside the unit
+ * cell. This local position
+ * might be located slightly
+ * outside an actual unit cell,
+ * due to numerical roundoff.
+ * Therefore, the point returned
+ * by this function should
+ * be projected onto the unit cell,
+ * using GeometryInfo::project_to_unit_cell.
+ * This is not automatically performed
+ * by the algorithm.
+ */
template <int dim, template<int, int> class Container, int spacedim>
std::pair<typename Container<dim,spacedim>::active_cell_iterator, Point<dim> >
find_active_cell_around_point (const Mapping<dim,spacedim> &mapping,
const SparsityPattern &cell_connection_graph,
Triangulation<dim,spacedim> &triangulation);
- /**
- * For each active cell, return in the
- * output array to which subdomain (as
- * given by the <tt>cell->subdomain_id()</tt>
- * function) it belongs. The output array
- * is supposed to have the right size
- * already when calling this function.
- *
- * This function returns the association
- * of each cell with one subdomain. If
- * you are looking for the association of
- * each @em DoF with a subdomain, use the
- * <tt>DoFTools::get_subdomain_association</tt>
- * function.
- */
+ /**
+ * For each active cell, return in the
+ * output array to which subdomain (as
+ * given by the <tt>cell->subdomain_id()</tt>
+ * function) it belongs. The output array
+ * is supposed to have the right size
+ * already when calling this function.
+ *
+ * This function returns the association
+ * of each cell with one subdomain. If
+ * you are looking for the association of
+ * each @em DoF with a subdomain, use the
+ * <tt>DoFTools::get_subdomain_association</tt>
+ * function.
+ */
template <int dim, int spacedim>
void
- get_subdomain_association (const Triangulation<dim, spacedim> &triangulation,
+ get_subdomain_association (const Triangulation<dim, spacedim> &triangulation,
std::vector<types::subdomain_id> &subdomain);
- /**
- * Count how many cells are uniquely
- * associated with the given @p subdomain
- * index.
- *
- * This function may return zero
- * if there are no cells with the
- * given @p subdomain index. This
- * can happen, for example, if
- * you try to partition a coarse
- * mesh into more partitions (one
- * for each processor) than there
- * are cells in the mesh.
- *
- * This function returns the number of
- * cells associated with one
- * subdomain. If you are looking for the
- * association of @em DoFs with this
- * subdomain, use the
- * <tt>DoFTools::count_dofs_with_subdomain_association</tt>
- * function.
- */
+ /**
+ * Count how many cells are uniquely
+ * associated with the given @p subdomain
+ * index.
+ *
+ * This function may return zero
+ * if there are no cells with the
+ * given @p subdomain index. This
+ * can happen, for example, if
+ * you try to partition a coarse
+ * mesh into more partitions (one
+ * for each processor) than there
+ * are cells in the mesh.
+ *
+ * This function returns the number of
+ * cells associated with one
+ * subdomain. If you are looking for the
+ * association of @em DoFs with this
+ * subdomain, use the
+ * <tt>DoFTools::count_dofs_with_subdomain_association</tt>
+ * function.
+ */
template <int dim, int spacedim>
unsigned int
count_cells_with_subdomain_association (const Triangulation<dim, spacedim> &triangulation,
template <typename G>
class TriaObjects
{
- public:
- /**
- * Constructor resetting some data.
- */
- TriaObjects();
-
- /**
- * Vector of the objects belonging to
- * this level. The index of the object
- * equals the index in this container.
- */
- std::vector<G> cells;
- /**
- * Index of the even children of an object.
- * Since when objects are refined, all
- * children are created at the same
- * time, they are appended to the list
- * at least in pairs after each other.
- * We therefore only store the index
- * of the even children, the uneven
- * follow immediately afterwards.
- *
- * If an object has no children, -1 is
- * stored in this list. An object is
- * called active if it has no
- * children. The function
- * TriaAccessorBase::has_children()
- * tests for this.
- */
- std::vector<int> children;
-
- /**
- * Store the refinement
- * case each of the
- * cells is refined
- * with. This vector
- * might be replaced by
- * vector<vector<bool> >
- * (dim, vector<bool>
- * (n_cells)) which is
- * more memory efficient.
- */
- std::vector<RefinementCase<G::dimension> > refinement_cases;
-
- /**
- * Vector storing whether an object is
- * used in the @p cells vector.
- *
- * Since it is difficult to delete
- * elements in a @p vector, when an
- * element is not needed any more
- * (e.g. after derefinement), it is
- * not deleted from the list, but
- * rather the according @p used flag
- * is set to @p false.
- */
- std::vector<bool> used;
-
- /**
- * Make available a field for user data,
- * one bit per object. This field is usually
- * used when an operation runs over all
- * cells and needs information whether
- * another cell (e.g. a neighbor) has
- * already been processed.
- *
- * You can clear all used flags using
- * dealii::Triangulation::clear_user_flags().
- */
- std::vector<bool> user_flags;
-
-
- /**
- * We use this union to store
- * boundary and material
- * data. Because only one one
- * out of these two is
- * actually needed here, we
- * use an union.
- */
- struct BoundaryOrMaterialId
+ public:
+ /**
+ * Constructor resetting some data.
+ */
+ TriaObjects();
+
+ /**
+ * Vector of the objects belonging to
+ * this level. The index of the object
+ * equals the index in this container.
+ */
+ std::vector<G> cells;
+ /**
+ * Index of the even children of an object.
+ * Since when objects are refined, all
+ * children are created at the same
+ * time, they are appended to the list
+ * at least in pairs after each other.
+ * We therefore only store the index
+ * of the even children, the uneven
+ * follow immediately afterwards.
+ *
+ * If an object has no children, -1 is
+ * stored in this list. An object is
+ * called active if it has no
+ * children. The function
+ * TriaAccessorBase::has_children()
+ * tests for this.
+ */
+ std::vector<int> children;
+
+ /**
+ * Store the refinement
+ * case each of the
+ * cells is refined
+ * with. This vector
+ * might be replaced by
+ * vector<vector<bool> >
+ * (dim, vector<bool>
+ * (n_cells)) which is
+ * more memory efficient.
+ */
+ std::vector<RefinementCase<G::dimension> > refinement_cases;
+
+ /**
+ * Vector storing whether an object is
+ * used in the @p cells vector.
+ *
+ * Since it is difficult to delete
+ * elements in a @p vector, when an
+ * element is not needed any more
+ * (e.g. after derefinement), it is
+ * not deleted from the list, but
+ * rather the according @p used flag
+ * is set to @p false.
+ */
+ std::vector<bool> used;
+
+ /**
+ * Make available a field for user data,
+ * one bit per object. This field is usually
+ * used when an operation runs over all
+ * cells and needs information whether
+ * another cell (e.g. a neighbor) has
+ * already been processed.
+ *
+ * You can clear all used flags using
+ * dealii::Triangulation::clear_user_flags().
+ */
+ std::vector<bool> user_flags;
+
+
+ /**
+ * We use this union to store
+ * boundary and material
+ * data. Because only one one
+ * out of these two is
+ * actually needed here, we
+ * use an union.
+ */
+ struct BoundaryOrMaterialId
+ {
+ union
{
- union
- {
- types::boundary_id boundary_id;
- types::material_id material_id;
- };
-
-
- /**
- * Default constructor.
- */
- BoundaryOrMaterialId ();
-
- /**
- * Return the size of objects
- * of this kind.
- */
- static
- std::size_t memory_consumption ();
-
- /**
- * Read or write the data
- * of this object to or
- * from a stream for the
- * purpose of
- * serialization
- */
- template <class Archive>
- void serialize(Archive & ar,
- const unsigned int version);
+ types::boundary_id boundary_id;
+ types::material_id material_id;
};
- /**
- * Store boundary and material data. For
- * example, in one dimension, this field
- * stores the material id of a line, which
- * is a number between 0 and
- * numbers::invalid_material_id-1. In more
- * than one dimension, lines have no
- * material id, but they may be at the
- * boundary; then, we store the
- * boundary indicator in this field,
- * which denotes to which part of the
- * boundary this line belongs and which
- * boundary conditions hold on this
- * part. The boundary indicator also
- * is a number between zero and
- * numbers::internal_face_boundary_id-1;
- * the id numbers::internal_face_boundary_id
- * is reserved for lines
- * in the interior and may be used
- * to check whether a line is at the
- * boundary or not, which otherwise
- * is not possible if you don't know
- * which cell it belongs to.
- */
- std::vector<BoundaryOrMaterialId> boundary_or_material_id;
-
- /**
- * Assert that enough space
- * is allocated to
- * accommodate
- * <code>new_objs_in_pairs</code>
- * new objects, stored in
- * pairs, plus
- * <code>new_obj_single</code>
- * stored individually.
- * This function does not
- * only call
- * <code>vector::reserve()</code>,
- * but does really append
- * the needed elements.
- *
- * In 2D e.g. refined lines have to be
- * stored in pairs, whereas new lines in the
- * interior of refined cells can be stored as
- * single lines.
- */
- void reserve_space (const unsigned int new_objs_in_pairs,
- const unsigned int new_objs_single = 0);
-
- /**
- * Return an iterator to the
- * next free slot for a
- * single object. This
- * function is only used by
- * dealii::Triangulation::execute_refinement()
- * in 3D.
- *
- * @warning Interestingly,
- * this function is not used
- * for 1D or 2D
- * triangulations, where it
- * seems the authors of the
- * refinement function insist
- * on reimplementing its
- * contents.
- *
- * @todo This function is
- * not instantiated for the
- * codim-one case
- */
- template <int dim, int spacedim>
- dealii::TriaRawIterator<dealii::TriaAccessor<G::dimension,dim,spacedim> >
- next_free_single_object (const dealii::Triangulation<dim,spacedim> &tria);
-
- /**
- * Return an iterator to the
- * next free slot for a pair
- * of objects. This
- * function is only used by
- * dealii::Triangulation::execute_refinement()
- * in 3D.
- *
- * @warning Interestingly,
- * this function is not used
- * for 1D or 2D
- * triangulations, where it
- * seems the authors of the
- * refinement function insist
- * on reimplementing its
- * contents.
- *
- * @todo This function is
- * not instantiated for the
- * codim-one case
- */
- template <int dim, int spacedim>
- dealii::TriaRawIterator<dealii::TriaAccessor<G::dimension,dim,spacedim> >
- next_free_pair_object (const dealii::Triangulation<dim,spacedim> &tria);
-
- /**
- * Return an iterator to the
- * next free slot for a pair
- * of hexes. Only implemented
- * for
- * <code>G=Hexahedron</code>.
- */
- template <int dim, int spacedim>
- typename dealii::Triangulation<dim,spacedim>::raw_hex_iterator
- next_free_hex (const dealii::Triangulation<dim,spacedim> &tria,
- const unsigned int level);
-
- /**
- * Clear all the data contained in this object.
- */
- void clear();
-
- /**
- * The orientation of the
- * face number <code>face</code>
- * of the cell with number
- * <code>cell</code>. The return
- * value is <code>true</code>, if
- * the normal vector points
- * the usual way
- * (GeometryInfo::unit_normal_orientation)
- * and <code>false</code> else.
- *
- * The result is always
- * <code>true</code> in this
- * class, but derived classes
- * will reimplement this.
- *
- * @warning There is a bug in
- * the class hierarchy right
- * now. Avoid ever calling
- * this function through a
- * reference, since you might
- * end up with the base class
- * function instead of the
- * derived class. Still, we
- * do not want to make it
- * virtual for efficiency
- * reasons.
- */
- bool face_orientation(const unsigned int cell, const unsigned int face) const;
-
-
- /**
- * Access to user pointers.
- */
- void*& user_pointer(const unsigned int i);
-
- /**
- * Read-only access to user pointers.
- */
- const void* user_pointer(const unsigned int i) const;
-
- /**
- * Access to user indices.
- */
- unsigned int& user_index(const unsigned int i);
-
- /**
- * Read-only access to user pointers.
- */
- unsigned int user_index(const unsigned int i) const;
-
- /**
- * Reset user data to zero.
- */
- void clear_user_data(const unsigned int i);
-
- /**
- * Clear all user pointers or
- * indices and reset their
- * type, such that the next
- * access may be aither or.
- */
- void clear_user_data();
-
- /**
- * Clear all user flags.
- */
- void clear_user_flags();
-
- /**
- * Check the memory consistency of the
- * different containers. Should only be
- * called with the prepro flag @p DEBUG
- * set. The function should be called from
- * the functions of the higher
- * TriaLevel classes.
- */
- void monitor_memory (const unsigned int true_dimension) const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
- */
- std::size_t memory_consumption () const;
-
- /**
- * Read or write the data of this object to or
- * from a stream for the purpose of serialization
- */
+
+
+ /**
+ * Default constructor.
+ */
+ BoundaryOrMaterialId ();
+
+ /**
+ * Return the size of objects
+ * of this kind.
+ */
+ static
+ std::size_t memory_consumption ();
+
+ /**
+ * Read or write the data
+ * of this object to or
+ * from a stream for the
+ * purpose of
+ * serialization
+ */
template <class Archive>
- void serialize(Archive & ar,
+ void serialize(Archive &ar,
const unsigned int version);
-
- /**
- * Exception
- */
- DeclException3 (ExcMemoryWasted,
- char*, int, int,
- << "The container " << arg1 << " contains "
- << arg2 << " elements, but it`s capacity is "
- << arg3 << ".");
- /**
- * Exception
- * @ingroup Exceptions
- */
- DeclException2 (ExcMemoryInexact,
- int, int,
- << "The containers have sizes " << arg1 << " and "
- << arg2 << ", which is not as expected.");
-
- /**
- * Exception
- */
- DeclException2 (ExcWrongIterator,
- char*, char*,
- << "You asked for the next free " << arg1 << "_iterator, "
- "but you can only ask for " << arg2 <<"_iterators.");
-
- /**
- * dealii::Triangulation objects can
- * either access a user
- * pointer or a user
- * index. What you tried to
- * do is trying to access one
- * of those after using the
- * other.
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcPointerIndexClash);
-
- protected:
- /**
- * Counter for next_free_single_* functions
- */
- unsigned int next_free_single;
-
- /**
- * Counter for next_free_pair_* functions
- */
- unsigned int next_free_pair;
-
- /**
- * Bool flag for next_free_single_* functions
- */
- bool reverse_order_next_free_single;
-
- /**
- * The data type storing user
- * pointers or user indices.
- */
- struct UserData
+ };
+ /**
+ * Store boundary and material data. For
+ * example, in one dimension, this field
+ * stores the material id of a line, which
+ * is a number between 0 and
+ * numbers::invalid_material_id-1. In more
+ * than one dimension, lines have no
+ * material id, but they may be at the
+ * boundary; then, we store the
+ * boundary indicator in this field,
+ * which denotes to which part of the
+ * boundary this line belongs and which
+ * boundary conditions hold on this
+ * part. The boundary indicator also
+ * is a number between zero and
+ * numbers::internal_face_boundary_id-1;
+ * the id numbers::internal_face_boundary_id
+ * is reserved for lines
+ * in the interior and may be used
+ * to check whether a line is at the
+ * boundary or not, which otherwise
+ * is not possible if you don't know
+ * which cell it belongs to.
+ */
+ std::vector<BoundaryOrMaterialId> boundary_or_material_id;
+
+ /**
+ * Assert that enough space
+ * is allocated to
+ * accommodate
+ * <code>new_objs_in_pairs</code>
+ * new objects, stored in
+ * pairs, plus
+ * <code>new_obj_single</code>
+ * stored individually.
+ * This function does not
+ * only call
+ * <code>vector::reserve()</code>,
+ * but does really append
+ * the needed elements.
+ *
+ * In 2D e.g. refined lines have to be
+ * stored in pairs, whereas new lines in the
+ * interior of refined cells can be stored as
+ * single lines.
+ */
+ void reserve_space (const unsigned int new_objs_in_pairs,
+ const unsigned int new_objs_single = 0);
+
+ /**
+ * Return an iterator to the
+ * next free slot for a
+ * single object. This
+ * function is only used by
+ * dealii::Triangulation::execute_refinement()
+ * in 3D.
+ *
+ * @warning Interestingly,
+ * this function is not used
+ * for 1D or 2D
+ * triangulations, where it
+ * seems the authors of the
+ * refinement function insist
+ * on reimplementing its
+ * contents.
+ *
+ * @todo This function is
+ * not instantiated for the
+ * codim-one case
+ */
+ template <int dim, int spacedim>
+ dealii::TriaRawIterator<dealii::TriaAccessor<G::dimension,dim,spacedim> >
+ next_free_single_object (const dealii::Triangulation<dim,spacedim> &tria);
+
+ /**
+ * Return an iterator to the
+ * next free slot for a pair
+ * of objects. This
+ * function is only used by
+ * dealii::Triangulation::execute_refinement()
+ * in 3D.
+ *
+ * @warning Interestingly,
+ * this function is not used
+ * for 1D or 2D
+ * triangulations, where it
+ * seems the authors of the
+ * refinement function insist
+ * on reimplementing its
+ * contents.
+ *
+ * @todo This function is
+ * not instantiated for the
+ * codim-one case
+ */
+ template <int dim, int spacedim>
+ dealii::TriaRawIterator<dealii::TriaAccessor<G::dimension,dim,spacedim> >
+ next_free_pair_object (const dealii::Triangulation<dim,spacedim> &tria);
+
+ /**
+ * Return an iterator to the
+ * next free slot for a pair
+ * of hexes. Only implemented
+ * for
+ * <code>G=Hexahedron</code>.
+ */
+ template <int dim, int spacedim>
+ typename dealii::Triangulation<dim,spacedim>::raw_hex_iterator
+ next_free_hex (const dealii::Triangulation<dim,spacedim> &tria,
+ const unsigned int level);
+
+ /**
+ * Clear all the data contained in this object.
+ */
+ void clear();
+
+ /**
+ * The orientation of the
+ * face number <code>face</code>
+ * of the cell with number
+ * <code>cell</code>. The return
+ * value is <code>true</code>, if
+ * the normal vector points
+ * the usual way
+ * (GeometryInfo::unit_normal_orientation)
+ * and <code>false</code> else.
+ *
+ * The result is always
+ * <code>true</code> in this
+ * class, but derived classes
+ * will reimplement this.
+ *
+ * @warning There is a bug in
+ * the class hierarchy right
+ * now. Avoid ever calling
+ * this function through a
+ * reference, since you might
+ * end up with the base class
+ * function instead of the
+ * derived class. Still, we
+ * do not want to make it
+ * virtual for efficiency
+ * reasons.
+ */
+ bool face_orientation(const unsigned int cell, const unsigned int face) const;
+
+
+ /**
+ * Access to user pointers.
+ */
- void *&user_pointer(const unsigned int i);
++ void *&user_pointer(const unsigned int i);
+
+ /**
+ * Read-only access to user pointers.
+ */
+ const void *user_pointer(const unsigned int i) const;
+
+ /**
+ * Access to user indices.
+ */
+ unsigned int &user_index(const unsigned int i);
+
+ /**
+ * Read-only access to user pointers.
+ */
+ unsigned int user_index(const unsigned int i) const;
+
+ /**
+ * Reset user data to zero.
+ */
+ void clear_user_data(const unsigned int i);
+
+ /**
+ * Clear all user pointers or
+ * indices and reset their
+ * type, such that the next
+ * access may be aither or.
+ */
+ void clear_user_data();
+
+ /**
+ * Clear all user flags.
+ */
+ void clear_user_flags();
+
+ /**
+ * Check the memory consistency of the
+ * different containers. Should only be
+ * called with the prepro flag @p DEBUG
+ * set. The function should be called from
+ * the functions of the higher
+ * TriaLevel classes.
+ */
+ void monitor_memory (const unsigned int true_dimension) const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object.
+ */
+ std::size_t memory_consumption () const;
+
+ /**
+ * Read or write the data of this object to or
+ * from a stream for the purpose of serialization
+ */
+ template <class Archive>
+ void serialize(Archive &ar,
+ const unsigned int version);
+
+ /**
+ * Exception
+ */
+ DeclException3 (ExcMemoryWasted,
+ char *, int, int,
+ << "The container " << arg1 << " contains "
+ << arg2 << " elements, but it`s capacity is "
+ << arg3 << ".");
+ /**
+ * Exception
+ * @ingroup Exceptions
+ */
+ DeclException2 (ExcMemoryInexact,
+ int, int,
+ << "The containers have sizes " << arg1 << " and "
+ << arg2 << ", which is not as expected.");
+
+ /**
+ * Exception
+ */
+ DeclException2 (ExcWrongIterator,
+ char *, char *,
+ << "You asked for the next free " << arg1 << "_iterator, "
+ "but you can only ask for " << arg2 <<"_iterators.");
+
+ /**
+ * dealii::Triangulation objects can
+ * either access a user
+ * pointer or a user
+ * index. What you tried to
+ * do is trying to access one
+ * of those after using the
+ * other.
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcPointerIndexClash);
+
+ protected:
+ /**
+ * Counter for next_free_single_* functions
+ */
+ unsigned int next_free_single;
+
+ /**
+ * Counter for next_free_pair_* functions
+ */
+ unsigned int next_free_pair;
+
+ /**
+ * Bool flag for next_free_single_* functions
+ */
+ bool reverse_order_next_free_single;
+
+ /**
+ * The data type storing user
+ * pointers or user indices.
+ */
+ struct UserData
+ {
+ union
{
- union
- {
- /// The entry used as user
- /// pointer.
- void* p;
- /// The entry used as user
- /// index.
- unsigned int i;
- };
-
- /**
- * Default constructor.
- */
- UserData()
- {
- p = 0;
- }
-
- /**
- * Write the data of this object
- * to a stream for the purpose of
- * serialization.
- */
- template <class Archive>
- void serialize (Archive & ar, const unsigned int version);
+ /// The entry used as user
+ /// pointer.
+ void *p;
+ /// The entry used as user
+ /// index.
+ unsigned int i;
};
- /**
- * Enum descibing the
- * possible types of
- * userdata.
- */
- enum UserDataType
+ /**
+ * Default constructor.
+ */
+ UserData()
{
- /// No userdata used yet.
- data_unknown,
- /// UserData contains pointers.
- data_pointer,
- /// UserData contains indices.
- data_index
- };
+ p = 0;
+ }
-
- /**
- * Pointer which is not used by the
- * library but may be accessed and set
- * by the user to handle data local to
- * a line/quad/etc.
- */
- std::vector<UserData> user_data;
- /**
- * In order to avoid
- * confusion between user
- * pointers and indices, this
- * enum is set by the first
- * function accessing either
- * and subsequent access will
- * not be allowed to change
- * the type of data accessed.
- */
- mutable UserDataType user_data_type;
+ /**
+ * Write the data of this object
+ * to a stream for the purpose of
+ * serialization.
+ */
+ template <class Archive>
+ void serialize (Archive &ar, const unsigned int version);
+ };
+
+ /**
+ * Enum descibing the
+ * possible types of
+ * userdata.
+ */
+ enum UserDataType
+ {
+ /// No userdata used yet.
+ data_unknown,
+ /// UserData contains pointers.
+ data_pointer,
+ /// UserData contains indices.
+ data_index
+ };
+
+
+ /**
+ * Pointer which is not used by the
+ * library but may be accessed and set
+ * by the user to handle data local to
+ * a line/quad/etc.
+ */
+ std::vector<UserData> user_data;
+ /**
+ * In order to avoid
+ * confusion between user
+ * pointers and indices, this
+ * enum is set by the first
+ * function accessing either
+ * and subsequent access will
+ * not be allowed to change
+ * the type of data accessed.
+ */
+ mutable UserDataType user_data_type;
};
- /**
- * For hexahedrons the data of TriaObjects needs to be extended, as we can obtain faces
- * (quads) in non-standard-orientation, therefore we declare a class TriaObjectsHex, which
- * additionally contains a bool-vector of the face-orientations.
- */
+ /**
+ * For hexahedrons the data of TriaObjects needs to be extended, as we can obtain faces
+ * (quads) in non-standard-orientation, therefore we declare a class TriaObjectsHex, which
+ * additionally contains a bool-vector of the face-orientations.
+ */
class TriaObjectsHex : public TriaObjects<TriaObject<3> >
{
template <int dim, int spacedim=dim>
class FEValues : public dealii::internal::hp::FEValuesBase<dim,dim,dealii::FEValues<dim,spacedim> >
{
- public:
+ public:
- static const unsigned int dimension = dim;
+ static const unsigned int dimension = dim;
- static const unsigned int space_dimension = spacedim;
+ static const unsigned int space_dimension = spacedim;
- /**
- * Constructor. Initialize this
- * object with the given
- * parameters.
- *
- * The finite element
- * collection parameter is
- * actually ignored, but is in
- * the signature of this
- * function to make it
- * compatible with the
- * signature of the respective
- * constructor of the usual
- * FEValues object, with
- * the respective parameter in
- * that function also being the
- * return value of the
- * <tt>DoFHandler::get_fe()</tt>
- * function.
- */
- FEValues (const dealii::hp::MappingCollection<dim,spacedim> &mapping_collection,
- const dealii::hp::FECollection<dim,spacedim> &fe_collection,
- const dealii::hp::QCollection<dim> &q_collection,
- const UpdateFlags update_flags);
+ /**
+ * Constructor. Initialize this
+ * object with the given
+ * parameters.
+ *
+ * The finite element
+ * collection parameter is
+ * actually ignored, but is in
+ * the signature of this
+ * function to make it
+ * compatible with the
+ * signature of the respective
+ * constructor of the usual
+ * FEValues object, with
+ * the respective parameter in
+ * that function also being the
+ * return value of the
+ * <tt>DoFHandler::get_fe()</tt>
+ * function.
+ */
+ FEValues (const dealii::hp::MappingCollection<dim,spacedim> &mapping_collection,
- const dealii::hp::FECollection<dim,spacedim> &fe_collection,
++ const dealii::hp::FECollection<dim,spacedim> &fe_collection,
+ const dealii::hp::QCollection<dim> &q_collection,
+ const UpdateFlags update_flags);
- /**
- * Constructor. Initialize this
- * object with the given
- * parameters, and choose a
- * @p MappingQ1 object for the
- * mapping object.
- *
- * The finite element
- * collection parameter is
- * actually ignored, but is in
- * the signature of this
- * function to make it
- * compatible with the
- * signature of the respective
- * constructor of the usual
- * FEValues object, with
- * the respective parameter in
- * that function also being the
- * return value of the
- * <tt>DoFHandler::get_fe()</tt>
- * function.
- */
- FEValues (const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim> &q_collection,
- const UpdateFlags update_flags);
+ /**
+ * Constructor. Initialize this
+ * object with the given
+ * parameters, and choose a
+ * @p MappingQ1 object for the
+ * mapping object.
+ *
+ * The finite element
+ * collection parameter is
+ * actually ignored, but is in
+ * the signature of this
+ * function to make it
+ * compatible with the
+ * signature of the respective
+ * constructor of the usual
+ * FEValues object, with
+ * the respective parameter in
+ * that function also being the
+ * return value of the
+ * <tt>DoFHandler::get_fe()</tt>
+ * function.
+ */
+ FEValues (const hp::FECollection<dim,spacedim> &fe_collection,
+ const hp::QCollection<dim> &q_collection,
+ const UpdateFlags update_flags);
- /**
- * Reinitialize the object for
- * the given cell.
- *
- * After the call, you can get
- * an FEValues object using the
- * get_present_fe_values()
- * function that corresponds to
- * the present cell. For this
- * FEValues object, we use the
- * additional arguments
- * described below to determine
- * which finite element,
- * mapping, and quadrature
- * formula to use. They are
- * order in such a way that the
- * arguments one may want to
- * change most frequently come
- * first. The rules for these
- * arguments are as follows:
- *
- * If the @p fe_index argument
- * to this function is left at
- * its default value, then we
- * use that finite element
- * within the hp::FECollection
- * passed to the constructor of
- * this class with index given
- * by
- * <code>cell-@>active_fe_index()</code>. Consequently,
- * the hp::FECollection
- * argument given to this
- * object should really be the
- * same as that used in the
- * construction of the
- * hp::DofHandler associated
- * with the present cell. On
- * the other hand, if a value
- * is given for this argument,
- * it overrides the choice of
- * <code>cell-@>active_fe_index()</code>.
- *
- * If the @p q_index argument
- * is left at its default
- * value, then we use that
- * quadrature formula within
- * the hp::QCollection passed
- * to the constructor of this
- * class with index given by
- * <code>cell-@>active_fe_index()</code>,
- * i.e. the same index as that
- * of the finite element. In
- * this case, there should be a
- * corresponding quadrature
- * formula for each finite
- * element in the
- * hp::FECollection. As a
- * special case, if the
- * quadrature collection
- * contains only a single
- * element (a frequent case if
- * one wants to use the same
- * quadrature object for all
- * finite elements in an hp
- * discretization, even if that
- * may not be the most
- * efficient), then this single
- * quadrature is used unless a
- * different value for this
- * argument is specified. On
- * the other hand, if a value
- * is given for this argument,
- * it overrides the choice of
- * <code>cell-@>active_fe_index()</code>
- * or the choice for the single
- * quadrature.
- *
- * If the @p mapping_index
- * argument is left at its
- * default value, then we use
- * that mapping object within
- * the hp::MappingCollection
- * passed to the constructor of
- * this class with index given
- * by
- * <code>cell-@>active_fe_index()</code>,
- * i.e. the same index as that
- * of the finite
- * element. As above, if the
- * mapping collection contains
- * only a single element (a
- * frequent case if one wants
- * to use a MappingQ1 object
- * for all finite elements in
- * an hp discretization), then
- * this single mapping is used
- * unless a different value for
- * this argument is specified.
- */
- void
- reinit (const typename hp::DoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Reinitialize the object for
+ * the given cell.
+ *
+ * After the call, you can get
+ * an FEValues object using the
+ * get_present_fe_values()
+ * function that corresponds to
+ * the present cell. For this
+ * FEValues object, we use the
+ * additional arguments
+ * described below to determine
+ * which finite element,
+ * mapping, and quadrature
+ * formula to use. They are
+ * order in such a way that the
+ * arguments one may want to
+ * change most frequently come
+ * first. The rules for these
+ * arguments are as follows:
+ *
+ * If the @p fe_index argument
+ * to this function is left at
+ * its default value, then we
+ * use that finite element
+ * within the hp::FECollection
+ * passed to the constructor of
+ * this class with index given
+ * by
+ * <code>cell-@>active_fe_index()</code>. Consequently,
+ * the hp::FECollection
+ * argument given to this
+ * object should really be the
+ * same as that used in the
+ * construction of the
+ * hp::DofHandler associated
+ * with the present cell. On
+ * the other hand, if a value
+ * is given for this argument,
+ * it overrides the choice of
+ * <code>cell-@>active_fe_index()</code>.
+ *
+ * If the @p q_index argument
+ * is left at its default
+ * value, then we use that
+ * quadrature formula within
+ * the hp::QCollection passed
+ * to the constructor of this
+ * class with index given by
+ * <code>cell-@>active_fe_index()</code>,
+ * i.e. the same index as that
+ * of the finite element. In
+ * this case, there should be a
+ * corresponding quadrature
+ * formula for each finite
+ * element in the
+ * hp::FECollection. As a
+ * special case, if the
+ * quadrature collection
+ * contains only a single
+ * element (a frequent case if
+ * one wants to use the same
+ * quadrature object for all
+ * finite elements in an hp
+ * discretization, even if that
+ * may not be the most
+ * efficient), then this single
+ * quadrature is used unless a
+ * different value for this
+ * argument is specified. On
+ * the other hand, if a value
+ * is given for this argument,
+ * it overrides the choice of
+ * <code>cell-@>active_fe_index()</code>
+ * or the choice for the single
+ * quadrature.
+ *
+ * If the @p mapping_index
+ * argument is left at its
+ * default value, then we use
+ * that mapping object within
+ * the hp::MappingCollection
+ * passed to the constructor of
+ * this class with index given
+ * by
+ * <code>cell-@>active_fe_index()</code>,
+ * i.e. the same index as that
+ * of the finite
+ * element. As above, if the
+ * mapping collection contains
+ * only a single element (a
+ * frequent case if one wants
+ * to use a MappingQ1 object
+ * for all finite elements in
+ * an hp discretization), then
+ * this single mapping is used
+ * unless a different value for
+ * this argument is specified.
+ */
+ void
+ reinit (const typename hp::DoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * always returns zero for
- * non-hp iterators, this
- * function chooses the zero-th
- * finite element, mapping, and
- * quadrature object from the
- * relevant constructions
- * passed to the constructor of
- * this object. The only
- * exception is if you specify
- * a value different from the
- * default value for any of
- * these last three arguments.
- */
- void
- reinit (const typename dealii::DoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * always returns zero for
+ * non-hp iterators, this
+ * function chooses the zero-th
+ * finite element, mapping, and
+ * quadrature object from the
+ * relevant constructions
+ * passed to the constructor of
+ * this object. The only
+ * exception is if you specify
+ * a value different from the
+ * default value for any of
+ * these last three arguments.
+ */
+ void
+ reinit (const typename dealii::DoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * always returns zero for
- * non-hp iterators, this
- * function chooses the zero-th
- * finite element, mapping, and
- * quadrature object from the
- * relevant constructions
- * passed to the constructor of
- * this object. The only
- * exception is if you specify
- * a value different from the
- * default value for any of
- * these last three arguments.
- */
- void
- reinit (const typename MGDoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * always returns zero for
+ * non-hp iterators, this
+ * function chooses the zero-th
+ * finite element, mapping, and
+ * quadrature object from the
+ * relevant constructions
+ * passed to the constructor of
+ * this object. The only
+ * exception is if you specify
+ * a value different from the
+ * default value for any of
+ * these last three arguments.
+ */
+ void
+ reinit (const typename MGDoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * doesn't make sense for
- * triangulation iterators,
- * this function chooses the
- * zero-th finite element,
- * mapping, and quadrature
- * object from the relevant
- * constructions passed to the
- * constructor of this
- * object. The only exception
- * is if you specify a value
- * different from the default
- * value for any of these last
- * three arguments.
- */
- void
- reinit (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * doesn't make sense for
+ * triangulation iterators,
+ * this function chooses the
+ * zero-th finite element,
+ * mapping, and quadrature
+ * object from the relevant
+ * constructions passed to the
+ * constructor of this
+ * object. The only exception
+ * is if you specify a value
+ * different from the default
+ * value for any of these last
+ * three arguments.
+ */
+ void
+ reinit (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
};
template <int dim, int spacedim=dim>
class FEFaceValues : public dealii::internal::hp::FEValuesBase<dim,dim-1,dealii::FEFaceValues<dim,spacedim> >
{
- public:
- /**
- * Constructor. Initialize this
- * object with the given
- * parameters.
- *
- * The finite element
- * collection parameter is
- * actually ignored, but is in
- * the signature of this
- * function to make it
- * compatible with the
- * signature of the respective
- * constructor of the usual
- * FEValues object, with
- * the respective parameter in
- * that function also being the
- * return value of the
- * <tt>DoFHandler::get_fe()</tt>
- * function.
- */
- FEFaceValues (const hp::MappingCollection<dim,spacedim> &mapping_collection,
- const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim-1> &q_collection,
- const UpdateFlags update_flags);
+ public:
+ /**
+ * Constructor. Initialize this
+ * object with the given
+ * parameters.
+ *
+ * The finite element
+ * collection parameter is
+ * actually ignored, but is in
+ * the signature of this
+ * function to make it
+ * compatible with the
+ * signature of the respective
+ * constructor of the usual
+ * FEValues object, with
+ * the respective parameter in
+ * that function also being the
+ * return value of the
+ * <tt>DoFHandler::get_fe()</tt>
+ * function.
+ */
+ FEFaceValues (const hp::MappingCollection<dim,spacedim> &mapping_collection,
- const hp::FECollection<dim,spacedim> &fe_collection,
++ const hp::FECollection<dim,spacedim> &fe_collection,
+ const hp::QCollection<dim-1> &q_collection,
+ const UpdateFlags update_flags);
- /**
- * Constructor. Initialize this
- * object with the given
- * parameters, and choose a
- * @p MappingQ1 object for the
- * mapping object.
- *
- * The finite element
- * collection parameter is
- * actually ignored, but is in
- * the signature of this
- * function to make it
- * compatible with the
- * signature of the respective
- * constructor of the usual
- * FEValues object, with
- * the respective parameter in
- * that function also being the
- * return value of the
- * <tt>DoFHandler::get_fe()</tt>
- * function.
- */
- FEFaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim-1> &q_collection,
- const UpdateFlags update_flags);
+ /**
+ * Constructor. Initialize this
+ * object with the given
+ * parameters, and choose a
+ * @p MappingQ1 object for the
+ * mapping object.
+ *
+ * The finite element
+ * collection parameter is
+ * actually ignored, but is in
+ * the signature of this
+ * function to make it
+ * compatible with the
+ * signature of the respective
+ * constructor of the usual
+ * FEValues object, with
+ * the respective parameter in
+ * that function also being the
+ * return value of the
+ * <tt>DoFHandler::get_fe()</tt>
+ * function.
+ */
- FEFaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
++ FEFaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
+ const hp::QCollection<dim-1> &q_collection,
+ const UpdateFlags update_flags);
- /**
- * Reinitialize the object for
- * the given cell and face.
- *
- * After the call, you can get
- * an FEFaceValues object using the
- * get_present_fe_values()
- * function that corresponds to
- * the present cell. For this
- * FEFaceValues object, we use the
- * additional arguments
- * described below to determine
- * which finite element,
- * mapping, and quadrature
- * formula to use. They are
- * order in such a way that the
- * arguments one may want to
- * change most frequently come
- * first. The rules for these
- * arguments are as follows:
- *
- * If the @p fe_index argument
- * to this function is left at
- * its default value, then we
- * use that finite element
- * within the hp::FECollection
- * passed to the constructor of
- * this class with index given
- * by
- * <code>cell-@>active_fe_index()</code>. Consequently,
- * the hp::FECollection
- * argument given to this
- * object should really be the
- * same as that used in the
- * construction of the
- * hp::DofHandler associated
- * with the present cell. On
- * the other hand, if a value
- * is given for this argument,
- * it overrides the choice of
- * <code>cell-@>active_fe_index()</code>.
- *
- * If the @p q_index argument
- * is left at its default
- * value, then we use that
- * quadrature formula within
- * the hp::QCollection passed
- * to the constructor of this
- * class with index given by
- * <code>cell-@>active_fe_index()</code>,
- * i.e. the same index as that
- * of the finite element. In
- * this case, there should be a
- * corresponding quadrature
- * formula for each finite
- * element in the
- * hp::FECollection. As a
- * special case, if the
- * quadrature collection
- * contains only a single
- * element (a frequent case if
- * one wants to use the same
- * quadrature object for all
- * finite elements in an hp
- * discretization, even if that
- * may not be the most
- * efficient), then this single
- * quadrature is used unless a
- * different value for this
- * argument is specified. On
- * the other hand, if a value
- * is given for this argument,
- * it overrides the choice of
- * <code>cell-@>active_fe_index()</code>
- * or the choice for the single
- * quadrature.
- *
- * If the @p mapping_index
- * argument is left at its
- * default value, then we use
- * that mapping object within
- * the hp::MappingCollection
- * passed to the constructor of
- * this class with index given
- * by
- * <code>cell-@>active_fe_index()</code>,
- * i.e. the same index as that
- * of the finite
- * element. As above, if the
- * mapping collection contains
- * only a single element (a
- * frequent case if one wants
- * to use a MappingQ1 object
- * for all finite elements in
- * an hp discretization), then
- * this single mapping is used
- * unless a different value for
- * this argument is specified.
- */
- void
- reinit (const typename hp::DoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Reinitialize the object for
+ * the given cell and face.
+ *
+ * After the call, you can get
+ * an FEFaceValues object using the
+ * get_present_fe_values()
+ * function that corresponds to
+ * the present cell. For this
+ * FEFaceValues object, we use the
+ * additional arguments
+ * described below to determine
+ * which finite element,
+ * mapping, and quadrature
+ * formula to use. They are
+ * order in such a way that the
+ * arguments one may want to
+ * change most frequently come
+ * first. The rules for these
+ * arguments are as follows:
+ *
+ * If the @p fe_index argument
+ * to this function is left at
+ * its default value, then we
+ * use that finite element
+ * within the hp::FECollection
+ * passed to the constructor of
+ * this class with index given
+ * by
+ * <code>cell-@>active_fe_index()</code>. Consequently,
+ * the hp::FECollection
+ * argument given to this
+ * object should really be the
+ * same as that used in the
+ * construction of the
+ * hp::DofHandler associated
+ * with the present cell. On
+ * the other hand, if a value
+ * is given for this argument,
+ * it overrides the choice of
+ * <code>cell-@>active_fe_index()</code>.
+ *
+ * If the @p q_index argument
+ * is left at its default
+ * value, then we use that
+ * quadrature formula within
+ * the hp::QCollection passed
+ * to the constructor of this
+ * class with index given by
+ * <code>cell-@>active_fe_index()</code>,
+ * i.e. the same index as that
+ * of the finite element. In
+ * this case, there should be a
+ * corresponding quadrature
+ * formula for each finite
+ * element in the
+ * hp::FECollection. As a
+ * special case, if the
+ * quadrature collection
+ * contains only a single
+ * element (a frequent case if
+ * one wants to use the same
+ * quadrature object for all
+ * finite elements in an hp
+ * discretization, even if that
+ * may not be the most
+ * efficient), then this single
+ * quadrature is used unless a
+ * different value for this
+ * argument is specified. On
+ * the other hand, if a value
+ * is given for this argument,
+ * it overrides the choice of
+ * <code>cell-@>active_fe_index()</code>
+ * or the choice for the single
+ * quadrature.
+ *
+ * If the @p mapping_index
+ * argument is left at its
+ * default value, then we use
+ * that mapping object within
+ * the hp::MappingCollection
+ * passed to the constructor of
+ * this class with index given
+ * by
+ * <code>cell-@>active_fe_index()</code>,
+ * i.e. the same index as that
+ * of the finite
+ * element. As above, if the
+ * mapping collection contains
+ * only a single element (a
+ * frequent case if one wants
+ * to use a MappingQ1 object
+ * for all finite elements in
+ * an hp discretization), then
+ * this single mapping is used
+ * unless a different value for
+ * this argument is specified.
+ */
+ void
+ reinit (const typename hp::DoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * always returns zero for
- * non-hp iterators, this
- * function chooses the zero-th
- * finite element, mapping, and
- * quadrature object from the
- * relevant constructions
- * passed to the constructor of
- * this object. The only
- * exception is if you specify
- * a value different from the
- * default value for any of
- * these last three arguments.
- */
- void
- reinit (const typename dealii::DoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * always returns zero for
+ * non-hp iterators, this
+ * function chooses the zero-th
+ * finite element, mapping, and
+ * quadrature object from the
+ * relevant constructions
+ * passed to the constructor of
+ * this object. The only
+ * exception is if you specify
+ * a value different from the
+ * default value for any of
+ * these last three arguments.
+ */
+ void
+ reinit (const typename dealii::DoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * always returns zero for
- * non-hp iterators, this
- * function chooses the zero-th
- * finite element, mapping, and
- * quadrature object from the
- * relevant constructions
- * passed to the constructor of
- * this object. The only
- * exception is if you specify
- * a value different from the
- * default value for any of
- * these last three arguments.
- */
- void
- reinit (const typename MGDoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * always returns zero for
+ * non-hp iterators, this
+ * function chooses the zero-th
+ * finite element, mapping, and
+ * quadrature object from the
+ * relevant constructions
+ * passed to the constructor of
+ * this object. The only
+ * exception is if you specify
+ * a value different from the
+ * default value for any of
+ * these last three arguments.
+ */
+ void
+ reinit (const typename MGDoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * doesn't make sense for
- * triangulation iterators,
- * this function chooses the
- * zero-th finite element,
- * mapping, and quadrature
- * object from the relevant
- * constructions passed to the
- * constructor of this
- * object. The only exception
- * is if you specify a value
- * different from the default
- * value for any of these last
- * three arguments.
- */
- void
- reinit (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * doesn't make sense for
+ * triangulation iterators,
+ * this function chooses the
+ * zero-th finite element,
+ * mapping, and quadrature
+ * object from the relevant
+ * constructions passed to the
+ * constructor of this
+ * object. The only exception
+ * is if you specify a value
+ * different from the default
+ * value for any of these last
+ * three arguments.
+ */
+ void
+ reinit (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
};
template <int dim, int spacedim=dim>
class FESubfaceValues : public dealii::internal::hp::FEValuesBase<dim,dim-1,dealii::FESubfaceValues<dim,spacedim> >
{
- public:
- /**
- * Constructor. Initialize this
- * object with the given
- * parameters.
- *
- * The finite element
- * collection parameter is
- * actually ignored, but is in
- * the signature of this
- * function to make it
- * compatible with the
- * signature of the respective
- * constructor of the usual
- * FEValues object, with
- * the respective parameter in
- * that function also being the
- * return value of the
- * <tt>DoFHandler::get_fe()</tt>
- * function.
- */
- FESubfaceValues (const hp::MappingCollection<dim,spacedim> &mapping_collection,
- const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim-1> &q_collection,
- const UpdateFlags update_flags);
+ public:
+ /**
+ * Constructor. Initialize this
+ * object with the given
+ * parameters.
+ *
+ * The finite element
+ * collection parameter is
+ * actually ignored, but is in
+ * the signature of this
+ * function to make it
+ * compatible with the
+ * signature of the respective
+ * constructor of the usual
+ * FEValues object, with
+ * the respective parameter in
+ * that function also being the
+ * return value of the
+ * <tt>DoFHandler::get_fe()</tt>
+ * function.
+ */
+ FESubfaceValues (const hp::MappingCollection<dim,spacedim> &mapping_collection,
- const hp::FECollection<dim,spacedim> &fe_collection,
++ const hp::FECollection<dim,spacedim> &fe_collection,
+ const hp::QCollection<dim-1> &q_collection,
+ const UpdateFlags update_flags);
- /**
- * Constructor. Initialize this
- * object with the given
- * parameters, and choose a
- * @p MappingQ1 object for the
- * mapping object.
- *
- * The finite element
- * collection parameter is
- * actually ignored, but is in
- * the signature of this
- * function to make it
- * compatible with the
- * signature of the respective
- * constructor of the usual
- * FEValues object, with
- * the respective parameter in
- * that function also being the
- * return value of the
- * <tt>DoFHandler::get_fe()</tt>
- * function.
- */
- FESubfaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim-1> &q_collection,
- const UpdateFlags update_flags);
+ /**
+ * Constructor. Initialize this
+ * object with the given
+ * parameters, and choose a
+ * @p MappingQ1 object for the
+ * mapping object.
+ *
+ * The finite element
+ * collection parameter is
+ * actually ignored, but is in
+ * the signature of this
+ * function to make it
+ * compatible with the
+ * signature of the respective
+ * constructor of the usual
+ * FEValues object, with
+ * the respective parameter in
+ * that function also being the
+ * return value of the
+ * <tt>DoFHandler::get_fe()</tt>
+ * function.
+ */
+ FESubfaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
+ const hp::QCollection<dim-1> &q_collection,
+ const UpdateFlags update_flags);
- /**
- * Reinitialize the object for
- * the given cell, face, and subface.
- *
- * After the call, you can get
- * an FESubfaceValues object using the
- * get_present_fe_values()
- * function that corresponds to
- * the present cell. For this
- * FESubfaceValues object, we use the
- * additional arguments
- * described below to determine
- * which finite element,
- * mapping, and quadrature
- * formula to use. They are
- * order in such a way that the
- * arguments one may want to
- * change most frequently come
- * first. The rules for these
- * arguments are as follows:
- *
- * If the @p q_index argument
- * is left at its default
- * value, then we use that
- * quadrature formula within
- * the hp::QCollection passed
- * to the constructor of this
- * class with index given by
- * <code>cell-@>active_fe_index()</code>,
- * i.e. the same index as that
- * of the finite element. In
- * this case, there should be a
- * corresponding quadrature
- * formula for each finite
- * element in the
- * hp::FECollection. As a
- * special case, if the
- * quadrature collection
- * contains only a single
- * element (a frequent case if
- * one wants to use the same
- * quadrature object for all
- * finite elements in an hp
- * discretization, even if that
- * may not be the most
- * efficient), then this single
- * quadrature is used unless a
- * different value for this
- * argument is specified. On
- * the other hand, if a value
- * is given for this argument,
- * it overrides the choice of
- * <code>cell-@>active_fe_index()</code>
- * or the choice for the single
- * quadrature.
- *
- * If the @p mapping_index
- * argument is left at its
- * default value, then we use
- * that mapping object within
- * the hp::MappingCollection
- * passed to the constructor of
- * this class with index given
- * by
- * <code>cell-@>active_fe_index()</code>,
- * i.e. the same index as that
- * of the finite
- * element. As above, if the
- * mapping collection contains
- * only a single element (a
- * frequent case if one wants
- * to use a MappingQ1 object
- * for all finite elements in
- * an hp discretization), then
- * this single mapping is used
- * unless a different value for
- * this argument is specified.
- */
- void
- reinit (const typename hp::DoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int subface_no,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Reinitialize the object for
+ * the given cell, face, and subface.
+ *
+ * After the call, you can get
+ * an FESubfaceValues object using the
+ * get_present_fe_values()
+ * function that corresponds to
+ * the present cell. For this
+ * FESubfaceValues object, we use the
+ * additional arguments
+ * described below to determine
+ * which finite element,
+ * mapping, and quadrature
+ * formula to use. They are
+ * order in such a way that the
+ * arguments one may want to
+ * change most frequently come
+ * first. The rules for these
+ * arguments are as follows:
+ *
+ * If the @p q_index argument
+ * is left at its default
+ * value, then we use that
+ * quadrature formula within
+ * the hp::QCollection passed
+ * to the constructor of this
+ * class with index given by
+ * <code>cell-@>active_fe_index()</code>,
+ * i.e. the same index as that
+ * of the finite element. In
+ * this case, there should be a
+ * corresponding quadrature
+ * formula for each finite
+ * element in the
+ * hp::FECollection. As a
+ * special case, if the
+ * quadrature collection
+ * contains only a single
+ * element (a frequent case if
+ * one wants to use the same
+ * quadrature object for all
+ * finite elements in an hp
+ * discretization, even if that
+ * may not be the most
+ * efficient), then this single
+ * quadrature is used unless a
+ * different value for this
+ * argument is specified. On
+ * the other hand, if a value
+ * is given for this argument,
+ * it overrides the choice of
+ * <code>cell-@>active_fe_index()</code>
+ * or the choice for the single
+ * quadrature.
+ *
+ * If the @p mapping_index
+ * argument is left at its
+ * default value, then we use
+ * that mapping object within
+ * the hp::MappingCollection
+ * passed to the constructor of
+ * this class with index given
+ * by
+ * <code>cell-@>active_fe_index()</code>,
+ * i.e. the same index as that
+ * of the finite
+ * element. As above, if the
+ * mapping collection contains
+ * only a single element (a
+ * frequent case if one wants
+ * to use a MappingQ1 object
+ * for all finite elements in
+ * an hp discretization), then
+ * this single mapping is used
+ * unless a different value for
+ * this argument is specified.
+ */
+ void
+ reinit (const typename hp::DoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int subface_no,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * always returns zero for
- * non-hp iterators, this
- * function chooses the zero-th
- * finite element, mapping, and
- * quadrature object from the
- * relevant constructions
- * passed to the constructor of
- * this object. The only
- * exception is if you specify
- * a value different from the
- * default value for any of
- * these last three arguments.
- */
- void
- reinit (const typename dealii::DoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int subface_no,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * always returns zero for
+ * non-hp iterators, this
+ * function chooses the zero-th
+ * finite element, mapping, and
+ * quadrature object from the
+ * relevant constructions
+ * passed to the constructor of
+ * this object. The only
+ * exception is if you specify
+ * a value different from the
+ * default value for any of
+ * these last three arguments.
+ */
+ void
+ reinit (const typename dealii::DoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int subface_no,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * always returns zero for
- * non-hp iterators, this
- * function chooses the zero-th
- * finite element, mapping, and
- * quadrature object from the
- * relevant constructions
- * passed to the constructor of
- * this object. The only
- * exception is if you specify
- * a value different from the
- * default value for any of
- * these last three arguments.
- */
- void
- reinit (const typename MGDoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int subface_no,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * always returns zero for
+ * non-hp iterators, this
+ * function chooses the zero-th
+ * finite element, mapping, and
+ * quadrature object from the
+ * relevant constructions
+ * passed to the constructor of
+ * this object. The only
+ * exception is if you specify
+ * a value different from the
+ * default value for any of
+ * these last three arguments.
+ */
+ void
+ reinit (const typename MGDoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int subface_no,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * doesn't make sense for
- * triangulation iterators,
- * this function chooses the
- * zero-th finite element,
- * mapping, and quadrature
- * object from the relevant
- * constructions passed to the
- * constructor of this
- * object. The only exception
- * is if you specify a value
- * different from the default
- * value for any of these last
- * three arguments.
- */
- void
- reinit (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int subface_no,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * doesn't make sense for
+ * triangulation iterators,
+ * this function chooses the
+ * zero-th finite element,
+ * mapping, and quadrature
+ * object from the relevant
+ * constructions passed to the
+ * constructor of this
+ * object. The only exception
+ * is if you specify a value
+ * different from the default
+ * value for any of these last
+ * three arguments.
+ */
+ void
+ reinit (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int subface_no,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
};
}
--- /dev/null
-
+//---------------------------------------------------------------------------
+// $Id: linear_algebra.h 27260 2012-10-31 14:38:43Z heister $
+//
+// Copyright (C) 2008, 2009, 2010, 2012 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+#ifndef __deal2__abstract_linear_algebra_h
+#define __deal2__abstract_linear_algebra_h
+
+#include <deal.II/base/config.h>
+
+
+#include <deal.II/lac/vector.h>
+#include <deal.II/lac/block_vector.h>
+#include <deal.II/lac/sparse_matrix.h>
+#include <deal.II/lac/precondition.h>
+DEAL_II_NAMESPACE_OPEN
+
+
+namespace LinearAlgebraDealII
+{
+ typedef Vector<double> Vector;
+ typedef BlockVector<double> BlockVector;
+
+ typedef SparseMatrix<double> SparseMatrix;
+
+ typedef PreconditionSSOR<SparseMatrix > PreconditionSSOR;
- class Vector {void compress();};
- class BlockVector {void compress();};
++
+}
+
+
+// Dummy class. This used to check your program
+// to make sure it is compatible with all
+// linear algebra classes. In other words,
+// this is the minimal interface.
+// TODO: should we move this into tests/ only?
+namespace LinearAlgebraDummy
+{
- class SparseMatrix { void compress();};
++ class Vector
++ {
++ void compress();
++ };
++ class BlockVector
++ {
++ void compress();
++ };
+
-
++ class SparseMatrix
++ {
++ void compress();
++ };
+
+ class PreconditionSSOR {};
- namespace LinearAlgebraPETSc
- {
- using namespace dealii;
++
+}
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+
+#ifdef DEAL_II_USE_PETSC
+
+#include <deal.II/lac/petsc_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
+#include <deal.II/lac/petsc_precondition.h>
+
+DEAL_II_NAMESPACE_OPEN
+
- typedef PETScWrappers::PreconditionSSOR PreconditionSSOR;
-
++namespace LinearAlgebraPETSc
++{
++ using namespace dealii;
+
+ typedef PETScWrappers::Vector Vector;
+ typedef PETScWrappers::BlockVector BlockVector;
+
+ typedef PETScWrappers::SparseMatrix SparseMatrix;
- namespace MPI
- {
-
++ typedef PETScWrappers::PreconditionSSOR PreconditionSSOR;
++
++
++ namespace MPI
++ {
+
- }
-
+ /**
+ * Typedef for the vector type used.
+ */
+ typedef PETScWrappers::MPI::Vector Vector;
+
+ /**
+ * Typedef for the type used to describe vectors that
+ * consist of multiple blocks.
+ */
+ typedef PETScWrappers::MPI::BlockVector BlockVector;
+
+ /**
+ * Typedef for the sparse matrix type used.
+ */
+ typedef PETScWrappers::MPI::SparseMatrix SparseMatrix;
+
+ /**
+ * Typedef for the type used to describe sparse matrices that
+ * consist of multiple blocks.
+ */
+ typedef PETScWrappers::MPI::BlockSparseMatrix BlockSparseMatrix;
+
+ /**
+ * Typedef for the AMG preconditioner type used for the
+ * top left block of the Stokes matrix.
+ */
+ typedef PETScWrappers::PreconditionBoomerAMG PreconditionAMG;
+
+ /**
+ * Typedef for the Incomplete Cholesky preconditioner used
+ * for other blocks of the system matrix.
+ */
+ typedef PETScWrappers::PreconditionICC PreconditionIC;
+
+ /**
+ * Typedef for the Incomplete LU decomposition preconditioner used
+ * for other blocks of the system matrix.
+ */
+ typedef PETScWrappers::PreconditionILU PreconditionILU;
- namespace LinearAlgebraTrilinos
+ }
++
++}
+DEAL_II_NAMESPACE_CLOSE
+
+
+#endif // DEAL_II_USE_PETSC
+
+#ifdef DEAL_II_USE_TRILINOS
+
+#include <deal.II/lac/trilinos_vector.h>
+#include <deal.II/lac/trilinos_block_vector.h>
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/lac/trilinos_precondition.h>
+#include <deal.II/lac/trilinos_solver.h>
+
+DEAL_II_NAMESPACE_OPEN
+
- using namespace dealii;
-
- typedef TrilinosWrappers::Vector Vector;
++namespace LinearAlgebraTrilinos
++{
++ using namespace dealii;
++
++ typedef TrilinosWrappers::Vector Vector;
++
++ namespace MPI
+ {
- namespace MPI
- {
-
+
- }
-
+ /**
+ * Typedef for the vector type used.
+ */
+ typedef TrilinosWrappers::MPI::Vector Vector;
+
+ /**
+ * Typedef for the type used to describe vectors that
+ * consist of multiple blocks.
+ */
+ typedef TrilinosWrappers::MPI::BlockVector BlockVector;
+
+ /**
+ * Typedef for the sparse matrix type used.
+ */
+ typedef TrilinosWrappers::SparseMatrix SparseMatrix;
+
+ /**
+ * Typedef for the type used to describe sparse matrices that
+ * consist of multiple blocks.
+ */
+ typedef TrilinosWrappers::BlockSparseMatrix BlockSparseMatrix;
+
+ /**
+ * Typedef for the AMG preconditioner type used for the
+ * top left block of the Stokes matrix.
+ */
+ typedef TrilinosWrappers::PreconditionAMG PreconditionAMG;
+
+ /**
+ * Typedef for the Incomplete Cholesky preconditioner used
+ * for other blocks of the system matrix.
+ */
+ typedef TrilinosWrappers::PreconditionIC PreconditionIC;
+
+ /**
+ * Typedef for the Incomplete LU decomposition preconditioner used
+ * for other blocks of the system matrix.
+ */
+ typedef TrilinosWrappers::PreconditionILU PreconditionILU;
+ }
+
++}
++
+DEAL_II_NAMESPACE_CLOSE
+
+
+#endif // DEAL_II_USE_TRILINOS
+
+
+
+#endif
template <typename MatrixType>
class BlockMatrixBase : public Subscriptor
{
- public:
- /**
- * Typedef the type of the underlying
- * matrix.
- */
- typedef MatrixType BlockType;
-
- /**
- * Type of matrix entries. In analogy to
- * the STL container classes.
- */
- typedef typename BlockType::value_type value_type;
- typedef value_type *pointer;
- typedef const value_type *const_pointer;
- typedef value_type &reference;
- typedef const value_type &const_reference;
- typedef std::size_t size_type;
-
- typedef
- MatrixIterator<BlockMatrixIterators::Accessor<BlockMatrixBase, false> >
- iterator;
-
- typedef
- MatrixIterator<BlockMatrixIterators::Accessor<BlockMatrixBase, true> >
- const_iterator;
-
-
- /**
- * Default constructor.
- */
- BlockMatrixBase ();
-
- /**
- * Copy the given matrix to this
- * one. The operation throws an
- * error if the sparsity patterns
- * of the two involved matrices
- * do not point to the same
- * object, since in this case the
- * copy operation is
- * cheaper. Since this operation
- * is notheless not for free, we
- * do not make it available
- * through operator=(), since
- * this may lead to unwanted
- * usage, e.g. in copy arguments
- * to functions, which should
- * really be arguments by
- * reference.
- *
- * The source matrix may be a
- * matrix of arbitrary type, as
- * long as its data type is
- * convertible to the data type
- * of this matrix.
- *
- * The function returns a
- * reference to <tt>this</tt>.
- */
- template <class BlockMatrixType>
- BlockMatrixBase &
- copy_from (const BlockMatrixType &source);
-
- /**
- * Access the block with the
- * given coordinates.
- */
- BlockType &
- block (const unsigned int row,
- const unsigned int column);
-
-
- /**
- * Access the block with the
- * given coordinates. Version for
- * constant objects.
- */
- const BlockType &
- block (const unsigned int row,
- const unsigned int column) const;
-
- /**
- * Return the dimension of the
- * image space. To remember: the
- * matrix is of dimension
- * $m \times n$.
- */
- unsigned int m () const;
-
- /**
- * Return the dimension of the
- * range space. To remember: the
- * matrix is of dimension
- * $m \times n$.
- */
- unsigned int n () const;
-
-
- /**
- * Return the number of blocks in
- * a column. Returns zero if no
- * sparsity pattern is presently
- * associated to this matrix.
- */
- unsigned int n_block_rows () const;
-
- /**
- * Return the number of blocks in
- * a row. Returns zero if no
- * sparsity pattern is presently
- * associated to this matrix.
- */
- unsigned int n_block_cols () const;
-
- /**
- * Set the element <tt>(i,j)</tt>
- * to <tt>value</tt>. Throws an
- * error if the entry does not
- * exist or if <tt>value</tt> is
- * not a finite number. Still, it
- * is allowed to store zero
- * values in non-existent fields.
- */
- void set (const unsigned int i,
- const unsigned int j,
- const value_type value);
-
- /**
- * Set all elements given in a
- * FullMatrix into the sparse matrix
- * locations given by
- * <tt>indices</tt>. In other words,
- * this function writes the elements
- * in <tt>full_matrix</tt> into the
- * calling matrix, using the
- * local-to-global indexing specified
- * by <tt>indices</tt> for both the
- * rows and the columns of the
- * matrix. This function assumes a
- * quadratic sparse matrix and a
- * quadratic full_matrix, the usual
- * situation in FE calculations.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be set anyway or
- * they should be filtered away (and
- * not change the previous content in
- * the respective element if it
- * exists). The default value is
- * <tt>false</tt>, i.e., even zero
- * values are treated.
- */
- template <typename number>
- void set (const std::vector<unsigned int> &indices,
- const FullMatrix<number> &full_matrix,
- const bool elide_zero_values = false);
-
- /**
- * Same function as before, but now
- * including the possibility to use
- * rectangular full_matrices and
- * different local-to-global indexing
- * on rows and columns, respectively.
- */
- template <typename number>
- void set (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<number> &full_matrix,
- const bool elide_zero_values = false);
-
- /**
- * Set several elements in the
- * specified row of the matrix with
- * column indices as given by
- * <tt>col_indices</tt> to the
- * respective value.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be set anyway or
- * they should be filtered away (and
- * not change the previous content in
- * the respective element if it
- * exists). The default value is
- * <tt>false</tt>, i.e., even zero
- * values are treated.
- */
- template <typename number>
- void set (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<number> &values,
- const bool elide_zero_values = false);
-
- /**
- * Set several elements to values
- * given by <tt>values</tt> in a
- * given row in columns given by
- * col_indices into the sparse
- * matrix.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be inserted anyway
- * or they should be filtered
- * away. The default value is
- * <tt>false</tt>, i.e., even zero
- * values are inserted/replaced.
- */
- template <typename number>
- void set (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const number *values,
- const bool elide_zero_values = false);
-
- /**
- * Add <tt>value</tt> to the
- * element (<i>i,j</i>). Throws
- * an error if the entry does not
- * exist or if <tt>value</tt> is
- * not a finite number. Still, it
- * is allowed to store zero
- * values in non-existent fields.
- */
- void add (const unsigned int i,
- const unsigned int j,
- const value_type value);
-
- /**
- * Add all elements given in a
- * FullMatrix<double> into sparse
- * matrix locations given by
- * <tt>indices</tt>. In other words,
- * this function adds the elements in
- * <tt>full_matrix</tt> to the
- * respective entries in calling
- * matrix, using the local-to-global
- * indexing specified by
- * <tt>indices</tt> for both the rows
- * and the columns of the
- * matrix. This function assumes a
- * quadratic sparse matrix and a
- * quadratic full_matrix, the usual
- * situation in FE calculations.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- template <typename number>
- void add (const std::vector<unsigned int> &indices,
- const FullMatrix<number> &full_matrix,
- const bool elide_zero_values = true);
-
- /**
- * Same function as before, but now
- * including the possibility to use
- * rectangular full_matrices and
- * different local-to-global indexing
- * on rows and columns, respectively.
- */
- template <typename number>
- void add (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<number> &full_matrix,
- const bool elide_zero_values = true);
-
- /**
- * Set several elements in the
- * specified row of the matrix with
- * column indices as given by
- * <tt>col_indices</tt> to the
- * respective value.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- template <typename number>
- void add (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<number> &values,
- const bool elide_zero_values = true);
-
- /**
- * Add an array of values given by
- * <tt>values</tt> in the given
- * global matrix row at columns
- * specified by col_indices in the
- * sparse matrix.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- template <typename number>
- void add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const number *values,
- const bool elide_zero_values = true,
- const bool col_indices_are_sorted = false);
-
- /**
- * Return the value of the entry
- * (i,j). This may be an
- * expensive operation and you
- * should always take care where
- * to call this function. In
- * order to avoid abuse, this
- * function throws an exception
- * if the wanted element does not
- * exist in the matrix.
- */
- value_type operator () (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * This function is mostly like
- * operator()() in that it
- * returns the value of the
- * matrix entry <tt>(i,j)</tt>. The only
- * difference is that if this
- * entry does not exist in the
- * sparsity pattern, then instead
- * of raising an exception, zero
- * is returned. While this may be
- * convenient in some cases, note
- * that it is simple to write
- * algorithms that are slow
- * compared to an optimal
- * solution, since the sparsity
- * of the matrix is not used.
- */
- value_type el (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * Return the main diagonal element in
- * the <i>i</i>th row. This function
- * throws an error if the matrix is not
- * quadratic and also if the diagonal
- * blocks of the matrix are not
- * quadratic.
- *
- * This function is considerably
- * faster than the operator()(),
- * since for quadratic matrices, the
- * diagonal entry may be the
- * first to be stored in each row
- * and access therefore does not
- * involve searching for the
- * right column number.
- */
- value_type diag_element (const unsigned int i) const;
-
- /**
- * Call the compress() function on all
- * the subblocks of the matrix.
- *
- *
- * See @ref GlossCompress "Compressing
- * distributed objects" for more
- * information.
- */
- void compress (::dealii::VectorOperation::values operation
- =::dealii::VectorOperation::unknown);
-
- /**
- * Multiply the entire matrix by a
- * fixed factor.
- */
- BlockMatrixBase & operator *= (const value_type factor);
-
- /**
- * Divide the entire matrix by a
- * fixed factor.
- */
- BlockMatrixBase & operator /= (const value_type factor);
-
- /**
- * Add <tt>matrix</tt> scaled by
- * <tt>factor</tt> to this matrix,
- * i.e. the matrix <tt>factor*matrix</tt>
- * is added to <tt>this</tt>. This
- * function throws an error if the
- * sparsity patterns of the two involved
- * matrices do not point to the same
- * object, since in this case the
- * operation is cheaper.
- *
- * The source matrix may be a sparse
- * matrix over an arbitrary underlying
- * scalar type, as long as its data type
- * is convertible to the data type of
- * this matrix.
- */
- template <class BlockMatrixType>
- void add (const value_type factor,
- const BlockMatrixType &matrix);
-
-
- /**
- * Adding Matrix-vector
- * multiplication. Add $M*src$ on
- * $dst$ with $M$ being this
- * matrix.
- */
- template <class BlockVectorType>
- void vmult_add (BlockVectorType &dst,
- const BlockVectorType &src) const;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M<sup>T</sup>src</i> to
- * <i>dst</i> with <i>M</i> being
- * this matrix. This function
- * does the same as vmult_add()
- * but takes the transposed
- * matrix.
- */
- template <class BlockVectorType>
- void Tvmult_add (BlockVectorType &dst,
- const BlockVectorType &src) const;
-
- /**
- * Return the norm of the vector
- * <i>v</i> with respect to the
- * norm induced by this matrix,
- * i.e. <i>v<sup>T</sup>Mv)</i>. This
- * is useful, e.g. in the finite
- * element context, where the
- * <i>L<sup>T</sup></i>-norm of a
- * function equals the matrix
- * norm with respect to the mass
- * matrix of the vector
- * representing the nodal values
- * of the finite element
- * function. Note that even
- * though the function's name
- * might suggest something
- * different, for historic
- * reasons not the norm but its
- * square is returned, as defined
- * above by the scalar product.
- *
- * Obviously, the matrix needs to
- * be square for this operation.
- */
- template <class BlockVectorType>
- value_type
- matrix_norm_square (const BlockVectorType &v) const;
-
- /**
- * Compute the matrix scalar
- * product $\left(u,Mv\right)$.
- */
- template <class BlockVectorType>
- value_type
- matrix_scalar_product (const BlockVectorType &u,
- const BlockVectorType &v) const;
-
- /**
- * Compute the residual
- * <i>r=b-Ax</i>. Write the
- * residual into <tt>dst</tt>.
- */
- template <class BlockVectorType>
- value_type residual (BlockVectorType &dst,
- const BlockVectorType &x,
- const BlockVectorType &b) const;
-
- /**
- * STL-like iterator with the
- * first entry.
- */
- iterator begin ();
-
- /**
- * Final iterator.
- */
- iterator end ();
-
- /**
- * STL-like iterator with the
- * first entry of row <tt>r</tt>.
- */
- iterator begin (const unsigned int r);
-
- /**
- * Final iterator of row <tt>r</tt>.
- */
- iterator end (const unsigned int r);
- /**
- * STL-like iterator with the
- * first entry.
- */
- const_iterator begin () const;
-
- /**
- * Final iterator.
- */
- const_iterator end () const;
-
- /**
- * STL-like iterator with the
- * first entry of row <tt>r</tt>.
- */
- const_iterator begin (const unsigned int r) const;
-
- /**
- * Final iterator of row <tt>r</tt>.
- */
- const_iterator end (const unsigned int r) const;
-
- /**
- * Return a reference to the underlying
- * BlockIndices data of the rows.
- */
- const BlockIndices & get_row_indices () const;
-
- /**
- * Return a reference to the underlying
- * BlockIndices data of the rows.
- */
- const BlockIndices & get_column_indices () const;
-
- /**
- * Determine an estimate for the memory
- * consumption (in bytes) of this
- * object. Note that only the memory
- * reserved on the current processor is
- * returned in case this is called in
- * an MPI-based program.
- */
- std::size_t memory_consumption () const;
-
- /** @addtogroup Exceptions
- * @{ */
-
- /**
- * Exception
- */
- DeclException4 (ExcIncompatibleRowNumbers,
- int, int, int, int,
- << "The blocks [" << arg1 << ',' << arg2 << "] and ["
- << arg3 << ',' << arg4 << "] have differing row numbers.");
- /**
- * Exception
- */
- DeclException4 (ExcIncompatibleColNumbers,
- int, int, int, int,
- << "The blocks [" << arg1 << ',' << arg2 << "] and ["
- << arg3 << ',' << arg4 << "] have differing column numbers.");
- //@}
- protected:
- /**
- * Release all memory and return
- * to a state just like after
- * having called the default
- * constructor. It also forgets
- * the sparsity pattern it was
- * previously tied to.
- *
- * This calls clear for all
- * sub-matrices and then resets this
- * object to have no blocks at all.
- *
- * This function is protected
- * since it may be necessary to
- * release additional structures.
- * A derived class can make it
- * public again, if it is
- * sufficient.
- */
- void clear ();
-
- /**
- * Index arrays for rows and columns.
- */
- BlockIndices row_block_indices;
- BlockIndices column_block_indices;
-
- /**
- * Array of sub-matrices.
- */
- Table<2,SmartPointer<BlockType, BlockMatrixBase<MatrixType> > > sub_objects;
-
- /**
- * This function collects the
- * sizes of the sub-objects and
- * stores them in internal
- * arrays, in order to be able to
- * relay global indices into the
- * matrix to indices into the
- * subobjects. You *must* call
- * this function each time after
- * you have changed the size of
- * the sub-objects.
- *
- * Derived classes should call this
- * function whenever the size of the
- * sub-objects has changed and the @p
- * X_block_indices arrays need to be
- * updated.
- *
- * Note that this function is not public
- * since not all derived classes need to
- * export its interface. For example, for
- * the usual deal.II SparseMatrix class,
- * the sizes are implicitly determined
- * whenever reinit() is called, and
- * individual blocks cannot be
- * resized. For that class, this function
- * therefore does not have to be
- * public. On the other hand, for the
- * PETSc classes, there is no associated
- * sparsity pattern object that
- * determines the block sizes, and for
- * these the function needs to be
- * publicly available. These classes
- * therefore export this function.
- */
- void collect_sizes ();
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M*src$ with $M$
- * being this matrix.
- *
- * Due to problems with deriving template
- * arguments between the block and
- * non-block versions of the vmult/Tvmult
- * functions, the actual functions are
- * implemented in derived classes, with
- * implementations forwarding the calls
- * to the implementations provided here
- * under a unique name for which template
- * arguments can be derived by the
- * compiler.
- */
- template <class BlockVectorType>
- void vmult_block_block (BlockVectorType &dst,
- const BlockVectorType &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- *
- * Due to problems with deriving template
- * arguments between the block and
- * non-block versions of the vmult/Tvmult
- * functions, the actual functions are
- * implemented in derived classes, with
- * implementations forwarding the calls
- * to the implementations provided here
- * under a unique name for which template
- * arguments can be derived by the
- * compiler.
- */
- template <class BlockVectorType,
- class VectorType>
- void vmult_block_nonblock (BlockVectorType &dst,
- const VectorType &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- *
- * Due to problems with deriving template
- * arguments between the block and
- * non-block versions of the vmult/Tvmult
- * functions, the actual functions are
- * implemented in derived classes, with
- * implementations forwarding the calls
- * to the implementations provided here
- * under a unique name for which template
- * arguments can be derived by the
- * compiler.
- */
- template <class BlockVectorType,
- class VectorType>
- void vmult_nonblock_block (VectorType &dst,
- const BlockVectorType &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- *
- * Due to problems with deriving template
- * arguments between the block and
- * non-block versions of the vmult/Tvmult
- * functions, the actual functions are
- * implemented in derived classes, with
- * implementations forwarding the calls
- * to the implementations provided here
- * under a unique name for which template
- * arguments can be derived by the
- * compiler.
- */
- template <class VectorType>
- void vmult_nonblock_nonblock (VectorType &dst,
- const VectorType &src) const;
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M^T*src$ with $M$
- * being this matrix. This
- * function does the same as
- * vmult() but takes the
- * transposed matrix.
- *
- * Due to problems with deriving template
- * arguments between the block and
- * non-block versions of the vmult/Tvmult
- * functions, the actual functions are
- * implemented in derived classes, with
- * implementations forwarding the calls
- * to the implementations provided here
- * under a unique name for which template
- * arguments can be derived by the
- * compiler.
- */
- template <class BlockVectorType>
- void Tvmult_block_block (BlockVectorType &dst,
+ public:
+ /**
+ * Typedef the type of the underlying
+ * matrix.
+ */
+ typedef MatrixType BlockType;
+
+ /**
+ * Type of matrix entries. In analogy to
+ * the STL container classes.
+ */
+ typedef typename BlockType::value_type value_type;
+ typedef value_type *pointer;
+ typedef const value_type *const_pointer;
+ typedef value_type &reference;
+ typedef const value_type &const_reference;
+ typedef std::size_t size_type;
+
+ typedef
+ MatrixIterator<BlockMatrixIterators::Accessor<BlockMatrixBase, false> >
+ iterator;
+
+ typedef
+ MatrixIterator<BlockMatrixIterators::Accessor<BlockMatrixBase, true> >
+ const_iterator;
+
+
+ /**
+ * Default constructor.
+ */
+ BlockMatrixBase ();
+
+ /**
+ * Copy the given matrix to this
+ * one. The operation throws an
+ * error if the sparsity patterns
+ * of the two involved matrices
+ * do not point to the same
+ * object, since in this case the
+ * copy operation is
+ * cheaper. Since this operation
+ * is notheless not for free, we
+ * do not make it available
+ * through operator=(), since
+ * this may lead to unwanted
+ * usage, e.g. in copy arguments
+ * to functions, which should
+ * really be arguments by
+ * reference.
+ *
+ * The source matrix may be a
+ * matrix of arbitrary type, as
+ * long as its data type is
+ * convertible to the data type
+ * of this matrix.
+ *
+ * The function returns a
+ * reference to <tt>this</tt>.
+ */
+ template <class BlockMatrixType>
+ BlockMatrixBase &
+ copy_from (const BlockMatrixType &source);
+
+ /**
+ * Access the block with the
+ * given coordinates.
+ */
+ BlockType &
+ block (const unsigned int row,
+ const unsigned int column);
+
+
+ /**
+ * Access the block with the
+ * given coordinates. Version for
+ * constant objects.
+ */
+ const BlockType &
+ block (const unsigned int row,
+ const unsigned int column) const;
+
+ /**
+ * Return the dimension of the
+ * image space. To remember: the
+ * matrix is of dimension
+ * $m \times n$.
+ */
+ unsigned int m () const;
+
+ /**
+ * Return the dimension of the
+ * range space. To remember: the
+ * matrix is of dimension
+ * $m \times n$.
+ */
+ unsigned int n () const;
+
+
+ /**
+ * Return the number of blocks in
+ * a column. Returns zero if no
+ * sparsity pattern is presently
+ * associated to this matrix.
+ */
+ unsigned int n_block_rows () const;
+
+ /**
+ * Return the number of blocks in
+ * a row. Returns zero if no
+ * sparsity pattern is presently
+ * associated to this matrix.
+ */
+ unsigned int n_block_cols () const;
+
+ /**
+ * Set the element <tt>(i,j)</tt>
+ * to <tt>value</tt>. Throws an
+ * error if the entry does not
+ * exist or if <tt>value</tt> is
+ * not a finite number. Still, it
+ * is allowed to store zero
+ * values in non-existent fields.
+ */
+ void set (const unsigned int i,
+ const unsigned int j,
+ const value_type value);
+
+ /**
+ * Set all elements given in a
+ * FullMatrix into the sparse matrix
+ * locations given by
+ * <tt>indices</tt>. In other words,
+ * this function writes the elements
+ * in <tt>full_matrix</tt> into the
+ * calling matrix, using the
+ * local-to-global indexing specified
+ * by <tt>indices</tt> for both the
+ * rows and the columns of the
+ * matrix. This function assumes a
+ * quadratic sparse matrix and a
+ * quadratic full_matrix, the usual
+ * situation in FE calculations.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be set anyway or
+ * they should be filtered away (and
+ * not change the previous content in
+ * the respective element if it
+ * exists). The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are treated.
+ */
+ template <typename number>
+ void set (const std::vector<unsigned int> &indices,
+ const FullMatrix<number> &full_matrix,
+ const bool elide_zero_values = false);
+
+ /**
+ * Same function as before, but now
+ * including the possibility to use
+ * rectangular full_matrices and
+ * different local-to-global indexing
+ * on rows and columns, respectively.
+ */
+ template <typename number>
+ void set (const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<number> &full_matrix,
+ const bool elide_zero_values = false);
+
+ /**
+ * Set several elements in the
+ * specified row of the matrix with
+ * column indices as given by
+ * <tt>col_indices</tt> to the
+ * respective value.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be set anyway or
+ * they should be filtered away (and
+ * not change the previous content in
+ * the respective element if it
+ * exists). The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are treated.
+ */
+ template <typename number>
+ void set (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
+ const std::vector<number> &values,
+ const bool elide_zero_values = false);
+
+ /**
+ * Set several elements to values
+ * given by <tt>values</tt> in a
+ * given row in columns given by
+ * col_indices into the sparse
+ * matrix.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be inserted anyway
+ * or they should be filtered
+ * away. The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are inserted/replaced.
+ */
+ template <typename number>
+ void set (const unsigned int row,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
+ const number *values,
+ const bool elide_zero_values = false);
+
+ /**
+ * Add <tt>value</tt> to the
+ * element (<i>i,j</i>). Throws
+ * an error if the entry does not
+ * exist or if <tt>value</tt> is
+ * not a finite number. Still, it
+ * is allowed to store zero
+ * values in non-existent fields.
+ */
+ void add (const unsigned int i,
+ const unsigned int j,
+ const value_type value);
+
+ /**
+ * Add all elements given in a
+ * FullMatrix<double> into sparse
+ * matrix locations given by
+ * <tt>indices</tt>. In other words,
+ * this function adds the elements in
+ * <tt>full_matrix</tt> to the
+ * respective entries in calling
+ * matrix, using the local-to-global
+ * indexing specified by
+ * <tt>indices</tt> for both the rows
+ * and the columns of the
+ * matrix. This function assumes a
+ * quadratic sparse matrix and a
+ * quadratic full_matrix, the usual
+ * situation in FE calculations.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ template <typename number>
+ void add (const std::vector<unsigned int> &indices,
+ const FullMatrix<number> &full_matrix,
+ const bool elide_zero_values = true);
+
+ /**
+ * Same function as before, but now
+ * including the possibility to use
+ * rectangular full_matrices and
+ * different local-to-global indexing
+ * on rows and columns, respectively.
+ */
+ template <typename number>
+ void add (const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<number> &full_matrix,
+ const bool elide_zero_values = true);
+
+ /**
+ * Set several elements in the
+ * specified row of the matrix with
+ * column indices as given by
+ * <tt>col_indices</tt> to the
+ * respective value.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ template <typename number>
+ void add (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
+ const std::vector<number> &values,
+ const bool elide_zero_values = true);
+
+ /**
+ * Add an array of values given by
+ * <tt>values</tt> in the given
+ * global matrix row at columns
+ * specified by col_indices in the
+ * sparse matrix.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ template <typename number>
+ void add (const unsigned int row,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
+ const number *values,
+ const bool elide_zero_values = true,
+ const bool col_indices_are_sorted = false);
+
+ /**
+ * Return the value of the entry
+ * (i,j). This may be an
+ * expensive operation and you
+ * should always take care where
+ * to call this function. In
+ * order to avoid abuse, this
+ * function throws an exception
+ * if the wanted element does not
+ * exist in the matrix.
+ */
+ value_type operator () (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * This function is mostly like
+ * operator()() in that it
+ * returns the value of the
+ * matrix entry <tt>(i,j)</tt>. The only
+ * difference is that if this
+ * entry does not exist in the
+ * sparsity pattern, then instead
+ * of raising an exception, zero
+ * is returned. While this may be
+ * convenient in some cases, note
+ * that it is simple to write
+ * algorithms that are slow
+ * compared to an optimal
+ * solution, since the sparsity
+ * of the matrix is not used.
+ */
+ value_type el (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * Return the main diagonal element in
+ * the <i>i</i>th row. This function
+ * throws an error if the matrix is not
+ * quadratic and also if the diagonal
+ * blocks of the matrix are not
+ * quadratic.
+ *
+ * This function is considerably
+ * faster than the operator()(),
+ * since for quadratic matrices, the
+ * diagonal entry may be the
+ * first to be stored in each row
+ * and access therefore does not
+ * involve searching for the
+ * right column number.
+ */
+ value_type diag_element (const unsigned int i) const;
+
+ /**
+ * Call the compress() function on all
+ * the subblocks of the matrix.
+ *
+ *
+ * See @ref GlossCompress "Compressing
+ * distributed objects" for more
+ * information.
+ */
+ void compress (::dealii::VectorOperation::values operation
+ =::dealii::VectorOperation::unknown);
+
+ /**
+ * Multiply the entire matrix by a
+ * fixed factor.
+ */
+ BlockMatrixBase &operator *= (const value_type factor);
+
+ /**
+ * Divide the entire matrix by a
+ * fixed factor.
+ */
+ BlockMatrixBase &operator /= (const value_type factor);
+
+ /**
+ * Add <tt>matrix</tt> scaled by
+ * <tt>factor</tt> to this matrix,
+ * i.e. the matrix <tt>factor*matrix</tt>
+ * is added to <tt>this</tt>. This
+ * function throws an error if the
+ * sparsity patterns of the two involved
+ * matrices do not point to the same
+ * object, since in this case the
+ * operation is cheaper.
+ *
+ * The source matrix may be a sparse
+ * matrix over an arbitrary underlying
+ * scalar type, as long as its data type
+ * is convertible to the data type of
+ * this matrix.
+ */
+ template <class BlockMatrixType>
+ void add (const value_type factor,
+ const BlockMatrixType &matrix);
+
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add $M*src$ on
+ * $dst$ with $M$ being this
+ * matrix.
+ */
+ template <class BlockVectorType>
+ void vmult_add (BlockVectorType &dst,
+ const BlockVectorType &src) const;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M<sup>T</sup>src</i> to
+ * <i>dst</i> with <i>M</i> being
+ * this matrix. This function
+ * does the same as vmult_add()
+ * but takes the transposed
+ * matrix.
+ */
+ template <class BlockVectorType>
+ void Tvmult_add (BlockVectorType &dst,
+ const BlockVectorType &src) const;
+
+ /**
+ * Return the norm of the vector
+ * <i>v</i> with respect to the
+ * norm induced by this matrix,
+ * i.e. <i>v<sup>T</sup>Mv)</i>. This
+ * is useful, e.g. in the finite
+ * element context, where the
+ * <i>L<sup>T</sup></i>-norm of a
+ * function equals the matrix
+ * norm with respect to the mass
+ * matrix of the vector
+ * representing the nodal values
+ * of the finite element
+ * function. Note that even
+ * though the function's name
+ * might suggest something
+ * different, for historic
+ * reasons not the norm but its
+ * square is returned, as defined
+ * above by the scalar product.
+ *
+ * Obviously, the matrix needs to
+ * be square for this operation.
+ */
+ template <class BlockVectorType>
+ value_type
+ matrix_norm_square (const BlockVectorType &v) const;
+
+ /**
+ * Compute the matrix scalar
+ * product $\left(u,Mv\right)$.
+ */
+ template <class BlockVectorType>
+ value_type
+ matrix_scalar_product (const BlockVectorType &u,
+ const BlockVectorType &v) const;
+
+ /**
+ * Compute the residual
+ * <i>r=b-Ax</i>. Write the
+ * residual into <tt>dst</tt>.
+ */
+ template <class BlockVectorType>
+ value_type residual (BlockVectorType &dst,
+ const BlockVectorType &x,
+ const BlockVectorType &b) const;
+
+ /**
+ * STL-like iterator with the
+ * first entry.
+ */
+ iterator begin ();
+
+ /**
+ * Final iterator.
+ */
+ iterator end ();
+
+ /**
+ * STL-like iterator with the
+ * first entry of row <tt>r</tt>.
+ */
+ iterator begin (const unsigned int r);
+
+ /**
+ * Final iterator of row <tt>r</tt>.
+ */
+ iterator end (const unsigned int r);
+ /**
+ * STL-like iterator with the
+ * first entry.
+ */
+ const_iterator begin () const;
+
+ /**
+ * Final iterator.
+ */
+ const_iterator end () const;
+
+ /**
+ * STL-like iterator with the
+ * first entry of row <tt>r</tt>.
+ */
+ const_iterator begin (const unsigned int r) const;
+
+ /**
+ * Final iterator of row <tt>r</tt>.
+ */
+ const_iterator end (const unsigned int r) const;
+
+ /**
+ * Return a reference to the underlying
+ * BlockIndices data of the rows.
+ */
+ const BlockIndices &get_row_indices () const;
+
+ /**
+ * Return a reference to the underlying
+ * BlockIndices data of the rows.
+ */
+ const BlockIndices &get_column_indices () const;
+
+ /**
+ * Determine an estimate for the memory
+ * consumption (in bytes) of this
+ * object. Note that only the memory
+ * reserved on the current processor is
+ * returned in case this is called in
+ * an MPI-based program.
+ */
+ std::size_t memory_consumption () const;
+
+ /** @addtogroup Exceptions
+ * @{ */
+
+ /**
+ * Exception
+ */
+ DeclException4 (ExcIncompatibleRowNumbers,
+ int, int, int, int,
+ << "The blocks [" << arg1 << ',' << arg2 << "] and ["
+ << arg3 << ',' << arg4 << "] have differing row numbers.");
+ /**
+ * Exception
+ */
+ DeclException4 (ExcIncompatibleColNumbers,
+ int, int, int, int,
+ << "The blocks [" << arg1 << ',' << arg2 << "] and ["
+ << arg3 << ',' << arg4 << "] have differing column numbers.");
+ //@}
+ protected:
+ /**
+ * Release all memory and return
+ * to a state just like after
+ * having called the default
+ * constructor. It also forgets
+ * the sparsity pattern it was
+ * previously tied to.
+ *
+ * This calls clear for all
+ * sub-matrices and then resets this
+ * object to have no blocks at all.
+ *
+ * This function is protected
+ * since it may be necessary to
+ * release additional structures.
+ * A derived class can make it
+ * public again, if it is
+ * sufficient.
+ */
+ void clear ();
+
+ /**
+ * Index arrays for rows and columns.
+ */
+ BlockIndices row_block_indices;
+ BlockIndices column_block_indices;
+
+ /**
+ * Array of sub-matrices.
+ */
+ Table<2,SmartPointer<BlockType, BlockMatrixBase<MatrixType> > > sub_objects;
+
+ /**
+ * This function collects the
+ * sizes of the sub-objects and
+ * stores them in internal
+ * arrays, in order to be able to
+ * relay global indices into the
+ * matrix to indices into the
+ * subobjects. You *must* call
+ * this function each time after
+ * you have changed the size of
+ * the sub-objects.
+ *
+ * Derived classes should call this
+ * function whenever the size of the
+ * sub-objects has changed and the @p
+ * X_block_indices arrays need to be
+ * updated.
+ *
+ * Note that this function is not public
+ * since not all derived classes need to
+ * export its interface. For example, for
+ * the usual deal.II SparseMatrix class,
+ * the sizes are implicitly determined
+ * whenever reinit() is called, and
+ * individual blocks cannot be
+ * resized. For that class, this function
+ * therefore does not have to be
+ * public. On the other hand, for the
+ * PETSc classes, there is no associated
+ * sparsity pattern object that
+ * determines the block sizes, and for
+ * these the function needs to be
+ * publicly available. These classes
+ * therefore export this function.
+ */
+ void collect_sizes ();
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M*src$ with $M$
+ * being this matrix.
+ *
+ * Due to problems with deriving template
+ * arguments between the block and
+ * non-block versions of the vmult/Tvmult
+ * functions, the actual functions are
+ * implemented in derived classes, with
+ * implementations forwarding the calls
+ * to the implementations provided here
+ * under a unique name for which template
+ * arguments can be derived by the
+ * compiler.
+ */
+ template <class BlockVectorType>
+ void vmult_block_block (BlockVectorType &dst,
+ const BlockVectorType &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ *
+ * Due to problems with deriving template
+ * arguments between the block and
+ * non-block versions of the vmult/Tvmult
+ * functions, the actual functions are
+ * implemented in derived classes, with
+ * implementations forwarding the calls
+ * to the implementations provided here
+ * under a unique name for which template
+ * arguments can be derived by the
+ * compiler.
+ */
+ template <class BlockVectorType,
+ class VectorType>
+ void vmult_block_nonblock (BlockVectorType &dst,
+ const VectorType &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ *
+ * Due to problems with deriving template
+ * arguments between the block and
+ * non-block versions of the vmult/Tvmult
+ * functions, the actual functions are
+ * implemented in derived classes, with
+ * implementations forwarding the calls
+ * to the implementations provided here
+ * under a unique name for which template
+ * arguments can be derived by the
+ * compiler.
+ */
+ template <class BlockVectorType,
+ class VectorType>
+ void vmult_nonblock_block (VectorType &dst,
const BlockVectorType &src) const;
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- *
- * Due to problems with deriving template
- * arguments between the block and
- * non-block versions of the vmult/Tvmult
- * functions, the actual functions are
- * implemented in derived classes, with
- * implementations forwarding the calls
- * to the implementations provided here
- * under a unique name for which template
- * arguments can be derived by the
- * compiler.
- */
- template <class BlockVectorType,
- class VectorType>
- void Tvmult_block_nonblock (BlockVectorType &dst,
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ *
+ * Due to problems with deriving template
+ * arguments between the block and
+ * non-block versions of the vmult/Tvmult
+ * functions, the actual functions are
+ * implemented in derived classes, with
+ * implementations forwarding the calls
+ * to the implementations provided here
+ * under a unique name for which template
+ * arguments can be derived by the
+ * compiler.
+ */
+ template <class VectorType>
+ void vmult_nonblock_nonblock (VectorType &dst,
const VectorType &src) const;
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- *
- * Due to problems with deriving template
- * arguments between the block and
- * non-block versions of the vmult/Tvmult
- * functions, the actual functions are
- * implemented in derived classes, with
- * implementations forwarding the calls
- * to the implementations provided here
- * under a unique name for which template
- * arguments can be derived by the
- * compiler.
- */
- template <class BlockVectorType,
- class VectorType>
- void Tvmult_nonblock_block (VectorType &dst,
- const BlockVectorType &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- *
- * Due to problems with deriving template
- * arguments between the block and
- * non-block versions of the vmult/Tvmult
- * functions, the actual functions are
- * implemented in derived classes, with
- * implementations forwarding the calls
- * to the implementations provided here
- * under a unique name for which template
- * arguments can be derived by the
- * compiler.
- */
- template <class VectorType>
- void Tvmult_nonblock_nonblock (VectorType &dst,
- const VectorType &src) const;
-
-
- protected:
-
- /**
- * Some matrix types, in particular PETSc,
- * need to synchronize set and add
- * operations. This has to be done for all
- * matrices in the BlockMatrix.
- * This routine prepares adding of elements
- * by notifying all blocks. Called by all
- * internal routines before adding
- * elements.
- */
- void prepare_add_operation();
-
- /**
- * Notifies all blocks to let them prepare
- * for setting elements, see
- * prepare_add_operation().
- */
- void prepare_set_operation();
-
-
- private:
- /**
- * Temporary vector for counting the
- * elements written into the
- * individual blocks when doing a
- * collective add or set.
- */
- std::vector<unsigned int> counter_within_block;
-
- /**
- * Temporary vector for column
- * indices on each block when writing
- * local to global data on each
- * sparse matrix.
- */
- std::vector<std::vector<unsigned int> > column_indices;
-
- /**
- * Temporary vector for storing the
- * local values (they need to be
- * reordered when writing local to
- * global).
- */
- std::vector<std::vector<double> > column_values;
-
-
- /**
- * Make the iterator class a
- * friend. We have to work around
- * a compiler bug here again.
- */
- template <typename, bool>
- friend class BlockMatrixIterators::Accessor;
-
- template <typename>
- friend class MatrixIterator;
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M^T*src$ with $M$
+ * being this matrix. This
+ * function does the same as
+ * vmult() but takes the
+ * transposed matrix.
+ *
+ * Due to problems with deriving template
+ * arguments between the block and
+ * non-block versions of the vmult/Tvmult
+ * functions, the actual functions are
+ * implemented in derived classes, with
+ * implementations forwarding the calls
+ * to the implementations provided here
+ * under a unique name for which template
+ * arguments can be derived by the
+ * compiler.
+ */
+ template <class BlockVectorType>
+ void Tvmult_block_block (BlockVectorType &dst,
+ const BlockVectorType &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ *
+ * Due to problems with deriving template
+ * arguments between the block and
+ * non-block versions of the vmult/Tvmult
+ * functions, the actual functions are
+ * implemented in derived classes, with
+ * implementations forwarding the calls
+ * to the implementations provided here
+ * under a unique name for which template
+ * arguments can be derived by the
+ * compiler.
+ */
+ template <class BlockVectorType,
+ class VectorType>
- void Tvmult_block_nonblock (BlockVectorType &dst,
++ void Tvmult_block_nonblock (BlockVectorType &dst,
+ const VectorType &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ *
+ * Due to problems with deriving template
+ * arguments between the block and
+ * non-block versions of the vmult/Tvmult
+ * functions, the actual functions are
+ * implemented in derived classes, with
+ * implementations forwarding the calls
+ * to the implementations provided here
+ * under a unique name for which template
+ * arguments can be derived by the
+ * compiler.
+ */
+ template <class BlockVectorType,
+ class VectorType>
+ void Tvmult_nonblock_block (VectorType &dst,
+ const BlockVectorType &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ *
+ * Due to problems with deriving template
+ * arguments between the block and
+ * non-block versions of the vmult/Tvmult
+ * functions, the actual functions are
+ * implemented in derived classes, with
+ * implementations forwarding the calls
+ * to the implementations provided here
+ * under a unique name for which template
+ * arguments can be derived by the
+ * compiler.
+ */
+ template <class VectorType>
+ void Tvmult_nonblock_nonblock (VectorType &dst,
+ const VectorType &src) const;
+
+
+ protected:
+
+ /**
+ * Some matrix types, in particular PETSc,
+ * need to synchronize set and add
+ * operations. This has to be done for all
+ * matrices in the BlockMatrix.
+ * This routine prepares adding of elements
+ * by notifying all blocks. Called by all
+ * internal routines before adding
+ * elements.
+ */
+ void prepare_add_operation();
+
+ /**
+ * Notifies all blocks to let them prepare
+ * for setting elements, see
+ * prepare_add_operation().
+ */
+ void prepare_set_operation();
+
+
+ private:
+ /**
+ * Temporary vector for counting the
+ * elements written into the
+ * individual blocks when doing a
+ * collective add or set.
+ */
+ std::vector<unsigned int> counter_within_block;
+
+ /**
+ * Temporary vector for column
+ * indices on each block when writing
+ * local to global data on each
+ * sparse matrix.
+ */
+ std::vector<std::vector<unsigned int> > column_indices;
+
+ /**
+ * Temporary vector for storing the
+ * local values (they need to be
+ * reordered when writing local to
+ * global).
+ */
+ std::vector<std::vector<double> > column_values;
+
+
+ /**
+ * Make the iterator class a
+ * friend. We have to work around
+ * a compiler bug here again.
+ */
+ template <typename, bool>
+ friend class BlockMatrixIterators::Accessor;
+
+ template <typename>
+ friend class MatrixIterator;
};
template <class BlockMatrix>
inline
Accessor<BlockMatrix, true>::Accessor (
- const BlockMatrix *matrix,
+ const BlockMatrix *matrix,
const unsigned int row,
const unsigned int col)
- :
- matrix(matrix),
- base_iterator(matrix->block(0,0).begin())
+ :
+ matrix(matrix),
+ base_iterator(matrix->block(0,0).begin())
{
Assert(col==0, ExcNotImplemented());
template <class BlockMatrix>
inline
Accessor<BlockMatrix, false>::Accessor (
- BlockMatrix *matrix,
+ BlockMatrix *matrix,
const unsigned int row,
const unsigned int col)
- :
- matrix(matrix),
- base_iterator(matrix->block(0,0).begin())
+ :
+ matrix(matrix),
+ base_iterator(matrix->block(0,0).begin())
{
Assert(col==0, ExcNotImplemented());
- // check if this is a regular row or
- // the end of the matrix
+ // check if this is a regular row or
+ // the end of the matrix
if (row < matrix->m())
{
const std::pair<unsigned int,unsigned int> indices
template <class MatrixType>
template <class BlockVectorType,
- class VectorType>
+ class VectorType>
void
BlockMatrixBase<MatrixType>::
-vmult_block_nonblock (BlockVectorType &dst,
+vmult_block_nonblock (BlockVectorType &dst,
const VectorType &src) const
{
Assert (dst.n_blocks() == n_block_rows(),
template <class MatrixType>
template <class BlockVectorType,
- class VectorType>
+ class VectorType>
void
BlockMatrixBase<MatrixType>::
-Tvmult_block_nonblock (BlockVectorType &dst,
+Tvmult_block_nonblock (BlockVectorType &dst,
const VectorType &src) const
{
Assert (dst.n_blocks() == n_block_cols(),
template <typename number>
class BlockSparseMatrix : public BlockMatrixBase<SparseMatrix<number> >
{
- public:
- /**
- * Typedef the base class for simpler
- * access to its own typedefs.
- */
- typedef BlockMatrixBase<SparseMatrix<number> > BaseClass;
-
- /**
- * Typedef the type of the underlying
- * matrix.
- */
- typedef typename BaseClass::BlockType BlockType;
-
- /**
- * Import the typedefs from the base
- * class.
- */
- typedef typename BaseClass::value_type value_type;
- typedef typename BaseClass::pointer pointer;
- typedef typename BaseClass::const_pointer const_pointer;
- typedef typename BaseClass::reference reference;
- typedef typename BaseClass::const_reference const_reference;
- typedef typename BaseClass::size_type size_type;
- typedef typename BaseClass::iterator iterator;
- typedef typename BaseClass::const_iterator const_iterator;
-
- /**
- * @name Constructors and initalization
- */
+ public:
+ /**
+ * Typedef the base class for simpler
+ * access to its own typedefs.
+ */
+ typedef BlockMatrixBase<SparseMatrix<number> > BaseClass;
+
+ /**
+ * Typedef the type of the underlying
+ * matrix.
+ */
+ typedef typename BaseClass::BlockType BlockType;
+
+ /**
+ * Import the typedefs from the base
+ * class.
+ */
+ typedef typename BaseClass::value_type value_type;
+ typedef typename BaseClass::pointer pointer;
+ typedef typename BaseClass::const_pointer const_pointer;
+ typedef typename BaseClass::reference reference;
+ typedef typename BaseClass::const_reference const_reference;
+ typedef typename BaseClass::size_type size_type;
+ typedef typename BaseClass::iterator iterator;
+ typedef typename BaseClass::const_iterator const_iterator;
+
+ /**
+ * @name Constructors and initalization
+ */
//@{
- /**
- * Constructor; initializes the
- * matrix to be empty, without
- * any structure, i.e. the
- * matrix is not usable at
- * all. This constructor is
- * therefore only useful for
- * matrices which are members of
- * a class. All other matrices
- * should be created at a point
- * in the data flow where all
- * necessary information is
- * available.
- *
- * You have to initialize the
- * matrix before usage with
- * reinit(BlockSparsityPattern). The
- * number of blocks per row and
- * column are then determined by
- * that function.
- */
- BlockSparseMatrix ();
-
- /**
- * Constructor. Takes the given
- * matrix sparsity structure to
- * represent the sparsity pattern
- * of this matrix. You can change
- * the sparsity pattern later on
- * by calling the reinit()
- * function.
- *
- * This constructor initializes
- * all sub-matrices with the
- * sub-sparsity pattern within
- * the argument.
- *
- * You have to make sure that the
- * lifetime of the sparsity
- * structure is at least as long
- * as that of this matrix or as
- * long as reinit() is not called
- * with a new sparsity structure.
- */
- BlockSparseMatrix (const BlockSparsityPattern &sparsity);
-
- /**
- * Destructor.
- */
- virtual ~BlockSparseMatrix ();
-
-
-
- /**
- * Pseudo copy operator only copying
- * empty objects. The sizes of the block
- * matrices need to be the same.
- */
- BlockSparseMatrix &
- operator = (const BlockSparseMatrix &);
-
- /**
- * This operator assigns a scalar to a
- * matrix. Since this does usually not
- * make much sense (should we set all
- * matrix entries to this value? Only
- * the nonzero entries of the sparsity
- * pattern?), this operation is only
- * allowed if the actual value to be
- * assigned is zero. This operator only
- * exists to allow for the obvious
- * notation <tt>matrix=0</tt>, which
- * sets all elements of the matrix to
- * zero, but keep the sparsity pattern
- * previously used.
- */
- BlockSparseMatrix &
- operator = (const double d);
-
- /**
- * Release all memory and return
- * to a state just like after
- * having called the default
- * constructor. It also forgets
- * the sparsity pattern it was
- * previously tied to.
- *
- * This calls SparseMatrix::clear on all
- * sub-matrices and then resets this
- * object to have no blocks at all.
- */
- void clear ();
-
- /**
- * Reinitialize the sparse matrix
- * with the given sparsity
- * pattern. The latter tells the
- * matrix how many nonzero
- * elements there need to be
- * reserved.
- *
- * Basically, this function only
- * calls SparseMatrix::reinit() of the
- * sub-matrices with the block
- * sparsity patterns of the
- * parameter.
- *
- * The elements of the matrix are
- * set to zero by this function.
- */
- virtual void reinit (const BlockSparsityPattern &sparsity);
+ /**
+ * Constructor; initializes the
+ * matrix to be empty, without
+ * any structure, i.e. the
+ * matrix is not usable at
+ * all. This constructor is
+ * therefore only useful for
+ * matrices which are members of
+ * a class. All other matrices
+ * should be created at a point
+ * in the data flow where all
+ * necessary information is
+ * available.
+ *
+ * You have to initialize the
+ * matrix before usage with
+ * reinit(BlockSparsityPattern). The
+ * number of blocks per row and
+ * column are then determined by
+ * that function.
+ */
+ BlockSparseMatrix ();
+
+ /**
+ * Constructor. Takes the given
+ * matrix sparsity structure to
+ * represent the sparsity pattern
+ * of this matrix. You can change
+ * the sparsity pattern later on
+ * by calling the reinit()
+ * function.
+ *
+ * This constructor initializes
+ * all sub-matrices with the
+ * sub-sparsity pattern within
+ * the argument.
+ *
+ * You have to make sure that the
+ * lifetime of the sparsity
+ * structure is at least as long
+ * as that of this matrix or as
+ * long as reinit() is not called
+ * with a new sparsity structure.
+ */
+ BlockSparseMatrix (const BlockSparsityPattern &sparsity);
+
+ /**
+ * Destructor.
+ */
+ virtual ~BlockSparseMatrix ();
+
+
+
+ /**
+ * Pseudo copy operator only copying
+ * empty objects. The sizes of the block
+ * matrices need to be the same.
+ */
+ BlockSparseMatrix &
+ operator = (const BlockSparseMatrix &);
+
+ /**
+ * This operator assigns a scalar to a
+ * matrix. Since this does usually not
+ * make much sense (should we set all
+ * matrix entries to this value? Only
+ * the nonzero entries of the sparsity
+ * pattern?), this operation is only
+ * allowed if the actual value to be
+ * assigned is zero. This operator only
+ * exists to allow for the obvious
+ * notation <tt>matrix=0</tt>, which
+ * sets all elements of the matrix to
+ * zero, but keep the sparsity pattern
+ * previously used.
+ */
+ BlockSparseMatrix &
+ operator = (const double d);
+
+ /**
+ * Release all memory and return
+ * to a state just like after
+ * having called the default
+ * constructor. It also forgets
+ * the sparsity pattern it was
+ * previously tied to.
+ *
+ * This calls SparseMatrix::clear on all
+ * sub-matrices and then resets this
+ * object to have no blocks at all.
+ */
+ void clear ();
+
+ /**
+ * Reinitialize the sparse matrix
+ * with the given sparsity
+ * pattern. The latter tells the
+ * matrix how many nonzero
+ * elements there need to be
+ * reserved.
+ *
+ * Basically, this function only
+ * calls SparseMatrix::reinit() of the
+ * sub-matrices with the block
+ * sparsity patterns of the
+ * parameter.
+ *
+ * The elements of the matrix are
+ * set to zero by this function.
+ */
+ virtual void reinit (const BlockSparsityPattern &sparsity);
//@}
- /**
- * @name Information on the matrix
- */
+ /**
+ * @name Information on the matrix
+ */
//@{
- /**
- * Return whether the object is
- * empty. It is empty if either
- * both dimensions are zero or no
- * BlockSparsityPattern is
- * associated.
- */
- bool empty () const;
-
- /**
- * Return the number of entries
- * in a specific row.
- */
- unsigned int get_row_length (const unsigned int row) const;
-
- /**
- * Return the number of nonzero
- * elements of this
- * matrix. Actually, it returns
- * the number of entries in the
- * sparsity pattern; if any of
- * the entries should happen to
- * be zero, it is counted anyway.
- */
- unsigned int n_nonzero_elements () const;
-
- /**
- * Return the number of actually
- * nonzero elements. Just counts the
- * number of actually nonzero elements
- * (with absolute value larger than
- * threshold) of all the blocks.
- */
- unsigned int n_actually_nonzero_elements (const double threshold = 0.0) const;
-
- /**
- * Return a (constant) reference
- * to the underlying sparsity
- * pattern of this matrix.
- *
- * Though the return value is
- * declared <tt>const</tt>, you
- * should be aware that it may
- * change if you call any
- * nonconstant function of
- * objects which operate on it.
- */
- const BlockSparsityPattern &
- get_sparsity_pattern () const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
- */
- std::size_t memory_consumption () const;
+ /**
+ * Return whether the object is
+ * empty. It is empty if either
+ * both dimensions are zero or no
+ * BlockSparsityPattern is
+ * associated.
+ */
+ bool empty () const;
+
+ /**
+ * Return the number of entries
+ * in a specific row.
+ */
+ unsigned int get_row_length (const unsigned int row) const;
+
+ /**
+ * Return the number of nonzero
+ * elements of this
+ * matrix. Actually, it returns
+ * the number of entries in the
+ * sparsity pattern; if any of
+ * the entries should happen to
+ * be zero, it is counted anyway.
+ */
+ unsigned int n_nonzero_elements () const;
+
+ /**
+ * Return the number of actually
+ * nonzero elements. Just counts the
+ * number of actually nonzero elements
+ * (with absolute value larger than
+ * threshold) of all the blocks.
+ */
+ unsigned int n_actually_nonzero_elements (const double threshold = 0.0) const;
+
+ /**
+ * Return a (constant) reference
+ * to the underlying sparsity
+ * pattern of this matrix.
+ *
+ * Though the return value is
+ * declared <tt>const</tt>, you
+ * should be aware that it may
+ * change if you call any
+ * nonconstant function of
+ * objects which operate on it.
+ */
+ const BlockSparsityPattern &
+ get_sparsity_pattern () const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object.
+ */
+ std::size_t memory_consumption () const;
//@}
- /**
- * @name Multiplications
- */
+ /**
+ * @name Multiplications
+ */
//@{
- /**
- * Matrix-vector multiplication:
- * let $dst = M*src$ with $M$
- * being this matrix.
- */
- template <typename block_number>
- void vmult (BlockVector<block_number> &dst,
- const BlockVector<block_number> &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- template <typename block_number,
- typename nonblock_number>
- void vmult (BlockVector<block_number> &dst,
- const Vector<nonblock_number> &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- template <typename block_number,
- typename nonblock_number>
- void vmult (Vector<nonblock_number> &dst,
- const BlockVector<block_number> &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- */
- template <typename nonblock_number>
- void vmult (Vector<nonblock_number> &dst,
- const Vector<nonblock_number> &src) const;
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M^T*src$ with $M$
- * being this matrix. This
- * function does the same as
- * vmult() but takes the
- * transposed matrix.
- */
- template <typename block_number>
- void Tvmult (BlockVector<block_number> &dst,
- const BlockVector<block_number> &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- template <typename block_number,
- typename nonblock_number>
- void Tvmult (BlockVector<block_number> &dst,
- const Vector<nonblock_number> &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- template <typename block_number,
- typename nonblock_number>
- void Tvmult (Vector<nonblock_number> &dst,
- const BlockVector<block_number> &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- */
- template <typename nonblock_number>
- void Tvmult (Vector<nonblock_number> &dst,
- const Vector<nonblock_number> &src) const;
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M*src$ with $M$
+ * being this matrix.
+ */
+ template <typename block_number>
+ void vmult (BlockVector<block_number> &dst,
+ const BlockVector<block_number> &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ */
+ template <typename block_number,
+ typename nonblock_number>
+ void vmult (BlockVector<block_number> &dst,
+ const Vector<nonblock_number> &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ */
+ template <typename block_number,
+ typename nonblock_number>
+ void vmult (Vector<nonblock_number> &dst,
+ const BlockVector<block_number> &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ */
+ template <typename nonblock_number>
+ void vmult (Vector<nonblock_number> &dst,
+ const Vector<nonblock_number> &src) const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M^T*src$ with $M$
+ * being this matrix. This
+ * function does the same as
+ * vmult() but takes the
+ * transposed matrix.
+ */
+ template <typename block_number>
+ void Tvmult (BlockVector<block_number> &dst,
+ const BlockVector<block_number> &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ */
+ template <typename block_number,
+ typename nonblock_number>
- void Tvmult (BlockVector<block_number> &dst,
++ void Tvmult (BlockVector<block_number> &dst,
+ const Vector<nonblock_number> &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ */
+ template <typename block_number,
+ typename nonblock_number>
+ void Tvmult (Vector<nonblock_number> &dst,
+ const BlockVector<block_number> &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ */
+ template <typename nonblock_number>
+ void Tvmult (Vector<nonblock_number> &dst,
+ const Vector<nonblock_number> &src) const;
//@}
- /**
- * @name Preconditioning methods
- */
+ /**
+ * @name Preconditioning methods
+ */
//@{
- /**
- * Apply the Jacobi
- * preconditioner, which
- * multiplies every element of
- * the <tt>src</tt> vector by the
- * inverse of the respective
- * diagonal element and
- * multiplies the result with the
- * relaxation parameter
- * <tt>omega</tt>.
- *
- * All diagonal blocks must be
- * square matrices for this
- * operation.
- */
- template <class BlockVectorType>
- void precondition_Jacobi (BlockVectorType &dst,
- const BlockVectorType &src,
- const number omega = 1.) const;
-
- /**
- * Apply the Jacobi
- * preconditioner to a simple vector.
- *
- * The matrix must be a single
- * square block for this.
- */
- template <typename number2>
- void precondition_Jacobi (Vector<number2> &dst,
- const Vector<number2> &src,
- const number omega = 1.) const;
+ /**
+ * Apply the Jacobi
+ * preconditioner, which
+ * multiplies every element of
+ * the <tt>src</tt> vector by the
+ * inverse of the respective
+ * diagonal element and
+ * multiplies the result with the
+ * relaxation parameter
+ * <tt>omega</tt>.
+ *
+ * All diagonal blocks must be
+ * square matrices for this
+ * operation.
+ */
+ template <class BlockVectorType>
+ void precondition_Jacobi (BlockVectorType &dst,
+ const BlockVectorType &src,
+ const number omega = 1.) const;
+
+ /**
+ * Apply the Jacobi
+ * preconditioner to a simple vector.
+ *
+ * The matrix must be a single
+ * square block for this.
+ */
+ template <typename number2>
+ void precondition_Jacobi (Vector<number2> &dst,
+ const Vector<number2> &src,
+ const number omega = 1.) const;
//@}
- /**
- * @name Input/Output
- */
+ /**
+ * @name Input/Output
+ */
//@{
- /**
- * Print the matrix in the usual
- * format, i.e. as a matrix and
- * not as a list of nonzero
- * elements. For better
- * readability, elements not in
- * the matrix are displayed as
- * empty space, while matrix
- * elements which are explicitly
- * set to zero are displayed as
- * such.
- *
- * The parameters allow for a
- * flexible setting of the output
- * format: <tt>precision</tt> and
- * <tt>scientific</tt> are used
- * to determine the number
- * format, where <tt>scientific =
- * false</tt> means fixed point
- * notation. A zero entry for
- * <tt>width</tt> makes the
- * function compute a width, but
- * it may be changed to a
- * positive value, if output is
- * crude.
- *
- * Additionally, a character for
- * an empty value may be
- * specified.
- *
- * Finally, the whole matrix can
- * be multiplied with a common
- * denominator to produce more
- * readable output, even
- * integers.
- *
- * @attention This function may
- * produce <b>large</b> amounts
- * of output if applied to a
- * large matrix!
- */
- void print_formatted (std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const unsigned int width = 0,
- const char *zero_string = " ",
- const double denominator = 1.) const;
+ /**
+ * Print the matrix in the usual
+ * format, i.e. as a matrix and
+ * not as a list of nonzero
+ * elements. For better
+ * readability, elements not in
+ * the matrix are displayed as
+ * empty space, while matrix
+ * elements which are explicitly
+ * set to zero are displayed as
+ * such.
+ *
+ * The parameters allow for a
+ * flexible setting of the output
+ * format: <tt>precision</tt> and
+ * <tt>scientific</tt> are used
+ * to determine the number
+ * format, where <tt>scientific =
+ * false</tt> means fixed point
+ * notation. A zero entry for
+ * <tt>width</tt> makes the
+ * function compute a width, but
+ * it may be changed to a
+ * positive value, if output is
+ * crude.
+ *
+ * Additionally, a character for
+ * an empty value may be
+ * specified.
+ *
+ * Finally, the whole matrix can
+ * be multiplied with a common
+ * denominator to produce more
+ * readable output, even
+ * integers.
+ *
+ * @attention This function may
+ * produce <b>large</b> amounts
+ * of output if applied to a
+ * large matrix!
+ */
+ void print_formatted (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const unsigned int width = 0,
+ const char *zero_string = " ",
+ const double denominator = 1.) const;
//@}
- /** @addtogroup Exceptions
- * @{ */
-
- /**
- * Exception
- */
- DeclException0 (ExcBlockDimensionMismatch);
- //@}
-
- private:
- /**
- * Pointer to the block sparsity
- * pattern used for this
- * matrix. In order to guarantee
- * that it is not deleted while
- * still in use, we subscribe to
- * it using the SmartPointer
- * class.
- */
- SmartPointer<const BlockSparsityPattern,BlockSparseMatrix<number> > sparsity_pattern;
+ /** @addtogroup Exceptions
+ * @{ */
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcBlockDimensionMismatch);
+ //@}
+
+ private:
+ /**
+ * Pointer to the block sparsity
+ * pattern used for this
+ * matrix. In order to guarantee
+ * that it is not deleted while
+ * still in use, we subscribe to
+ * it using the SmartPointer
+ * class.
+ */
+ SmartPointer<const BlockSparsityPattern,BlockSparseMatrix<number> > sparsity_pattern;
};
template <typename number>
class ChunkSparseMatrix : public virtual Subscriptor
{
- public:
- /**
- * Type of matrix entries. In analogy to
- * the STL container classes.
- */
- typedef number value_type;
-
- /**
- * Declare a type that has holds
- * real-valued numbers with the
- * same precision as the template
- * argument to this class. If the
- * template argument of this
- * class is a real data type,
- * then real_type equals the
- * template argument. If the
- * template argument is a
- * std::complex type then
- * real_type equals the type
- * underlying the complex
- * numbers.
- *
- * This typedef is used to
- * represent the return type of
- * norms.
- */
- typedef typename numbers::NumberTraits<number>::real_type real_type;
-
- /**
- * A structure that describes some of the
- * traits of this class in terms of its
- * run-time behavior. Some other classes
- * (such as the block matrix classes)
- * that take one or other of the matrix
- * classes as its template parameters can
- * tune their behavior based on the
- * variables in this class.
- */
- struct Traits
- {
- /**
- * It is safe to elide additions of
- * zeros to individual elements of
- * this matrix.
- */
- static const bool zero_addition_can_be_elided = true;
- };
-
- /**
- * @name Constructors and initalization.
- */
+ public:
+ /**
+ * Type of matrix entries. In analogy to
+ * the STL container classes.
+ */
+ typedef number value_type;
+
+ /**
+ * Declare a type that has holds
+ * real-valued numbers with the
+ * same precision as the template
+ * argument to this class. If the
+ * template argument of this
+ * class is a real data type,
+ * then real_type equals the
+ * template argument. If the
+ * template argument is a
+ * std::complex type then
+ * real_type equals the type
+ * underlying the complex
+ * numbers.
+ *
+ * This typedef is used to
+ * represent the return type of
+ * norms.
+ */
+ typedef typename numbers::NumberTraits<number>::real_type real_type;
+
+ /**
+ * A structure that describes some of the
+ * traits of this class in terms of its
+ * run-time behavior. Some other classes
+ * (such as the block matrix classes)
+ * that take one or other of the matrix
+ * classes as its template parameters can
+ * tune their behavior based on the
+ * variables in this class.
+ */
+ struct Traits
+ {
+ /**
+ * It is safe to elide additions of
+ * zeros to individual elements of
+ * this matrix.
+ */
+ static const bool zero_addition_can_be_elided = true;
+ };
+
+ /**
+ * @name Constructors and initalization.
+ */
//@{
- /**
- * Constructor; initializes the matrix to
- * be empty, without any structure, i.e.
- * the matrix is not usable at all. This
- * constructor is therefore only useful
- * for matrices which are members of a
- * class. All other matrices should be
- * created at a point in the data flow
- * where all necessary information is
- * available.
- *
- * You have to initialize
- * the matrix before usage with
- * reinit(const ChunkSparsityPattern&).
- */
- ChunkSparseMatrix ();
-
- /**
- * Copy constructor. This constructor is
- * only allowed to be called if the matrix
- * to be copied is empty. This is for the
- * same reason as for the
- * ChunkSparsityPattern, see there for the
- * details.
- *
- * If you really want to copy a whole
- * matrix, you can do so by using the
- * copy_from() function.
- */
- ChunkSparseMatrix (const ChunkSparseMatrix &);
-
- /**
- * Constructor. Takes the given
- * matrix sparsity structure to
- * represent the sparsity pattern
- * of this matrix. You can change
- * the sparsity pattern later on
- * by calling the reinit(const
- * ChunkSparsityPattern&) function.
- *
- * You have to make sure that the
- * lifetime of the sparsity
- * structure is at least as long
- * as that of this matrix or as
- * long as reinit(const
- * ChunkSparsityPattern&) is not
- * called with a new sparsity
- * pattern.
- *
- * The constructor is marked
- * explicit so as to disallow
- * that someone passes a sparsity
- * pattern in place of a sparse
- * matrix to some function, where
- * an empty matrix would be
- * generated then.
- */
- explicit ChunkSparseMatrix (const ChunkSparsityPattern &sparsity);
-
- /**
- * Copy constructor: initialize
- * the matrix with the identity
- * matrix. This constructor will
- * throw an exception if the
- * sizes of the sparsity pattern
- * and the identity matrix do not
- * coincide, or if the sparsity
- * pattern does not provide for
- * nonzero entries on the entire
- * diagonal.
- */
- ChunkSparseMatrix (const ChunkSparsityPattern &sparsity,
- const IdentityMatrix &id);
-
- /**
- * Destructor. Free all memory, but do not
- * release the memory of the sparsity
- * structure.
- */
- virtual ~ChunkSparseMatrix ();
-
- /**
- * Copy operator. Since copying
- * entire sparse matrices is a
- * very expensive operation, we
- * disallow doing so except for
- * the special case of empty
- * matrices of size zero. This
- * doesn't seem particularly
- * useful, but is exactly what
- * one needs if one wanted to
- * have a
- * <code>std::vector@<ChunkSparseMatrix@<double@>
- * @></code>: in that case, one
- * can create a vector (which
- * needs the ability to copy
- * objects) of empty matrices
- * that are then later filled
- * with something useful.
- */
- ChunkSparseMatrix<number>& operator = (const ChunkSparseMatrix<number> &);
-
- /**
- * Copy operator: initialize
- * the matrix with the identity
- * matrix. This operator will
- * throw an exception if the
- * sizes of the sparsity pattern
- * and the identity matrix do not
- * coincide, or if the sparsity
- * pattern does not provide for
- * nonzero entries on the entire
- * diagonal.
- */
- ChunkSparseMatrix<number> &
- operator= (const IdentityMatrix &id);
-
- /**
- * This operator assigns a scalar to
- * a matrix. Since this does usually
- * not make much sense (should we set
- * all matrix entries to this value?
- * Only the nonzero entries of the
- * sparsity pattern?), this operation
- * is only allowed if the actual
- * value to be assigned is zero. This
- * operator only exists to allow for
- * the obvious notation
- * <tt>matrix=0</tt>, which sets all
- * elements of the matrix to zero,
- * but keep the sparsity pattern
- * previously used.
- */
- ChunkSparseMatrix & operator = (const double d);
-
- /**
- * Reinitialize the sparse matrix
- * with the given sparsity
- * pattern. The latter tells the
- * matrix how many nonzero
- * elements there need to be
- * reserved.
- *
- * Regarding memory allocation,
- * the same applies as said
- * above.
- *
- * You have to make sure that the
- * lifetime of the sparsity
- * structure is at least as long
- * as that of this matrix or as
- * long as reinit(const
- * ChunkSparsityPattern &) is not
- * called with a new sparsity
- * structure.
- *
- * The elements of the matrix are
- * set to zero by this function.
- */
- virtual void reinit (const ChunkSparsityPattern &sparsity);
-
- /**
- * Release all memory and return
- * to a state just like after
- * having called the default
- * constructor. It also forgets
- * the sparsity pattern it was
- * previously tied to.
- */
- virtual void clear ();
+ /**
+ * Constructor; initializes the matrix to
+ * be empty, without any structure, i.e.
+ * the matrix is not usable at all. This
+ * constructor is therefore only useful
+ * for matrices which are members of a
+ * class. All other matrices should be
+ * created at a point in the data flow
+ * where all necessary information is
+ * available.
+ *
+ * You have to initialize
+ * the matrix before usage with
+ * reinit(const ChunkSparsityPattern&).
+ */
+ ChunkSparseMatrix ();
+
+ /**
+ * Copy constructor. This constructor is
+ * only allowed to be called if the matrix
+ * to be copied is empty. This is for the
+ * same reason as for the
+ * ChunkSparsityPattern, see there for the
+ * details.
+ *
+ * If you really want to copy a whole
+ * matrix, you can do so by using the
+ * copy_from() function.
+ */
+ ChunkSparseMatrix (const ChunkSparseMatrix &);
+
+ /**
+ * Constructor. Takes the given
+ * matrix sparsity structure to
+ * represent the sparsity pattern
+ * of this matrix. You can change
+ * the sparsity pattern later on
+ * by calling the reinit(const
+ * ChunkSparsityPattern&) function.
+ *
+ * You have to make sure that the
+ * lifetime of the sparsity
+ * structure is at least as long
+ * as that of this matrix or as
+ * long as reinit(const
+ * ChunkSparsityPattern&) is not
+ * called with a new sparsity
+ * pattern.
+ *
+ * The constructor is marked
+ * explicit so as to disallow
+ * that someone passes a sparsity
+ * pattern in place of a sparse
+ * matrix to some function, where
+ * an empty matrix would be
+ * generated then.
+ */
+ explicit ChunkSparseMatrix (const ChunkSparsityPattern &sparsity);
+
+ /**
+ * Copy constructor: initialize
+ * the matrix with the identity
+ * matrix. This constructor will
+ * throw an exception if the
+ * sizes of the sparsity pattern
+ * and the identity matrix do not
+ * coincide, or if the sparsity
+ * pattern does not provide for
+ * nonzero entries on the entire
+ * diagonal.
+ */
+ ChunkSparseMatrix (const ChunkSparsityPattern &sparsity,
- const IdentityMatrix &id);
++ const IdentityMatrix &id);
+
+ /**
+ * Destructor. Free all memory, but do not
+ * release the memory of the sparsity
+ * structure.
+ */
+ virtual ~ChunkSparseMatrix ();
+
+ /**
+ * Copy operator. Since copying
+ * entire sparse matrices is a
+ * very expensive operation, we
+ * disallow doing so except for
+ * the special case of empty
+ * matrices of size zero. This
+ * doesn't seem particularly
+ * useful, but is exactly what
+ * one needs if one wanted to
+ * have a
+ * <code>std::vector@<ChunkSparseMatrix@<double@>
+ * @></code>: in that case, one
+ * can create a vector (which
+ * needs the ability to copy
+ * objects) of empty matrices
+ * that are then later filled
+ * with something useful.
+ */
+ ChunkSparseMatrix<number> &operator = (const ChunkSparseMatrix<number> &);
+
+ /**
+ * Copy operator: initialize
+ * the matrix with the identity
+ * matrix. This operator will
+ * throw an exception if the
+ * sizes of the sparsity pattern
+ * and the identity matrix do not
+ * coincide, or if the sparsity
+ * pattern does not provide for
+ * nonzero entries on the entire
+ * diagonal.
+ */
+ ChunkSparseMatrix<number> &
- operator= (const IdentityMatrix &id);
++ operator= (const IdentityMatrix &id);
+
+ /**
+ * This operator assigns a scalar to
+ * a matrix. Since this does usually
+ * not make much sense (should we set
+ * all matrix entries to this value?
+ * Only the nonzero entries of the
+ * sparsity pattern?), this operation
+ * is only allowed if the actual
+ * value to be assigned is zero. This
+ * operator only exists to allow for
+ * the obvious notation
+ * <tt>matrix=0</tt>, which sets all
+ * elements of the matrix to zero,
+ * but keep the sparsity pattern
+ * previously used.
+ */
+ ChunkSparseMatrix &operator = (const double d);
+
+ /**
+ * Reinitialize the sparse matrix
+ * with the given sparsity
+ * pattern. The latter tells the
+ * matrix how many nonzero
+ * elements there need to be
+ * reserved.
+ *
+ * Regarding memory allocation,
+ * the same applies as said
+ * above.
+ *
+ * You have to make sure that the
+ * lifetime of the sparsity
+ * structure is at least as long
+ * as that of this matrix or as
+ * long as reinit(const
+ * ChunkSparsityPattern &) is not
+ * called with a new sparsity
+ * structure.
+ *
+ * The elements of the matrix are
+ * set to zero by this function.
+ */
+ virtual void reinit (const ChunkSparsityPattern &sparsity);
+
+ /**
+ * Release all memory and return
+ * to a state just like after
+ * having called the default
+ * constructor. It also forgets
+ * the sparsity pattern it was
+ * previously tied to.
+ */
+ virtual void clear ();
//@}
- /**
- * @name Information on the matrix
- */
+ /**
+ * @name Information on the matrix
+ */
//@{
- /**
- * Return whether the object is
- * empty. It is empty if either
- * both dimensions are zero or no
- * ChunkSparsityPattern is
- * associated.
- */
- bool empty () const;
-
- /**
- * Return the dimension of the
- * image space. To remember: the
- * matrix is of dimension
- * $m \times n$.
- */
- unsigned int m () const;
-
- /**
- * Return the dimension of the
- * range space. To remember: the
- * matrix is of dimension
- * $m \times n$.
- */
- unsigned int n () const;
-
- /**
- * Return the number of nonzero
- * elements of this
- * matrix. Actually, it returns
- * the number of entries in the
- * sparsity pattern; if any of
- * the entries should happen to
- * be zero, it is counted anyway.
- */
- unsigned int n_nonzero_elements () const;
-
- /**
- * Return the number of actually
- * nonzero elements of this
- * matrix.
- *
- * Note, that this function does
- * (in contrary to
- * n_nonzero_elements()) not
- * count all entries of the
- * sparsity pattern but only the
- * ones that are nonzero.
- */
- unsigned int n_actually_nonzero_elements () const;
-
- /**
- * Return a (constant) reference
- * to the underlying sparsity
- * pattern of this matrix.
- *
- * Though the return value is
- * declared <tt>const</tt>, you
- * should be aware that it may
- * change if you call any
- * nonconstant function of
- * objects which operate on it.
- */
- const ChunkSparsityPattern & get_sparsity_pattern () const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object. See
- * MemoryConsumption.
- */
- std::size_t memory_consumption () const;
+ /**
+ * Return whether the object is
+ * empty. It is empty if either
+ * both dimensions are zero or no
+ * ChunkSparsityPattern is
+ * associated.
+ */
+ bool empty () const;
+
+ /**
+ * Return the dimension of the
+ * image space. To remember: the
+ * matrix is of dimension
+ * $m \times n$.
+ */
+ unsigned int m () const;
+
+ /**
+ * Return the dimension of the
+ * range space. To remember: the
+ * matrix is of dimension
+ * $m \times n$.
+ */
+ unsigned int n () const;
+
+ /**
+ * Return the number of nonzero
+ * elements of this
+ * matrix. Actually, it returns
+ * the number of entries in the
+ * sparsity pattern; if any of
+ * the entries should happen to
+ * be zero, it is counted anyway.
+ */
+ unsigned int n_nonzero_elements () const;
+
+ /**
+ * Return the number of actually
+ * nonzero elements of this
+ * matrix.
+ *
+ * Note, that this function does
+ * (in contrary to
+ * n_nonzero_elements()) not
+ * count all entries of the
+ * sparsity pattern but only the
+ * ones that are nonzero.
+ */
+ unsigned int n_actually_nonzero_elements () const;
+
+ /**
+ * Return a (constant) reference
+ * to the underlying sparsity
+ * pattern of this matrix.
+ *
+ * Though the return value is
+ * declared <tt>const</tt>, you
+ * should be aware that it may
+ * change if you call any
+ * nonconstant function of
+ * objects which operate on it.
+ */
+ const ChunkSparsityPattern &get_sparsity_pattern () const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object. See
+ * MemoryConsumption.
+ */
+ std::size_t memory_consumption () const;
//@}
- /**
- * @name Modifying entries
- */
+ /**
+ * @name Modifying entries
+ */
//@{
- /**
- * Set the element (<i>i,j</i>)
- * to <tt>value</tt>. Throws an
- * error if the entry does not
- * exist or if <tt>value</tt> is
- * not a finite number. Still, it
- * is allowed to store zero
- * values in non-existent fields.
- */
- void set (const unsigned int i,
- const unsigned int j,
- const number value);
-
- /**
- * Add <tt>value</tt> to the
- * element (<i>i,j</i>). Throws
- * an error if the entry does not
- * exist or if <tt>value</tt> is
- * not a finite number. Still, it
- * is allowed to store zero
- * values in non-existent fields.
- */
- void add (const unsigned int i,
- const unsigned int j,
- const number value);
-
- /**
- * Multiply the entire matrix by a
- * fixed factor.
- */
- ChunkSparseMatrix & operator *= (const number factor);
-
- /**
- * Divide the entire matrix by a
- * fixed factor.
- */
- ChunkSparseMatrix & operator /= (const number factor);
-
- /**
- * Symmetrize the matrix by
- * forming the mean value between
- * the existing matrix and its
- * transpose, $A = \frac 12(A+A^T)$.
- *
- * This operation assumes that
- * the underlying sparsity
- * pattern represents a symmetric
- * object. If this is not the
- * case, then the result of this
- * operation will not be a
- * symmetric matrix, since it
- * only explicitly symmetrizes
- * by looping over the lower left
- * triangular part for efficiency
- * reasons; if there are entries
- * in the upper right triangle,
- * then these elements are missed
- * in the
- * symmetrization. Symmetrization
- * of the sparsity pattern can be
- * obtain by
- * ChunkSparsityPattern::symmetrize().
- */
- void symmetrize ();
-
- /**
- * Copy the given matrix to this
- * one. The operation throws an
- * error if the sparsity patterns
- * of the two involved matrices
- * do not point to the same
- * object, since in this case the
- * copy operation is
- * cheaper. Since this operation
- * is notheless not for free, we
- * do not make it available
- * through <tt>operator =</tt>,
- * since this may lead to
- * unwanted usage, e.g. in copy
- * arguments to functions, which
- * should really be arguments by
- * reference.
- *
- * The source matrix may be a matrix
- * of arbitrary type, as long as its
- * data type is convertible to the
- * data type of this matrix.
- *
- * The function returns a reference to
- * <tt>*this</tt>.
- */
- template <typename somenumber>
- ChunkSparseMatrix<number> &
- copy_from (const ChunkSparseMatrix<somenumber> &source);
-
- /**
- * This function is complete
- * analogous to the
- * ChunkSparsityPattern::copy_from()
- * function in that it allows to
- * initialize a whole matrix in
- * one step. See there for more
- * information on argument types
- * and their meaning. You can
- * also find a small example on
- * how to use this function
- * there.
- *
- * The only difference to the
- * cited function is that the
- * objects which the inner
- * iterator points to need to be
- * of type <tt>std::pair<unsigned
- * int, value</tt>, where
- * <tt>value</tt> needs to be
- * convertible to the element
- * type of this class, as
- * specified by the
- * <tt>number</tt> template
- * argument.
- *
- * Previous content of the matrix
- * is overwritten. Note that the
- * entries specified by the input
- * parameters need not
- * necessarily cover all elements
- * of the matrix. Elements not
- * covered remain untouched.
- */
- template <typename ForwardIterator>
- void copy_from (const ForwardIterator begin,
- const ForwardIterator end);
-
- /**
- * Copy the nonzero entries of a
- * full matrix into this
- * object. Previous content is
- * deleted. Note that the
- * underlying sparsity pattern
- * must be appropriate to hold
- * the nonzero entries of the
- * full matrix.
- */
- template <typename somenumber>
- void copy_from (const FullMatrix<somenumber> &matrix);
-
- /**
- * Add <tt>matrix</tt> scaled by
- * <tt>factor</tt> to this matrix,
- * i.e. the matrix <tt>factor*matrix</tt>
- * is added to <tt>this</tt>. This
- * function throws an error if the
- * sparsity patterns of the two involved
- * matrices do not point to the same
- * object, since in this case the
- * operation is cheaper.
- *
- * The source matrix may be a sparse
- * matrix over an arbitrary underlying
- * scalar type, as long as its data type
- * is convertible to the data type of
- * this matrix.
- */
- template <typename somenumber>
- void add (const number factor,
- const ChunkSparseMatrix<somenumber> &matrix);
+ /**
+ * Set the element (<i>i,j</i>)
+ * to <tt>value</tt>. Throws an
+ * error if the entry does not
+ * exist or if <tt>value</tt> is
+ * not a finite number. Still, it
+ * is allowed to store zero
+ * values in non-existent fields.
+ */
+ void set (const unsigned int i,
+ const unsigned int j,
+ const number value);
+
+ /**
+ * Add <tt>value</tt> to the
+ * element (<i>i,j</i>). Throws
+ * an error if the entry does not
+ * exist or if <tt>value</tt> is
+ * not a finite number. Still, it
+ * is allowed to store zero
+ * values in non-existent fields.
+ */
+ void add (const unsigned int i,
+ const unsigned int j,
+ const number value);
+
+ /**
+ * Multiply the entire matrix by a
+ * fixed factor.
+ */
+ ChunkSparseMatrix &operator *= (const number factor);
+
+ /**
+ * Divide the entire matrix by a
+ * fixed factor.
+ */
+ ChunkSparseMatrix &operator /= (const number factor);
+
+ /**
+ * Symmetrize the matrix by
+ * forming the mean value between
+ * the existing matrix and its
+ * transpose, $A = \frac 12(A+A^T)$.
+ *
+ * This operation assumes that
+ * the underlying sparsity
+ * pattern represents a symmetric
+ * object. If this is not the
+ * case, then the result of this
+ * operation will not be a
+ * symmetric matrix, since it
+ * only explicitly symmetrizes
+ * by looping over the lower left
+ * triangular part for efficiency
+ * reasons; if there are entries
+ * in the upper right triangle,
+ * then these elements are missed
+ * in the
+ * symmetrization. Symmetrization
+ * of the sparsity pattern can be
+ * obtain by
+ * ChunkSparsityPattern::symmetrize().
+ */
+ void symmetrize ();
+
+ /**
+ * Copy the given matrix to this
+ * one. The operation throws an
+ * error if the sparsity patterns
+ * of the two involved matrices
+ * do not point to the same
+ * object, since in this case the
+ * copy operation is
+ * cheaper. Since this operation
+ * is notheless not for free, we
+ * do not make it available
+ * through <tt>operator =</tt>,
+ * since this may lead to
+ * unwanted usage, e.g. in copy
+ * arguments to functions, which
+ * should really be arguments by
+ * reference.
+ *
+ * The source matrix may be a matrix
+ * of arbitrary type, as long as its
+ * data type is convertible to the
+ * data type of this matrix.
+ *
+ * The function returns a reference to
+ * <tt>*this</tt>.
+ */
+ template <typename somenumber>
+ ChunkSparseMatrix<number> &
+ copy_from (const ChunkSparseMatrix<somenumber> &source);
+
+ /**
+ * This function is complete
+ * analogous to the
+ * ChunkSparsityPattern::copy_from()
+ * function in that it allows to
+ * initialize a whole matrix in
+ * one step. See there for more
+ * information on argument types
+ * and their meaning. You can
+ * also find a small example on
+ * how to use this function
+ * there.
+ *
+ * The only difference to the
+ * cited function is that the
+ * objects which the inner
+ * iterator points to need to be
+ * of type <tt>std::pair<unsigned
+ * int, value</tt>, where
+ * <tt>value</tt> needs to be
+ * convertible to the element
+ * type of this class, as
+ * specified by the
+ * <tt>number</tt> template
+ * argument.
+ *
+ * Previous content of the matrix
+ * is overwritten. Note that the
+ * entries specified by the input
+ * parameters need not
+ * necessarily cover all elements
+ * of the matrix. Elements not
+ * covered remain untouched.
+ */
+ template <typename ForwardIterator>
+ void copy_from (const ForwardIterator begin,
+ const ForwardIterator end);
+
+ /**
+ * Copy the nonzero entries of a
+ * full matrix into this
+ * object. Previous content is
+ * deleted. Note that the
+ * underlying sparsity pattern
+ * must be appropriate to hold
+ * the nonzero entries of the
+ * full matrix.
+ */
+ template <typename somenumber>
+ void copy_from (const FullMatrix<somenumber> &matrix);
+
+ /**
+ * Add <tt>matrix</tt> scaled by
+ * <tt>factor</tt> to this matrix,
+ * i.e. the matrix <tt>factor*matrix</tt>
+ * is added to <tt>this</tt>. This
+ * function throws an error if the
+ * sparsity patterns of the two involved
+ * matrices do not point to the same
+ * object, since in this case the
+ * operation is cheaper.
+ *
+ * The source matrix may be a sparse
+ * matrix over an arbitrary underlying
+ * scalar type, as long as its data type
+ * is convertible to the data type of
+ * this matrix.
+ */
+ template <typename somenumber>
+ void add (const number factor,
+ const ChunkSparseMatrix<somenumber> &matrix);
//@}
- /**
- * @name Entry Access
- */
+ /**
+ * @name Entry Access
+ */
//@{
- /**
- * Return the value of the entry
- * (<i>i,j</i>). This may be an
- * expensive operation and you
- * should always take care where
- * to call this function. In
- * order to avoid abuse, this
- * function throws an exception
- * if the required element does
- * not exist in the matrix.
- *
- * In case you want a function
- * that returns zero instead (for
- * entries that are not in the
- * sparsity pattern of the
- * matrix), use the el()
- * function.
- *
- * If you are looping over all elements,
- * consider using one of the iterator
- * classes instead, since they are
- * tailored better to a sparse matrix
- * structure.
- */
- number operator () (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * This function is mostly like
- * operator()() in that it
- * returns the value of the
- * matrix entry (<i>i,j</i>). The
- * only difference is that if
- * this entry does not exist in
- * the sparsity pattern, then
- * instead of raising an
- * exception, zero is
- * returned. While this may be
- * convenient in some cases, note
- * that it is simple to write
- * algorithms that are slow
- * compared to an optimal
- * solution, since the sparsity
- * of the matrix is not used.
- *
- * If you are looping over all elements,
- * consider using one of the iterator
- * classes instead, since they are
- * tailored better to a sparse matrix
- * structure.
- */
- number el (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * Return the main diagonal
- * element in the <i>i</i>th
- * row. This function throws an
- * error if the matrix is not
- * quadratic (see
- * ChunkSparsityPattern::optimize_diagonal()).
- *
- * This function is considerably
- * faster than the operator()(),
- * since for quadratic matrices, the
- * diagonal entry may be the
- * first to be stored in each row
- * and access therefore does not
- * involve searching for the
- * right column number.
- */
- number diag_element (const unsigned int i) const;
-
- /**
- * Same as above, but return a
- * writeable reference. You're
- * sure you know what you do?
- */
- number & diag_element (const unsigned int i);
+ /**
+ * Return the value of the entry
+ * (<i>i,j</i>). This may be an
+ * expensive operation and you
+ * should always take care where
+ * to call this function. In
+ * order to avoid abuse, this
+ * function throws an exception
+ * if the required element does
+ * not exist in the matrix.
+ *
+ * In case you want a function
+ * that returns zero instead (for
+ * entries that are not in the
+ * sparsity pattern of the
+ * matrix), use the el()
+ * function.
+ *
+ * If you are looping over all elements,
+ * consider using one of the iterator
+ * classes instead, since they are
+ * tailored better to a sparse matrix
+ * structure.
+ */
+ number operator () (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * This function is mostly like
+ * operator()() in that it
+ * returns the value of the
+ * matrix entry (<i>i,j</i>). The
+ * only difference is that if
+ * this entry does not exist in
+ * the sparsity pattern, then
+ * instead of raising an
+ * exception, zero is
+ * returned. While this may be
+ * convenient in some cases, note
+ * that it is simple to write
+ * algorithms that are slow
+ * compared to an optimal
+ * solution, since the sparsity
+ * of the matrix is not used.
+ *
+ * If you are looping over all elements,
+ * consider using one of the iterator
+ * classes instead, since they are
+ * tailored better to a sparse matrix
+ * structure.
+ */
+ number el (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * Return the main diagonal
+ * element in the <i>i</i>th
+ * row. This function throws an
+ * error if the matrix is not
+ * quadratic (see
+ * ChunkSparsityPattern::optimize_diagonal()).
+ *
+ * This function is considerably
+ * faster than the operator()(),
+ * since for quadratic matrices, the
+ * diagonal entry may be the
+ * first to be stored in each row
+ * and access therefore does not
+ * involve searching for the
+ * right column number.
+ */
+ number diag_element (const unsigned int i) const;
+
+ /**
+ * Same as above, but return a
+ * writeable reference. You're
+ * sure you know what you do?
+ */
+ number &diag_element (const unsigned int i);
//@}
- /**
- * @name Matrix vector multiplications
- */
+ /**
+ * @name Matrix vector multiplications
+ */
//@{
- /**
- * Matrix-vector multiplication:
- * let <i>dst = M*src</i> with
- * <i>M</i> being this matrix.
- *
- * Note that while this function can
- * operate on all vectors that offer
- * iterator classes, it is only really
- * effective for objects of type @ref
- * Vector. For all classes for which
- * iterating over elements, or random
- * member access is expensive, this
- * function is not efficient. In
- * particular, if you want to multiply
- * with BlockVector objects, you should
- * consider using a BlockChunkSparseMatrix as
- * well.
- *
- * Source and destination must
- * not be the same vector.
- */
- template <class OutVector, class InVector>
- void vmult (OutVector& dst,
- const InVector& src) const;
-
- /**
- * Matrix-vector multiplication:
- * let <i>dst = M<sup>T</sup>*src</i> with
- * <i>M</i> being this
- * matrix. This function does the
- * same as vmult() but takes
- * the transposed matrix.
- *
- * Note that while this function can
- * operate on all vectors that offer
- * iterator classes, it is only really
- * effective for objects of type @ref
- * Vector. For all classes for which
- * iterating over elements, or random
- * member access is expensive, this
- * function is not efficient. In
- * particular, if you want to multiply
- * with BlockVector objects, you should
- * consider using a BlockChunkSparseMatrix as
- * well.
- *
- * Source and destination must
- * not be the same vector.
- */
- template <class OutVector, class InVector>
- void Tvmult (OutVector& dst,
- const InVector& src) const;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M*src</i> on <i>dst</i>
- * with <i>M</i> being this
- * matrix.
- *
- * Note that while this function can
- * operate on all vectors that offer
- * iterator classes, it is only really
- * effective for objects of type @ref
- * Vector. For all classes for which
- * iterating over elements, or random
- * member access is expensive, this
- * function is not efficient. In
- * particular, if you want to multiply
- * with BlockVector objects, you should
- * consider using a BlockChunkSparseMatrix as
- * well.
- *
- * Source and destination must
- * not be the same vector.
- */
- template <class OutVector, class InVector>
- void vmult_add (OutVector& dst,
- const InVector& src) const;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M<sup>T</sup>*src</i> to
- * <i>dst</i> with <i>M</i> being
- * this matrix. This function
- * does the same as vmult_add()
- * but takes the transposed
- * matrix.
- *
- * Note that while this function can
- * operate on all vectors that offer
- * iterator classes, it is only really
- * effective for objects of type @ref
- * Vector. For all classes for which
- * iterating over elements, or random
- * member access is expensive, this
- * function is not efficient. In
- * particular, if you want to multiply
- * with BlockVector objects, you should
- * consider using a BlockChunkSparseMatrix as
- * well.
- *
- * Source and destination must
- * not be the same vector.
- */
- template <class OutVector, class InVector>
- void Tvmult_add (OutVector& dst,
- const InVector& src) const;
-
- /**
- * Return the square of the norm
- * of the vector $v$ with respect
- * to the norm induced by this
- * matrix,
- * i.e. $\left(v,Mv\right)$. This
- * is useful, e.g. in the finite
- * element context, where the
- * $L_2$ norm of a function
- * equals the matrix norm with
- * respect to the mass matrix of
- * the vector representing the
- * nodal values of the finite
- * element function.
- *
- * Obviously, the matrix needs to be
- * quadratic for this operation, and for
- * the result to actually be a norm it
- * also needs to be either real symmetric
- * or complex hermitian.
- *
- * The underlying template types of both
- * this matrix and the given vector
- * should either both be real or
- * complex-valued, but not mixed, for
- * this function to make sense.
- */
- template <typename somenumber>
- somenumber matrix_norm_square (const Vector<somenumber> &v) const;
-
- /**
- * Compute the matrix scalar
- * product $\left(u,Mv\right)$.
- */
- template <typename somenumber>
- somenumber matrix_scalar_product (const Vector<somenumber> &u,
- const Vector<somenumber> &v) const;
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to be
- * <i>r=b-Mx</i>. Write the
- * residual into
- * <tt>dst</tt>. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned.
- *
- * Source <i>x</i> and destination
- * <i>dst</i> must not be the same
- * vector.
- */
- template <typename somenumber>
- somenumber residual (Vector<somenumber> &dst,
- const Vector<somenumber> &x,
- const Vector<somenumber> &b) const;
+ /**
+ * Matrix-vector multiplication:
+ * let <i>dst = M*src</i> with
+ * <i>M</i> being this matrix.
+ *
+ * Note that while this function can
+ * operate on all vectors that offer
+ * iterator classes, it is only really
+ * effective for objects of type @ref
+ * Vector. For all classes for which
+ * iterating over elements, or random
+ * member access is expensive, this
+ * function is not efficient. In
+ * particular, if you want to multiply
+ * with BlockVector objects, you should
+ * consider using a BlockChunkSparseMatrix as
+ * well.
+ *
+ * Source and destination must
+ * not be the same vector.
+ */
+ template <class OutVector, class InVector>
+ void vmult (OutVector &dst,
+ const InVector &src) const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let <i>dst = M<sup>T</sup>*src</i> with
+ * <i>M</i> being this
+ * matrix. This function does the
+ * same as vmult() but takes
+ * the transposed matrix.
+ *
+ * Note that while this function can
+ * operate on all vectors that offer
+ * iterator classes, it is only really
+ * effective for objects of type @ref
+ * Vector. For all classes for which
+ * iterating over elements, or random
+ * member access is expensive, this
+ * function is not efficient. In
+ * particular, if you want to multiply
+ * with BlockVector objects, you should
+ * consider using a BlockChunkSparseMatrix as
+ * well.
+ *
+ * Source and destination must
+ * not be the same vector.
+ */
+ template <class OutVector, class InVector>
+ void Tvmult (OutVector &dst,
+ const InVector &src) const;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M*src</i> on <i>dst</i>
+ * with <i>M</i> being this
+ * matrix.
+ *
+ * Note that while this function can
+ * operate on all vectors that offer
+ * iterator classes, it is only really
+ * effective for objects of type @ref
+ * Vector. For all classes for which
+ * iterating over elements, or random
+ * member access is expensive, this
+ * function is not efficient. In
+ * particular, if you want to multiply
+ * with BlockVector objects, you should
+ * consider using a BlockChunkSparseMatrix as
+ * well.
+ *
+ * Source and destination must
+ * not be the same vector.
+ */
+ template <class OutVector, class InVector>
+ void vmult_add (OutVector &dst,
+ const InVector &src) const;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M<sup>T</sup>*src</i> to
+ * <i>dst</i> with <i>M</i> being
+ * this matrix. This function
+ * does the same as vmult_add()
+ * but takes the transposed
+ * matrix.
+ *
+ * Note that while this function can
+ * operate on all vectors that offer
+ * iterator classes, it is only really
+ * effective for objects of type @ref
+ * Vector. For all classes for which
+ * iterating over elements, or random
+ * member access is expensive, this
+ * function is not efficient. In
+ * particular, if you want to multiply
+ * with BlockVector objects, you should
+ * consider using a BlockChunkSparseMatrix as
+ * well.
+ *
+ * Source and destination must
+ * not be the same vector.
+ */
+ template <class OutVector, class InVector>
+ void Tvmult_add (OutVector &dst,
+ const InVector &src) const;
+
+ /**
+ * Return the square of the norm
+ * of the vector $v$ with respect
+ * to the norm induced by this
+ * matrix,
+ * i.e. $\left(v,Mv\right)$. This
+ * is useful, e.g. in the finite
+ * element context, where the
+ * $L_2$ norm of a function
+ * equals the matrix norm with
+ * respect to the mass matrix of
+ * the vector representing the
+ * nodal values of the finite
+ * element function.
+ *
+ * Obviously, the matrix needs to be
+ * quadratic for this operation, and for
+ * the result to actually be a norm it
+ * also needs to be either real symmetric
+ * or complex hermitian.
+ *
+ * The underlying template types of both
+ * this matrix and the given vector
+ * should either both be real or
+ * complex-valued, but not mixed, for
+ * this function to make sense.
+ */
+ template <typename somenumber>
+ somenumber matrix_norm_square (const Vector<somenumber> &v) const;
+
+ /**
+ * Compute the matrix scalar
+ * product $\left(u,Mv\right)$.
+ */
+ template <typename somenumber>
+ somenumber matrix_scalar_product (const Vector<somenumber> &u,
+ const Vector<somenumber> &v) const;
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to be
+ * <i>r=b-Mx</i>. Write the
+ * residual into
+ * <tt>dst</tt>. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned.
+ *
+ * Source <i>x</i> and destination
+ * <i>dst</i> must not be the same
+ * vector.
+ */
+ template <typename somenumber>
+ somenumber residual (Vector<somenumber> &dst,
+ const Vector<somenumber> &x,
+ const Vector<somenumber> &b) const;
//@}
- /**
- * @name Matrix norms
- */
+ /**
+ * @name Matrix norms
+ */
//@{
- /**
- * Return the l1-norm of the matrix, that is
- * $|M|_1=max_{all columns j}\sum_{all
- * rows i} |M_ij|$,
- * (max. sum of columns).
- * This is the
- * natural matrix norm that is compatible
- * to the l1-norm for vectors, i.e.
- * $|Mv|_1\leq |M|_1 |v|_1$.
- * (cf. Haemmerlin-Hoffmann : Numerische Mathematik)
- */
- real_type l1_norm () const;
-
- /**
- * Return the linfty-norm of the
- * matrix, that is
- * $|M|_infty=max_{all rows i}\sum_{all
- * columns j} |M_ij|$,
- * (max. sum of rows).
- * This is the
- * natural matrix norm that is compatible
- * to the linfty-norm of vectors, i.e.
- * $|Mv|_infty \leq |M|_infty |v|_infty$.
- * (cf. Haemmerlin-Hoffmann : Numerische Mathematik)
- */
- real_type linfty_norm () const;
-
- /**
- * Return the frobenius norm of the
- * matrix, i.e. the square root of the
- * sum of squares of all entries in the
- * matrix.
- */
- real_type frobenius_norm () const;
+ /**
+ * Return the l1-norm of the matrix, that is
+ * $|M|_1=max_{all columns j}\sum_{all
+ * rows i} |M_ij|$,
+ * (max. sum of columns).
+ * This is the
+ * natural matrix norm that is compatible
+ * to the l1-norm for vectors, i.e.
+ * $|Mv|_1\leq |M|_1 |v|_1$.
+ * (cf. Haemmerlin-Hoffmann : Numerische Mathematik)
+ */
+ real_type l1_norm () const;
+
+ /**
+ * Return the linfty-norm of the
+ * matrix, that is
+ * $|M|_infty=max_{all rows i}\sum_{all
+ * columns j} |M_ij|$,
+ * (max. sum of rows).
+ * This is the
+ * natural matrix norm that is compatible
+ * to the linfty-norm of vectors, i.e.
+ * $|Mv|_infty \leq |M|_infty |v|_infty$.
+ * (cf. Haemmerlin-Hoffmann : Numerische Mathematik)
+ */
+ real_type linfty_norm () const;
+
+ /**
+ * Return the frobenius norm of the
+ * matrix, i.e. the square root of the
+ * sum of squares of all entries in the
+ * matrix.
+ */
+ real_type frobenius_norm () const;
//@}
- /**
- * @name Preconditioning methods
- */
+ /**
+ * @name Preconditioning methods
+ */
//@{
- /**
- * Apply the Jacobi
- * preconditioner, which
- * multiplies every element of
- * the <tt>src</tt> vector by the
- * inverse of the respective
- * diagonal element and
- * multiplies the result with the
- * relaxation factor <tt>omega</tt>.
- */
- template <typename somenumber>
- void precondition_Jacobi (Vector<somenumber> &dst,
- const Vector<somenumber> &src,
- const number omega = 1.) const;
-
- /**
- * Apply SSOR preconditioning to
- * <tt>src</tt>.
- */
- template <typename somenumber>
- void precondition_SSOR (Vector<somenumber> &dst,
- const Vector<somenumber> &src,
- const number om = 1.) const;
-
- /**
- * Apply SOR preconditioning
- * matrix to <tt>src</tt>.
- */
- template <typename somenumber>
- void precondition_SOR (Vector<somenumber> &dst,
- const Vector<somenumber> &src,
- const number om = 1.) const;
-
- /**
- * Apply transpose SOR
- * preconditioning matrix to
- * <tt>src</tt>.
- */
- template <typename somenumber>
- void precondition_TSOR (Vector<somenumber> &dst,
+ /**
+ * Apply the Jacobi
+ * preconditioner, which
+ * multiplies every element of
+ * the <tt>src</tt> vector by the
+ * inverse of the respective
+ * diagonal element and
+ * multiplies the result with the
+ * relaxation factor <tt>omega</tt>.
+ */
+ template <typename somenumber>
+ void precondition_Jacobi (Vector<somenumber> &dst,
const Vector<somenumber> &src,
- const number om = 1.) const;
-
- /**
- * Perform SSOR preconditioning
- * in-place. Apply the
- * preconditioner matrix without
- * copying to a second vector.
- * <tt>omega</tt> is the relaxation
- * parameter.
- */
- template <typename somenumber>
- void SSOR (Vector<somenumber> &v,
- const number omega = 1.) const;
-
- /**
- * Perform an SOR preconditioning
- * in-place. <tt>omega</tt> is
- * the relaxation parameter.
- */
- template <typename somenumber>
- void SOR (Vector<somenumber> &v,
+ const number omega = 1.) const;
+
+ /**
+ * Apply SSOR preconditioning to
+ * <tt>src</tt>.
+ */
+ template <typename somenumber>
+ void precondition_SSOR (Vector<somenumber> &dst,
+ const Vector<somenumber> &src,
+ const number om = 1.) const;
+
+ /**
+ * Apply SOR preconditioning
+ * matrix to <tt>src</tt>.
+ */
+ template <typename somenumber>
+ void precondition_SOR (Vector<somenumber> &dst,
+ const Vector<somenumber> &src,
+ const number om = 1.) const;
+
+ /**
+ * Apply transpose SOR
+ * preconditioning matrix to
+ * <tt>src</tt>.
+ */
+ template <typename somenumber>
+ void precondition_TSOR (Vector<somenumber> &dst,
+ const Vector<somenumber> &src,
+ const number om = 1.) const;
+
+ /**
+ * Perform SSOR preconditioning
+ * in-place. Apply the
+ * preconditioner matrix without
+ * copying to a second vector.
+ * <tt>omega</tt> is the relaxation
+ * parameter.
+ */
+ template <typename somenumber>
+ void SSOR (Vector<somenumber> &v,
+ const number omega = 1.) const;
+
+ /**
+ * Perform an SOR preconditioning
+ * in-place. <tt>omega</tt> is
+ * the relaxation parameter.
+ */
+ template <typename somenumber>
+ void SOR (Vector<somenumber> &v,
+ const number om = 1.) const;
+
+ /**
+ * Perform a transpose SOR
+ * preconditioning in-place.
+ * <tt>omega</tt> is the
+ * relaxation parameter.
+ */
+ template <typename somenumber>
+ void TSOR (Vector<somenumber> &v,
+ const number om = 1.) const;
+
+ /**
+ * Perform a permuted SOR
+ * preconditioning in-place.
+ *
+ * The standard SOR method is
+ * applied in the order
+ * prescribed by <tt>permutation</tt>,
+ * that is, first the row
+ * <tt>permutation[0]</tt>, then
+ * <tt>permutation[1]</tt> and so
+ * on. For efficiency reasons,
+ * the permutation as well as its
+ * inverse are required.
+ *
+ * <tt>omega</tt> is the
+ * relaxation parameter.
+ */
+ template <typename somenumber>
+ void PSOR (Vector<somenumber> &v,
+ const std::vector<unsigned int> &permutation,
+ const std::vector<unsigned int> &inverse_permutation,
+ const number om = 1.) const;
+
+ /**
+ * Perform a transposed permuted SOR
+ * preconditioning in-place.
+ *
+ * The transposed SOR method is
+ * applied in the order
+ * prescribed by
+ * <tt>permutation</tt>, that is,
+ * first the row
+ * <tt>permutation[m()-1]</tt>,
+ * then
+ * <tt>permutation[m()-2]</tt>
+ * and so on. For efficiency
+ * reasons, the permutation as
+ * well as its inverse are
+ * required.
+ *
+ * <tt>omega</tt> is the
+ * relaxation parameter.
+ */
+ template <typename somenumber>
+ void TPSOR (Vector<somenumber> &v,
+ const std::vector<unsigned int> &permutation,
+ const std::vector<unsigned int> &inverse_permutation,
const number om = 1.) const;
- /**
- * Perform a transpose SOR
- * preconditioning in-place.
- * <tt>omega</tt> is the
- * relaxation parameter.
- */
- template <typename somenumber>
- void TSOR (Vector<somenumber> &v,
- const number om = 1.) const;
-
- /**
- * Perform a permuted SOR
- * preconditioning in-place.
- *
- * The standard SOR method is
- * applied in the order
- * prescribed by <tt>permutation</tt>,
- * that is, first the row
- * <tt>permutation[0]</tt>, then
- * <tt>permutation[1]</tt> and so
- * on. For efficiency reasons,
- * the permutation as well as its
- * inverse are required.
- *
- * <tt>omega</tt> is the
- * relaxation parameter.
- */
- template <typename somenumber>
- void PSOR (Vector<somenumber> &v,
- const std::vector<unsigned int>& permutation,
- const std::vector<unsigned int>& inverse_permutation,
- const number om = 1.) const;
-
- /**
- * Perform a transposed permuted SOR
- * preconditioning in-place.
- *
- * The transposed SOR method is
- * applied in the order
- * prescribed by
- * <tt>permutation</tt>, that is,
- * first the row
- * <tt>permutation[m()-1]</tt>,
- * then
- * <tt>permutation[m()-2]</tt>
- * and so on. For efficiency
- * reasons, the permutation as
- * well as its inverse are
- * required.
- *
- * <tt>omega</tt> is the
- * relaxation parameter.
- */
- template <typename somenumber>
- void TPSOR (Vector<somenumber> &v,
- const std::vector<unsigned int>& permutation,
- const std::vector<unsigned int>& inverse_permutation,
- const number om = 1.) const;
-
- /**
- * Do one SOR step on <tt>v</tt>.
- * Performs a direct SOR step
- * with right hand side
- * <tt>b</tt>.
- */
- template <typename somenumber>
- void SOR_step (Vector<somenumber> &v,
- const Vector<somenumber> &b,
- const number om = 1.) const;
-
- /**
- * Do one adjoint SOR step on
- * <tt>v</tt>. Performs a direct
- * TSOR step with right hand side
- * <tt>b</tt>.
- */
- template <typename somenumber>
- void TSOR_step (Vector<somenumber> &v,
- const Vector<somenumber> &b,
- const number om = 1.) const;
-
- /**
- * Do one SSOR step on
- * <tt>v</tt>. Performs a direct
- * SSOR step with right hand side
- * <tt>b</tt> by performing TSOR
- * after SOR.
- */
- template <typename somenumber>
- void SSOR_step (Vector<somenumber> &v,
- const Vector<somenumber> &b,
- const number om = 1.) const;
+ /**
+ * Do one SOR step on <tt>v</tt>.
+ * Performs a direct SOR step
+ * with right hand side
+ * <tt>b</tt>.
+ */
+ template <typename somenumber>
+ void SOR_step (Vector<somenumber> &v,
+ const Vector<somenumber> &b,
+ const number om = 1.) const;
+
+ /**
+ * Do one adjoint SOR step on
+ * <tt>v</tt>. Performs a direct
+ * TSOR step with right hand side
+ * <tt>b</tt>.
+ */
+ template <typename somenumber>
+ void TSOR_step (Vector<somenumber> &v,
+ const Vector<somenumber> &b,
+ const number om = 1.) const;
+
+ /**
+ * Do one SSOR step on
+ * <tt>v</tt>. Performs a direct
+ * SSOR step with right hand side
+ * <tt>b</tt> by performing TSOR
+ * after SOR.
+ */
+ template <typename somenumber>
+ void SSOR_step (Vector<somenumber> &v,
+ const Vector<somenumber> &b,
+ const number om = 1.) const;
//@}
- /**
- * @name Input/Output
- */
+ /**
+ * @name Input/Output
+ */
//@{
- /**
- * Print the matrix to the given
- * stream, using the format
- * <tt>(line,col) value</tt>,
- * i.e. one nonzero entry of the
- * matrix per line.
- */
- void print (std::ostream &out) const;
-
- /**
- * Print the matrix in the usual
- * format, i.e. as a matrix and
- * not as a list of nonzero
- * elements. For better
- * readability, elements not in
- * the matrix are displayed as
- * empty space, while matrix
- * elements which are explicitly
- * set to zero are displayed as
- * such.
- *
- * The parameters allow for a
- * flexible setting of the output
- * format: <tt>precision</tt> and
- * <tt>scientific</tt> are used
- * to determine the number
- * format, where <tt>scientific =
- * false</tt> means fixed point
- * notation. A zero entry for
- * <tt>width</tt> makes the
- * function compute a width, but
- * it may be changed to a
- * positive value, if output is
- * crude.
- *
- * Additionally, a character for
- * an empty value may be
- * specified.
- *
- * Finally, the whole matrix can
- * be multiplied with a common
- * denominator to produce more
- * readable output, even
- * integers.
- *
- * @attention This function may
- * produce <b>large</b> amounts
- * of output if applied to a
- * large matrix!
- */
- void print_formatted (std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const unsigned int width = 0,
- const char *zero_string = " ",
- const double denominator = 1.) const;
-
- /**
- * Print the actual pattern of
- * the matrix. For each entry
- * with an absolute value larger
- * than threshold, a '*' is
- * printed, a ':' for every value
- * smaller and a '.' for every
- * entry not allocated.
- */
- void print_pattern(std::ostream& out,
- const double threshold = 0.) const;
-
- /**
- * Write the data of this object
- * en bloc to a file. This is
- * done in a binary mode, so the
- * output is neither readable by
- * humans nor (probably) by other
- * computers using a different
- * operating system of number
- * format.
- *
- * The purpose of this function
- * is that you can swap out
- * matrices and sparsity pattern
- * if you are short of memory,
- * want to communicate between
- * different programs, or allow
- * objects to be persistent
- * across different runs of the
- * program.
- */
- void block_write (std::ostream &out) const;
-
- /**
- * Read data that has previously
- * been written by block_write()
- * from a file. This is done
- * using the inverse operations
- * to the above function, so it
- * is reasonably fast because the
- * bitstream is not interpreted
- * except for a few numbers up
- * front.
- *
- * The object is resized on this
- * operation, and all previous
- * contents are lost. Note,
- * however, that no checks are
- * performed whether new data and
- * the underlying ChunkSparsityPattern
- * object fit together. It is
- * your responsibility to make
- * sure that the sparsity pattern
- * and the data to be read match.
- *
- * A primitive form of error
- * checking is performed which
- * will recognize the bluntest
- * attempts to interpret some
- * data as a matrix stored
- * bitwise to a file that wasn't
- * actually created that way, but
- * not more.
- */
- void block_read (std::istream &in);
+ /**
+ * Print the matrix to the given
+ * stream, using the format
+ * <tt>(line,col) value</tt>,
+ * i.e. one nonzero entry of the
+ * matrix per line.
+ */
+ void print (std::ostream &out) const;
+
+ /**
+ * Print the matrix in the usual
+ * format, i.e. as a matrix and
+ * not as a list of nonzero
+ * elements. For better
+ * readability, elements not in
+ * the matrix are displayed as
+ * empty space, while matrix
+ * elements which are explicitly
+ * set to zero are displayed as
+ * such.
+ *
+ * The parameters allow for a
+ * flexible setting of the output
+ * format: <tt>precision</tt> and
+ * <tt>scientific</tt> are used
+ * to determine the number
+ * format, where <tt>scientific =
+ * false</tt> means fixed point
+ * notation. A zero entry for
+ * <tt>width</tt> makes the
+ * function compute a width, but
+ * it may be changed to a
+ * positive value, if output is
+ * crude.
+ *
+ * Additionally, a character for
+ * an empty value may be
+ * specified.
+ *
+ * Finally, the whole matrix can
+ * be multiplied with a common
+ * denominator to produce more
+ * readable output, even
+ * integers.
+ *
+ * @attention This function may
+ * produce <b>large</b> amounts
+ * of output if applied to a
+ * large matrix!
+ */
+ void print_formatted (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const unsigned int width = 0,
+ const char *zero_string = " ",
+ const double denominator = 1.) const;
+
+ /**
+ * Print the actual pattern of
+ * the matrix. For each entry
+ * with an absolute value larger
+ * than threshold, a '*' is
+ * printed, a ':' for every value
+ * smaller and a '.' for every
+ * entry not allocated.
+ */
+ void print_pattern(std::ostream &out,
+ const double threshold = 0.) const;
+
+ /**
+ * Write the data of this object
+ * en bloc to a file. This is
+ * done in a binary mode, so the
+ * output is neither readable by
+ * humans nor (probably) by other
+ * computers using a different
+ * operating system of number
+ * format.
+ *
+ * The purpose of this function
+ * is that you can swap out
+ * matrices and sparsity pattern
+ * if you are short of memory,
+ * want to communicate between
+ * different programs, or allow
+ * objects to be persistent
+ * across different runs of the
+ * program.
+ */
+ void block_write (std::ostream &out) const;
+
+ /**
+ * Read data that has previously
+ * been written by block_write()
+ * from a file. This is done
+ * using the inverse operations
+ * to the above function, so it
+ * is reasonably fast because the
+ * bitstream is not interpreted
+ * except for a few numbers up
+ * front.
+ *
+ * The object is resized on this
+ * operation, and all previous
+ * contents are lost. Note,
+ * however, that no checks are
+ * performed whether new data and
+ * the underlying ChunkSparsityPattern
+ * object fit together. It is
+ * your responsibility to make
+ * sure that the sparsity pattern
+ * and the data to be read match.
+ *
+ * A primitive form of error
+ * checking is performed which
+ * will recognize the bluntest
+ * attempts to interpret some
+ * data as a matrix stored
+ * bitwise to a file that wasn't
+ * actually created that way, but
+ * not more.
+ */
+ void block_read (std::istream &in);
//@}
- /** @addtogroup Exceptions
- * @{ */
-
- /**
- * Exception
- */
- DeclException2 (ExcInvalidIndex,
- int, int,
- << "The entry with index <" << arg1 << ',' << arg2
- << "> does not exist.");
- /**
- * Exception
- */
- DeclException1 (ExcInvalidIndex1,
- int,
- << "The index " << arg1 << " is not in the allowed range.");
- /**
- * Exception
- */
- DeclException0 (ExcDifferentChunkSparsityPatterns);
- /**
- * Exception
- */
- DeclException2 (ExcIteratorRange,
- int, int,
- << "The iterators denote a range of " << arg1
- << " elements, but the given number of rows was " << arg2);
- /**
- * Exception
- */
- DeclException0 (ExcSourceEqualsDestination);
- //@}
- private:
- /**
- * Pointer to the sparsity
- * pattern used for this
- * matrix. In order to guarantee
- * that it is not deleted while
- * still in use, we subscribe to
- * it using the SmartPointer
- * class.
- */
- SmartPointer<const ChunkSparsityPattern,ChunkSparseMatrix<number> > cols;
-
- /**
- * Array of values for all the
- * nonzero entries. The position
- * within the matrix, i.e. the
- * row and column number for a
- * given entry can only be
- * deduced using the sparsity
- * pattern. The same holds for
- * the more common operation of
- * finding an entry by its
- * coordinates.
- */
- number *val;
-
- /**
- * Allocated size of #val. This
- * can be larger than the
- * actually used part if the size
- * of the matrix was reduced
- * somewhen in the past by
- * associating a sparsity pattern
- * with a smaller size to this
- * object, using the reinit()
- * function.
- */
- unsigned int max_len;
-
- /**
- * Return the location of entry
- * $(i,j)$ within the val array.
- */
- unsigned int compute_location (const unsigned int i,
- const unsigned int j) const;
-
- // make all other sparse matrices
- // friends
- template <typename somenumber> friend class ChunkSparseMatrix;
+ /** @addtogroup Exceptions
+ * @{ */
+
+ /**
+ * Exception
+ */
+ DeclException2 (ExcInvalidIndex,
+ int, int,
+ << "The entry with index <" << arg1 << ',' << arg2
+ << "> does not exist.");
+ /**
+ * Exception
+ */
+ DeclException1 (ExcInvalidIndex1,
+ int,
+ << "The index " << arg1 << " is not in the allowed range.");
+ /**
+ * Exception
+ */
+ DeclException0 (ExcDifferentChunkSparsityPatterns);
+ /**
+ * Exception
+ */
+ DeclException2 (ExcIteratorRange,
+ int, int,
+ << "The iterators denote a range of " << arg1
+ << " elements, but the given number of rows was " << arg2);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcSourceEqualsDestination);
+ //@}
+ private:
+ /**
+ * Pointer to the sparsity
+ * pattern used for this
+ * matrix. In order to guarantee
+ * that it is not deleted while
+ * still in use, we subscribe to
+ * it using the SmartPointer
+ * class.
+ */
+ SmartPointer<const ChunkSparsityPattern,ChunkSparseMatrix<number> > cols;
+
+ /**
+ * Array of values for all the
+ * nonzero entries. The position
+ * within the matrix, i.e. the
+ * row and column number for a
+ * given entry can only be
+ * deduced using the sparsity
+ * pattern. The same holds for
+ * the more common operation of
+ * finding an entry by its
+ * coordinates.
+ */
+ number *val;
+
+ /**
+ * Allocated size of #val. This
+ * can be larger than the
+ * actually used part if the size
+ * of the matrix was reduced
+ * somewhen in the past by
+ * associating a sparsity pattern
+ * with a smaller size to this
+ * object, using the reinit()
+ * function.
+ */
+ unsigned int max_len;
+
+ /**
+ * Return the location of entry
+ * $(i,j)$ within the val array.
+ */
+ unsigned int compute_location (const unsigned int i,
+ const unsigned int j) const;
+
+ // make all other sparse matrices
+ // friends
+ template <typename somenumber> friend class ChunkSparseMatrix;
};
/*@}*/
template <typename number>
ChunkSparseMatrix<number>::ChunkSparseMatrix (const ChunkSparsityPattern &c,
- const IdentityMatrix &id)
+ const IdentityMatrix &id)
- :
- cols(0, "ChunkSparseMatrix"),
- val(0),
- max_len(0)
+ :
+ cols(0, "ChunkSparseMatrix"),
+ val(0),
+ max_len(0)
{
Assert (c.n_rows() == id.m(), ExcDimensionMismatch (c.n_rows(), id.m()));
Assert (c.n_cols() == id.n(), ExcDimensionMismatch (c.n_cols(), id.n()));
*/
class ConstraintMatrix : public Subscriptor
{
- public:
- /**
- * An enum that describes what should
- * happen if the two ConstraintMatrix
- * objects involved in a call to the
- * merge() function happen to have
- * constraints on the same degrees of
- * freedom.
- */
- enum MergeConflictBehavior
- {
- /**
- * Throw an exception if the two
- * objects concerned have
- * conflicting constraints on the
- * same degree of freedom.
- */
- no_conflicts_allowed,
-
- /**
- * In an operation
- * <code>cm1.merge(cm2)</code>, if
- * <code>cm1</code> and
- * <code>cm2</code> have
- * constraints on the same degree
- * of freedom, take the one from
- * <code>cm1</code>.
- */
- left_object_wins,
-
- /**
- * In an operation
- * <code>cm1.merge(cm2)</code>, if
- * <code>cm1</code> and
- * <code>cm2</code> have
- * constraints on the same degree
- * of freedom, take the one from
- * <code>cm2</code>.
- */
- right_object_wins
- };
-
- /**
- * Constructor. The supplied IndexSet
- * defines which indices might be
- * constrained inside this
- * ConstraintMatrix. In a calculation
- * with a
- * parallel::distributed::DoFHandler one
- * should use locally_relevant_dofs. The
- * IndexSet allows the ConstraintMatrix
- * to safe memory. Otherwise internal
- * data structures for all possible
- * indices will be created.
- */
- ConstraintMatrix (const IndexSet & local_constraints = IndexSet());
-
- /**
- * Copy constructor
- */
- ConstraintMatrix (const ConstraintMatrix &constraint_matrix);
-
- /**
- * Reinit the ConstraintMatrix object and
- * supply an IndexSet with lines that may
- * be constrained. This function is only
- * relevant in the distributed case to
- * supply a different IndexSet. Otherwise
- * this routine is equivalent to calling
- * clear(). See the constructor for
- * details.
- */
- void reinit (const IndexSet & local_constraints = IndexSet());
-
- /**
- * Determines if we can store a
- * constraint for the given @p
- * line_index. This routine only matters
- * in the distributed case and checks if
- * the IndexSet allows storage of this
- * line. Always returns true if not in
- * the distributed case.
- */
- bool can_store_line (const unsigned int line_index) const;
-
- /**
- * This function copies the content of @p
- * constraints_in with DoFs that are
- * element of the IndexSet @p
- * filter. Elements that are not present
- * in the IndexSet are ignored. All DoFs
- * will be transformed to local index
- * space of the filter, both the
- * constrained DoFs and the other DoFs
- * these entries are constrained to. The
- * local index space of the filter is a
- * contiguous numbering of all (global)
- * DoFs that are elements in the
- * filter.
- *
- * If, for example, the filter represents
- * the range <tt>[10,20)</tt>, and the
- * constraint matrix @p constraints_in
- * includes the global indices
- * <tt>{7,13,14}</tt>, the indices
- * <tt>{3,4}</tt> are added to the
- * calling constraint matrix (since 13
- * and 14 are elements in the filter and
- * element 13 is the fourth element in
- * the index, and 14 is the fifth).
- *
- * This function provides an easy way to
- * create a ConstraintMatrix for certain
- * vector components in a vector-valued
- * problem from a full ConstraintMatrix,
- * i.e. extracting a diagonal subblock
- * from a larger ConstraintMatrix. The
- * block is specified by the IndexSet
- * argument.
- */
- void add_selected_constraints (const ConstraintMatrix &constraints_in,
- const IndexSet &filter);
-
- /**
- * @name Adding constraints
- * @{
- */
-
- /**
- * Add a new line to the matrix. If the
- * line already exists, then the function
- * simply returns without doing anything.
- */
- void add_line (const unsigned int line);
-
- /**
- * Call the first add_line() function for
- * every index <code>i</code> for which
- * <code>lines[i]</code> is true.
- *
- * This function essentially exists to
- * allow adding several constraints of
- * the form <i>x<sub>i</sub></i>=0 all at once, where
- * the set of indices <i>i</i> for which these
- * constraints should be added are given
- * by the argument of this function. On
- * the other hand, just as if the
- * single-argument add_line() function
- * were called repeatedly, the
- * constraints can later be modified to
- * include linear dependencies using the
- * add_entry() function as well as
- * inhomogeneities using
- * set_inhomogeneity().
- */
- void add_lines (const std::vector<bool> &lines);
-
- /**
- * Call the first add_line() function for
- * every index <code>i</code> that
- * appears in the argument.
- *
- * This function essentially exists to
- * allow adding several constraints of
- * the form <i>x<sub>i</sub></i>=0 all at once, where
- * the set of indices <i>i</i> for which these
- * constraints should be added are given
- * by the argument of this function. On
- * the other hand, just as if the
- * single-argument add_line() function
- * were called repeatedly, the
- * constraints can later be modified to
- * include linear dependencies using the
- * add_entry() function as well as
- * inhomogeneities using
- * set_inhomogeneity().
- */
- void add_lines (const std::set<unsigned int> &lines);
-
- /**
- * Call the first add_line() function for
- * every index <code>i</code> that
- * appears in the argument.
- *
- * This function essentially exists to
- * allow adding several constraints of
- * the form <i>x<sub>i</sub></i>=0 all at once, where
- * the set of indices <i>i</i> for which these
- * constraints should be added are given
- * by the argument of this function. On
- * the other hand, just as if the
- * single-argument add_line() function
- * were called repeatedly, the
- * constraints can later be modified to
- * include linear dependencies using the
- * add_entry() function as well as
- * inhomogeneities using
- * set_inhomogeneity().
- */
- void add_lines (const IndexSet &lines);
-
- /**
- * Add an entry to a given
- * line. The list of lines is
- * searched from the back to the
- * front, so clever programming
- * would add a new line (which is
- * pushed to the back) and
- * immediately afterwards fill
- * the entries of that line. This
- * way, no expensive searching is
- * needed.
- *
- * If an entry with the same
- * indices as the one this
- * function call denotes already
- * exists, then this function
- * simply returns provided that
- * the value of the entry is the
- * same. Thus, it does no harm to
- * enter a constraint twice.
- */
- void add_entry (const unsigned int line,
- const unsigned int column,
- const double value);
-
- /**
- * Add a whole series of entries,
- * denoted by pairs of column indices
- * and values, to a line of
- * constraints. This function is
- * equivalent to calling the preceding
- * function several times, but is
- * faster.
- */
- void add_entries (const unsigned int line,
- const std::vector<std::pair<unsigned int,double> > &col_val_pairs);
-
- /**
- * Set an imhomogeneity to the
- * constraint line <i>i</i>, according
- * to the discussion in the general
- * class description.
- *
- * @note the line needs to be added with
- * one of the add_line() calls first.
- */
- void set_inhomogeneity (const unsigned int line,
- const double value);
-
- /**
- * Close the filling of entries. Since
- * the lines of a matrix of this type
- * are usually filled in an arbitrary
- * order and since we do not want to
- * use associative constainers to store
- * the lines, we need to sort the lines
- * and within the lines the columns
- * before usage of the matrix. This is
- * done through this function.
- *
- * Also, zero entries are discarded,
- * since they are not needed.
- *
- * After closing, no more entries are
- * accepted. If the object was already
- * closed, then this function returns
- * immediately.
- *
- * This function also resolves chains
- * of constraints. For example, degree
- * of freedom 13 may be constrained to
- * <i>u</i><sub>13</sub>=<i>u</i><sub>3</sub>/2+<i>u</i><sub>7</sub>/2 while degree of
- * freedom 7 is itself constrained as
- * <i>u</i><sub>7</sub>=<i>u</i><sub>2</sub>/2+<i>u</i><sub>4</sub>/2. Then, the
- * resolution will be that
- * <i>u</i><sub>13</sub>=<i>u</i><sub>3</sub>/2+<i>u</i><sub>2</sub>/4+<i>u</i><sub>4</sub>/4. Note,
- * however, that cycles in this graph
- * of constraints are not allowed,
- * i.e. for example <i>u</i><sub>4</sub> may not be
- * constrained, directly or indirectly,
- * to <i>u</i><sub>13</sub> again.
- */
- void close ();
-
- /**
- * Merge the constraints represented by
- * the object given as argument into
- * the constraints represented by this
- * object. Both objects may or may not
- * be closed (by having their function
- * close() called before). If this
- * object was closed before, then it
- * will be closed afterwards as
- * well. Note, however, that if the
- * other argument is closed, then
- * merging may be significantly faster.
- *
- * Using the default value of the second
- * arguments, the constraints in each of
- * the two objects (the old one
- * represented by this object and the
- * argument) may not refer to the same
- * degree of freedom, i.e. a degree of
- * freedom that is constrained in one
- * object may not be constrained in the
- * second. If this is nevertheless the
- * case, an exception is thrown. However,
- * this behavior can be changed by
- * providing a different value for the
- * second argument.
- */
- void merge (const ConstraintMatrix &other_constraints,
- const MergeConflictBehavior merge_conflict_behavior = no_conflicts_allowed);
-
- /**
- * Shift all entries of this matrix
- * down @p offset rows and over @p
- * offset columns.
- *
- * This function is useful if you are
- * building block matrices, where all
- * blocks are built by the same
- * DoFHandler object, i.e. the matrix
- * size is larger than the number of
- * degrees of freedom. Since several
- * matrix rows and columns correspond
- * to the same degrees of freedom,
- * you'd generate several constraint
- * objects, then shift them, and
- * finally merge() them together
- * again.
- */
- void shift (const unsigned int offset);
-
- /**
- * Clear all entries of this
- * matrix. Reset the flag determining
- * whether new entries are accepted or
- * not.
- *
- * This function may be called also on
- * objects which are empty or already
- * cleared.
- */
- void clear ();
-
- /**
- * @}
- */
-
-
- /**
- * @name Querying constraints
- * @{
- */
-
- /**
- * Return number of constraints stored in
- * this matrix.
- */
- unsigned int n_constraints () const;
-
- /**
- * Return whether the degree of freedom
- * with number @p index is a
- * constrained one.
- *
- * Note that if close() was called
- * before, then this function is
- * significantly faster, since then the
- * constrained degrees of freedom are
- * sorted and we can do a binary
- * search, while before close() was
- * called, we have to perform a linear
- * search through all entries.
- */
- bool is_constrained (const unsigned int index) const;
-
- /**
- * Return whether the dof is
- * constrained, and whether it is
- * constrained to only one other degree
- * of freedom with weight one. The
- * function therefore returns whether
- * the degree of freedom would simply
- * be eliminated in favor of exactly
- * one other degree of freedom.
- *
- * The function returns @p false if
- * either the degree of freedom is not
- * constrained at all, or if it is
- * constrained to more than one other
- * degree of freedom, or if it is
- * constrained to only one degree of
- * freedom but with a weight different
- * from one.
- */
- bool is_identity_constrained (const unsigned int index) const;
-
- /**
- * Return the maximum number of other
- * dofs that one dof is constrained
- * to. For example, in 2d a hanging
- * node is constrained only to its two
- * neighbors, so the returned value
- * would be 2. However, for higher
- * order elements and/or higher
- * dimensions, or other types of
- * constraints, this number is no more
- * obvious.
- *
- * The name indicates that within the
- * system matrix, references to a
- * constrained node are indirected to
- * the nodes it is constrained to.
- */
- unsigned int max_constraint_indirections () const;
-
- /**
- * Returns <tt>true</tt> in case the
- * dof is constrained and there is a
- * non-trivial inhomogeneous valeus set
- * to the dof.
- */
- bool is_inhomogeneously_constrained (const unsigned int index) const;
-
- /**
- * Returns <tt>false</tt> if all
- * constraints in the ConstraintMatrix
- * are homogeneous ones, and
- * <tt>true</tt> if there is at least
- * one inhomogeneity.
- */
- bool has_inhomogeneities () const;
-
- /**
- * Returns a pointer to the the vector of
- * entries if a line is constrained, and a
- * zero pointer in case the dof is not
- * constrained.
- */
- const std::vector<std::pair<unsigned int,double> >*
- get_constraint_entries (const unsigned int line) const;
-
- /**
- * Returns the value of the inhomogeneity
- * stored in the constrained dof @p
- * line. Unconstrained dofs also return a
- * zero value.
- */
- double get_inhomogeneity (const unsigned int line) const;
-
- /**
- * Print the constraint lines. Mainly
- * for debugging purposes.
- *
- * This function writes out all entries
- * in the constraint matrix lines with
- * their value in the form <tt>row col
- * : value</tt>. Unconstrained lines
- * containing only one identity entry
- * are not stored in this object and
- * are not printed.
- */
- void print (std::ostream &) const;
-
- /**
- * Write the graph of constraints in
- * 'dot' format. 'dot' is a program
- * that can take a list of nodes and
- * produce a graphical representation
- * of the graph of constrained degrees
- * of freedom and the degrees of
- * freedom they are constrained to.
- *
- * The output of this function can be
- * used as input to the 'dot' program
- * that can convert the graph into a
- * graphical representation in
- * postscript, png, xfig, and a number
- * of other formats.
- *
- * This function exists mostly for
- * debugging purposes.
- */
- void write_dot (std::ostream &) const;
-
- /**
- * Determine an estimate for the memory
- * consumption (in bytes) of this
- * object.
- */
+ public:
+ /**
+ * An enum that describes what should
+ * happen if the two ConstraintMatrix
+ * objects involved in a call to the
+ * merge() function happen to have
+ * constraints on the same degrees of
+ * freedom.
+ */
+ enum MergeConflictBehavior
+ {
+ /**
+ * Throw an exception if the two
+ * objects concerned have
+ * conflicting constraints on the
+ * same degree of freedom.
+ */
+ no_conflicts_allowed,
+
+ /**
+ * In an operation
+ * <code>cm1.merge(cm2)</code>, if
+ * <code>cm1</code> and
+ * <code>cm2</code> have
+ * constraints on the same degree
+ * of freedom, take the one from
+ * <code>cm1</code>.
+ */
+ left_object_wins,
+
+ /**
+ * In an operation
+ * <code>cm1.merge(cm2)</code>, if
+ * <code>cm1</code> and
+ * <code>cm2</code> have
+ * constraints on the same degree
+ * of freedom, take the one from
+ * <code>cm2</code>.
+ */
+ right_object_wins
+ };
+
+ /**
+ * Constructor. The supplied IndexSet
+ * defines which indices might be
+ * constrained inside this
+ * ConstraintMatrix. In a calculation
+ * with a
+ * parallel::distributed::DoFHandler one
+ * should use locally_relevant_dofs. The
+ * IndexSet allows the ConstraintMatrix
+ * to safe memory. Otherwise internal
+ * data structures for all possible
+ * indices will be created.
+ */
+ ConstraintMatrix (const IndexSet &local_constraints = IndexSet());
+
+ /**
+ * Copy constructor
+ */
+ ConstraintMatrix (const ConstraintMatrix &constraint_matrix);
+
+ /**
+ * Reinit the ConstraintMatrix object and
+ * supply an IndexSet with lines that may
+ * be constrained. This function is only
+ * relevant in the distributed case to
+ * supply a different IndexSet. Otherwise
+ * this routine is equivalent to calling
+ * clear(). See the constructor for
+ * details.
+ */
+ void reinit (const IndexSet &local_constraints = IndexSet());
+
+ /**
+ * Determines if we can store a
+ * constraint for the given @p
+ * line_index. This routine only matters
+ * in the distributed case and checks if
+ * the IndexSet allows storage of this
+ * line. Always returns true if not in
+ * the distributed case.
+ */
+ bool can_store_line (const unsigned int line_index) const;
+
+ /**
+ * This function copies the content of @p
+ * constraints_in with DoFs that are
+ * element of the IndexSet @p
+ * filter. Elements that are not present
+ * in the IndexSet are ignored. All DoFs
+ * will be transformed to local index
+ * space of the filter, both the
+ * constrained DoFs and the other DoFs
+ * these entries are constrained to. The
+ * local index space of the filter is a
+ * contiguous numbering of all (global)
+ * DoFs that are elements in the
+ * filter.
+ *
+ * If, for example, the filter represents
+ * the range <tt>[10,20)</tt>, and the
+ * constraint matrix @p constraints_in
+ * includes the global indices
+ * <tt>{7,13,14}</tt>, the indices
+ * <tt>{3,4}</tt> are added to the
+ * calling constraint matrix (since 13
+ * and 14 are elements in the filter and
+ * element 13 is the fourth element in
+ * the index, and 14 is the fifth).
+ *
+ * This function provides an easy way to
+ * create a ConstraintMatrix for certain
+ * vector components in a vector-valued
+ * problem from a full ConstraintMatrix,
+ * i.e. extracting a diagonal subblock
+ * from a larger ConstraintMatrix. The
+ * block is specified by the IndexSet
+ * argument.
+ */
+ void add_selected_constraints (const ConstraintMatrix &constraints_in,
+ const IndexSet &filter);
+
+ /**
+ * @name Adding constraints
+ * @{
+ */
+
+ /**
+ * Add a new line to the matrix. If the
+ * line already exists, then the function
+ * simply returns without doing anything.
+ */
+ void add_line (const unsigned int line);
+
+ /**
+ * Call the first add_line() function for
+ * every index <code>i</code> for which
+ * <code>lines[i]</code> is true.
+ *
+ * This function essentially exists to
+ * allow adding several constraints of
+ * the form <i>x<sub>i</sub></i>=0 all at once, where
+ * the set of indices <i>i</i> for which these
+ * constraints should be added are given
+ * by the argument of this function. On
+ * the other hand, just as if the
+ * single-argument add_line() function
+ * were called repeatedly, the
+ * constraints can later be modified to
+ * include linear dependencies using the
+ * add_entry() function as well as
+ * inhomogeneities using
+ * set_inhomogeneity().
+ */
+ void add_lines (const std::vector<bool> &lines);
+
+ /**
+ * Call the first add_line() function for
+ * every index <code>i</code> that
+ * appears in the argument.
+ *
+ * This function essentially exists to
+ * allow adding several constraints of
+ * the form <i>x<sub>i</sub></i>=0 all at once, where
+ * the set of indices <i>i</i> for which these
+ * constraints should be added are given
+ * by the argument of this function. On
+ * the other hand, just as if the
+ * single-argument add_line() function
+ * were called repeatedly, the
+ * constraints can later be modified to
+ * include linear dependencies using the
+ * add_entry() function as well as
+ * inhomogeneities using
+ * set_inhomogeneity().
+ */
+ void add_lines (const std::set<unsigned int> &lines);
+
+ /**
+ * Call the first add_line() function for
+ * every index <code>i</code> that
+ * appears in the argument.
+ *
+ * This function essentially exists to
+ * allow adding several constraints of
+ * the form <i>x<sub>i</sub></i>=0 all at once, where
+ * the set of indices <i>i</i> for which these
+ * constraints should be added are given
+ * by the argument of this function. On
+ * the other hand, just as if the
+ * single-argument add_line() function
+ * were called repeatedly, the
+ * constraints can later be modified to
+ * include linear dependencies using the
+ * add_entry() function as well as
+ * inhomogeneities using
+ * set_inhomogeneity().
+ */
+ void add_lines (const IndexSet &lines);
+
+ /**
+ * Add an entry to a given
+ * line. The list of lines is
+ * searched from the back to the
+ * front, so clever programming
+ * would add a new line (which is
+ * pushed to the back) and
+ * immediately afterwards fill
+ * the entries of that line. This
+ * way, no expensive searching is
+ * needed.
+ *
+ * If an entry with the same
+ * indices as the one this
+ * function call denotes already
+ * exists, then this function
+ * simply returns provided that
+ * the value of the entry is the
+ * same. Thus, it does no harm to
+ * enter a constraint twice.
+ */
+ void add_entry (const unsigned int line,
+ const unsigned int column,
+ const double value);
+
+ /**
+ * Add a whole series of entries,
+ * denoted by pairs of column indices
+ * and values, to a line of
+ * constraints. This function is
+ * equivalent to calling the preceding
+ * function several times, but is
+ * faster.
+ */
+ void add_entries (const unsigned int line,
+ const std::vector<std::pair<unsigned int,double> > &col_val_pairs);
+
+ /**
+ * Set an imhomogeneity to the
+ * constraint line <i>i</i>, according
+ * to the discussion in the general
+ * class description.
+ *
+ * @note the line needs to be added with
+ * one of the add_line() calls first.
+ */
+ void set_inhomogeneity (const unsigned int line,
+ const double value);
+
+ /**
+ * Close the filling of entries. Since
+ * the lines of a matrix of this type
+ * are usually filled in an arbitrary
+ * order and since we do not want to
+ * use associative constainers to store
+ * the lines, we need to sort the lines
+ * and within the lines the columns
+ * before usage of the matrix. This is
+ * done through this function.
+ *
+ * Also, zero entries are discarded,
+ * since they are not needed.
+ *
+ * After closing, no more entries are
+ * accepted. If the object was already
+ * closed, then this function returns
+ * immediately.
+ *
+ * This function also resolves chains
+ * of constraints. For example, degree
+ * of freedom 13 may be constrained to
+ * <i>u</i><sub>13</sub>=<i>u</i><sub>3</sub>/2+<i>u</i><sub>7</sub>/2 while degree of
+ * freedom 7 is itself constrained as
+ * <i>u</i><sub>7</sub>=<i>u</i><sub>2</sub>/2+<i>u</i><sub>4</sub>/2. Then, the
+ * resolution will be that
+ * <i>u</i><sub>13</sub>=<i>u</i><sub>3</sub>/2+<i>u</i><sub>2</sub>/4+<i>u</i><sub>4</sub>/4. Note,
+ * however, that cycles in this graph
+ * of constraints are not allowed,
+ * i.e. for example <i>u</i><sub>4</sub> may not be
+ * constrained, directly or indirectly,
+ * to <i>u</i><sub>13</sub> again.
+ */
+ void close ();
+
+ /**
+ * Merge the constraints represented by
+ * the object given as argument into
+ * the constraints represented by this
+ * object. Both objects may or may not
+ * be closed (by having their function
+ * close() called before). If this
+ * object was closed before, then it
+ * will be closed afterwards as
+ * well. Note, however, that if the
+ * other argument is closed, then
+ * merging may be significantly faster.
+ *
+ * Using the default value of the second
+ * arguments, the constraints in each of
+ * the two objects (the old one
+ * represented by this object and the
+ * argument) may not refer to the same
+ * degree of freedom, i.e. a degree of
+ * freedom that is constrained in one
+ * object may not be constrained in the
+ * second. If this is nevertheless the
+ * case, an exception is thrown. However,
+ * this behavior can be changed by
+ * providing a different value for the
+ * second argument.
+ */
+ void merge (const ConstraintMatrix &other_constraints,
+ const MergeConflictBehavior merge_conflict_behavior = no_conflicts_allowed);
+
+ /**
+ * Shift all entries of this matrix
+ * down @p offset rows and over @p
+ * offset columns.
+ *
+ * This function is useful if you are
+ * building block matrices, where all
+ * blocks are built by the same
+ * DoFHandler object, i.e. the matrix
+ * size is larger than the number of
+ * degrees of freedom. Since several
+ * matrix rows and columns correspond
+ * to the same degrees of freedom,
+ * you'd generate several constraint
+ * objects, then shift them, and
+ * finally merge() them together
+ * again.
+ */
+ void shift (const unsigned int offset);
+
+ /**
+ * Clear all entries of this
+ * matrix. Reset the flag determining
+ * whether new entries are accepted or
+ * not.
+ *
+ * This function may be called also on
+ * objects which are empty or already
+ * cleared.
+ */
+ void clear ();
+
+ /**
+ * @}
+ */
+
+
+ /**
+ * @name Querying constraints
+ * @{
+ */
+
+ /**
+ * Return number of constraints stored in
+ * this matrix.
+ */
+ unsigned int n_constraints () const;
+
+ /**
+ * Return whether the degree of freedom
+ * with number @p index is a
+ * constrained one.
+ *
+ * Note that if close() was called
+ * before, then this function is
+ * significantly faster, since then the
+ * constrained degrees of freedom are
+ * sorted and we can do a binary
+ * search, while before close() was
+ * called, we have to perform a linear
+ * search through all entries.
+ */
+ bool is_constrained (const unsigned int index) const;
+
+ /**
+ * Return whether the dof is
+ * constrained, and whether it is
+ * constrained to only one other degree
+ * of freedom with weight one. The
+ * function therefore returns whether
+ * the degree of freedom would simply
+ * be eliminated in favor of exactly
+ * one other degree of freedom.
+ *
+ * The function returns @p false if
+ * either the degree of freedom is not
+ * constrained at all, or if it is
+ * constrained to more than one other
+ * degree of freedom, or if it is
+ * constrained to only one degree of
+ * freedom but with a weight different
+ * from one.
+ */
+ bool is_identity_constrained (const unsigned int index) const;
+
+ /**
+ * Return the maximum number of other
+ * dofs that one dof is constrained
+ * to. For example, in 2d a hanging
+ * node is constrained only to its two
+ * neighbors, so the returned value
+ * would be 2. However, for higher
+ * order elements and/or higher
+ * dimensions, or other types of
+ * constraints, this number is no more
+ * obvious.
+ *
+ * The name indicates that within the
+ * system matrix, references to a
+ * constrained node are indirected to
+ * the nodes it is constrained to.
+ */
+ unsigned int max_constraint_indirections () const;
+
+ /**
+ * Returns <tt>true</tt> in case the
+ * dof is constrained and there is a
+ * non-trivial inhomogeneous valeus set
+ * to the dof.
+ */
+ bool is_inhomogeneously_constrained (const unsigned int index) const;
+
+ /**
+ * Returns <tt>false</tt> if all
+ * constraints in the ConstraintMatrix
+ * are homogeneous ones, and
+ * <tt>true</tt> if there is at least
+ * one inhomogeneity.
+ */
+ bool has_inhomogeneities () const;
+
+ /**
+ * Returns a pointer to the the vector of
+ * entries if a line is constrained, and a
+ * zero pointer in case the dof is not
+ * constrained.
+ */
+ const std::vector<std::pair<unsigned int,double> > *
+ get_constraint_entries (const unsigned int line) const;
+
+ /**
+ * Returns the value of the inhomogeneity
+ * stored in the constrained dof @p
+ * line. Unconstrained dofs also return a
+ * zero value.
+ */
+ double get_inhomogeneity (const unsigned int line) const;
+
+ /**
+ * Print the constraint lines. Mainly
+ * for debugging purposes.
+ *
+ * This function writes out all entries
+ * in the constraint matrix lines with
+ * their value in the form <tt>row col
+ * : value</tt>. Unconstrained lines
+ * containing only one identity entry
+ * are not stored in this object and
+ * are not printed.
+ */
+ void print (std::ostream &) const;
+
+ /**
+ * Write the graph of constraints in
+ * 'dot' format. 'dot' is a program
+ * that can take a list of nodes and
+ * produce a graphical representation
+ * of the graph of constrained degrees
+ * of freedom and the degrees of
+ * freedom they are constrained to.
+ *
+ * The output of this function can be
+ * used as input to the 'dot' program
+ * that can convert the graph into a
+ * graphical representation in
+ * postscript, png, xfig, and a number
+ * of other formats.
+ *
+ * This function exists mostly for
+ * debugging purposes.
+ */
+ void write_dot (std::ostream &) const;
+
+ /**
+ * Determine an estimate for the memory
+ * consumption (in bytes) of this
+ * object.
+ */
+ std::size_t memory_consumption () const;
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Eliminating constraints from linear systems after their creation
+ * @{
+ */
+
+ /**
+ * Condense a given sparsity
+ * pattern. This function assumes the
+ * uncondensed matrix struct to be
+ * compressed and the one to be filled
+ * to be empty. The condensed structure
+ * is compressed afterwards.
+ *
+ * The constraint matrix object must be
+ * closed to call this function.
+ *
+ * @note The hanging nodes are
+ * completely eliminated from the
+ * linear system referring to
+ * <tt>condensed</tt>. Therefore, the
+ * dimension of <tt>condensed</tt> is
+ * the dimension of
+ * <tt>uncondensed</tt> minus the
+ * number of constrained degrees of
+ * freedom.
+ */
+ void condense (const SparsityPattern &uncondensed,
+ SparsityPattern &condensed) const;
+
+
+ /**
+ * This function does much the same as
+ * the above one, except that it
+ * condenses the matrix struct
+ * 'in-place'. It does not remove
+ * nonzero entries from the matrix but
+ * adds those needed for the process of
+ * distribution of the constrained
+ * degrees of freedom.
+ *
+ * Since this function adds new nonzero
+ * entries to the sparsity pattern, the
+ * argument must not be
+ * compressed. However the constraint
+ * matrix must be closed. The matrix
+ * struct is compressed at the end of
+ * the function.
+ */
+ void condense (SparsityPattern &sparsity) const;
+
+ /**
+ * Same function as above, but
+ * condenses square block sparsity
+ * patterns.
+ */
+ void condense (BlockSparsityPattern &sparsity) const;
+
+ /**
+ * Same function as above, but
+ * condenses square compressed sparsity
+ * patterns.
+ *
+ * Given the data structure used by
+ * CompressedSparsityPattern, this
+ * function becomes quadratic in the
+ * number of degrees of freedom for
+ * large problems and can dominate
+ * setting up linear systems when
+ * several hundred thousand or millions
+ * of unknowns are involved and for
+ * problems with many nonzero elements
+ * per row (for example for
+ * vector-valued problems or hp finite
+ * elements). In this case, it is
+ * advisable to use the
+ * CompressedSetSparsityPattern class
+ * instead, see for example @ref
+ * step_27 "step-27", or to use the
+ * CompressedSimpleSparsityPattern
+ * class, see for example @ref step_31
+ * "step-31".
+ */
+ void condense (CompressedSparsityPattern &sparsity) const;
+
+ /**
+ * Same function as above, but
+ * condenses compressed sparsity
+ * patterns, which are based on the
+ * std::set container.
+ */
+ void condense (CompressedSetSparsityPattern &sparsity) const;
+
+ /**
+ * Same function as above, but
+ * condenses compressed sparsity
+ * patterns, which are based on the
+ * ''simple'' aproach.
+ */
+ void condense (CompressedSimpleSparsityPattern &sparsity) const;
+
+ /**
+ * Same function as above, but
+ * condenses square compressed sparsity
+ * patterns.
+ *
+ * Given the data structure used by
+ * BlockCompressedSparsityPattern, this
+ * function becomes quadratic in the
+ * number of degrees of freedom for
+ * large problems and can dominate
+ * setting up linear systems when
+ * several hundred thousand or millions
+ * of unknowns are involved and for
+ * problems with many nonzero elements
+ * per row (for example for
+ * vector-valued problems or hp finite
+ * elements). In this case, it is
+ * advisable to use the
+ * BlockCompressedSetSparsityPattern
+ * class instead, see for example @ref
+ * step_27 "step-27" and @ref step_31
+ * "step-31".
+ */
+ void condense (BlockCompressedSparsityPattern &sparsity) const;
+
+ /**
+ * Same function as above, but
+ * condenses square compressed sparsity
+ * patterns.
+ */
+ void condense (BlockCompressedSetSparsityPattern &sparsity) const;
+
+ /**
+ * Same function as above, but
+ * condenses square compressed sparsity
+ * patterns.
+ */
+ void condense (BlockCompressedSimpleSparsityPattern &sparsity) const;
+
+
+ /**
+ * Condense a given matrix. The
+ * associated matrix struct should be
+ * condensed and compressed. It is the
+ * user's responsibility to guarantee
+ * that all entries in the @p condensed
+ * matrix be zero!
+ *
+ * The constraint matrix object must be
+ * closed to call this function.
+ */
+ template<typename number>
+ void condense (const SparseMatrix<number> &uncondensed,
+ SparseMatrix<number> &condensed) const;
+
+ /**
+ * This function does much the same as
+ * the above one, except that it
+ * condenses the matrix 'in-place'. See
+ * the general documentation of this
+ * class for more detailed information.
+ */
+ template<typename number>
+ void condense (SparseMatrix<number> &matrix) const;
+
+ /**
+ * Same function as above, but
+ * condenses square block sparse
+ * matrices.
+ */
+ template <typename number>
+ void condense (BlockSparseMatrix<number> &matrix) const;
+
+ /**
+ * Condense the given vector @p
+ * uncondensed into @p condensed. It is
+ * the user's responsibility to
+ * guarantee that all entries of @p
+ * condensed be zero. Note that this
+ * function does not take any
+ * inhomogeneity into account and
+ * throws an exception in case there
+ * are any inhomogeneities. Use
+ * the function using both a matrix and
+ * vector for that case.
+ *
+ * The @p VectorType may be a
+ * Vector<float>, Vector<double>,
+ * BlockVector<tt><...></tt>, a PETSc
+ * or Trilinos vector wrapper class, or
+ * any other type having the same
+ * interface.
+ */
+ template <class VectorType>
+ void condense (const VectorType &uncondensed,
+ VectorType &condensed) const;
+
+ /**
+ * Condense the given vector
+ * in-place. The @p VectorType may be a
+ * Vector<float>, Vector<double>,
+ * BlockVector<tt><...></tt>, a PETSc
+ * or Trilinos vector wrapper class, or
+ * any other type having the same
+ * interface. Note that this function
+ * does not take any inhomogeneity into
+ * account and throws an exception in
+ * case there are any
+ * inhomogeneities. Use the function
+ * using both a matrix and vector for
+ * that case.
+ */
+ template <class VectorType>
+ void condense (VectorType &vec) const;
+
+ /**
+ * Condense a given matrix and a given
+ * vector. The associated matrix struct
+ * should be condensed and
+ * compressed. It is the user's
+ * responsibility to guarantee that all
+ * entries in the @p condensed matrix
+ * and vector be zero! This function is
+ * the appropriate choice for applying
+ * inhomogeneous constraints.
+ *
+ * The constraint matrix object must be
+ * closed to call this function.
+ */
+ template<typename number, class VectorType>
+ void condense (const SparseMatrix<number> &uncondensed_matrix,
+ const VectorType &uncondensed_vector,
+ SparseMatrix<number> &condensed_matrix,
+ VectorType &condensed_vector) const;
+
+ /**
+ * This function does much the same as
+ * the above one, except that it
+ * condenses matrix and vector
+ * 'in-place'. See the general
+ * documentation of this class for more
+ * detailed information.
+ */
+ template<typename number, class VectorType>
+ void condense (SparseMatrix<number> &matrix,
+ VectorType &vector) const;
+
+ /**
+ * Same function as above, but
+ * condenses square block sparse
+ * matrices and vectors.
+ */
+ template <typename number, class BlockVectorType>
+ void condense (BlockSparseMatrix<number> &matrix,
+ BlockVectorType &vector) const;
+
+ /**
+ * Sets the values of all constrained
+ * DoFs in a vector to zero.
+ * The @p VectorType may be a
+ * Vector<float>, Vector<double>,
+ * BlockVector<tt><...></tt>, a
+ * PETSc or Trilinos vector
+ * wrapper class, or any other
+ * type having the same
+ * interface.
+ */
+ template <class VectorType>
+ void set_zero (VectorType &vec) const;
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Eliminating constraints from linear systems during their creation
+ * @{
+ */
+
+ /**
+ * This function takes a vector of
+ * local contributions (@p
+ * local_vector) corresponding to the
+ * degrees of freedom indices given in
+ * @p local_dof_indices and distributes
+ * them to the global vector. In most
+ * cases, these local contributions
+ * will be the result of an integration
+ * over a cell or face of a
+ * cell. However, as long as @p
+ * local_vector and @p
+ * local_dof_indices have the same
+ * number of elements, this function is
+ * happy with whatever it is
+ * given.
+ *
+ * In contrast to the similar function
+ * in the DoFAccessor class, this
+ * function also takes care of
+ * constraints, i.e. if one of the
+ * elements of @p local_dof_indices
+ * belongs to a constrained node, then
+ * rather than writing the
+ * corresponding element of @p
+ * local_vector into @p global_vector,
+ * the element is distributed to the
+ * entries in the global vector to
+ * which this particular degree of
+ * freedom is constrained.
+ *
+ * Thus, by using this function to
+ * distribute local contributions to the
+ * global object, one saves the call to
+ * the condense function after the
+ * vectors and matrices are fully
+ * assembled. On the other hand, by
+ * consequence, the function does not
+ * only write into the entries enumerated
+ * by the @p local_dof_indices array, but
+ * also (possibly) others as necessary.
+ *
+ * Note that this function will apply all
+ * constraints as if they were
+ * homogeneous. For correctly setting
+ * inhomogeneous constraints, use the
+ * similar function with a matrix
+ * argument or the function with both
+ * matrix and vector arguments.
+ *
+ * @note This function is not
+ * thread-safe, so you will need to make
+ * sure that only one process at a time
+ * calls this function.
+ */
+ template <class InVector, class OutVector>
+ void
+ distribute_local_to_global (const InVector &local_vector,
+ const std::vector<unsigned int> &local_dof_indices,
+ OutVector &global_vector) const;
+
+ /**
+ * This function takes a vector of
+ * local contributions (@p
+ * local_vector) corresponding to the
+ * degrees of freedom indices given in
+ * @p local_dof_indices and distributes
+ * them to the global vector. In most
+ * cases, these local contributions
+ * will be the result of an integration
+ * over a cell or face of a
+ * cell. However, as long as @p
+ * local_vector and @p
+ * local_dof_indices have the same
+ * number of elements, this function is
+ * happy with whatever it is
+ * given.
+ *
+ * In contrast to the similar function in
+ * the DoFAccessor class, this function
+ * also takes care of constraints,
+ * i.e. if one of the elements of @p
+ * local_dof_indices belongs to a
+ * constrained node, then rather than
+ * writing the corresponding element of
+ * @p local_vector into @p global_vector,
+ * the element is distributed to the
+ * entries in the global vector to which
+ * this particular degree of freedom is
+ * constrained.
+ *
+ * Thus, by using this function to
+ * distribute local contributions to the
+ * global object, one saves the call to
+ * the condense function after the
+ * vectors and matrices are fully
+ * assembled. On the other hand, by
+ * consequence, the function does not
+ * only write into the entries enumerated
+ * by the @p local_dof_indices array, but
+ * also (possibly) others as
+ * necessary. This includes writing into
+ * diagonal elements of the matrix if the
+ * corresponding degree of freedom is
+ * constrained.
+ *
+ * The fourth argument
+ * <tt>local_matrix</tt> is intended to
+ * be used in case one wants to apply
+ * inhomogeneous constraints on the
+ * vector only. Such a situation could be
+ * where one wants to assemble of a right
+ * hand side vector on a problem with
+ * inhomogeneous constraints, but the
+ * global matrix has been assembled
+ * previously. A typical example of this
+ * is a time stepping algorithm where the
+ * stiffness matrix is assembled once,
+ * and the right hand side updated every
+ * time step. Note that, however, the
+ * entries in the columns of the local
+ * matrix have to be exactly the same as
+ * those that have been written into the
+ * global matrix. Otherwise, this
+ * function will not be able to correctly
+ * handle inhomogeneities.
+ *
+ * @note This function is not
+ * thread-safe, so you will need to make
+ * sure that only one process at a time
+ * calls this function.
+ */
+ template <typename VectorType>
+ void
+ distribute_local_to_global (const Vector<double> &local_vector,
+ const std::vector<unsigned int> &local_dof_indices,
+ VectorType &global_vector,
+ const FullMatrix<double> &local_matrix) const;
+
+ /**
+ * Enter a single value into a
+ * result vector, obeying constraints.
+ */
+ template <class VectorType>
+ void
+ distribute_local_to_global (const unsigned int index,
+ const double value,
+ VectorType &global_vector) const;
+
+ /**
+ * This function takes a pointer to a
+ * vector of local contributions (@p
+ * local_vector) corresponding to the
+ * degrees of freedom indices given in
+ * @p local_dof_indices and distributes
+ * them to the global vector. In most
+ * cases, these local contributions
+ * will be the result of an integration
+ * over a cell or face of a
+ * cell. However, as long as the
+ * entries in @p local_dof_indices
+ * indicate reasonable global vector
+ * entries, this function is happy with
+ * whatever it is given.
+ *
+ * If one of the elements of @p
+ * local_dof_indices belongs to a
+ * constrained node, then rather than
+ * writing the corresponding element of
+ * @p local_vector into @p
+ * global_vector, the element is
+ * distributed to the entries in the
+ * global vector to which this
+ * particular degree of freedom is
+ * constrained.
+ *
+ * Thus, by using this function to
+ * distribute local contributions to
+ * the global object, one saves the
+ * call to the condense function after
+ * the vectors and matrices are fully
+ * assembled. Note that this function
+ * completely ignores inhomogeneous
+ * constraints.
+ *
+ * @note This function is not
+ * thread-safe, so you will need to
+ * make sure that only one process at a
+ * time calls this function.
+ */
+ template <typename ForwardIteratorVec, typename ForwardIteratorInd,
+ class VectorType>
+ void
+ distribute_local_to_global (ForwardIteratorVec local_vector_begin,
+ ForwardIteratorVec local_vector_end,
+ ForwardIteratorInd local_indices_begin,
+ VectorType &global_vector) const;
+
+ /**
+ * This function takes a matrix of
+ * local contributions (@p
+ * local_matrix) corresponding to the
+ * degrees of freedom indices given in
+ * @p local_dof_indices and distributes
+ * them to the global matrix. In most
+ * cases, these local contributions
+ * will be the result of an integration
+ * over a cell or face of a
+ * cell. However, as long as @p
+ * local_matrix and @p
+ * local_dof_indices have the same
+ * number of elements, this function is
+ * happy with whatever it is given.
+ *
+ * In contrast to the similar function
+ * in the DoFAccessor class, this
+ * function also takes care of
+ * constraints, i.e. if one of the
+ * elements of @p local_dof_indices
+ * belongs to a constrained node, then
+ * rather than writing the
+ * corresponding element of @p
+ * local_matrix into @p global_matrix,
+ * the element is distributed to the
+ * entries in the global matrix to
+ * which this particular degree of
+ * freedom is constrained.
+ *
+ * With this scheme, we never write
+ * into rows or columns of constrained
+ * degrees of freedom. In order to make
+ * sure that the resulting matrix can
+ * still be inverted, we need to do
+ * something with the diagonal elements
+ * corresponding to constrained
+ * nodes. Thus, if a degree of freedom
+ * in @p local_dof_indices is
+ * constrained, we distribute the
+ * corresponding entries in the matrix,
+ * but also add the absolute value of
+ * the diagonal entry of the local
+ * matrix to the corresponding entry in
+ * the global matrix. Since the exact
+ * value of the diagonal element is not
+ * important (the value of the
+ * respective degree of freedom will be
+ * overwritten by the distribute() call
+ * later on anyway), this guarantees
+ * that the diagonal entry is always
+ * non-zero, positive, and of the same
+ * order of magnitude as the other
+ * entries of the matrix.
+ *
+ * Thus, by using this function to
+ * distribute local contributions to
+ * the global object, one saves the
+ * call to the condense function after
+ * the vectors and matrices are fully
+ * assembled.
+ *
+ * @note This function is not
+ * thread-safe, so you will need to
+ * make sure that only one process at a
+ * time calls this function.
+ */
+ template <typename MatrixType>
+ void
+ distribute_local_to_global (const FullMatrix<double> &local_matrix,
+ const std::vector<unsigned int> &local_dof_indices,
+ MatrixType &global_matrix) const;
+
+ /**
+ * Does the same as the function
+ * above but can treat non
+ * quadratic matrices.
+ */
+ template <typename MatrixType>
+ void
+ distribute_local_to_global (const FullMatrix<double> &local_matrix,
+ const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ MatrixType &global_matrix) const;
+
+ /**
+ * This function simultaneously
+ * writes elements into matrix
+ * and vector, according to the
+ * constraints specified by the
+ * calling ConstraintMatrix. This
+ * function can correctly handle
+ * inhomogeneous constraints as
+ * well. For the parameter
+ * use_inhomogeneities_for_rhs
+ * see the documentation in @ref
+ * constraints module.
+ *
+ * @note This function is not
+ * thread-safe, so you will need to
+ * make sure that only one process at a
+ * time calls this function.
+ */
+ template <typename MatrixType, typename VectorType>
+ void
+ distribute_local_to_global (const FullMatrix<double> &local_matrix,
+ const Vector<double> &local_vector,
+ const std::vector<unsigned int> &local_dof_indices,
+ MatrixType &global_matrix,
+ VectorType &global_vector,
+ bool use_inhomogeneities_for_rhs = false) const;
+
+ /**
+ * Do a similar operation as the
+ * distribute_local_to_global() function
+ * that distributes writing entries into
+ * a matrix for constrained degrees of
+ * freedom, except that here we don't
+ * write into a matrix but only allocate
+ * sparsity pattern entries.
+ *
+ * As explained in the
+ * @ref hp_paper "hp paper"
+ * and in step-27,
+ * first allocating a sparsity pattern
+ * and later coming back and allocating
+ * additional entries for those matrix
+ * entries that will be written to due to
+ * the elimination of constrained degrees
+ * of freedom (using
+ * ConstraintMatrix::condense() ), can be
+ * a very expensive procedure. It is
+ * cheaper to allocate these entries
+ * right away without having to do a
+ * second pass over the sparsity pattern
+ * object. This function does exactly
+ * that.
+ *
+ * Because the function only allocates
+ * entries in a sparsity pattern, all it
+ * needs to know are the degrees of
+ * freedom that couple to each
+ * other. Unlike the previous function,
+ * no actual values are written, so the
+ * second input argument is not necessary
+ * here.
+ *
+ * The third argument to this function,
+ * keep_constrained_entries determines
+ * whether the function shall allocate
+ * entries in the sparsity pattern at
+ * all for entries that will later be
+ * set to zero upon condensation of the
+ * matrix. These entries are necessary
+ * if the matrix is built
+ * unconstrained, and only later
+ * condensed. They are not necessary if
+ * the matrix is built using the
+ * distribute_local_to_global()
+ * function of this class which
+ * distributes entries right away when
+ * copying a local matrix into a global
+ * object. The default of this argument
+ * is true, meaning to allocate the few
+ * entries that may later be set to
+ * zero.
+ *
+ * By default, the function adds
+ * entries for all pairs of indices
+ * given in the first argument to the
+ * sparsity pattern (unless
+ * keep_constrained_entries is
+ * false). However, sometimes one would
+ * like to only add a subset of all of
+ * these pairs. In that case, the last
+ * argument can be used which specifies
+ * a boolean mask which of the pairs of
+ * indices should be considered. If the
+ * mask is false for a pair of indices,
+ * then no entry will be added to the
+ * sparsity pattern for this pair,
+ * irrespective of whether one or both
+ * of the indices correspond to
+ * constrained degrees of freedom.
+ *
+ * This function is not typically called
+ * from user code, but is used in the
+ * DoFTools::make_sparsity_pattern()
+ * function when passed a constraint
+ * matrix object.
+ */
+ template <typename SparsityType>
+ void
+ add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
+ SparsityType &sparsity_pattern,
+ const bool keep_constrained_entries = true,
+ const Table<2,bool> &dof_mask = default_empty_table) const;
+
+ /**
+ * Similar to the other function,
+ * but for non-quadratic sparsity
+ * patterns.
+ */
+
+ template <typename SparsityType>
+ void
+ add_entries_local_to_global (const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ SparsityType &sparsity_pattern,
+ const bool keep_constrained_entries = true,
+ const Table<2,bool> &dof_mask = default_empty_table) const;
+
+ /**
+ * This function imports values from a
+ * global vector (@p global_vector) by
+ * applying the constraints to a vector
+ * of local values, expressed in
+ * iterator format. In most cases, the
+ * local values will be identified by
+ * the local dof values on a
+ * cell. However, as long as the
+ * entries in @p local_dof_indices
+ * indicate reasonable global vector
+ * entries, this function is happy with
+ * whatever it is given.
+ *
+ * If one of the elements of @p
+ * local_dof_indices belongs to a
+ * constrained node, then rather than
+ * writing the corresponding element of
+ * @p global_vector into @p
+ * local_vector, the constraints are
+ * resolved as the respective
+ * distribute function does, i.e., the
+ * local entry is constructed from the
+ * global entries to which this
+ * particular degree of freedom is
+ * constrained.
+ *
+ * In contrast to the similar function
+ * get_dof_values in the DoFAccessor
+ * class, this function does not need
+ * the constrained values to be
+ * correctly set (i.e., distribute to
+ * be called).
+ */
+ template <typename ForwardIteratorVec, typename ForwardIteratorInd,
+ class VectorType>
+ void
- get_dof_values (const VectorType &global_vector,
++ get_dof_values (const VectorType &global_vector,
+ ForwardIteratorInd local_indices_begin,
+ ForwardIteratorVec local_vector_begin,
+ ForwardIteratorVec local_vector_end) const;
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Dealing with constraints after solving a linear system
+ * @{
+ */
+
+ /**
+ * Re-distribute the elements of the
+ * vector @p condensed to @p
+ * uncondensed. It is the user's
+ * responsibility to guarantee that all
+ * entries of @p uncondensed be zero!
+ *
+ * This function undoes the action of
+ * @p condense somehow, but it should
+ * be noted that it is not the inverse
+ * of @p condense.
+ *
+ * The @p VectorType may be a
+ * Vector<float>, Vector<double>,
+ * BlockVector<tt><...></tt>, a PETSc
+ * or Trilinos vector wrapper class, or
+ * any other type having the same
+ * interface.
+ */
+ template <class VectorType>
+ void distribute (const VectorType &condensed,
+ VectorType &uncondensed) const;
+
+ /**
+ * Re-distribute the elements of the
+ * vector in-place. The @p VectorType
+ * may be a Vector<float>,
+ * Vector<double>,
+ * BlockVector<tt><...></tt>, a PETSc
+ * or Trilinos vector wrapper class, or
+ * any other type having the same
+ * interface.
+ *
+ * Note that if called with a
+ * TrilinosWrappers::MPI::Vector it may
+ * not contain ghost elements.
+ */
+ template <class VectorType>
+ void distribute (VectorType &vec) const;
+
+ /**
+ * @}
+ */
+
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcMatrixIsClosed);
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcMatrixNotClosed);
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException1 (ExcLineInexistant,
+ unsigned int,
+ << "The specified line " << arg1
+ << " does not exist.");
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException4 (ExcEntryAlreadyExists,
+ int, int, double, double,
+ << "The entry for the indices " << arg1 << " and "
+ << arg2 << " already exists, but the values "
+ << arg3 << " (old) and " << arg4 << " (new) differ "
+ << "by " << (arg4-arg3) << ".");
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException2 (ExcDoFConstrainedToConstrainedDoF,
+ int, int,
+ << "You tried to constrain DoF " << arg1
+ << " to DoF " << arg2
+ << ", but that one is also constrained. This is not allowed!");
+ /**
+ * Exception.
+ *
+ * @ingroup Exceptions
+ */
+ DeclException1 (ExcDoFIsConstrainedFromBothObjects,
+ int,
+ << "Degree of freedom " << arg1
+ << " is constrained from both object in a merge operation.");
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException1 (ExcDoFIsConstrainedToConstrainedDoF,
+ int,
+ << "In the given argument a degree of freedom is constrained "
+ << "to another DoF with number " << arg1
+ << ", which however is constrained by this object. This is not"
+ << " allowed.");
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException1 (ExcRowNotStoredHere,
+ int,
+ << "The index set given to this constraint matrix indicates "
+ << "constraints for degree of freedom " << arg1
+ << " should not be stored by this object, but a constraint "
+ << "is being added.");
+
+ private:
+
+ /**
+ * This class represents one line of a
+ * constraint matrix.
+ */
+ struct ConstraintLine
+ {
+ /**
+ * A data type in which we store the list
+ * of entries that make up the homogenous
+ * part of a constraint.
+ */
+ typedef std::vector<std::pair<unsigned int,double> > Entries;
+
+ /**
+ * Number of this line. Since only
+ * very few lines are stored, we
+ * can not assume a specific order
+ * and have to store the line
+ * number explicitly.
+ */
+ unsigned int line;
+
+ /**
+ * Row numbers and values of the
+ * entries in this line.
+ *
+ * For the reason why we use a
+ * vector instead of a map and the
+ * consequences thereof, the same
+ * applies as what is said for
+ * ConstraintMatrix::lines.
+ */
+ Entries entries;
+
+ /**
+ * Value of the inhomogeneity.
+ */
+ double inhomogeneity;
+
+ /**
+ * This operator is a bit weird and
+ * unintuitive: it compares the
+ * line numbers of two lines. We
+ * need this to sort the lines; in
+ * fact we could do this using a
+ * comparison predicate. However,
+ * this way, it is easier, albeit
+ * unintuitive since two lines
+ * really have no god-given order
+ * relation.
+ */
+ bool operator < (const ConstraintLine &) const;
+
+ /**
+ * This operator is likewise weird:
+ * it checks whether the line
+ * indices of the two operands are
+ * equal, irrespective of the fact
+ * that the contents of the line
+ * may be different.
+ */
+ bool operator == (const ConstraintLine &) const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes) of
+ * this object.
+ */
std::size_t memory_consumption () const;
-
- /**
- * @}
- */
-
- /**
- * @name Eliminating constraints from linear systems after their creation
- * @{
- */
-
- /**
- * Condense a given sparsity
- * pattern. This function assumes the
- * uncondensed matrix struct to be
- * compressed and the one to be filled
- * to be empty. The condensed structure
- * is compressed afterwards.
- *
- * The constraint matrix object must be
- * closed to call this function.
- *
- * @note The hanging nodes are
- * completely eliminated from the
- * linear system referring to
- * <tt>condensed</tt>. Therefore, the
- * dimension of <tt>condensed</tt> is
- * the dimension of
- * <tt>uncondensed</tt> minus the
- * number of constrained degrees of
- * freedom.
- */
- void condense (const SparsityPattern &uncondensed,
- SparsityPattern &condensed) const;
-
-
- /**
- * This function does much the same as
- * the above one, except that it
- * condenses the matrix struct
- * 'in-place'. It does not remove
- * nonzero entries from the matrix but
- * adds those needed for the process of
- * distribution of the constrained
- * degrees of freedom.
- *
- * Since this function adds new nonzero
- * entries to the sparsity pattern, the
- * argument must not be
- * compressed. However the constraint
- * matrix must be closed. The matrix
- * struct is compressed at the end of
- * the function.
- */
- void condense (SparsityPattern &sparsity) const;
-
- /**
- * Same function as above, but
- * condenses square block sparsity
- * patterns.
- */
- void condense (BlockSparsityPattern &sparsity) const;
-
- /**
- * Same function as above, but
- * condenses square compressed sparsity
- * patterns.
- *
- * Given the data structure used by
- * CompressedSparsityPattern, this
- * function becomes quadratic in the
- * number of degrees of freedom for
- * large problems and can dominate
- * setting up linear systems when
- * several hundred thousand or millions
- * of unknowns are involved and for
- * problems with many nonzero elements
- * per row (for example for
- * vector-valued problems or hp finite
- * elements). In this case, it is
- * advisable to use the
- * CompressedSetSparsityPattern class
- * instead, see for example @ref
- * step_27 "step-27", or to use the
- * CompressedSimpleSparsityPattern
- * class, see for example @ref step_31
- * "step-31".
- */
- void condense (CompressedSparsityPattern &sparsity) const;
-
- /**
- * Same function as above, but
- * condenses compressed sparsity
- * patterns, which are based on the
- * std::set container.
- */
- void condense (CompressedSetSparsityPattern &sparsity) const;
-
- /**
- * Same function as above, but
- * condenses compressed sparsity
- * patterns, which are based on the
- * ''simple'' aproach.
- */
- void condense (CompressedSimpleSparsityPattern &sparsity) const;
-
- /**
- * Same function as above, but
- * condenses square compressed sparsity
- * patterns.
- *
- * Given the data structure used by
- * BlockCompressedSparsityPattern, this
- * function becomes quadratic in the
- * number of degrees of freedom for
- * large problems and can dominate
- * setting up linear systems when
- * several hundred thousand or millions
- * of unknowns are involved and for
- * problems with many nonzero elements
- * per row (for example for
- * vector-valued problems or hp finite
- * elements). In this case, it is
- * advisable to use the
- * BlockCompressedSetSparsityPattern
- * class instead, see for example @ref
- * step_27 "step-27" and @ref step_31
- * "step-31".
- */
- void condense (BlockCompressedSparsityPattern &sparsity) const;
-
- /**
- * Same function as above, but
- * condenses square compressed sparsity
- * patterns.
- */
- void condense (BlockCompressedSetSparsityPattern &sparsity) const;
-
- /**
- * Same function as above, but
- * condenses square compressed sparsity
- * patterns.
- */
- void condense (BlockCompressedSimpleSparsityPattern &sparsity) const;
-
-
- /**
- * Condense a given matrix. The
- * associated matrix struct should be
- * condensed and compressed. It is the
- * user's responsibility to guarantee
- * that all entries in the @p condensed
- * matrix be zero!
- *
- * The constraint matrix object must be
- * closed to call this function.
- */
- template<typename number>
- void condense (const SparseMatrix<number> &uncondensed,
- SparseMatrix<number> &condensed) const;
-
- /**
- * This function does much the same as
- * the above one, except that it
- * condenses the matrix 'in-place'. See
- * the general documentation of this
- * class for more detailed information.
- */
- template<typename number>
- void condense (SparseMatrix<number> &matrix) const;
-
- /**
- * Same function as above, but
- * condenses square block sparse
- * matrices.
- */
- template <typename number>
- void condense (BlockSparseMatrix<number> &matrix) const;
-
- /**
- * Condense the given vector @p
- * uncondensed into @p condensed. It is
- * the user's responsibility to
- * guarantee that all entries of @p
- * condensed be zero. Note that this
- * function does not take any
- * inhomogeneity into account and
- * throws an exception in case there
- * are any inhomogeneities. Use
- * the function using both a matrix and
- * vector for that case.
- *
- * The @p VectorType may be a
- * Vector<float>, Vector<double>,
- * BlockVector<tt><...></tt>, a PETSc
- * or Trilinos vector wrapper class, or
- * any other type having the same
- * interface.
- */
- template <class VectorType>
- void condense (const VectorType &uncondensed,
- VectorType &condensed) const;
-
- /**
- * Condense the given vector
- * in-place. The @p VectorType may be a
- * Vector<float>, Vector<double>,
- * BlockVector<tt><...></tt>, a PETSc
- * or Trilinos vector wrapper class, or
- * any other type having the same
- * interface. Note that this function
- * does not take any inhomogeneity into
- * account and throws an exception in
- * case there are any
- * inhomogeneities. Use the function
- * using both a matrix and vector for
- * that case.
- */
- template <class VectorType>
- void condense (VectorType &vec) const;
-
- /**
- * Condense a given matrix and a given
- * vector. The associated matrix struct
- * should be condensed and
- * compressed. It is the user's
- * responsibility to guarantee that all
- * entries in the @p condensed matrix
- * and vector be zero! This function is
- * the appropriate choice for applying
- * inhomogeneous constraints.
- *
- * The constraint matrix object must be
- * closed to call this function.
- */
- template<typename number, class VectorType>
- void condense (const SparseMatrix<number> &uncondensed_matrix,
- const VectorType &uncondensed_vector,
- SparseMatrix<number> &condensed_matrix,
- VectorType &condensed_vector) const;
-
- /**
- * This function does much the same as
- * the above one, except that it
- * condenses matrix and vector
- * 'in-place'. See the general
- * documentation of this class for more
- * detailed information.
- */
- template<typename number, class VectorType>
- void condense (SparseMatrix<number> &matrix,
- VectorType &vector) const;
-
- /**
- * Same function as above, but
- * condenses square block sparse
- * matrices and vectors.
- */
- template <typename number, class BlockVectorType>
- void condense (BlockSparseMatrix<number> &matrix,
- BlockVectorType &vector) const;
-
- /**
- * Sets the values of all constrained
- * DoFs in a vector to zero.
- * The @p VectorType may be a
- * Vector<float>, Vector<double>,
- * BlockVector<tt><...></tt>, a
- * PETSc or Trilinos vector
- * wrapper class, or any other
- * type having the same
- * interface.
- */
- template <class VectorType>
- void set_zero (VectorType &vec) const;
-
- /**
- * @}
- */
-
- /**
- * @name Eliminating constraints from linear systems during their creation
- * @{
- */
-
- /**
- * This function takes a vector of
- * local contributions (@p
- * local_vector) corresponding to the
- * degrees of freedom indices given in
- * @p local_dof_indices and distributes
- * them to the global vector. In most
- * cases, these local contributions
- * will be the result of an integration
- * over a cell or face of a
- * cell. However, as long as @p
- * local_vector and @p
- * local_dof_indices have the same
- * number of elements, this function is
- * happy with whatever it is
- * given.
- *
- * In contrast to the similar function
- * in the DoFAccessor class, this
- * function also takes care of
- * constraints, i.e. if one of the
- * elements of @p local_dof_indices
- * belongs to a constrained node, then
- * rather than writing the
- * corresponding element of @p
- * local_vector into @p global_vector,
- * the element is distributed to the
- * entries in the global vector to
- * which this particular degree of
- * freedom is constrained.
- *
- * Thus, by using this function to
- * distribute local contributions to the
- * global object, one saves the call to
- * the condense function after the
- * vectors and matrices are fully
- * assembled. On the other hand, by
- * consequence, the function does not
- * only write into the entries enumerated
- * by the @p local_dof_indices array, but
- * also (possibly) others as necessary.
- *
- * Note that this function will apply all
- * constraints as if they were
- * homogeneous. For correctly setting
- * inhomogeneous constraints, use the
- * similar function with a matrix
- * argument or the function with both
- * matrix and vector arguments.
- *
- * @note This function is not
- * thread-safe, so you will need to make
- * sure that only one process at a time
- * calls this function.
- */
- template <class InVector, class OutVector>
- void
- distribute_local_to_global (const InVector &local_vector,
- const std::vector<unsigned int> &local_dof_indices,
- OutVector &global_vector) const;
-
- /**
- * This function takes a vector of
- * local contributions (@p
- * local_vector) corresponding to the
- * degrees of freedom indices given in
- * @p local_dof_indices and distributes
- * them to the global vector. In most
- * cases, these local contributions
- * will be the result of an integration
- * over a cell or face of a
- * cell. However, as long as @p
- * local_vector and @p
- * local_dof_indices have the same
- * number of elements, this function is
- * happy with whatever it is
- * given.
- *
- * In contrast to the similar function in
- * the DoFAccessor class, this function
- * also takes care of constraints,
- * i.e. if one of the elements of @p
- * local_dof_indices belongs to a
- * constrained node, then rather than
- * writing the corresponding element of
- * @p local_vector into @p global_vector,
- * the element is distributed to the
- * entries in the global vector to which
- * this particular degree of freedom is
- * constrained.
- *
- * Thus, by using this function to
- * distribute local contributions to the
- * global object, one saves the call to
- * the condense function after the
- * vectors and matrices are fully
- * assembled. On the other hand, by
- * consequence, the function does not
- * only write into the entries enumerated
- * by the @p local_dof_indices array, but
- * also (possibly) others as
- * necessary. This includes writing into
- * diagonal elements of the matrix if the
- * corresponding degree of freedom is
- * constrained.
- *
- * The fourth argument
- * <tt>local_matrix</tt> is intended to
- * be used in case one wants to apply
- * inhomogeneous constraints on the
- * vector only. Such a situation could be
- * where one wants to assemble of a right
- * hand side vector on a problem with
- * inhomogeneous constraints, but the
- * global matrix has been assembled
- * previously. A typical example of this
- * is a time stepping algorithm where the
- * stiffness matrix is assembled once,
- * and the right hand side updated every
- * time step. Note that, however, the
- * entries in the columns of the local
- * matrix have to be exactly the same as
- * those that have been written into the
- * global matrix. Otherwise, this
- * function will not be able to correctly
- * handle inhomogeneities.
- *
- * @note This function is not
- * thread-safe, so you will need to make
- * sure that only one process at a time
- * calls this function.
- */
- template <typename VectorType>
- void
- distribute_local_to_global (const Vector<double> &local_vector,
- const std::vector<unsigned int> &local_dof_indices,
- VectorType &global_vector,
- const FullMatrix<double> &local_matrix) const;
-
- /**
- * Enter a single value into a
- * result vector, obeying constraints.
- */
- template <class VectorType>
- void
- distribute_local_to_global (const unsigned int index,
- const double value,
- VectorType &global_vector) const;
-
- /**
- * This function takes a pointer to a
- * vector of local contributions (@p
- * local_vector) corresponding to the
- * degrees of freedom indices given in
- * @p local_dof_indices and distributes
- * them to the global vector. In most
- * cases, these local contributions
- * will be the result of an integration
- * over a cell or face of a
- * cell. However, as long as the
- * entries in @p local_dof_indices
- * indicate reasonable global vector
- * entries, this function is happy with
- * whatever it is given.
- *
- * If one of the elements of @p
- * local_dof_indices belongs to a
- * constrained node, then rather than
- * writing the corresponding element of
- * @p local_vector into @p
- * global_vector, the element is
- * distributed to the entries in the
- * global vector to which this
- * particular degree of freedom is
- * constrained.
- *
- * Thus, by using this function to
- * distribute local contributions to
- * the global object, one saves the
- * call to the condense function after
- * the vectors and matrices are fully
- * assembled. Note that this function
- * completely ignores inhomogeneous
- * constraints.
- *
- * @note This function is not
- * thread-safe, so you will need to
- * make sure that only one process at a
- * time calls this function.
- */
- template <typename ForwardIteratorVec, typename ForwardIteratorInd,
- class VectorType>
- void
- distribute_local_to_global (ForwardIteratorVec local_vector_begin,
- ForwardIteratorVec local_vector_end,
- ForwardIteratorInd local_indices_begin,
- VectorType &global_vector) const;
-
- /**
- * This function takes a matrix of
- * local contributions (@p
- * local_matrix) corresponding to the
- * degrees of freedom indices given in
- * @p local_dof_indices and distributes
- * them to the global matrix. In most
- * cases, these local contributions
- * will be the result of an integration
- * over a cell or face of a
- * cell. However, as long as @p
- * local_matrix and @p
- * local_dof_indices have the same
- * number of elements, this function is
- * happy with whatever it is given.
- *
- * In contrast to the similar function
- * in the DoFAccessor class, this
- * function also takes care of
- * constraints, i.e. if one of the
- * elements of @p local_dof_indices
- * belongs to a constrained node, then
- * rather than writing the
- * corresponding element of @p
- * local_matrix into @p global_matrix,
- * the element is distributed to the
- * entries in the global matrix to
- * which this particular degree of
- * freedom is constrained.
- *
- * With this scheme, we never write
- * into rows or columns of constrained
- * degrees of freedom. In order to make
- * sure that the resulting matrix can
- * still be inverted, we need to do
- * something with the diagonal elements
- * corresponding to constrained
- * nodes. Thus, if a degree of freedom
- * in @p local_dof_indices is
- * constrained, we distribute the
- * corresponding entries in the matrix,
- * but also add the absolute value of
- * the diagonal entry of the local
- * matrix to the corresponding entry in
- * the global matrix. Since the exact
- * value of the diagonal element is not
- * important (the value of the
- * respective degree of freedom will be
- * overwritten by the distribute() call
- * later on anyway), this guarantees
- * that the diagonal entry is always
- * non-zero, positive, and of the same
- * order of magnitude as the other
- * entries of the matrix.
- *
- * Thus, by using this function to
- * distribute local contributions to
- * the global object, one saves the
- * call to the condense function after
- * the vectors and matrices are fully
- * assembled.
- *
- * @note This function is not
- * thread-safe, so you will need to
- * make sure that only one process at a
- * time calls this function.
- */
- template <typename MatrixType>
- void
- distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const std::vector<unsigned int> &local_dof_indices,
- MatrixType &global_matrix) const;
-
- /**
- * Does the same as the function
- * above but can treat non
- * quadratic matrices.
- */
- template <typename MatrixType>
- void
- distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- MatrixType &global_matrix) const;
-
- /**
- * This function simultaneously
- * writes elements into matrix
- * and vector, according to the
- * constraints specified by the
- * calling ConstraintMatrix. This
- * function can correctly handle
- * inhomogeneous constraints as
- * well. For the parameter
- * use_inhomogeneities_for_rhs
- * see the documentation in @ref
- * constraints module.
- *
- * @note This function is not
- * thread-safe, so you will need to
- * make sure that only one process at a
- * time calls this function.
- */
- template <typename MatrixType, typename VectorType>
- void
- distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const Vector<double> &local_vector,
- const std::vector<unsigned int> &local_dof_indices,
- MatrixType &global_matrix,
- VectorType &global_vector,
- bool use_inhomogeneities_for_rhs = false) const;
-
- /**
- * Do a similar operation as the
- * distribute_local_to_global() function
- * that distributes writing entries into
- * a matrix for constrained degrees of
- * freedom, except that here we don't
- * write into a matrix but only allocate
- * sparsity pattern entries.
- *
- * As explained in the
- * @ref hp_paper "hp paper"
- * and in step-27,
- * first allocating a sparsity pattern
- * and later coming back and allocating
- * additional entries for those matrix
- * entries that will be written to due to
- * the elimination of constrained degrees
- * of freedom (using
- * ConstraintMatrix::condense() ), can be
- * a very expensive procedure. It is
- * cheaper to allocate these entries
- * right away without having to do a
- * second pass over the sparsity pattern
- * object. This function does exactly
- * that.
- *
- * Because the function only allocates
- * entries in a sparsity pattern, all it
- * needs to know are the degrees of
- * freedom that couple to each
- * other. Unlike the previous function,
- * no actual values are written, so the
- * second input argument is not necessary
- * here.
- *
- * The third argument to this function,
- * keep_constrained_entries determines
- * whether the function shall allocate
- * entries in the sparsity pattern at
- * all for entries that will later be
- * set to zero upon condensation of the
- * matrix. These entries are necessary
- * if the matrix is built
- * unconstrained, and only later
- * condensed. They are not necessary if
- * the matrix is built using the
- * distribute_local_to_global()
- * function of this class which
- * distributes entries right away when
- * copying a local matrix into a global
- * object. The default of this argument
- * is true, meaning to allocate the few
- * entries that may later be set to
- * zero.
- *
- * By default, the function adds
- * entries for all pairs of indices
- * given in the first argument to the
- * sparsity pattern (unless
- * keep_constrained_entries is
- * false). However, sometimes one would
- * like to only add a subset of all of
- * these pairs. In that case, the last
- * argument can be used which specifies
- * a boolean mask which of the pairs of
- * indices should be considered. If the
- * mask is false for a pair of indices,
- * then no entry will be added to the
- * sparsity pattern for this pair,
- * irrespective of whether one or both
- * of the indices correspond to
- * constrained degrees of freedom.
- *
- * This function is not typically called
- * from user code, but is used in the
- * DoFTools::make_sparsity_pattern()
- * function when passed a constraint
- * matrix object.
- */
- template <typename SparsityType>
- void
- add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
- SparsityType &sparsity_pattern,
- const bool keep_constrained_entries = true,
- const Table<2,bool> &dof_mask = default_empty_table) const;
-
- /**
- * Similar to the other function,
- * but for non-quadratic sparsity
- * patterns.
- */
-
- template <typename SparsityType>
- void
- add_entries_local_to_global (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- SparsityType &sparsity_pattern,
- const bool keep_constrained_entries = true,
- const Table<2,bool> &dof_mask = default_empty_table) const;
-
- /**
- * This function imports values from a
- * global vector (@p global_vector) by
- * applying the constraints to a vector
- * of local values, expressed in
- * iterator format. In most cases, the
- * local values will be identified by
- * the local dof values on a
- * cell. However, as long as the
- * entries in @p local_dof_indices
- * indicate reasonable global vector
- * entries, this function is happy with
- * whatever it is given.
- *
- * If one of the elements of @p
- * local_dof_indices belongs to a
- * constrained node, then rather than
- * writing the corresponding element of
- * @p global_vector into @p
- * local_vector, the constraints are
- * resolved as the respective
- * distribute function does, i.e., the
- * local entry is constructed from the
- * global entries to which this
- * particular degree of freedom is
- * constrained.
- *
- * In contrast to the similar function
- * get_dof_values in the DoFAccessor
- * class, this function does not need
- * the constrained values to be
- * correctly set (i.e., distribute to
- * be called).
- */
- template <typename ForwardIteratorVec, typename ForwardIteratorInd,
- class VectorType>
- void
- get_dof_values (const VectorType &global_vector,
- ForwardIteratorInd local_indices_begin,
- ForwardIteratorVec local_vector_begin,
- ForwardIteratorVec local_vector_end) const;
-
- /**
- * @}
- */
-
- /**
- * @name Dealing with constraints after solving a linear system
- * @{
- */
-
- /**
- * Re-distribute the elements of the
- * vector @p condensed to @p
- * uncondensed. It is the user's
- * responsibility to guarantee that all
- * entries of @p uncondensed be zero!
- *
- * This function undoes the action of
- * @p condense somehow, but it should
- * be noted that it is not the inverse
- * of @p condense.
- *
- * The @p VectorType may be a
- * Vector<float>, Vector<double>,
- * BlockVector<tt><...></tt>, a PETSc
- * or Trilinos vector wrapper class, or
- * any other type having the same
- * interface.
- */
- template <class VectorType>
- void distribute (const VectorType &condensed,
- VectorType &uncondensed) const;
-
- /**
- * Re-distribute the elements of the
- * vector in-place. The @p VectorType
- * may be a Vector<float>,
- * Vector<double>,
- * BlockVector<tt><...></tt>, a PETSc
- * or Trilinos vector wrapper class, or
- * any other type having the same
- * interface.
- *
- * Note that if called with a
- * TrilinosWrappers::MPI::Vector it may
- * not contain ghost elements.
- */
- template <class VectorType>
- void distribute (VectorType &vec) const;
-
- /**
- * @}
- */
-
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcMatrixIsClosed);
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcMatrixNotClosed);
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException1 (ExcLineInexistant,
- unsigned int,
- << "The specified line " << arg1
- << " does not exist.");
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException4 (ExcEntryAlreadyExists,
- int, int, double, double,
- << "The entry for the indices " << arg1 << " and "
- << arg2 << " already exists, but the values "
- << arg3 << " (old) and " << arg4 << " (new) differ "
- << "by " << (arg4-arg3) << ".");
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException2 (ExcDoFConstrainedToConstrainedDoF,
- int, int,
- << "You tried to constrain DoF " << arg1
- << " to DoF " << arg2
- << ", but that one is also constrained. This is not allowed!");
- /**
- * Exception.
- *
- * @ingroup Exceptions
- */
- DeclException1 (ExcDoFIsConstrainedFromBothObjects,
- int,
- << "Degree of freedom " << arg1
- << " is constrained from both object in a merge operation.");
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException1 (ExcDoFIsConstrainedToConstrainedDoF,
- int,
- << "In the given argument a degree of freedom is constrained "
- << "to another DoF with number " << arg1
- << ", which however is constrained by this object. This is not"
- << " allowed.");
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException1 (ExcRowNotStoredHere,
- int,
- << "The index set given to this constraint matrix indicates "
- << "constraints for degree of freedom " << arg1
- << " should not be stored by this object, but a constraint "
- << "is being added.");
-
- private:
-
- /**
- * This class represents one line of a
- * constraint matrix.
- */
- struct ConstraintLine
- {
- /**
- * A data type in which we store the list
- * of entries that make up the homogenous
- * part of a constraint.
- */
- typedef std::vector<std::pair<unsigned int,double> > Entries;
-
- /**
- * Number of this line. Since only
- * very few lines are stored, we
- * can not assume a specific order
- * and have to store the line
- * number explicitly.
- */
- unsigned int line;
-
- /**
- * Row numbers and values of the
- * entries in this line.
- *
- * For the reason why we use a
- * vector instead of a map and the
- * consequences thereof, the same
- * applies as what is said for
- * ConstraintMatrix::lines.
- */
- Entries entries;
-
- /**
- * Value of the inhomogeneity.
- */
- double inhomogeneity;
-
- /**
- * This operator is a bit weird and
- * unintuitive: it compares the
- * line numbers of two lines. We
- * need this to sort the lines; in
- * fact we could do this using a
- * comparison predicate. However,
- * this way, it is easier, albeit
- * unintuitive since two lines
- * really have no god-given order
- * relation.
- */
- bool operator < (const ConstraintLine &) const;
-
- /**
- * This operator is likewise weird:
- * it checks whether the line
- * indices of the two operands are
- * equal, irrespective of the fact
- * that the contents of the line
- * may be different.
- */
- bool operator == (const ConstraintLine &) const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes) of
- * this object.
- */
- std::size_t memory_consumption () const;
- };
-
- /**
- * Store the lines of the matrix.
- * Entries are usually appended in an
- * arbitrary order and insertion into a
- * vector is done best at the end, so
- * the order is unspecified after all
- * entries are inserted. Sorting of the
- * entries takes place when calling the
- * <tt>close()</tt> function.
- *
- * We could, instead of using a vector,
- * use an associative array, like a map
- * to store the lines. This, however,
- * would mean a much more fractioned
- * heap since it allocates many small
- * objects, and would additionally make
- * usage of this matrix much slower.
- */
- std::vector<ConstraintLine> lines;
-
- /**
- * A list of unsigned integers that
- * contains the position of the
- * ConstraintLine of a constrained degree
- * of freedom, or
- * numbers::invalid_unsigned_int if the
- * degree of freedom is not
- * constrained. The
- * numbers::invalid_unsigned_int
- * return value returns thus whether
- * there is a constraint line for a given
- * degree of freedom index. Note that
- * this class has no notion of how many
- * degrees of freedom there really are,
- * so if we check whether there is a
- * constraint line for a given degree of
- * freedom, then this vector may actually
- * be shorter than the index of the DoF
- * we check for.
- *
- * This field exists since when adding a
- * new constraint line we have to figure
- * out whether it already
- * exists. Previously, we would simply
- * walk the unsorted list of constraint
- * lines until we either hit the end or
- * found it. This algorithm is O(N) if N
- * is the number of constraints, which
- * makes it O(N^2) when inserting all
- * constraints. For large problems with
- * many constraints, this could easily
- * take 5-10 per cent of the total run
- * time. With this field, we can save
- * this time since we find any constraint
- * in O(1) time or get to know that it a
- * certain degree of freedom is not
- * constrained.
- *
- * To make things worse, traversing the
- * list of existing constraints requires
- * reads from many different places in
- * memory. Thus, in large 3d
- * applications, the add_line() function
- * showed up very prominently in the
- * overall compute time, mainly because
- * it generated a lot of cache
- * misses. This should also be fixed by
- * using the O(1) algorithm to access the
- * fields of this array.
- *
- * The field is useful in a number of
- * other contexts as well, e.g. when one
- * needs random access to the constraints
- * as in all the functions that apply
- * constraints on the fly while add cell
- * contributions into vectors and
- * matrices.
- */
- std::vector<unsigned int> lines_cache;
-
- /**
- * This IndexSet is used to limit the
- * lines to save in the ContraintMatrix
- * to a subset. This is necessary,
- * because the lines_cache vector would
- * become too big in a distributed
- * calculation.
- */
- IndexSet local_lines;
-
- /**
- * Store whether the arrays are sorted.
- * If so, no new entries can be added.
- */
- bool sorted;
-
- /**
- * Internal function to calculate the
- * index of line @p line in the vector
- * lines_cache using local_lines.
- */
- unsigned int calculate_line_index (const unsigned int line) const;
-
- /**
- * Return @p true if the weight of an
- * entry (the second element of the
- * pair) equals zero. This function is
- * used to delete entries with zero
- * weight.
- */
- static bool check_zero_weight (const std::pair<unsigned int, double> &p);
-
- /**
- * Dummy table that serves as default
- * argument for function
- * <tt>add_entries_local_to_global()</tt>.
- */
- static const Table<2,bool> default_empty_table;
-
- /**
- * This function actually implements
- * the local_to_global function for
- * standard (non-block) matrices.
- */
- template <typename MatrixType, typename VectorType>
- void
- distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const Vector<double> &local_vector,
- const std::vector<unsigned int> &local_dof_indices,
- MatrixType &global_matrix,
- VectorType &global_vector,
- bool use_inhomogeneities_for_rhs,
- internal::bool2type<false>) const;
-
- /**
- * This function actually implements
- * the local_to_global function for
- * block matrices.
- */
- template <typename MatrixType, typename VectorType>
- void
- distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const Vector<double> &local_vector,
- const std::vector<unsigned int> &local_dof_indices,
- MatrixType &global_matrix,
- VectorType &global_vector,
- bool use_inhomogeneities_for_rhs,
- internal::bool2type<true>) const;
-
- /**
- * This function actually implements
- * the local_to_global function for
- * standard (non-block) sparsity types.
- */
- template <typename SparsityType>
- void
- add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
- SparsityType &sparsity_pattern,
- const bool keep_constrained_entries,
- const Table<2,bool> &dof_mask,
- internal::bool2type<false>) const;
-
- /**
- * This function actually implements
- * the local_to_global function for
- * block sparsity types.
- */
- template <typename SparsityType>
- void
- add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
- SparsityType &sparsity_pattern,
- const bool keep_constrained_entries,
- const Table<2,bool> &dof_mask,
- internal::bool2type<true>) const;
-
- /**
- * Internal helper function for
- * distribute_local_to_global function.
- *
- * Creates a list of affected global rows
- * for distribution, including the local
- * rows where the entries come from. The
- * list is sorted according to the global
- * row indices.
- */
- void
- make_sorted_row_list (const std::vector<unsigned int> &local_dof_indices,
- internals::GlobalRowsFromLocal &global_rows) const;
-
- /**
- * Internal helper function for
- * add_entries_local_to_global function.
- *
- * Creates a list of affected rows for
- * distribution without any additional
- * information, otherwise similar to the
- * other make_sorted_row_list()
- * function.
- */
- void
- make_sorted_row_list (const std::vector<unsigned int> &local_dof_indices,
- std::vector<unsigned int> &active_dofs) const;
-
- /**
- * Internal helper function for
- * distribute_local_to_global function.
- */
- double
- resolve_vector_entry (const unsigned int i,
- const internals::GlobalRowsFromLocal &global_rows,
- const Vector<double> &local_vector,
- const std::vector<unsigned int> &local_dof_indices,
- const FullMatrix<double> &local_matrix) const;
+ };
+
+ /**
+ * Store the lines of the matrix.
+ * Entries are usually appended in an
+ * arbitrary order and insertion into a
+ * vector is done best at the end, so
+ * the order is unspecified after all
+ * entries are inserted. Sorting of the
+ * entries takes place when calling the
+ * <tt>close()</tt> function.
+ *
+ * We could, instead of using a vector,
+ * use an associative array, like a map
+ * to store the lines. This, however,
+ * would mean a much more fractioned
+ * heap since it allocates many small
+ * objects, and would additionally make
+ * usage of this matrix much slower.
+ */
+ std::vector<ConstraintLine> lines;
+
+ /**
+ * A list of unsigned integers that
+ * contains the position of the
+ * ConstraintLine of a constrained degree
+ * of freedom, or
+ * numbers::invalid_unsigned_int if the
+ * degree of freedom is not
+ * constrained. The
+ * numbers::invalid_unsigned_int
+ * return value returns thus whether
+ * there is a constraint line for a given
+ * degree of freedom index. Note that
+ * this class has no notion of how many
+ * degrees of freedom there really are,
+ * so if we check whether there is a
+ * constraint line for a given degree of
+ * freedom, then this vector may actually
+ * be shorter than the index of the DoF
+ * we check for.
+ *
+ * This field exists since when adding a
+ * new constraint line we have to figure
+ * out whether it already
+ * exists. Previously, we would simply
+ * walk the unsorted list of constraint
+ * lines until we either hit the end or
+ * found it. This algorithm is O(N) if N
+ * is the number of constraints, which
+ * makes it O(N^2) when inserting all
+ * constraints. For large problems with
+ * many constraints, this could easily
+ * take 5-10 per cent of the total run
+ * time. With this field, we can save
+ * this time since we find any constraint
+ * in O(1) time or get to know that it a
+ * certain degree of freedom is not
+ * constrained.
+ *
+ * To make things worse, traversing the
+ * list of existing constraints requires
+ * reads from many different places in
+ * memory. Thus, in large 3d
+ * applications, the add_line() function
+ * showed up very prominently in the
+ * overall compute time, mainly because
+ * it generated a lot of cache
+ * misses. This should also be fixed by
+ * using the O(1) algorithm to access the
+ * fields of this array.
+ *
+ * The field is useful in a number of
+ * other contexts as well, e.g. when one
+ * needs random access to the constraints
+ * as in all the functions that apply
+ * constraints on the fly while add cell
+ * contributions into vectors and
+ * matrices.
+ */
+ std::vector<unsigned int> lines_cache;
+
+ /**
+ * This IndexSet is used to limit the
+ * lines to save in the ContraintMatrix
+ * to a subset. This is necessary,
+ * because the lines_cache vector would
+ * become too big in a distributed
+ * calculation.
+ */
+ IndexSet local_lines;
+
+ /**
+ * Store whether the arrays are sorted.
+ * If so, no new entries can be added.
+ */
+ bool sorted;
+
+ /**
+ * Internal function to calculate the
+ * index of line @p line in the vector
+ * lines_cache using local_lines.
+ */
+ unsigned int calculate_line_index (const unsigned int line) const;
+
+ /**
+ * Return @p true if the weight of an
+ * entry (the second element of the
+ * pair) equals zero. This function is
+ * used to delete entries with zero
+ * weight.
+ */
+ static bool check_zero_weight (const std::pair<unsigned int, double> &p);
+
+ /**
+ * Dummy table that serves as default
+ * argument for function
+ * <tt>add_entries_local_to_global()</tt>.
+ */
+ static const Table<2,bool> default_empty_table;
+
+ /**
+ * This function actually implements
+ * the local_to_global function for
+ * standard (non-block) matrices.
+ */
+ template <typename MatrixType, typename VectorType>
+ void
+ distribute_local_to_global (const FullMatrix<double> &local_matrix,
+ const Vector<double> &local_vector,
+ const std::vector<unsigned int> &local_dof_indices,
+ MatrixType &global_matrix,
+ VectorType &global_vector,
+ bool use_inhomogeneities_for_rhs,
+ internal::bool2type<false>) const;
+
+ /**
+ * This function actually implements
+ * the local_to_global function for
+ * block matrices.
+ */
+ template <typename MatrixType, typename VectorType>
+ void
+ distribute_local_to_global (const FullMatrix<double> &local_matrix,
+ const Vector<double> &local_vector,
+ const std::vector<unsigned int> &local_dof_indices,
+ MatrixType &global_matrix,
+ VectorType &global_vector,
+ bool use_inhomogeneities_for_rhs,
+ internal::bool2type<true>) const;
+
+ /**
+ * This function actually implements
+ * the local_to_global function for
+ * standard (non-block) sparsity types.
+ */
+ template <typename SparsityType>
+ void
+ add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
+ SparsityType &sparsity_pattern,
+ const bool keep_constrained_entries,
+ const Table<2,bool> &dof_mask,
+ internal::bool2type<false>) const;
+
+ /**
+ * This function actually implements
+ * the local_to_global function for
+ * block sparsity types.
+ */
+ template <typename SparsityType>
+ void
+ add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
+ SparsityType &sparsity_pattern,
+ const bool keep_constrained_entries,
+ const Table<2,bool> &dof_mask,
+ internal::bool2type<true>) const;
+
+ /**
+ * Internal helper function for
+ * distribute_local_to_global function.
+ *
+ * Creates a list of affected global rows
+ * for distribution, including the local
+ * rows where the entries come from. The
+ * list is sorted according to the global
+ * row indices.
+ */
+ void
+ make_sorted_row_list (const std::vector<unsigned int> &local_dof_indices,
- internals::GlobalRowsFromLocal &global_rows) const;
++ internals::GlobalRowsFromLocal &global_rows) const;
+
+ /**
+ * Internal helper function for
+ * add_entries_local_to_global function.
+ *
+ * Creates a list of affected rows for
+ * distribution without any additional
+ * information, otherwise similar to the
+ * other make_sorted_row_list()
+ * function.
+ */
+ void
+ make_sorted_row_list (const std::vector<unsigned int> &local_dof_indices,
+ std::vector<unsigned int> &active_dofs) const;
+
+ /**
+ * Internal helper function for
+ * distribute_local_to_global function.
+ */
+ double
+ resolve_vector_entry (const unsigned int i,
+ const internals::GlobalRowsFromLocal &global_rows,
+ const Vector<double> &local_vector,
+ const std::vector<unsigned int> &local_dof_indices,
+ const FullMatrix<double> &local_matrix) const;
};
template <typename ForwardIteratorVec, typename ForwardIteratorInd,
- class VectorType>
+ class VectorType>
inline
-void ConstraintMatrix::get_dof_values (const VectorType &global_vector,
+void ConstraintMatrix::get_dof_values (const VectorType &global_vector,
ForwardIteratorInd local_indices_begin,
ForwardIteratorVec local_vector_begin,
ForwardIteratorVec local_vector_end) const
const unsigned int column_start,
const unsigned int column_end,
const FullMatrix<double> &local_matrix,
- unsigned int * &col_ptr,
- number * &val_ptr)
- unsigned int *&col_ptr,
- number *&val_ptr)
++ unsigned int *&col_ptr,
++ number *&val_ptr)
{
if (column_end == column_start)
return;
template <typename Number>
class Vector : public Subscriptor
{
- public:
- /**
- * Declare standard types used in all
- * containers. These types parallel those in
- * the <tt>C++</tt> standard libraries
- * <tt>vector<...></tt> class.
- */
- typedef Number value_type;
- typedef value_type *pointer;
- typedef const value_type *const_pointer;
- typedef value_type *iterator;
- typedef const value_type *const_iterator;
- typedef value_type &reference;
- typedef const value_type &const_reference;
- typedef size_t size_type;
- typedef typename numbers::NumberTraits<Number>::real_type real_type;
-
- /**
- * @name 1: Basic Object-handling
- */
- //@{
- /**
- * Empty constructor.
- */
- Vector ();
-
- /**
- * Copy constructor. Uses the parallel
- * partitioning of @p in_vector.
- */
- Vector (const Vector<Number> &in_vector);
-
- /**
- * Constructs a parallel vector of the given
- * global size without any actual parallel
- * distribution.
- */
- Vector (const unsigned int size);
-
- /**
- * Constructs a parallel vector. The local
- * range is specified by @p locally_owned_set
- * (note that this must be a contiguous
- * interval, multiple intervals are not
- * possible). The IndexSet @p ghost_indices
- * specifies ghost indices, i.e., indices
- * which one might need to read data from or
- * accumulate data from. It is allowed that
- * the set of ghost indices also contains the
- * local range, but it does not need to.
- *
- * This function involves global
- * communication, so it should only be called
- * once for a given layout. Use the
- * constructor with Vector<Number> argument to
- * create additional vectors with the same
- * parallel layout.
- */
- Vector (const IndexSet &local_range,
- const IndexSet &ghost_indices,
- const MPI_Comm communicator);
-
- /**
- * Create the vector based on the parallel
- * partitioning described in @p
- * partitioner. The input argument is a shared
- * pointer, which store the partitioner data
- * only once and share it between several
- * vectors with the same layout.
- */
- Vector (const std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
-
- /**
- * Destructor.
- */
- ~Vector ();
-
- /**
- * Sets the global size of the vector to @p
- * size without any actual parallel
- * distribution.
- */
- void reinit (const unsigned int size,
- const bool fast = false);
-
- /**
- * Uses the parallel layout of the input
- * vector @p in_vector and allocates memory
- * for this vector. Recommended initialization
- * function when several vectors with the same
- * layout should be created.
- *
- * If the flag @p fast is set to false, the
- * memory will be initialized with zero,
- * otherwise the memory will be untouched (and
- * the user must make sure to fill it with
- * reasonable data before using it).
- */
- template <typename Number2>
- void reinit(const Vector<Number2> &in_vector,
- const bool fast = false);
-
- /**
- * Initialize the vector. The local range is
- * specified by @p locally_owned_set (note
- * that this must be a contiguous interval,
- * multiple intervals are not possible). The
- * IndexSet @p ghost_indices specifies ghost
- * indices, i.e., indices which one might need
- * to read data from or accumulate data
- * from. It is allowed that the set of ghost
- * indices also contains the local range, but
- * it does not need to.
- *
- * This function involves global
- * communication, so it should only be called
- * once for a given layout. Use the @p reinit
- * function with Vector<Number> argument to
- * create additional vectors with the same
- * parallel layout.
- */
- void reinit (const IndexSet &local_range,
- const IndexSet &ghost_indices,
- const MPI_Comm communicator);
-
- /**
- * Initialize the vector given to the parallel
- * partitioning described in @p
- * partitioner. The input argument is a shared
- * pointer, which store the partitioner data
- * only once and share it between several
- * vectors with the same layout.
- */
- void reinit (const std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
-
- /**
- * Swap the contents of this
- * vector and the other vector
- * @p v. One could do this
- * operation with a temporary
- * variable and copying over the
- * data elements, but this
- * function is significantly more
- * efficient since it only swaps
- * the pointers to the data of
- * the two vectors and therefore
- * does not need to allocate
- * temporary storage and move
- * data around.
- *
- * This function is analog to the
- * the @p swap function of all C++
- * standard containers. Also,
- * there is a global function
- * <tt>swap(u,v)</tt> that simply calls
- * <tt>u.swap(v)</tt>, again in analogy
- * to standard functions.
- *
- * This function is virtual in
- * order to allow for derived
- * classes to handle memory
- * separately.
- */
- void swap (Vector<Number> &v);
-
- /**
- * Assigns the vector to the parallel
- * partitioning of the input vector @p
- * in_vector, and copies all the data.
- */
- Vector<Number> &
- operator = (const Vector<Number> &in_vector);
-
- /**
- * Assigns the vector to the parallel
- * partitioning of the input vector @p
- * in_vector, and copies all the data.
- */
- template <typename Number2>
- Vector<Number> &
- operator = (const Vector<Number2> &in_vector);
-
- /**
- * This method copies the local range from
- * another vector with the same local range,
- * but possibly different layout of ghost
- * indices.
- */
- void copy_from (const Vector<Number> &in_vector,
- const bool call_update_ghost_values = false);
-
- /**
- * Sets all elements of the vector to the
- * scalar @p s. If the scalar is zero, also
- * ghost elements are set to zero, otherwise
- * they remain unchanged.
- */
- Vector<Number>& operator = (const Number s);
-
- /**
- * This function copies the data that has
- * accumulated in the data buffer for ghost
- * indices to the owning processor.
- *
- * For the meaning of this argument,
- * see the entry on @ref
- * GlossCompress "Compressing
- * distributed vectors and matrices"
- * in the glossary.
- */
- void compress (::dealii::VectorOperation::values operation
- =::dealii::VectorOperation::unknown);
-
-
- /**
- * Fills the data field for ghost indices with
- * the values stored in the respective
- * positions of the owning processor. This
- * function is needed before reading from
- * ghosts. The function is @p const even
- * though ghost data is changed. This is
- * needed to allow functions with a @p const
- * vector to perform the data exchange without
- * creating temporaries.
- */
- void update_ghost_values () const;
-
- /**
- * Initiates communication for the @p
- * compress() function with non-blocking
- * communication. This function does not wait
- * for the transfer to finish, in order to
- * allow for other computations during the
- * time it takes until all data arrives.
- *
- * Before the data is actually exchanged, the
- * function must be followed by a call to @p
- * compress_finish().
- *
- * In case this function is called for more
- * than one vector before @p
- * compress_finish() is invoked, it is
- * mandatory to specify a unique
- * communication channel to each such call, in
- * order to avoid several messages with the
- * same ID that will corrupt this operation.
- */
- void compress_start (const unsigned int communication_channel = 0);
-
- /**
- * For all requests that have been initiated
- * in compress_start, wait for the
- * communication to finish. Once it is
- * finished, add or set the data (depending on
- * whether @p add_ghost_data is @p true or @p
- * false) to the respective positions in the
- * owning processor, and clear the contents in
- * the ghost data fields. The meaning of
- * this argument is the same as in compress().
- *
- * Must follow a call to the @p compress_start
- * function.
- */
- void compress_finish (const bool add_ghost_data = true);
-
-
- /**
- * Initiates communication for the @p
- * update_ghost_values() function with non-blocking
- * communication. This function does not wait
- * for the transfer to finish, in order to
- * allow for other computations during the
- * time it takes until all data arrives.
- *
- * Before the data is actually exchanged, the
- * function must be followed by a call to @p
- * update_ghost_values_finish().
- *
- * In case this function is called for more
- * than one vector before @p
- * update_ghost_values_finish() is invoked, it is
- * mandatory to specify a unique communication
- * channel to each such call, in order to
- * avoid several messages with the same ID
- * that will corrupt this operation.
- */
- void update_ghost_values_start (const unsigned int communication_channel = 0) const;
-
-
- /**
- * For all requests that have been started in
- * update_ghost_values_start, wait for the communication
- * to finish.
- *
- * Must follow a call to the @p
- * update_ghost_values_start function before reading
- * data from ghost indices.
- */
- void update_ghost_values_finish () const;
-
- /**
- * This method zeros the entries on ghost
- * dofs, but does not touch locally owned
- * DoFs.
- */
- void zero_out_ghosts ();
-
- /**
- * Return whether the vector contains only
- * elements with value zero. This function
- * is mainly for internal consistency
- * checks and should seldom be used when
- * not in debug mode since it uses quite
- * some time.
- */
- bool all_zero () const;
-
- /**
- * Return @p true if the vector has no
- * negative entries, i.e. all entries are
- * zero or positive. This function is
- * used, for example, to check whether
- * refinement indicators are really all
- * positive (or zero).
- *
- * The function obviously only makes
- * sense if the template argument of this
- * class is a real type. If it is a
- * complex type, then an exception is
- * thrown.
- */
- bool is_non_negative () const;
-
- /**
- * Checks for equality of the two vectors.
- */
- template <typename Number2>
- bool operator == (const Vector<Number2> &v) const;
-
- /**
- * Checks for inequality of the two vectors.
- */
- template <typename Number2>
- bool operator != (const Vector<Number2> &v) const;
-
- /**
- * Perform the inner product of two vectors.
- */
- template <typename Number2>
- Number operator * (const Vector<Number2> &V) const;
-
- /**
- * Computes the square of the l<sub>2</sub>
- * norm of the vector (i.e., the sum of the
- * squares of all entries among all
- * processors).
- */
- real_type norm_sqr () const;
-
- /**
- * Computes the mean value of all the entries
- * in the vector.
- */
- Number mean_value () const;
-
- /**
- * Returns the l<sub>1</sub> norm of the
- * vector (i.e., the sum of the absolute
- * values of all entries among all
- * processors).
- */
- real_type l1_norm () const;
-
- /**
- * Returns the l<sub>2</sub> norm of the
- * vector (i.e., square root of the sum of the
- * square of all entries among all
- * processors).
- */
- real_type l2_norm () const;
-
- /**
- * Returns the l<sub>p</sub> norm with real @p
- * p of the vector (i.e., the pth root of sum
- * of the pth power of all entries among all
- * processors).
- */
- real_type lp_norm (const real_type p) const;
-
- /**
- * Returns the maximum norm of the vector
- * (i.e., maximum absolute value among all
- * entries among all processors).
- */
- real_type linfty_norm () const;
-
- /**
- * Returns the global size of the vector,
- * equal to the sum of the number of locally
- * owned indices among all the processors.
- */
- types::global_dof_index size () const;
-
- /**
- * Returns the local size of the vector, i.e.,
- * the number of indices owned locally.
- */
- unsigned int local_size() const;
-
- /**
- * Returns the half-open interval that
- * specifies the locally owned range of the
- * vector. Note that <code>local_size() ==
- * local_range().second -
- * local_range().first</code>.
- */
- std::pair<types::global_dof_index, types::global_dof_index> local_range () const;
-
- /**
- * Returns true if the given global index is
- * in the local range of this processor.
- */
- bool in_local_range (const types::global_dof_index global_index) const;
-
- /**
- * Returns the number of ghost elements
- * present on the vector.
- */
- unsigned int n_ghost_entries () const;
-
- /**
- * Returns whether the given global index is a
- * ghost index on the present
- * processor. Returns false for indices that
- * are owned locally and for indices not
- * present at all.
- */
- bool is_ghost_entry (const types::global_dof_index global_index) const;
-
- /**
- * Make the @p Vector class a bit like
- * the <tt>vector<></tt> class of the C++
- * standard library by returning
- * iterators to the start and end of the
- * locally owned elements of this vector.
- */
- iterator begin ();
-
- /**
- * Return constant iterator to the start of
- * the vector.
- */
- const_iterator begin () const;
-
- /**
- * Return an iterator pointing to the
- * element past the end of the array of
- * locally owned entries.
- */
- iterator end ();
-
- /**
- * Return a constant iterator pointing to
- * the element past the end of the array
- * of the locally owned entries.
- */
- const_iterator end () const;
- //@}
-
-
- /**
- * @name 2: Data-Access
- */
- //@{
-
- /**
- * Read access to the data in the
- * position corresponding to @p
- * global_index. The index must be
- * either in the local range of the
- * vector or be specified as a ghost
- * index at construction.
- */
- Number operator () (const types::global_dof_index global_index) const;
-
- /**
- * Read and write access to the data
- * in the position corresponding to
- * @p global_index. The index must be
- * either in the local range of the
- * vector or be specified as a ghost
- * index at construction.
- */
- Number& operator () (const types::global_dof_index global_index);
-
- /**
- * Read access to the data in the
- * position corresponding to @p
- * global_index. The index must be
- * either in the local range of the
- * vector or be specified as a ghost
- * index at construction.
- *
- * This function does the same thing
- * as operator().
- */
- Number operator [] (const types::global_dof_index global_index) const;
-
- /**
- * Read and write access to the data
- * in the position corresponding to
- * @p global_index. The index must be
- * either in the local range of the
- * vector or be specified as a ghost
- * index at construction.
- *
- * This function does the same thing
- * as operator().
- */
- Number& operator [] (const types::global_dof_index global_index);
-
- /**
- * Read access to the data field specified by
- * @p local_index. Locally owned indices can
- * be accessed with indices
- * <code>[0,local_size)</code>, and ghost
- * indices with indices
- * <code>[local_size,local_size+
- * n_ghost_entries]</code>.
- */
- Number local_element (const unsigned int local_index) const;
-
- /**
- * Read and write access to the data field
- * specified by @p local_index. Locally owned
- * indices can be accessed with indices
- * <code>[0,local_size)</code>, and ghost
- * indices with indices
- * <code>[local_size,local_size+n_ghosts]</code>.
- */
- Number& local_element (const unsigned int local_index);
- //@}
-
-
- /**
- * @name 3: Modification of vectors
- */
- //@{
-
- /**
- * Add the given vector to the present
- * one.
- */
- Vector<Number> & operator += (const Vector<Number> &V);
-
- /**
- * Subtract the given vector from the
- * present one.
- */
- Vector<Number> & operator -= (const Vector<Number> &V);
-
- /**
- * A collective add operation:
- * This funnction adds a whole
- * set of values stored in @p
- * values to the vector
- * components specified by @p
- * indices.
- */
- template <typename OtherNumber>
- void add (const std::vector<unsigned int> &indices,
- const std::vector<OtherNumber> &values);
-
- /**
- * This is a second collective
- * add operation. As a
- * difference, this function
- * takes a deal.II vector of
- * values.
- */
- template <typename OtherNumber>
- void add (const std::vector<unsigned int> &indices,
- const ::dealii::Vector<OtherNumber> &values);
-
- /**
- * Take an address where
- * <tt>n_elements</tt> are stored
- * contiguously and add them into
- * the vector. Handles all cases
- * which are not covered by the
- * other two <tt>add()</tt>
- * functions above.
- */
- template <typename OtherNumber>
- void add (const unsigned int n_elements,
- const unsigned int *indices,
- const OtherNumber *values);
-
- /**
- * Addition of @p s to all
- * components. Note that @p s is a
- * scalar and not a vector.
- */
- void add (const Number s);
-
- /**
- * Simple vector addition, equal to the
- * <tt>operator +=</tt>.
- */
- void add (const Vector<Number> &V);
-
- /**
- * Simple addition of a multiple of a
- * vector, i.e. <tt>*this += a*V</tt>.
- */
- void add (const Number a, const Vector<Number> &V);
-
- /**
- * Multiple addition of scaled vectors,
- * i.e. <tt>*this += a*V+b*W</tt>.
- */
- void add (const Number a, const Vector<Number> &V,
- const Number b, const Vector<Number> &W);
-
- /**
- * Scaling and simple vector addition,
- * i.e.
- * <tt>*this = s*(*this)+V</tt>.
- */
- void sadd (const Number s,
- const Vector<Number> &V);
-
- /**
- * Scaling and simple addition, i.e.
- * <tt>*this = s*(*this)+a*V</tt>.
- */
- void sadd (const Number s,
- const Number a,
- const Vector<Number> &V);
-
- /**
- * Scaling and multiple addition.
- */
- void sadd (const Number s,
- const Number a,
- const Vector<Number> &V,
- const Number b,
- const Vector<Number> &W);
-
- /**
- * Scaling and multiple addition.
- * <tt>*this = s*(*this)+a*V + b*W + c*X</tt>.
- */
- void sadd (const Number s,
- const Number a,
- const Vector<Number> &V,
- const Number b,
- const Vector<Number> &W,
- const Number c,
- const Vector<Number> &X);
-
- /**
- * Scale each element of the
- * vector by the given factor.
- *
- * This function is deprecated
- * and will be removed in a
- * future version. Use
- * <tt>operator *=</tt> and
- * <tt>operator /=</tt> instead.
- */
- void scale (const Number factor);
-
-
- /**
- * Scale each element of the
- * vector by a constant
- * value.
- */
- Vector<Number> & operator *= (const Number factor);
-
- /**
- * Scale each element of the
- * vector by the inverse of the
- * given value.
- */
- Vector<Number> & operator /= (const Number factor);
-
- /**
- * Scale each element of this
- * vector by the corresponding
- * element in the argument. This
- * function is mostly meant to
- * simulate multiplication (and
- * immediate re-assignment) by a
- * diagonal scaling matrix.
- */
- void scale (const Vector<Number> &scaling_factors);
-
- /**
- * Scale each element of this
- * vector by the corresponding
- * element in the argument. This
- * function is mostly meant to
- * simulate multiplication (and
- * immediate re-assignment) by a
- * diagonal scaling matrix.
- */
- template <typename Number2>
- void scale (const Vector<Number2> &scaling_factors);
-
- /**
- * Assignment <tt>*this = a*u</tt>.
- */
- void equ (const Number a, const Vector<Number>& u);
-
- /**
- * Assignment <tt>*this = a*u</tt>.
- */
- template <typename Number2>
- void equ (const Number a, const Vector<Number2>& u);
-
- /**
- * Assignment <tt>*this = a*u + b*v</tt>.
- */
- void equ (const Number a, const Vector<Number>& u,
- const Number b, const Vector<Number>& v);
-
- /**
- * Assignment <tt>*this = a*u + b*v + b*w</tt>.
- */
- void equ (const Number a, const Vector<Number>& u,
- const Number b, const Vector<Number>& v,
- const Number c, const Vector<Number>& w);
-
- /**
- * Compute the elementwise ratio of the
- * two given vectors, that is let
- * <tt>this[i] = a[i]/b[i]</tt>. This is
- * useful for example if you want to
- * compute the cellwise ratio of true to
- * estimated error.
- *
- * This vector is appropriately
- * scaled to hold the result.
- *
- * If any of the <tt>b[i]</tt> is
- * zero, the result is
- * undefined. No attempt is made
- * to catch such situations.
- */
- void ratio (const Vector<Number> &a,
- const Vector<Number> &b);
- //@}
-
-
- /**
- * @name 4: Mixed stuff
- */
- //@{
- /**
- * Checks whether the given
- * partitioner is compatible with the
- * partitioner used for this
- * vector. Two partitioners are
- * compatible if the have the same
- * local size and the same ghost
- * indices. They do not necessarily
- * need to be the same data
- * field. This is a local operation
- * only, i.e., if only some
- * processors decide that the
- * partitioning is not compatible,
- * only these processors will return
- * @p false, whereas the other
- * processors will return @p true.
- */
- bool
- partitioners_are_compatible (const Utilities::MPI::Partitioner &part) const;
-
-
- /**
- * Prints the vector to the output stream @p
- * out.
- */
- void print (std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const bool across = true) const;
-
- /**
- * Returns the memory consumption of this
- * class in bytes.
- */
- std::size_t memory_consumption () const;
- //@}
-
- private:
- /**
- * Shared pointer to store the parallel
- * partitioning information. This information
- * can be shared between several vectors that
- * have the same partitioning.
- */
- std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> partitioner;
-
- /**
- * The size that is currently allocated in the
- * val array.
- */
- unsigned int allocated_size;
-
- /**
- * Pointer to the array of
- * local elements of this vector.
- */
- Number *val;
-
- /**
- * Temporary storage that holds the data that
- * is sent to this processor in @p compress()
- * or sent from this processor in @p
- * update_ghost_values.
- */
- mutable Number *import_data;
-
- /**
- * Provide this class with all functionality
- * of ::dealii::Vector by creating a
- * VectorView object.
- */
- VectorView<Number> vector_view;
+ public:
+ /**
+ * Declare standard types used in all
+ * containers. These types parallel those in
+ * the <tt>C++</tt> standard libraries
+ * <tt>vector<...></tt> class.
+ */
+ typedef Number value_type;
+ typedef value_type *pointer;
+ typedef const value_type *const_pointer;
+ typedef value_type *iterator;
+ typedef const value_type *const_iterator;
+ typedef value_type &reference;
+ typedef const value_type &const_reference;
+ typedef size_t size_type;
+ typedef typename numbers::NumberTraits<Number>::real_type real_type;
+
+ /**
+ * @name 1: Basic Object-handling
+ */
+ //@{
+ /**
+ * Empty constructor.
+ */
+ Vector ();
+
+ /**
+ * Copy constructor. Uses the parallel
+ * partitioning of @p in_vector.
+ */
+ Vector (const Vector<Number> &in_vector);
+
+ /**
+ * Constructs a parallel vector of the given
+ * global size without any actual parallel
+ * distribution.
+ */
+ Vector (const unsigned int size);
+
+ /**
+ * Constructs a parallel vector. The local
+ * range is specified by @p locally_owned_set
+ * (note that this must be a contiguous
+ * interval, multiple intervals are not
+ * possible). The IndexSet @p ghost_indices
+ * specifies ghost indices, i.e., indices
+ * which one might need to read data from or
+ * accumulate data from. It is allowed that
+ * the set of ghost indices also contains the
+ * local range, but it does not need to.
+ *
+ * This function involves global
+ * communication, so it should only be called
+ * once for a given layout. Use the
+ * constructor with Vector<Number> argument to
+ * create additional vectors with the same
+ * parallel layout.
+ */
+ Vector (const IndexSet &local_range,
+ const IndexSet &ghost_indices,
+ const MPI_Comm communicator);
+
+ /**
+ * Create the vector based on the parallel
+ * partitioning described in @p
+ * partitioner. The input argument is a shared
+ * pointer, which store the partitioner data
+ * only once and share it between several
+ * vectors with the same layout.
+ */
+ Vector (const std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
+
+ /**
+ * Destructor.
+ */
+ ~Vector ();
+
+ /**
+ * Sets the global size of the vector to @p
+ * size without any actual parallel
+ * distribution.
+ */
+ void reinit (const unsigned int size,
+ const bool fast = false);
+
+ /**
+ * Uses the parallel layout of the input
+ * vector @p in_vector and allocates memory
+ * for this vector. Recommended initialization
+ * function when several vectors with the same
+ * layout should be created.
+ *
+ * If the flag @p fast is set to false, the
+ * memory will be initialized with zero,
+ * otherwise the memory will be untouched (and
+ * the user must make sure to fill it with
+ * reasonable data before using it).
+ */
+ template <typename Number2>
+ void reinit(const Vector<Number2> &in_vector,
+ const bool fast = false);
+
+ /**
+ * Initialize the vector. The local range is
+ * specified by @p locally_owned_set (note
+ * that this must be a contiguous interval,
+ * multiple intervals are not possible). The
+ * IndexSet @p ghost_indices specifies ghost
+ * indices, i.e., indices which one might need
+ * to read data from or accumulate data
+ * from. It is allowed that the set of ghost
+ * indices also contains the local range, but
+ * it does not need to.
+ *
+ * This function involves global
+ * communication, so it should only be called
+ * once for a given layout. Use the @p reinit
+ * function with Vector<Number> argument to
+ * create additional vectors with the same
+ * parallel layout.
+ */
+ void reinit (const IndexSet &local_range,
+ const IndexSet &ghost_indices,
+ const MPI_Comm communicator);
+
+ /**
+ * Initialize the vector given to the parallel
+ * partitioning described in @p
+ * partitioner. The input argument is a shared
+ * pointer, which store the partitioner data
+ * only once and share it between several
+ * vectors with the same layout.
+ */
+ void reinit (const std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
+
+ /**
+ * Swap the contents of this
+ * vector and the other vector
+ * @p v. One could do this
+ * operation with a temporary
+ * variable and copying over the
+ * data elements, but this
+ * function is significantly more
+ * efficient since it only swaps
+ * the pointers to the data of
+ * the two vectors and therefore
+ * does not need to allocate
+ * temporary storage and move
+ * data around.
+ *
+ * This function is analog to the
+ * the @p swap function of all C++
+ * standard containers. Also,
+ * there is a global function
+ * <tt>swap(u,v)</tt> that simply calls
+ * <tt>u.swap(v)</tt>, again in analogy
+ * to standard functions.
+ *
+ * This function is virtual in
+ * order to allow for derived
+ * classes to handle memory
+ * separately.
+ */
+ void swap (Vector<Number> &v);
+
+ /**
+ * Assigns the vector to the parallel
+ * partitioning of the input vector @p
+ * in_vector, and copies all the data.
+ */
+ Vector<Number> &
- operator = (const Vector<Number> &in_vector);
++ operator = (const Vector<Number> &in_vector);
+
+ /**
+ * Assigns the vector to the parallel
+ * partitioning of the input vector @p
+ * in_vector, and copies all the data.
+ */
+ template <typename Number2>
+ Vector<Number> &
+ operator = (const Vector<Number2> &in_vector);
+
+ /**
+ * This method copies the local range from
+ * another vector with the same local range,
+ * but possibly different layout of ghost
+ * indices.
+ */
+ void copy_from (const Vector<Number> &in_vector,
+ const bool call_update_ghost_values = false);
+
+ /**
+ * Sets all elements of the vector to the
+ * scalar @p s. If the scalar is zero, also
+ * ghost elements are set to zero, otherwise
+ * they remain unchanged.
+ */
+ Vector<Number> &operator = (const Number s);
+
+ /**
+ * This function copies the data that has
+ * accumulated in the data buffer for ghost
+ * indices to the owning processor.
+ *
+ * For the meaning of this argument,
+ * see the entry on @ref
+ * GlossCompress "Compressing
+ * distributed vectors and matrices"
+ * in the glossary.
+ */
+ void compress (::dealii::VectorOperation::values operation
+ =::dealii::VectorOperation::unknown);
+
+
+ /**
+ * Fills the data field for ghost indices with
+ * the values stored in the respective
+ * positions of the owning processor. This
+ * function is needed before reading from
+ * ghosts. The function is @p const even
+ * though ghost data is changed. This is
+ * needed to allow functions with a @p const
+ * vector to perform the data exchange without
+ * creating temporaries.
+ */
+ void update_ghost_values () const;
+
+ /**
+ * Initiates communication for the @p
+ * compress() function with non-blocking
+ * communication. This function does not wait
+ * for the transfer to finish, in order to
+ * allow for other computations during the
+ * time it takes until all data arrives.
+ *
+ * Before the data is actually exchanged, the
+ * function must be followed by a call to @p
+ * compress_finish().
+ *
+ * In case this function is called for more
+ * than one vector before @p
+ * compress_finish() is invoked, it is
+ * mandatory to specify a unique
+ * communication channel to each such call, in
+ * order to avoid several messages with the
+ * same ID that will corrupt this operation.
+ */
+ void compress_start (const unsigned int communication_channel = 0);
+
+ /**
+ * For all requests that have been initiated
+ * in compress_start, wait for the
+ * communication to finish. Once it is
+ * finished, add or set the data (depending on
+ * whether @p add_ghost_data is @p true or @p
+ * false) to the respective positions in the
+ * owning processor, and clear the contents in
+ * the ghost data fields. The meaning of
+ * this argument is the same as in compress().
+ *
+ * Must follow a call to the @p compress_start
+ * function.
+ */
+ void compress_finish (const bool add_ghost_data = true);
+
+
+ /**
+ * Initiates communication for the @p
+ * update_ghost_values() function with non-blocking
+ * communication. This function does not wait
+ * for the transfer to finish, in order to
+ * allow for other computations during the
+ * time it takes until all data arrives.
+ *
+ * Before the data is actually exchanged, the
+ * function must be followed by a call to @p
+ * update_ghost_values_finish().
+ *
+ * In case this function is called for more
+ * than one vector before @p
+ * update_ghost_values_finish() is invoked, it is
+ * mandatory to specify a unique communication
+ * channel to each such call, in order to
+ * avoid several messages with the same ID
+ * that will corrupt this operation.
+ */
+ void update_ghost_values_start (const unsigned int communication_channel = 0) const;
+
+
+ /**
+ * For all requests that have been started in
+ * update_ghost_values_start, wait for the communication
+ * to finish.
+ *
+ * Must follow a call to the @p
+ * update_ghost_values_start function before reading
+ * data from ghost indices.
+ */
+ void update_ghost_values_finish () const;
+
+ /**
+ * This method zeros the entries on ghost
+ * dofs, but does not touch locally owned
+ * DoFs.
+ */
+ void zero_out_ghosts ();
+
+ /**
+ * Return whether the vector contains only
+ * elements with value zero. This function
+ * is mainly for internal consistency
+ * checks and should seldom be used when
+ * not in debug mode since it uses quite
+ * some time.
+ */
+ bool all_zero () const;
+
+ /**
+ * Return @p true if the vector has no
+ * negative entries, i.e. all entries are
+ * zero or positive. This function is
+ * used, for example, to check whether
+ * refinement indicators are really all
+ * positive (or zero).
+ *
+ * The function obviously only makes
+ * sense if the template argument of this
+ * class is a real type. If it is a
+ * complex type, then an exception is
+ * thrown.
+ */
+ bool is_non_negative () const;
+
+ /**
+ * Checks for equality of the two vectors.
+ */
+ template <typename Number2>
+ bool operator == (const Vector<Number2> &v) const;
+
+ /**
+ * Checks for inequality of the two vectors.
+ */
+ template <typename Number2>
+ bool operator != (const Vector<Number2> &v) const;
+
+ /**
+ * Perform the inner product of two vectors.
+ */
+ template <typename Number2>
+ Number operator * (const Vector<Number2> &V) const;
+
+ /**
+ * Computes the square of the l<sub>2</sub>
+ * norm of the vector (i.e., the sum of the
+ * squares of all entries among all
+ * processors).
+ */
+ real_type norm_sqr () const;
+
+ /**
+ * Computes the mean value of all the entries
+ * in the vector.
+ */
+ Number mean_value () const;
+
+ /**
+ * Returns the l<sub>1</sub> norm of the
+ * vector (i.e., the sum of the absolute
+ * values of all entries among all
+ * processors).
+ */
+ real_type l1_norm () const;
+
+ /**
+ * Returns the l<sub>2</sub> norm of the
+ * vector (i.e., square root of the sum of the
+ * square of all entries among all
+ * processors).
+ */
+ real_type l2_norm () const;
+
+ /**
+ * Returns the l<sub>p</sub> norm with real @p
+ * p of the vector (i.e., the pth root of sum
+ * of the pth power of all entries among all
+ * processors).
+ */
+ real_type lp_norm (const real_type p) const;
+
+ /**
+ * Returns the maximum norm of the vector
+ * (i.e., maximum absolute value among all
+ * entries among all processors).
+ */
+ real_type linfty_norm () const;
+
+ /**
+ * Returns the global size of the vector,
+ * equal to the sum of the number of locally
+ * owned indices among all the processors.
+ */
+ types::global_dof_index size () const;
+
+ /**
+ * Returns the local size of the vector, i.e.,
+ * the number of indices owned locally.
+ */
+ unsigned int local_size() const;
+
+ /**
+ * Returns the half-open interval that
+ * specifies the locally owned range of the
+ * vector. Note that <code>local_size() ==
+ * local_range().second -
+ * local_range().first</code>.
+ */
+ std::pair<types::global_dof_index, types::global_dof_index> local_range () const;
+
+ /**
+ * Returns true if the given global index is
+ * in the local range of this processor.
+ */
+ bool in_local_range (const types::global_dof_index global_index) const;
+
+ /**
+ * Returns the number of ghost elements
+ * present on the vector.
+ */
+ unsigned int n_ghost_entries () const;
+
+ /**
+ * Returns whether the given global index is a
+ * ghost index on the present
+ * processor. Returns false for indices that
+ * are owned locally and for indices not
+ * present at all.
+ */
+ bool is_ghost_entry (const types::global_dof_index global_index) const;
+
+ /**
+ * Make the @p Vector class a bit like
+ * the <tt>vector<></tt> class of the C++
+ * standard library by returning
+ * iterators to the start and end of the
+ * locally owned elements of this vector.
+ */
+ iterator begin ();
+
+ /**
+ * Return constant iterator to the start of
+ * the vector.
+ */
+ const_iterator begin () const;
+
+ /**
+ * Return an iterator pointing to the
+ * element past the end of the array of
+ * locally owned entries.
+ */
+ iterator end ();
+
+ /**
+ * Return a constant iterator pointing to
+ * the element past the end of the array
+ * of the locally owned entries.
+ */
+ const_iterator end () const;
+ //@}
+
+
+ /**
+ * @name 2: Data-Access
+ */
+ //@{
+
+ /**
+ * Read access to the data in the
+ * position corresponding to @p
+ * global_index. The index must be
+ * either in the local range of the
+ * vector or be specified as a ghost
+ * index at construction.
+ */
+ Number operator () (const types::global_dof_index global_index) const;
+
+ /**
+ * Read and write access to the data
+ * in the position corresponding to
+ * @p global_index. The index must be
+ * either in the local range of the
+ * vector or be specified as a ghost
+ * index at construction.
+ */
+ Number &operator () (const types::global_dof_index global_index);
+
+ /**
+ * Read access to the data in the
+ * position corresponding to @p
+ * global_index. The index must be
+ * either in the local range of the
+ * vector or be specified as a ghost
+ * index at construction.
+ *
+ * This function does the same thing
+ * as operator().
+ */
+ Number operator [] (const types::global_dof_index global_index) const;
+
+ /**
+ * Read and write access to the data
+ * in the position corresponding to
+ * @p global_index. The index must be
+ * either in the local range of the
+ * vector or be specified as a ghost
+ * index at construction.
+ *
+ * This function does the same thing
+ * as operator().
+ */
+ Number &operator [] (const types::global_dof_index global_index);
+
+ /**
+ * Read access to the data field specified by
+ * @p local_index. Locally owned indices can
+ * be accessed with indices
+ * <code>[0,local_size)</code>, and ghost
+ * indices with indices
+ * <code>[local_size,local_size+
+ * n_ghost_entries]</code>.
+ */
+ Number local_element (const unsigned int local_index) const;
+
+ /**
+ * Read and write access to the data field
+ * specified by @p local_index. Locally owned
+ * indices can be accessed with indices
+ * <code>[0,local_size)</code>, and ghost
+ * indices with indices
+ * <code>[local_size,local_size+n_ghosts]</code>.
+ */
+ Number &local_element (const unsigned int local_index);
+ //@}
+
+
+ /**
+ * @name 3: Modification of vectors
+ */
+ //@{
+
+ /**
+ * Add the given vector to the present
+ * one.
+ */
+ Vector<Number> &operator += (const Vector<Number> &V);
+
+ /**
+ * Subtract the given vector from the
+ * present one.
+ */
+ Vector<Number> &operator -= (const Vector<Number> &V);
+
+ /**
+ * A collective add operation:
+ * This funnction adds a whole
+ * set of values stored in @p
+ * values to the vector
+ * components specified by @p
+ * indices.
+ */
+ template <typename OtherNumber>
+ void add (const std::vector<unsigned int> &indices,
- const std::vector<OtherNumber> &values);
++ const std::vector<OtherNumber> &values);
+
+ /**
+ * This is a second collective
+ * add operation. As a
+ * difference, this function
+ * takes a deal.II vector of
+ * values.
+ */
+ template <typename OtherNumber>
+ void add (const std::vector<unsigned int> &indices,
+ const ::dealii::Vector<OtherNumber> &values);
+
+ /**
+ * Take an address where
+ * <tt>n_elements</tt> are stored
+ * contiguously and add them into
+ * the vector. Handles all cases
+ * which are not covered by the
+ * other two <tt>add()</tt>
+ * functions above.
+ */
+ template <typename OtherNumber>
+ void add (const unsigned int n_elements,
+ const unsigned int *indices,
- const OtherNumber *values);
++ const OtherNumber *values);
+
+ /**
+ * Addition of @p s to all
+ * components. Note that @p s is a
+ * scalar and not a vector.
+ */
+ void add (const Number s);
+
+ /**
+ * Simple vector addition, equal to the
+ * <tt>operator +=</tt>.
+ */
+ void add (const Vector<Number> &V);
+
+ /**
+ * Simple addition of a multiple of a
+ * vector, i.e. <tt>*this += a*V</tt>.
+ */
+ void add (const Number a, const Vector<Number> &V);
+
+ /**
+ * Multiple addition of scaled vectors,
+ * i.e. <tt>*this += a*V+b*W</tt>.
+ */
+ void add (const Number a, const Vector<Number> &V,
+ const Number b, const Vector<Number> &W);
+
+ /**
+ * Scaling and simple vector addition,
+ * i.e.
+ * <tt>*this = s*(*this)+V</tt>.
+ */
+ void sadd (const Number s,
+ const Vector<Number> &V);
+
+ /**
+ * Scaling and simple addition, i.e.
+ * <tt>*this = s*(*this)+a*V</tt>.
+ */
+ void sadd (const Number s,
+ const Number a,
+ const Vector<Number> &V);
+
+ /**
+ * Scaling and multiple addition.
+ */
+ void sadd (const Number s,
+ const Number a,
+ const Vector<Number> &V,
+ const Number b,
+ const Vector<Number> &W);
+
+ /**
+ * Scaling and multiple addition.
+ * <tt>*this = s*(*this)+a*V + b*W + c*X</tt>.
+ */
+ void sadd (const Number s,
+ const Number a,
+ const Vector<Number> &V,
+ const Number b,
+ const Vector<Number> &W,
+ const Number c,
+ const Vector<Number> &X);
+
+ /**
+ * Scale each element of the
+ * vector by the given factor.
+ *
+ * This function is deprecated
+ * and will be removed in a
+ * future version. Use
+ * <tt>operator *=</tt> and
+ * <tt>operator /=</tt> instead.
+ */
+ void scale (const Number factor);
+
+
+ /**
+ * Scale each element of the
+ * vector by a constant
+ * value.
+ */
+ Vector<Number> &operator *= (const Number factor);
+
+ /**
+ * Scale each element of the
+ * vector by the inverse of the
+ * given value.
+ */
+ Vector<Number> &operator /= (const Number factor);
+
+ /**
+ * Scale each element of this
+ * vector by the corresponding
+ * element in the argument. This
+ * function is mostly meant to
+ * simulate multiplication (and
+ * immediate re-assignment) by a
+ * diagonal scaling matrix.
+ */
+ void scale (const Vector<Number> &scaling_factors);
+
+ /**
+ * Scale each element of this
+ * vector by the corresponding
+ * element in the argument. This
+ * function is mostly meant to
+ * simulate multiplication (and
+ * immediate re-assignment) by a
+ * diagonal scaling matrix.
+ */
+ template <typename Number2>
+ void scale (const Vector<Number2> &scaling_factors);
+
+ /**
+ * Assignment <tt>*this = a*u</tt>.
+ */
+ void equ (const Number a, const Vector<Number> &u);
+
+ /**
+ * Assignment <tt>*this = a*u</tt>.
+ */
+ template <typename Number2>
+ void equ (const Number a, const Vector<Number2> &u);
+
+ /**
+ * Assignment <tt>*this = a*u + b*v</tt>.
+ */
+ void equ (const Number a, const Vector<Number> &u,
+ const Number b, const Vector<Number> &v);
+
+ /**
+ * Assignment <tt>*this = a*u + b*v + b*w</tt>.
+ */
+ void equ (const Number a, const Vector<Number> &u,
+ const Number b, const Vector<Number> &v,
+ const Number c, const Vector<Number> &w);
+
+ /**
+ * Compute the elementwise ratio of the
+ * two given vectors, that is let
+ * <tt>this[i] = a[i]/b[i]</tt>. This is
+ * useful for example if you want to
+ * compute the cellwise ratio of true to
+ * estimated error.
+ *
+ * This vector is appropriately
+ * scaled to hold the result.
+ *
+ * If any of the <tt>b[i]</tt> is
+ * zero, the result is
+ * undefined. No attempt is made
+ * to catch such situations.
+ */
+ void ratio (const Vector<Number> &a,
+ const Vector<Number> &b);
+ //@}
+
+
+ /**
+ * @name 4: Mixed stuff
+ */
+ //@{
+ /**
+ * Checks whether the given
+ * partitioner is compatible with the
+ * partitioner used for this
+ * vector. Two partitioners are
+ * compatible if the have the same
+ * local size and the same ghost
+ * indices. They do not necessarily
+ * need to be the same data
+ * field. This is a local operation
+ * only, i.e., if only some
+ * processors decide that the
+ * partitioning is not compatible,
+ * only these processors will return
+ * @p false, whereas the other
+ * processors will return @p true.
+ */
+ bool
+ partitioners_are_compatible (const Utilities::MPI::Partitioner &part) const;
+
+
+ /**
+ * Prints the vector to the output stream @p
+ * out.
+ */
+ void print (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /**
+ * Returns the memory consumption of this
+ * class in bytes.
+ */
+ std::size_t memory_consumption () const;
+ //@}
+
+ private:
+ /**
+ * Shared pointer to store the parallel
+ * partitioning information. This information
+ * can be shared between several vectors that
+ * have the same partitioning.
+ */
+ std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> partitioner;
+
+ /**
+ * The size that is currently allocated in the
+ * val array.
+ */
+ unsigned int allocated_size;
+
+ /**
+ * Pointer to the array of
+ * local elements of this vector.
+ */
+ Number *val;
+
+ /**
+ * Temporary storage that holds the data that
+ * is sent to this processor in @p compress()
+ * or sent from this processor in @p
+ * update_ghost_values.
+ */
+ mutable Number *import_data;
+
+ /**
+ * Provide this class with all functionality
+ * of ::dealii::Vector by creating a
+ * VectorView object.
+ */
+ VectorView<Number> vector_view;
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
- /**
- * A vector that collects all requests from @p
- * compress() operations. This class uses
- * persistent MPI communicators, i.e., the
- * communication channels are stored during
- * successive calls to a given function. This
- * reduces the overhead involved with setting
- * up the MPI machinery, but it does not
- * remove the need for a receive operation to
- * be posted before the data can actually be
- * sent.
- */
- std::vector<MPI_Request> compress_requests;
-
- /**
- * A vector that collects all requests from @p
- * update_ghost_values() operations. This class uses
- * persistent MPI communicators.
- */
- mutable std::vector<MPI_Request> update_ghost_values_requests;
+ /**
+ * A vector that collects all requests from @p
+ * compress() operations. This class uses
+ * persistent MPI communicators, i.e., the
+ * communication channels are stored during
+ * successive calls to a given function. This
+ * reduces the overhead involved with setting
+ * up the MPI machinery, but it does not
+ * remove the need for a receive operation to
+ * be posted before the data can actually be
+ * sent.
+ */
+ std::vector<MPI_Request> compress_requests;
+
+ /**
+ * A vector that collects all requests from @p
+ * update_ghost_values() operations. This class uses
+ * persistent MPI communicators.
+ */
+ mutable std::vector<MPI_Request> update_ghost_values_requests;
#endif
- /**
- * A lock that makes sure that
- * the @p compress and @p
- * update_ghost_values functions
- * give reasonable results also
- * when used with several
- * threads.
- */
- mutable Threads::ThreadMutex mutex;
-
- /**
- * A helper function that clears the
- * compress_requests and update_ghost_values_requests
- * field. Used in reinit functions.
- */
- void clear_mpi_requests ();
-
- /**
- * A helper function that is used to resize
- * the val array.
- */
- void resize_val (const unsigned int new_allocated_size);
-
- /*
- * Make all other vector types
- * friends.
- */
- template <typename Number2> friend class Vector;
+ /**
+ * A lock that makes sure that
+ * the @p compress and @p
+ * update_ghost_values functions
+ * give reasonable results also
+ * when used with several
+ * threads.
+ */
+ mutable Threads::ThreadMutex mutex;
+
+ /**
+ * A helper function that clears the
+ * compress_requests and update_ghost_values_requests
+ * field. Used in reinit functions.
+ */
+ void clear_mpi_requests ();
+
+ /**
+ * A helper function that is used to resize
+ * the val array.
+ */
+ void resize_val (const unsigned int new_allocated_size);
+
+ /*
+ * Make all other vector types
+ * friends.
+ */
+ template <typename Number2> friend class Vector;
};
- /*@}*/
+ /*@}*/
- /*----------------------- Inline functions ----------------------------------*/
+ /*----------------------- Inline functions ----------------------------------*/
#ifndef DOXYGEN
namespace PETScWrappers
{
- /*! @addtogroup PETScWrappers
- *@{
- */
-
- /**
- * Blocked sparse matrix based on the PETScWrappers::SparseMatrix class. This
- * class implements the functions that are specific to the PETSc SparseMatrix
- * base objects for a blocked sparse matrix, and leaves the actual work
- * relaying most of the calls to the individual blocks to the functions
- * implemented in the base class. See there also for a description of when
- * this class is useful.
- *
- * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices do
- * not have external objects for the sparsity patterns. Thus, one does not
- * determine the size of the individual blocks of a block matrix of this type
- * by attaching a block sparsity pattern, but by calling reinit() to set the
- * number of blocks and then by setting the size of each block separately. In
- * order to fix the data structures of the block matrix, it is then necessary
- * to let it know that we have changed the sizes of the underlying
- * matrices. For this, one has to call the collect_sizes() function, for much
- * the same reason as is documented with the BlockSparsityPattern class.
- *
- * @ingroup Matrix1
- * @see @ref GlossBlockLA "Block (linear algebra)"
- * @author Wolfgang Bangerth, 2004
- */
+ /*! @addtogroup PETScWrappers
+ *@{
+ */
+
+ /**
+ * Blocked sparse matrix based on the PETScWrappers::SparseMatrix class. This
+ * class implements the functions that are specific to the PETSc SparseMatrix
+ * base objects for a blocked sparse matrix, and leaves the actual work
+ * relaying most of the calls to the individual blocks to the functions
+ * implemented in the base class. See there also for a description of when
+ * this class is useful.
+ *
+ * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices do
+ * not have external objects for the sparsity patterns. Thus, one does not
+ * determine the size of the individual blocks of a block matrix of this type
+ * by attaching a block sparsity pattern, but by calling reinit() to set the
+ * number of blocks and then by setting the size of each block separately. In
+ * order to fix the data structures of the block matrix, it is then necessary
+ * to let it know that we have changed the sizes of the underlying
+ * matrices. For this, one has to call the collect_sizes() function, for much
+ * the same reason as is documented with the BlockSparsityPattern class.
+ *
+ * @ingroup Matrix1
+ * @see @ref GlossBlockLA "Block (linear algebra)"
+ * @author Wolfgang Bangerth, 2004
+ */
class BlockSparseMatrix : public BlockMatrixBase<PETScWrappers::SparseMatrix>
{
- public:
- /**
- * Typedef the base class for simpler
- * access to its own typedefs.
- */
- typedef BlockMatrixBase<SparseMatrix> BaseClass;
-
- /**
- * Typedef the type of the underlying
- * matrix.
- */
- typedef BaseClass::BlockType BlockType;
-
- /**
- * Import the typedefs from the base
- * class.
- */
- typedef BaseClass::value_type value_type;
- typedef BaseClass::pointer pointer;
- typedef BaseClass::const_pointer const_pointer;
- typedef BaseClass::reference reference;
- typedef BaseClass::const_reference const_reference;
- typedef BaseClass::size_type size_type;
- typedef BaseClass::iterator iterator;
- typedef BaseClass::const_iterator const_iterator;
-
- /**
- * Constructor; initializes the
- * matrix to be empty, without
- * any structure, i.e. the
- * matrix is not usable at
- * all. This constructor is
- * therefore only useful for
- * matrices which are members of
- * a class. All other matrices
- * should be created at a point
- * in the data flow where all
- * necessary information is
- * available.
- *
- * You have to initialize the
- * matrix before usage with
- * reinit(BlockSparsityPattern). The
- * number of blocks per row and
- * column are then determined by
- * that function.
- */
- BlockSparseMatrix ();
-
- /**
- * Destructor.
- */
- ~BlockSparseMatrix ();
-
- /**
- * Pseudo copy operator only copying
- * empty objects. The sizes of the block
- * matrices need to be the same.
- */
- BlockSparseMatrix &
- operator = (const BlockSparseMatrix &);
-
- /**
- * This operator assigns a scalar to a
- * matrix. Since this does usually not
- * make much sense (should we set all
- * matrix entries to this value? Only
- * the nonzero entries of the sparsity
- * pattern?), this operation is only
- * allowed if the actual value to be
- * assigned is zero. This operator only
- * exists to allow for the obvious
- * notation <tt>matrix=0</tt>, which
- * sets all elements of the matrix to
- * zero, but keep the sparsity pattern
- * previously used.
- */
- BlockSparseMatrix &
- operator = (const double d);
-
- /**
- * Resize the matrix, by setting
- * the number of block rows and
- * columns. This deletes all
- * blocks and replaces them by
- * unitialized ones, i.e. ones
- * for which also the sizes are
- * not yet set. You have to do
- * that by calling the @p reinit
- * functions of the blocks
- * themselves. Do not forget to
- * call collect_sizes() after
- * that on this object.
- *
- * The reason that you have to
- * set sizes of the blocks
- * yourself is that the sizes may
- * be varying, the maximum number
- * of elements per row may be
- * varying, etc. It is simpler
- * not to reproduce the interface
- * of the @p SparsityPattern
- * class here but rather let the
- * user call whatever function
- * she desires.
- */
- void reinit (const unsigned int n_block_rows,
- const unsigned int n_block_columns);
-
- /**
- * This function collects the
- * sizes of the sub-objects and
- * stores them in internal
- * arrays, in order to be able to
- * relay global indices into the
- * matrix to indices into the
- * subobjects. You *must* call
- * this function each time after
- * you have changed the size of
- * the sub-objects.
- */
- void collect_sizes ();
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M*src$ with $M$
- * being this matrix.
- */
- void vmult (BlockVector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- void vmult (BlockVector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- void vmult (Vector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- */
- void vmult (Vector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M^T*src$ with $M$
- * being this matrix. This
- * function does the same as
- * vmult() but takes the
- * transposed matrix.
- */
- void Tvmult (BlockVector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- void Tvmult (BlockVector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- void Tvmult (Vector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- */
- void Tvmult (Vector &dst,
- const Vector &src) const;
-
- /**
- * Make the clear() function in the
- * base class visible, though it is
- * protected.
- */
- using BlockMatrixBase<SparseMatrix>::clear;
-
- /** @addtogroup Exceptions
- * @{
- */
-
- /**
- * Exception
- */
- DeclException4 (ExcIncompatibleRowNumbers,
- int, int, int, int,
- << "The blocks [" << arg1 << ',' << arg2 << "] and ["
- << arg3 << ',' << arg4 << "] have differing row numbers.");
- /**
- * Exception
- */
- DeclException4 (ExcIncompatibleColNumbers,
- int, int, int, int,
- << "The blocks [" << arg1 << ',' << arg2 << "] and ["
- << arg3 << ',' << arg4 << "] have differing column numbers.");
- ///@}
+ public:
+ /**
+ * Typedef the base class for simpler
+ * access to its own typedefs.
+ */
+ typedef BlockMatrixBase<SparseMatrix> BaseClass;
+
+ /**
+ * Typedef the type of the underlying
+ * matrix.
+ */
+ typedef BaseClass::BlockType BlockType;
+
+ /**
+ * Import the typedefs from the base
+ * class.
+ */
+ typedef BaseClass::value_type value_type;
+ typedef BaseClass::pointer pointer;
+ typedef BaseClass::const_pointer const_pointer;
+ typedef BaseClass::reference reference;
+ typedef BaseClass::const_reference const_reference;
+ typedef BaseClass::size_type size_type;
+ typedef BaseClass::iterator iterator;
+ typedef BaseClass::const_iterator const_iterator;
+
+ /**
+ * Constructor; initializes the
+ * matrix to be empty, without
+ * any structure, i.e. the
+ * matrix is not usable at
+ * all. This constructor is
+ * therefore only useful for
+ * matrices which are members of
+ * a class. All other matrices
+ * should be created at a point
+ * in the data flow where all
+ * necessary information is
+ * available.
+ *
+ * You have to initialize the
+ * matrix before usage with
+ * reinit(BlockSparsityPattern). The
+ * number of blocks per row and
+ * column are then determined by
+ * that function.
+ */
+ BlockSparseMatrix ();
+
+ /**
+ * Destructor.
+ */
+ ~BlockSparseMatrix ();
+
+ /**
+ * Pseudo copy operator only copying
+ * empty objects. The sizes of the block
+ * matrices need to be the same.
+ */
+ BlockSparseMatrix &
+ operator = (const BlockSparseMatrix &);
+
+ /**
+ * This operator assigns a scalar to a
+ * matrix. Since this does usually not
+ * make much sense (should we set all
+ * matrix entries to this value? Only
+ * the nonzero entries of the sparsity
+ * pattern?), this operation is only
+ * allowed if the actual value to be
+ * assigned is zero. This operator only
+ * exists to allow for the obvious
+ * notation <tt>matrix=0</tt>, which
+ * sets all elements of the matrix to
+ * zero, but keep the sparsity pattern
+ * previously used.
+ */
+ BlockSparseMatrix &
+ operator = (const double d);
+
+ /**
+ * Resize the matrix, by setting
+ * the number of block rows and
+ * columns. This deletes all
+ * blocks and replaces them by
+ * unitialized ones, i.e. ones
+ * for which also the sizes are
+ * not yet set. You have to do
+ * that by calling the @p reinit
+ * functions of the blocks
+ * themselves. Do not forget to
+ * call collect_sizes() after
+ * that on this object.
+ *
+ * The reason that you have to
+ * set sizes of the blocks
+ * yourself is that the sizes may
+ * be varying, the maximum number
+ * of elements per row may be
+ * varying, etc. It is simpler
+ * not to reproduce the interface
+ * of the @p SparsityPattern
+ * class here but rather let the
+ * user call whatever function
+ * she desires.
+ */
+ void reinit (const unsigned int n_block_rows,
+ const unsigned int n_block_columns);
+
+ /**
+ * This function collects the
+ * sizes of the sub-objects and
+ * stores them in internal
+ * arrays, in order to be able to
+ * relay global indices into the
+ * matrix to indices into the
+ * subobjects. You *must* call
+ * this function each time after
+ * you have changed the size of
+ * the sub-objects.
+ */
+ void collect_sizes ();
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M*src$ with $M$
+ * being this matrix.
+ */
+ void vmult (BlockVector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ */
+ void vmult (BlockVector &dst,
+ const Vector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ */
+ void vmult (Vector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ */
+ void vmult (Vector &dst,
+ const Vector &src) const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M^T*src$ with $M$
+ * being this matrix. This
+ * function does the same as
+ * vmult() but takes the
+ * transposed matrix.
+ */
+ void Tvmult (BlockVector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ */
- void Tvmult (BlockVector &dst,
++ void Tvmult (BlockVector &dst,
+ const Vector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ */
+ void Tvmult (Vector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ */
+ void Tvmult (Vector &dst,
+ const Vector &src) const;
+
+ /**
+ * Make the clear() function in the
+ * base class visible, though it is
+ * protected.
+ */
+ using BlockMatrixBase<SparseMatrix>::clear;
+
+ /** @addtogroup Exceptions
+ * @{
+ */
+
+ /**
+ * Exception
+ */
+ DeclException4 (ExcIncompatibleRowNumbers,
+ int, int, int, int,
+ << "The blocks [" << arg1 << ',' << arg2 << "] and ["
+ << arg3 << ',' << arg4 << "] have differing row numbers.");
+ /**
+ * Exception
+ */
+ DeclException4 (ExcIncompatibleColNumbers,
+ int, int, int, int,
+ << "The blocks [" << arg1 << ',' << arg2 << "] and ["
+ << arg3 << ',' << arg4 << "] have differing column numbers.");
+ ///@}
};
inline
void
- BlockSparseMatrix::Tvmult (BlockVector &dst,
+ BlockSparseMatrix::Tvmult (BlockVector &dst,
- const Vector &src) const
+ const Vector &src) const
{
BaseClass::Tvmult_block_nonblock (dst, src);
}
namespace PETScWrappers
{
- /*! @addtogroup PETScWrappers
- *@{
- */
-
- /**
- * An implementation of block vectors based on the vector class implemented in
- * PETScWrappers. While the base class provides for most of the interface,
- * this class handles the actual allocation of vectors and provides functions
- * that are specific to the underlying vector type.
- *
- * @ingroup Vectors
- * @see @ref GlossBlockLA "Block (linear algebra)"
- * @author Wolfgang Bangerth, 2004
- */
+ /*! @addtogroup PETScWrappers
+ *@{
+ */
+
+ /**
+ * An implementation of block vectors based on the vector class implemented in
+ * PETScWrappers. While the base class provides for most of the interface,
+ * this class handles the actual allocation of vectors and provides functions
+ * that are specific to the underlying vector type.
+ *
+ * @ingroup Vectors
+ * @see @ref GlossBlockLA "Block (linear algebra)"
+ * @author Wolfgang Bangerth, 2004
+ */
class BlockVector : public BlockVectorBase<Vector>
{
- public:
- /**
- * Typedef the base class for simpler
- * access to its own typedefs.
- */
- typedef BlockVectorBase<Vector> BaseClass;
-
- /**
- * Typedef the type of the underlying
- * vector.
- */
- typedef BaseClass::BlockType BlockType;
-
- /**
- * Import the typedefs from the base
- * class.
- */
- typedef BaseClass::value_type value_type;
- typedef BaseClass::pointer pointer;
- typedef BaseClass::const_pointer const_pointer;
- typedef BaseClass::reference reference;
- typedef BaseClass::const_reference const_reference;
- typedef BaseClass::size_type size_type;
- typedef BaseClass::iterator iterator;
- typedef BaseClass::const_iterator const_iterator;
-
- /**
- * Constructor. There are three
- * ways to use this
- * constructor. First, without
- * any arguments, it generates
- * an object with no
- * blocks. Given one argument,
- * it initializes <tt>num_blocks</tt>
- * blocks, but these blocks have
- * size zero. The third variant
- * finally initializes all
- * blocks to the same size
- * <tt>block_size</tt>.
- *
- * Confer the other constructor
- * further down if you intend to
- * use blocks of different
- * sizes.
- */
- explicit BlockVector (const unsigned int num_blocks = 0,
- const unsigned int block_size = 0);
-
- /**
- * Copy-Constructor. Dimension set to
- * that of V, all components are copied
- * from V
- */
- BlockVector (const BlockVector &V);
-
- /**
- * Copy-constructor: copy the values
- * from a PETSc wrapper parallel block
- * vector class.
- *
- *
- * Note that due to the communication
- * model of MPI, @em all processes have
- * to actually perform this operation,
- * even if they do not use the
- * result. It is not sufficient if only
- * one processor tries to copy the
- * elements from the other processors
- * over to its own process space.
- */
- explicit BlockVector (const MPI::BlockVector &v);
-
- /**
- * Constructor. Set the number of
- * blocks to <tt>n.size()</tt> and
- * initialize each block with
- * <tt>n[i]</tt> zero elements.
- */
- BlockVector (const std::vector<unsigned int> &n);
-
- /**
- * Constructor. Set the number of
- * blocks to
- * <tt>n.size()</tt>. Initialize the
- * vector with the elements
- * pointed to by the range of
- * iterators given as second and
- * third argument. Apart from the
- * first argument, this
- * constructor is in complete
- * analogy to the respective
- * constructor of the
- * <tt>std::vector</tt> class, but the
- * first argument is needed in
- * order to know how to subdivide
- * the block vector into
- * different blocks.
- */
- template <typename InputIterator>
- BlockVector (const std::vector<unsigned int> &n,
- const InputIterator first,
- const InputIterator end);
-
- /**
- * Destructor. Clears memory
- */
- ~BlockVector ();
-
- /**
- * Copy operator: fill all components of
- * the vector with the given scalar
- * value.
- */
- BlockVector & operator = (const value_type s);
-
- /**
- * Copy operator for arguments of the
- * same type.
- */
- BlockVector &
- operator= (const BlockVector &V);
-
- /**
- * Copy all the elements of the
- * parallel block vector @p v into this
- * local vector. Note that due to the
- * communication model of MPI, @em all
- * processes have to actually perform
- * this operation, even if they do not
- * use the result. It is not sufficient
- * if only one processor tries to copy
- * the elements from the other
- * processors over to its own process
- * space.
- */
- BlockVector &
- operator = (const MPI::BlockVector &v);
-
- /**
- * Reinitialize the BlockVector to
- * contain <tt>num_blocks</tt> blocks of
- * size <tt>block_size</tt> each.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- */
- void reinit (const unsigned int num_blocks,
- const unsigned int block_size,
- const bool fast = false);
-
- /**
- * Reinitialize the BlockVector such
- * that it contains
- * <tt>block_sizes.size()</tt>
- * blocks. Each block is reinitialized
- * to dimension
- * <tt>block_sizes[i]</tt>.
- *
- * If the number of blocks is the
- * same as before this function
- * was called, all vectors remain
- * the same and reinit() is
- * called for each vector.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- *
- * Note that you must call this
- * (or the other reinit()
- * functions) function, rather
- * than calling the reinit()
- * functions of an individual
- * block, to allow the block
- * vector to update its caches of
- * vector sizes. If you call
- * reinit() on one of the
- * blocks, then subsequent
- * actions on this object may
- * yield unpredictable results
- * since they may be routed to
- * the wrong block.
- */
- void reinit (const std::vector<unsigned int> &N,
- const bool fast=false);
-
- /**
- * Change the dimension to that
- * of the vector <tt>V</tt>. The same
- * applies as for the other
- * reinit() function.
- *
- * The elements of <tt>V</tt> are not
- * copied, i.e. this function is
- * the same as calling <tt>reinit
- * (V.size(), fast)</tt>.
- *
- * Note that you must call this
- * (or the other reinit()
- * functions) function, rather
- * than calling the reinit()
- * functions of an individual
- * block, to allow the block
- * vector to update its caches of
- * vector sizes. If you call
- * reinit() of one of the
- * blocks, then subsequent
- * actions of this object may
- * yield unpredictable results
- * since they may be routed to
- * the wrong block.
- */
- void reinit (const BlockVector &V,
- const bool fast=false);
-
- /**
- * Swap the contents of this
- * vector and the other vector
- * <tt>v</tt>. One could do this
- * operation with a temporary
- * variable and copying over the
- * data elements, but this
- * function is significantly more
- * efficient since it only swaps
- * the pointers to the data of
- * the two vectors and therefore
- * does not need to allocate
- * temporary storage and move
- * data around.
- *
- * Limitation: right now this
- * function only works if both
- * vectors have the same number
- * of blocks. If needed, the
- * numbers of blocks should be
- * exchanged, too.
- *
- * This function is analog to the
- * the swap() function of all C++
- * standard containers. Also,
- * there is a global function
- * swap(u,v) that simply calls
- * <tt>u.swap(v)</tt>, again in analogy
- * to standard functions.
- */
- void swap (BlockVector &v);
-
- /**
- * Print to a stream.
- */
- void print (std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const bool across = true) const;
-
- /** @addtogroup Exceptions
- * @{ */
-
- /**
- * Exception
- */
- DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
- ///@}
+ public:
+ /**
+ * Typedef the base class for simpler
+ * access to its own typedefs.
+ */
+ typedef BlockVectorBase<Vector> BaseClass;
+
+ /**
+ * Typedef the type of the underlying
+ * vector.
+ */
+ typedef BaseClass::BlockType BlockType;
+
+ /**
+ * Import the typedefs from the base
+ * class.
+ */
+ typedef BaseClass::value_type value_type;
+ typedef BaseClass::pointer pointer;
+ typedef BaseClass::const_pointer const_pointer;
+ typedef BaseClass::reference reference;
+ typedef BaseClass::const_reference const_reference;
+ typedef BaseClass::size_type size_type;
+ typedef BaseClass::iterator iterator;
+ typedef BaseClass::const_iterator const_iterator;
+
+ /**
+ * Constructor. There are three
+ * ways to use this
+ * constructor. First, without
+ * any arguments, it generates
+ * an object with no
+ * blocks. Given one argument,
+ * it initializes <tt>num_blocks</tt>
+ * blocks, but these blocks have
+ * size zero. The third variant
+ * finally initializes all
+ * blocks to the same size
+ * <tt>block_size</tt>.
+ *
+ * Confer the other constructor
+ * further down if you intend to
+ * use blocks of different
+ * sizes.
+ */
+ explicit BlockVector (const unsigned int num_blocks = 0,
+ const unsigned int block_size = 0);
+
+ /**
+ * Copy-Constructor. Dimension set to
+ * that of V, all components are copied
+ * from V
+ */
- BlockVector (const BlockVector &V);
++ BlockVector (const BlockVector &V);
+
+ /**
+ * Copy-constructor: copy the values
+ * from a PETSc wrapper parallel block
+ * vector class.
+ *
+ *
+ * Note that due to the communication
+ * model of MPI, @em all processes have
+ * to actually perform this operation,
+ * even if they do not use the
+ * result. It is not sufficient if only
+ * one processor tries to copy the
+ * elements from the other processors
+ * over to its own process space.
+ */
+ explicit BlockVector (const MPI::BlockVector &v);
+
+ /**
+ * Constructor. Set the number of
+ * blocks to <tt>n.size()</tt> and
+ * initialize each block with
+ * <tt>n[i]</tt> zero elements.
+ */
+ BlockVector (const std::vector<unsigned int> &n);
+
+ /**
+ * Constructor. Set the number of
+ * blocks to
+ * <tt>n.size()</tt>. Initialize the
+ * vector with the elements
+ * pointed to by the range of
+ * iterators given as second and
+ * third argument. Apart from the
+ * first argument, this
+ * constructor is in complete
+ * analogy to the respective
+ * constructor of the
+ * <tt>std::vector</tt> class, but the
+ * first argument is needed in
+ * order to know how to subdivide
+ * the block vector into
+ * different blocks.
+ */
+ template <typename InputIterator>
+ BlockVector (const std::vector<unsigned int> &n,
+ const InputIterator first,
+ const InputIterator end);
+
+ /**
+ * Destructor. Clears memory
+ */
+ ~BlockVector ();
+
+ /**
+ * Copy operator: fill all components of
+ * the vector with the given scalar
+ * value.
+ */
+ BlockVector &operator = (const value_type s);
+
+ /**
+ * Copy operator for arguments of the
+ * same type.
+ */
+ BlockVector &
+ operator= (const BlockVector &V);
+
+ /**
+ * Copy all the elements of the
+ * parallel block vector @p v into this
+ * local vector. Note that due to the
+ * communication model of MPI, @em all
+ * processes have to actually perform
+ * this operation, even if they do not
+ * use the result. It is not sufficient
+ * if only one processor tries to copy
+ * the elements from the other
+ * processors over to its own process
+ * space.
+ */
+ BlockVector &
+ operator = (const MPI::BlockVector &v);
+
+ /**
+ * Reinitialize the BlockVector to
+ * contain <tt>num_blocks</tt> blocks of
+ * size <tt>block_size</tt> each.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ */
+ void reinit (const unsigned int num_blocks,
+ const unsigned int block_size,
+ const bool fast = false);
+
+ /**
+ * Reinitialize the BlockVector such
+ * that it contains
+ * <tt>block_sizes.size()</tt>
+ * blocks. Each block is reinitialized
+ * to dimension
+ * <tt>block_sizes[i]</tt>.
+ *
+ * If the number of blocks is the
+ * same as before this function
+ * was called, all vectors remain
+ * the same and reinit() is
+ * called for each vector.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ *
+ * Note that you must call this
+ * (or the other reinit()
+ * functions) function, rather
+ * than calling the reinit()
+ * functions of an individual
+ * block, to allow the block
+ * vector to update its caches of
+ * vector sizes. If you call
+ * reinit() on one of the
+ * blocks, then subsequent
+ * actions on this object may
+ * yield unpredictable results
+ * since they may be routed to
+ * the wrong block.
+ */
+ void reinit (const std::vector<unsigned int> &N,
+ const bool fast=false);
+
+ /**
+ * Change the dimension to that
+ * of the vector <tt>V</tt>. The same
+ * applies as for the other
+ * reinit() function.
+ *
+ * The elements of <tt>V</tt> are not
+ * copied, i.e. this function is
+ * the same as calling <tt>reinit
+ * (V.size(), fast)</tt>.
+ *
+ * Note that you must call this
+ * (or the other reinit()
+ * functions) function, rather
+ * than calling the reinit()
+ * functions of an individual
+ * block, to allow the block
+ * vector to update its caches of
+ * vector sizes. If you call
+ * reinit() of one of the
+ * blocks, then subsequent
+ * actions of this object may
+ * yield unpredictable results
+ * since they may be routed to
+ * the wrong block.
+ */
+ void reinit (const BlockVector &V,
+ const bool fast=false);
+
+ /**
+ * Swap the contents of this
+ * vector and the other vector
+ * <tt>v</tt>. One could do this
+ * operation with a temporary
+ * variable and copying over the
+ * data elements, but this
+ * function is significantly more
+ * efficient since it only swaps
+ * the pointers to the data of
+ * the two vectors and therefore
+ * does not need to allocate
+ * temporary storage and move
+ * data around.
+ *
+ * Limitation: right now this
+ * function only works if both
+ * vectors have the same number
+ * of blocks. If needed, the
+ * numbers of blocks should be
+ * exchanged, too.
+ *
+ * This function is analog to the
+ * the swap() function of all C++
+ * standard containers. Also,
+ * there is a global function
+ * swap(u,v) that simply calls
+ * <tt>u.swap(v)</tt>, again in analogy
+ * to standard functions.
+ */
+ void swap (BlockVector &v);
+
+ /**
+ * Print to a stream.
+ */
+ void print (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /** @addtogroup Exceptions
+ * @{ */
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
+ ///@}
};
- /*@}*/
+ /*@}*/
- /*----------------------- Inline functions ----------------------------------*/
+ /*----------------------- Inline functions ----------------------------------*/
}
- /**
- * Base class for all matrix classes that are implemented on top of the PETSc
- * matrix types. Since in PETSc all matrix types (i.e. sequential and
- * parallel, sparse, blocked, etc.) are built by filling the contents of an
- * abstract object that is only referenced through a pointer of a type that is
- * independent of the actual matrix type, we can implement almost all
- * functionality of matrices in this base class. Derived classes will then only
- * have to provide the functionality to create one or the other kind of
- * matrix.
- *
- * The interface of this class is modeled after the existing
- * SparseMatrix class in deal.II. It has almost the same member
- * functions, and is often exchangable. However, since PETSc only supports a
- * single scalar type (either double, float, or a complex data type), it is
- * not templated, and only works with whatever your PETSc installation has
- * defined the data type PetscScalar to.
- *
- * Note that PETSc only guarantees that operations do what you expect if the
- * functions @p MatAssemblyBegin and @p MatAssemblyEnd have been called
- * after matrix assembly. Therefore, you need to call
- * SparseMatrix::compress() before you actually use the matrix. This also
- * calls @p MatCompress that compresses the storage format for sparse
- * matrices by discarding unused elements. PETSc allows to continue with
- * assembling the matrix after calls to these functions, but since there are
- * no more free entries available after that any more, it is better to only
- * call SparseMatrix::compress() once at the end of the assembly stage and
- * before the matrix is actively used.
- *
- * @ingroup PETScWrappers
- * @ingroup Matrix1
- * @author Wolfgang Bangerth, 2004
- */
+ /**
+ * Base class for all matrix classes that are implemented on top of the PETSc
+ * matrix types. Since in PETSc all matrix types (i.e. sequential and
+ * parallel, sparse, blocked, etc.) are built by filling the contents of an
+ * abstract object that is only referenced through a pointer of a type that is
+ * independent of the actual matrix type, we can implement almost all
+ * functionality of matrices in this base class. Derived classes will then only
+ * have to provide the functionality to create one or the other kind of
+ * matrix.
+ *
+ * The interface of this class is modeled after the existing
+ * SparseMatrix class in deal.II. It has almost the same member
+ * functions, and is often exchangable. However, since PETSc only supports a
+ * single scalar type (either double, float, or a complex data type), it is
+ * not templated, and only works with whatever your PETSc installation has
+ * defined the data type PetscScalar to.
+ *
+ * Note that PETSc only guarantees that operations do what you expect if the
+ * functions @p MatAssemblyBegin and @p MatAssemblyEnd have been called
+ * after matrix assembly. Therefore, you need to call
+ * SparseMatrix::compress() before you actually use the matrix. This also
+ * calls @p MatCompress that compresses the storage format for sparse
+ * matrices by discarding unused elements. PETSc allows to continue with
+ * assembling the matrix after calls to these functions, but since there are
+ * no more free entries available after that any more, it is better to only
+ * call SparseMatrix::compress() once at the end of the assembly stage and
+ * before the matrix is actively used.
+ *
+ * @ingroup PETScWrappers
+ * @ingroup Matrix1
+ * @author Wolfgang Bangerth, 2004
+ */
class MatrixBase : public Subscriptor
{
- public:
- /**
- * Declare a typedef for the iterator
- * class.
- */
- typedef MatrixIterators::const_iterator const_iterator;
-
- /**
- * Declare a typedef in analogy to all
- * the other container classes.
- */
- typedef PetscScalar value_type;
-
- /**
- * Default constructor.
- */
- MatrixBase ();
-
- /**
- * Destructor. Made virtual so that one
- * can use pointers to this class.
- */
- virtual ~MatrixBase ();
-
- /**
- * This operator assigns a scalar to a
- * matrix. Since this does usually not
- * make much sense (should we set all
- * matrix entries to this value? Only
- * the nonzero entries of the sparsity
- * pattern?), this operation is only
- * allowed if the actual value to be
- * assigned is zero. This operator only
- * exists to allow for the obvious
- * notation <tt>matrix=0</tt>, which
- * sets all elements of the matrix to
- * zero, but keeps the sparsity pattern
- * previously used.
- */
- MatrixBase &
- operator = (const value_type d);
- /**
- * Release all memory and return
- * to a state just like after
- * having called the default
- * constructor.
- */
- void clear ();
-
- /**
- * Set the element (<i>i,j</i>) to @p
- * value.
- *
- * If the present object (from a
- * derived class of this one) happens
- * to be a sparse matrix, then this
- * function adds a new entry to the
- * matrix if it didn't exist before,
- * very much in contrast to the
- * SparseMatrix class which throws an
- * error if the entry does not exist.
- * If <tt>value</tt> is not a finite
- * number an exception is thrown.
- */
- void set (const unsigned int i,
- const unsigned int j,
- const PetscScalar value);
-
- /**
- * Set all elements given in a
- * FullMatrix<double> into the sparse
- * matrix locations given by
- * <tt>indices</tt>. In other words,
- * this function writes the elements
- * in <tt>full_matrix</tt> into the
- * calling matrix, using the
- * local-to-global indexing specified
- * by <tt>indices</tt> for both the
- * rows and the columns of the
- * matrix. This function assumes a
- * quadratic sparse matrix and a
- * quadratic full_matrix, the usual
- * situation in FE calculations.
- *
- * If the present object (from a
- * derived class of this one) happens
- * to be a sparse matrix, then this
- * function adds some new entries to
- * the matrix if they didn't exist
- * before, very much in contrast to
- * the SparseMatrix class which
- * throws an error if the entry does
- * not exist.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be inserted anyway
- * or they should be filtered
- * away. The default value is
- * <tt>false</tt>, i.e., even zero
- * values are inserted/replaced.
- */
- void set (const std::vector<unsigned int> &indices,
- const FullMatrix<PetscScalar> &full_matrix,
- const bool elide_zero_values = false);
-
- /**
- * Same function as before, but now
- * including the possibility to use
- * rectangular full_matrices and
- * different local-to-global indexing
- * on rows and columns, respectively.
- */
- void set (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<PetscScalar> &full_matrix,
- const bool elide_zero_values = false);
-
- /**
- * Set several elements in the
- * specified row of the matrix with
- * column indices as given by
- * <tt>col_indices</tt> to the
- * respective value.
- *
- * If the present object (from a
- * derived class of this one) happens
- * to be a sparse matrix, then this
- * function adds some new entries to
- * the matrix if they didn't exist
- * before, very much in contrast to
- * the SparseMatrix class which
- * throws an error if the entry does
- * not exist.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be inserted anyway
- * or they should be filtered
- * away. The default value is
- * <tt>false</tt>, i.e., even zero
- * values are inserted/replaced.
- */
- void set (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<PetscScalar> &values,
- const bool elide_zero_values = false);
-
- /**
- * Set several elements to values
- * given by <tt>values</tt> in a
- * given row in columns given by
- * col_indices into the sparse
- * matrix.
- *
- * If the present object (from a
- * derived class of this one) happens
- * to be a sparse matrix, then this
- * function adds some new entries to
- * the matrix if they didn't exist
- * before, very much in contrast to
- * the SparseMatrix class which
- * throws an error if the entry does
- * not exist.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be inserted anyway
- * or they should be filtered
- * away. The default value is
- * <tt>false</tt>, i.e., even zero
- * values are inserted/replaced.
- */
- void set (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const PetscScalar *values,
- const bool elide_zero_values = false);
-
- /**
- * Add @p value to the element
- * (<i>i,j</i>).
- *
- * If the present object (from a
- * derived class of this one) happens
- * to be a sparse matrix, then this
- * function adds a new entry to the
- * matrix if it didn't exist before,
- * very much in contrast to the
- * SparseMatrix class which throws an
- * error if the entry does not exist.
- * If <tt>value</tt> is not a finite
- * number an exception is thrown.
- */
- void add (const unsigned int i,
- const unsigned int j,
- const PetscScalar value);
-
- /**
- * Add all elements given in a
- * FullMatrix<double> into sparse
- * matrix locations given by
- * <tt>indices</tt>. In other words,
- * this function adds the elements in
- * <tt>full_matrix</tt> to the
- * respective entries in calling
- * matrix, using the local-to-global
- * indexing specified by
- * <tt>indices</tt> for both the rows
- * and the columns of the
- * matrix. This function assumes a
- * quadratic sparse matrix and a
- * quadratic full_matrix, the usual
- * situation in FE calculations.
- *
- * If the present object (from a
- * derived class of this one) happens
- * to be a sparse matrix, then this
- * function adds some new entries to
- * the matrix if they didn't exist
- * before, very much in contrast to
- * the SparseMatrix class which
- * throws an error if the entry does
- * not exist.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- void add (const std::vector<unsigned int> &indices,
- const FullMatrix<PetscScalar> &full_matrix,
- const bool elide_zero_values = true);
-
- /**
- * Same function as before, but now
- * including the possibility to use
- * rectangular full_matrices and
- * different local-to-global indexing
- * on rows and columns, respectively.
- */
- void add (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<PetscScalar> &full_matrix,
- const bool elide_zero_values = true);
-
- /**
- * Set several elements in the
- * specified row of the matrix with
- * column indices as given by
- * <tt>col_indices</tt> to the
- * respective value.
- *
- * If the present object (from a
- * derived class of this one) happens
- * to be a sparse matrix, then this
- * function adds some new entries to
- * the matrix if they didn't exist
- * before, very much in contrast to
- * the SparseMatrix class which
- * throws an error if the entry does
- * not exist.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- void add (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<PetscScalar> &values,
- const bool elide_zero_values = true);
-
- /**
- * Add an array of values given by
- * <tt>values</tt> in the given
- * global matrix row at columns
- * specified by col_indices in the
- * sparse matrix.
- *
- * If the present object (from a
- * derived class of this one) happens
- * to be a sparse matrix, then this
- * function adds some new entries to
- * the matrix if they didn't exist
- * before, very much in contrast to
- * the SparseMatrix class which
- * throws an error if the entry does
- * not exist.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- void add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const PetscScalar *values,
- const bool elide_zero_values = true,
- const bool col_indices_are_sorted = false);
-
- /**
- * Remove all elements from
- * this <tt>row</tt> by setting
- * them to zero. The function
- * does not modify the number
- * of allocated nonzero
- * entries, it only sets some
- * entries to zero. It may drop
- * them from the sparsity
- * pattern, though (but retains
- * the allocated memory in case
- * new entries are again added
- * later).
- *
- * This operation is used in
- * eliminating constraints (e.g. due to
- * hanging nodes) and makes sure that
- * we can write this modification to
- * the matrix without having to read
- * entries (such as the locations of
- * non-zero elements) from it --
- * without this operation, removing
- * constraints on parallel matrices is
- * a rather complicated procedure.
- *
- * The second parameter can be used to
- * set the diagonal entry of this row
- * to a value different from zero. The
- * default is to set it to zero.
- */
- void clear_row (const unsigned int row,
- const PetscScalar new_diag_value = 0);
-
- /**
- * Same as clear_row(), except that it
- * works on a number of rows at once.
- *
- * The second parameter can be used to
- * set the diagonal entries of all
- * cleared rows to something different
- * from zero. Note that all of these
- * diagonal entries get the same value
- * -- if you want different values for
- * the diagonal entries, you have to
- * set them by hand.
- */
- void clear_rows (const std::vector<unsigned int> &rows,
- const PetscScalar new_diag_value = 0);
-
- /**
- * PETSc matrices store their own
- * sparsity patterns. So, in analogy to
- * our own SparsityPattern class,
- * this function compresses the
- * sparsity pattern and allows the
- * resulting matrix to be used in all
- * other operations where before only
- * assembly functions were
- * allowed. This function must
- * therefore be called once you have
- * assembled the matrix.
- *
- * See @ref GlossCompress "Compressing distributed objects"
- * for more information.
- * more information.
- */
- void compress (::dealii::VectorOperation::values operation
- =::dealii::VectorOperation::unknown);
- /**
- * Return the value of the entry
- * (<i>i,j</i>). This may be an
- * expensive operation and you should
- * always take care where to call this
- * function. In contrast to the
- * respective function in the
- * @p MatrixBase class, we don't
- * throw an exception if the respective
- * entry doesn't exist in the sparsity
- * pattern of this class, since PETSc
- * does not transmit this information.
- *
- * This function is therefore exactly
- * equivalent to the <tt>el()</tt> function.
- */
- PetscScalar operator () (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * Return the value of the matrix entry
- * (<i>i,j</i>). If this entry does not
- * exist in the sparsity pattern, then
- * zero is returned. While this may be
- * convenient in some cases, note that
- * it is simple to write algorithms
- * that are slow compared to an optimal
- * solution, since the sparsity of the
- * matrix is not used.
- */
- PetscScalar el (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * Return the main diagonal
- * element in the <i>i</i>th
- * row. This function throws an
- * error if the matrix is not
- * quadratic.
- *
- * Since we do not have direct access
- * to the underlying data structure,
- * this function is no faster than the
- * elementwise access using the el()
- * function. However, we provide this
- * function for compatibility with the
- * SparseMatrix class.
- */
- PetscScalar diag_element (const unsigned int i) const;
-
- /**
- * Return the number of rows in this
- * matrix.
- */
- unsigned int m () const;
-
- /**
- * Return the number of columns in this
- * matrix.
- */
- unsigned int n () const;
-
- /**
- * Return the local dimension of the
- * matrix, i.e. the number of rows
- * stored on the present MPI
- * process. For sequential matrices,
- * this number is the same as m(),
- * but for parallel matrices it may be
- * smaller.
- *
- * To figure out which elements
- * exactly are stored locally,
- * use local_range().
- */
- unsigned int local_size () const;
-
- /**
- * Return a pair of indices
- * indicating which rows of
- * this matrix are stored
- * locally. The first number is
- * the index of the first
- * row stored, the second
- * the index of the one past
- * the last one that is stored
- * locally. If this is a
- * sequential matrix, then the
- * result will be the pair
- * (0,m()), otherwise it will be
- * a pair (i,i+n), where
- * <tt>n=local_size()</tt>.
- */
- std::pair<unsigned int, unsigned int>
- local_range () const;
-
- /**
- * Return whether @p index is
- * in the local range or not,
- * see also local_range().
- */
- bool in_local_range (const unsigned int index) const;
-
- /**
- * Return a reference to the MPI
- * communicator object in use with this
- * matrix. This function has to be
- * implemented in derived classes.
- */
- virtual const MPI_Comm & get_mpi_communicator () const = 0;
-
- /**
- * Return the number of nonzero
- * elements of this
- * matrix. Actually, it returns
- * the number of entries in the
- * sparsity pattern; if any of
- * the entries should happen to
- * be zero, it is counted anyway.
- */
- unsigned int n_nonzero_elements () const;
-
- /**
- * Number of entries in a specific row.
- */
- unsigned int row_length (const unsigned int row) const;
-
- /**
- * Return the l1-norm of the matrix, that is
- * $|M|_1=max_{all columns j}\sum_{all
- * rows i} |M_ij|$,
- * (max. sum of columns).
- * This is the
- * natural matrix norm that is compatible
- * to the l1-norm for vectors, i.e.
- * $|Mv|_1\leq |M|_1 |v|_1$.
- * (cf. Haemmerlin-Hoffmann:
- * Numerische Mathematik)
- */
- PetscReal l1_norm () const;
-
- /**
- * Return the linfty-norm of the
- * matrix, that is
- * $|M|_infty=max_{all rows i}\sum_{all
- * columns j} |M_ij|$,
- * (max. sum of rows).
- * This is the
- * natural matrix norm that is compatible
- * to the linfty-norm of vectors, i.e.
- * $|Mv|_infty \leq |M|_infty |v|_infty$.
- * (cf. Haemmerlin-Hoffmann:
- * Numerische Mathematik)
- */
- PetscReal linfty_norm () const;
-
- /**
- * Return the frobenius norm of the
- * matrix, i.e. the square root of the
- * sum of squares of all entries in the
- * matrix.
- */
- PetscReal frobenius_norm () const;
-
-
- /**
- * Return the square of the norm
- * of the vector $v$ with respect
- * to the norm induced by this
- * matrix,
- * i.e. $\left(v,Mv\right)$. This
- * is useful, e.g. in the finite
- * element context, where the
- * $L_2$ norm of a function
- * equals the matrix norm with
- * respect to the mass matrix of
- * the vector representing the
- * nodal values of the finite
- * element function.
- *
- * Obviously, the matrix needs to
- * be quadratic for this operation.
- *
- * The implementation of this function
- * is not as efficient as the one in
- * the @p MatrixBase class used in
- * deal.II (i.e. the original one, not
- * the PETSc wrapper class) since PETSc
- * doesn't support this operation and
- * needs a temporary vector.
- *
- * Note that if the current object
- * represents a parallel distributed
- * matrix (of type
- * PETScWrappers::MPI::SparseMatrix),
- * then the given vector has to be
- * a distributed vector as
- * well. Conversely, if the matrix is
- * not distributed, then neither
- * may the vector be.
- */
- PetscScalar matrix_norm_square (const VectorBase &v) const;
-
-
- /**
- * Compute the matrix scalar
- * product $\left(u,Mv\right)$.
- *
- * The implementation of this function
- * is not as efficient as the one in
- * the @p MatrixBase class used in
- * deal.II (i.e. the original one, not
- * the PETSc wrapper class) since PETSc
- * doesn't support this operation and
- * needs a temporary vector.
- *
- * Note that if the current object
- * represents a parallel distributed
- * matrix (of type
- * PETScWrappers::MPI::SparseMatrix),
- * then both vectors have to be
- * distributed vectors as
- * well. Conversely, if the matrix is
- * not distributed, then neither of the
- * vectors may be.
- */
- PetscScalar matrix_scalar_product (const VectorBase &u,
- const VectorBase &v) const;
+ public:
+ /**
+ * Declare a typedef for the iterator
+ * class.
+ */
+ typedef MatrixIterators::const_iterator const_iterator;
+
+ /**
+ * Declare a typedef in analogy to all
+ * the other container classes.
+ */
+ typedef PetscScalar value_type;
+
+ /**
+ * Default constructor.
+ */
+ MatrixBase ();
+
+ /**
+ * Destructor. Made virtual so that one
+ * can use pointers to this class.
+ */
+ virtual ~MatrixBase ();
+
+ /**
+ * This operator assigns a scalar to a
+ * matrix. Since this does usually not
+ * make much sense (should we set all
+ * matrix entries to this value? Only
+ * the nonzero entries of the sparsity
+ * pattern?), this operation is only
+ * allowed if the actual value to be
+ * assigned is zero. This operator only
+ * exists to allow for the obvious
+ * notation <tt>matrix=0</tt>, which
+ * sets all elements of the matrix to
+ * zero, but keeps the sparsity pattern
+ * previously used.
+ */
+ MatrixBase &
+ operator = (const value_type d);
+ /**
+ * Release all memory and return
+ * to a state just like after
+ * having called the default
+ * constructor.
+ */
+ void clear ();
+
+ /**
+ * Set the element (<i>i,j</i>) to @p
+ * value.
+ *
+ * If the present object (from a
+ * derived class of this one) happens
+ * to be a sparse matrix, then this
+ * function adds a new entry to the
+ * matrix if it didn't exist before,
+ * very much in contrast to the
+ * SparseMatrix class which throws an
+ * error if the entry does not exist.
+ * If <tt>value</tt> is not a finite
+ * number an exception is thrown.
+ */
+ void set (const unsigned int i,
+ const unsigned int j,
+ const PetscScalar value);
+
+ /**
+ * Set all elements given in a
+ * FullMatrix<double> into the sparse
+ * matrix locations given by
+ * <tt>indices</tt>. In other words,
+ * this function writes the elements
+ * in <tt>full_matrix</tt> into the
+ * calling matrix, using the
+ * local-to-global indexing specified
+ * by <tt>indices</tt> for both the
+ * rows and the columns of the
+ * matrix. This function assumes a
+ * quadratic sparse matrix and a
+ * quadratic full_matrix, the usual
+ * situation in FE calculations.
+ *
+ * If the present object (from a
+ * derived class of this one) happens
+ * to be a sparse matrix, then this
+ * function adds some new entries to
+ * the matrix if they didn't exist
+ * before, very much in contrast to
+ * the SparseMatrix class which
+ * throws an error if the entry does
+ * not exist.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be inserted anyway
+ * or they should be filtered
+ * away. The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are inserted/replaced.
+ */
+ void set (const std::vector<unsigned int> &indices,
+ const FullMatrix<PetscScalar> &full_matrix,
+ const bool elide_zero_values = false);
+
+ /**
+ * Same function as before, but now
+ * including the possibility to use
+ * rectangular full_matrices and
+ * different local-to-global indexing
+ * on rows and columns, respectively.
+ */
+ void set (const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<PetscScalar> &full_matrix,
+ const bool elide_zero_values = false);
+
+ /**
+ * Set several elements in the
+ * specified row of the matrix with
+ * column indices as given by
+ * <tt>col_indices</tt> to the
+ * respective value.
+ *
+ * If the present object (from a
+ * derived class of this one) happens
+ * to be a sparse matrix, then this
+ * function adds some new entries to
+ * the matrix if they didn't exist
+ * before, very much in contrast to
+ * the SparseMatrix class which
+ * throws an error if the entry does
+ * not exist.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be inserted anyway
+ * or they should be filtered
+ * away. The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are inserted/replaced.
+ */
+ void set (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
- const std::vector<PetscScalar> &values,
++ const std::vector<PetscScalar> &values,
+ const bool elide_zero_values = false);
+
+ /**
+ * Set several elements to values
+ * given by <tt>values</tt> in a
+ * given row in columns given by
+ * col_indices into the sparse
+ * matrix.
+ *
+ * If the present object (from a
+ * derived class of this one) happens
+ * to be a sparse matrix, then this
+ * function adds some new entries to
+ * the matrix if they didn't exist
+ * before, very much in contrast to
+ * the SparseMatrix class which
+ * throws an error if the entry does
+ * not exist.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be inserted anyway
+ * or they should be filtered
+ * away. The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are inserted/replaced.
+ */
+ void set (const unsigned int row,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
- const PetscScalar *values,
++ const PetscScalar *values,
+ const bool elide_zero_values = false);
+
+ /**
+ * Add @p value to the element
+ * (<i>i,j</i>).
+ *
+ * If the present object (from a
+ * derived class of this one) happens
+ * to be a sparse matrix, then this
+ * function adds a new entry to the
+ * matrix if it didn't exist before,
+ * very much in contrast to the
+ * SparseMatrix class which throws an
+ * error if the entry does not exist.
+ * If <tt>value</tt> is not a finite
+ * number an exception is thrown.
+ */
+ void add (const unsigned int i,
+ const unsigned int j,
+ const PetscScalar value);
+
+ /**
+ * Add all elements given in a
+ * FullMatrix<double> into sparse
+ * matrix locations given by
+ * <tt>indices</tt>. In other words,
+ * this function adds the elements in
+ * <tt>full_matrix</tt> to the
+ * respective entries in calling
+ * matrix, using the local-to-global
+ * indexing specified by
+ * <tt>indices</tt> for both the rows
+ * and the columns of the
+ * matrix. This function assumes a
+ * quadratic sparse matrix and a
+ * quadratic full_matrix, the usual
+ * situation in FE calculations.
+ *
+ * If the present object (from a
+ * derived class of this one) happens
+ * to be a sparse matrix, then this
+ * function adds some new entries to
+ * the matrix if they didn't exist
+ * before, very much in contrast to
+ * the SparseMatrix class which
+ * throws an error if the entry does
+ * not exist.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ void add (const std::vector<unsigned int> &indices,
+ const FullMatrix<PetscScalar> &full_matrix,
+ const bool elide_zero_values = true);
+
+ /**
+ * Same function as before, but now
+ * including the possibility to use
+ * rectangular full_matrices and
+ * different local-to-global indexing
+ * on rows and columns, respectively.
+ */
+ void add (const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<PetscScalar> &full_matrix,
+ const bool elide_zero_values = true);
+
+ /**
+ * Set several elements in the
+ * specified row of the matrix with
+ * column indices as given by
+ * <tt>col_indices</tt> to the
+ * respective value.
+ *
+ * If the present object (from a
+ * derived class of this one) happens
+ * to be a sparse matrix, then this
+ * function adds some new entries to
+ * the matrix if they didn't exist
+ * before, very much in contrast to
+ * the SparseMatrix class which
+ * throws an error if the entry does
+ * not exist.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ void add (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
- const std::vector<PetscScalar> &values,
++ const std::vector<PetscScalar> &values,
+ const bool elide_zero_values = true);
+
+ /**
+ * Add an array of values given by
+ * <tt>values</tt> in the given
+ * global matrix row at columns
+ * specified by col_indices in the
+ * sparse matrix.
+ *
+ * If the present object (from a
+ * derived class of this one) happens
+ * to be a sparse matrix, then this
+ * function adds some new entries to
+ * the matrix if they didn't exist
+ * before, very much in contrast to
+ * the SparseMatrix class which
+ * throws an error if the entry does
+ * not exist.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ void add (const unsigned int row,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
- const PetscScalar *values,
++ const PetscScalar *values,
+ const bool elide_zero_values = true,
+ const bool col_indices_are_sorted = false);
+
+ /**
+ * Remove all elements from
+ * this <tt>row</tt> by setting
+ * them to zero. The function
+ * does not modify the number
+ * of allocated nonzero
+ * entries, it only sets some
+ * entries to zero. It may drop
+ * them from the sparsity
+ * pattern, though (but retains
+ * the allocated memory in case
+ * new entries are again added
+ * later).
+ *
+ * This operation is used in
+ * eliminating constraints (e.g. due to
+ * hanging nodes) and makes sure that
+ * we can write this modification to
+ * the matrix without having to read
+ * entries (such as the locations of
+ * non-zero elements) from it --
+ * without this operation, removing
+ * constraints on parallel matrices is
+ * a rather complicated procedure.
+ *
+ * The second parameter can be used to
+ * set the diagonal entry of this row
+ * to a value different from zero. The
+ * default is to set it to zero.
+ */
+ void clear_row (const unsigned int row,
+ const PetscScalar new_diag_value = 0);
+
+ /**
+ * Same as clear_row(), except that it
+ * works on a number of rows at once.
+ *
+ * The second parameter can be used to
+ * set the diagonal entries of all
+ * cleared rows to something different
+ * from zero. Note that all of these
+ * diagonal entries get the same value
+ * -- if you want different values for
+ * the diagonal entries, you have to
+ * set them by hand.
+ */
+ void clear_rows (const std::vector<unsigned int> &rows,
+ const PetscScalar new_diag_value = 0);
+
+ /**
+ * PETSc matrices store their own
+ * sparsity patterns. So, in analogy to
+ * our own SparsityPattern class,
+ * this function compresses the
+ * sparsity pattern and allows the
+ * resulting matrix to be used in all
+ * other operations where before only
+ * assembly functions were
+ * allowed. This function must
+ * therefore be called once you have
+ * assembled the matrix.
+ *
+ * See @ref GlossCompress "Compressing distributed objects"
+ * for more information.
+ * more information.
+ */
+ void compress (::dealii::VectorOperation::values operation
+ =::dealii::VectorOperation::unknown);
+ /**
+ * Return the value of the entry
+ * (<i>i,j</i>). This may be an
+ * expensive operation and you should
+ * always take care where to call this
+ * function. In contrast to the
+ * respective function in the
+ * @p MatrixBase class, we don't
+ * throw an exception if the respective
+ * entry doesn't exist in the sparsity
+ * pattern of this class, since PETSc
+ * does not transmit this information.
+ *
+ * This function is therefore exactly
+ * equivalent to the <tt>el()</tt> function.
+ */
+ PetscScalar operator () (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * Return the value of the matrix entry
+ * (<i>i,j</i>). If this entry does not
+ * exist in the sparsity pattern, then
+ * zero is returned. While this may be
+ * convenient in some cases, note that
+ * it is simple to write algorithms
+ * that are slow compared to an optimal
+ * solution, since the sparsity of the
+ * matrix is not used.
+ */
+ PetscScalar el (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * Return the main diagonal
+ * element in the <i>i</i>th
+ * row. This function throws an
+ * error if the matrix is not
+ * quadratic.
+ *
+ * Since we do not have direct access
+ * to the underlying data structure,
+ * this function is no faster than the
+ * elementwise access using the el()
+ * function. However, we provide this
+ * function for compatibility with the
+ * SparseMatrix class.
+ */
+ PetscScalar diag_element (const unsigned int i) const;
+
+ /**
+ * Return the number of rows in this
+ * matrix.
+ */
+ unsigned int m () const;
+
+ /**
+ * Return the number of columns in this
+ * matrix.
+ */
+ unsigned int n () const;
+
+ /**
+ * Return the local dimension of the
+ * matrix, i.e. the number of rows
+ * stored on the present MPI
+ * process. For sequential matrices,
+ * this number is the same as m(),
+ * but for parallel matrices it may be
+ * smaller.
+ *
+ * To figure out which elements
+ * exactly are stored locally,
+ * use local_range().
+ */
+ unsigned int local_size () const;
+
+ /**
+ * Return a pair of indices
+ * indicating which rows of
+ * this matrix are stored
+ * locally. The first number is
+ * the index of the first
+ * row stored, the second
+ * the index of the one past
+ * the last one that is stored
+ * locally. If this is a
+ * sequential matrix, then the
+ * result will be the pair
+ * (0,m()), otherwise it will be
+ * a pair (i,i+n), where
+ * <tt>n=local_size()</tt>.
+ */
+ std::pair<unsigned int, unsigned int>
+ local_range () const;
+
+ /**
+ * Return whether @p index is
+ * in the local range or not,
+ * see also local_range().
+ */
+ bool in_local_range (const unsigned int index) const;
+
+ /**
+ * Return a reference to the MPI
+ * communicator object in use with this
+ * matrix. This function has to be
+ * implemented in derived classes.
+ */
+ virtual const MPI_Comm &get_mpi_communicator () const = 0;
+
+ /**
+ * Return the number of nonzero
+ * elements of this
+ * matrix. Actually, it returns
+ * the number of entries in the
+ * sparsity pattern; if any of
+ * the entries should happen to
+ * be zero, it is counted anyway.
+ */
+ unsigned int n_nonzero_elements () const;
+
+ /**
+ * Number of entries in a specific row.
+ */
+ unsigned int row_length (const unsigned int row) const;
+
+ /**
+ * Return the l1-norm of the matrix, that is
+ * $|M|_1=max_{all columns j}\sum_{all
+ * rows i} |M_ij|$,
+ * (max. sum of columns).
+ * This is the
+ * natural matrix norm that is compatible
+ * to the l1-norm for vectors, i.e.
+ * $|Mv|_1\leq |M|_1 |v|_1$.
+ * (cf. Haemmerlin-Hoffmann:
+ * Numerische Mathematik)
+ */
+ PetscReal l1_norm () const;
+
+ /**
+ * Return the linfty-norm of the
+ * matrix, that is
+ * $|M|_infty=max_{all rows i}\sum_{all
+ * columns j} |M_ij|$,
+ * (max. sum of rows).
+ * This is the
+ * natural matrix norm that is compatible
+ * to the linfty-norm of vectors, i.e.
+ * $|Mv|_infty \leq |M|_infty |v|_infty$.
+ * (cf. Haemmerlin-Hoffmann:
+ * Numerische Mathematik)
+ */
+ PetscReal linfty_norm () const;
+
+ /**
+ * Return the frobenius norm of the
+ * matrix, i.e. the square root of the
+ * sum of squares of all entries in the
+ * matrix.
+ */
+ PetscReal frobenius_norm () const;
+
+
+ /**
+ * Return the square of the norm
+ * of the vector $v$ with respect
+ * to the norm induced by this
+ * matrix,
+ * i.e. $\left(v,Mv\right)$. This
+ * is useful, e.g. in the finite
+ * element context, where the
+ * $L_2$ norm of a function
+ * equals the matrix norm with
+ * respect to the mass matrix of
+ * the vector representing the
+ * nodal values of the finite
+ * element function.
+ *
+ * Obviously, the matrix needs to
+ * be quadratic for this operation.
+ *
+ * The implementation of this function
+ * is not as efficient as the one in
+ * the @p MatrixBase class used in
+ * deal.II (i.e. the original one, not
+ * the PETSc wrapper class) since PETSc
+ * doesn't support this operation and
+ * needs a temporary vector.
+ *
+ * Note that if the current object
+ * represents a parallel distributed
+ * matrix (of type
+ * PETScWrappers::MPI::SparseMatrix),
+ * then the given vector has to be
+ * a distributed vector as
+ * well. Conversely, if the matrix is
+ * not distributed, then neither
+ * may the vector be.
+ */
+ PetscScalar matrix_norm_square (const VectorBase &v) const;
+
+
+ /**
+ * Compute the matrix scalar
+ * product $\left(u,Mv\right)$.
+ *
+ * The implementation of this function
+ * is not as efficient as the one in
+ * the @p MatrixBase class used in
+ * deal.II (i.e. the original one, not
+ * the PETSc wrapper class) since PETSc
+ * doesn't support this operation and
+ * needs a temporary vector.
+ *
+ * Note that if the current object
+ * represents a parallel distributed
+ * matrix (of type
+ * PETScWrappers::MPI::SparseMatrix),
+ * then both vectors have to be
+ * distributed vectors as
+ * well. Conversely, if the matrix is
+ * not distributed, then neither of the
+ * vectors may be.
+ */
+ PetscScalar matrix_scalar_product (const VectorBase &u,
+ const VectorBase &v) const;
#if DEAL_II_PETSC_VERSION_GTE(3,1,0)
namespace PETScWrappers
{
- /**
- * Implementation of a parallel matrix class based on PETSc <tt>MatShell</tt> matrix-type.
- * This base class implements only the interface to the PETSc matrix object,
- * while all the functionality is contained in the matrix-vector
- * multiplication which must be reimplmented in derived classes.
- *
- * This interface is an addition to the dealii::MatrixFree class to realize
- * user-defined matrix-classes together with PETSc solvers and functionalities.
- * See also the documentation of dealii::MatrixFree class and step-37 and step-48.
- *
- * Similar to other matrix classes in namespaces PETScWrappers and PETScWrappers::MPI,
- * the MatrxiFree class provides the usual matrix-vector multiplication
- * <tt>vmult(VectorBase &dst, const VectorBase &src)</tt>
- * which is pure virtual and must be reimplemented in derived classes.
- * Besides the usual interface, this class has a matrix-vector multiplication
- * <tt>vmult(Vec &dst, const Vec &src)</tt>
- * taking PETSc Vec objects, which will be called by
- * <tt>matrix_free_mult(Mat A, Vec src, Vec dst)</tt>
- * registered as matrix-vector multiplication of this PETSc matrix object.
- * The default implementation of the vmult function in the base class translates
- * the given PETSc <tt>Vec*</tt> vectors into a deal.II vector, calls
- * the usual vmult function with the usual interface and converts
- * the result back to PETSc <tt>Vec*</tt>. This could be made much more efficient
- * in derived classes without allocating new memory.
- *
- * @ingroup PETScWrappers
- * @ingroup Matrix1
- * @author Wolfgang Bangerth, Martin Steigemann, 2012
- */
+ /**
+ * Implementation of a parallel matrix class based on PETSc <tt>MatShell</tt> matrix-type.
+ * This base class implements only the interface to the PETSc matrix object,
+ * while all the functionality is contained in the matrix-vector
+ * multiplication which must be reimplmented in derived classes.
+ *
+ * This interface is an addition to the dealii::MatrixFree class to realize
+ * user-defined matrix-classes together with PETSc solvers and functionalities.
+ * See also the documentation of dealii::MatrixFree class and step-37 and step-48.
+ *
+ * Similar to other matrix classes in namespaces PETScWrappers and PETScWrappers::MPI,
+ * the MatrxiFree class provides the usual matrix-vector multiplication
+ * <tt>vmult(VectorBase &dst, const VectorBase &src)</tt>
+ * which is pure virtual and must be reimplemented in derived classes.
+ * Besides the usual interface, this class has a matrix-vector multiplication
+ * <tt>vmult(Vec &dst, const Vec &src)</tt>
+ * taking PETSc Vec objects, which will be called by
+ * <tt>matrix_free_mult(Mat A, Vec src, Vec dst)</tt>
+ * registered as matrix-vector multiplication of this PETSc matrix object.
+ * The default implementation of the vmult function in the base class translates
+ * the given PETSc <tt>Vec*</tt> vectors into a deal.II vector, calls
+ * the usual vmult function with the usual interface and converts
+ * the result back to PETSc <tt>Vec*</tt>. This could be made much more efficient
+ * in derived classes without allocating new memory.
+ *
+ * @ingroup PETScWrappers
+ * @ingroup Matrix1
+ * @author Wolfgang Bangerth, Martin Steigemann, 2012
+ */
class MatrixFree : public MatrixBase
{
- public:
-
- /**
- * Default constructor. Create an
- * empty matrix object.
- */
- MatrixFree ();
-
- /**
- * Create a matrix object of
- * dimensions @p m times @p n
- * with communication happening
- * over the provided @p communicator.
- *
- * For the meaning of the @p local_rows
- * and @p local_columns parameters,
- * see the PETScWrappers::MPI::SparseMatrix
- * class documentation.
- *
- * As other PETSc matrices, also the
- * the matrix-free object needs to
- * have a size and to perform matrix
- * vector multiplications efficiently
- * in parallel also @p local_rows
- * and @p local_columns. But in contrast
- * to PETSc::SparseMatrix classes a
- * PETSc matrix-free object does not need
- * any estimation of non_zero entries
- * and has no option <tt>is_symmetric</tt>.
- */
- MatrixFree (const MPI_Comm &communicator,
- const unsigned int m,
- const unsigned int n,
- const unsigned int local_rows,
- const unsigned int local_columns);
-
- /**
- * Create a matrix object of
- * dimensions @p m times @p n
- * with communication happening
- * over the provided @p communicator.
- *
- * As other PETSc matrices, also the
- * the matrix-free object needs to
- * have a size and to perform matrix
- * vector multiplications efficiently
- * in parallel also @p local_rows
- * and @p local_columns. But in contrast
- * to PETSc::SparseMatrix classes a
- * PETSc matrix-free object does not need
- * any estimation of non_zero entries
- * and has no option <tt>is_symmetric</tt>.
- */
- MatrixFree (const MPI_Comm &communicator,
- const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &local_rows_per_process,
- const std::vector<unsigned int> &local_columns_per_process,
- const unsigned int this_process);
-
- /**
- * Constructor for the serial case:
- * Same function as
- * <tt>MatrixFree()</tt>, see above,
- * with <tt>communicator = MPI_COMM_WORLD</tt>.
- */
- MatrixFree (const unsigned int m,
- const unsigned int n,
- const unsigned int local_rows,
- const unsigned int local_columns);
-
- /**
- * Constructor for the serial case:
- * Same function as
- * <tt>MatrixFree()</tt>, see above,
- * with <tt>communicator = MPI_COMM_WORLD</tt>.
- */
- MatrixFree (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &local_rows_per_process,
- const std::vector<unsigned int> &local_columns_per_process,
- const unsigned int this_process);
-
- /**
- * Throw away the present matrix and
- * generate one that has the same
- * properties as if it were created by
- * the constructor of this class with
- * the same argument list as the
- * present function.
- */
- void reinit (const MPI_Comm &communicator,
- const unsigned int m,
- const unsigned int n,
- const unsigned int local_rows,
- const unsigned int local_columns);
-
- /**
- * Throw away the present matrix and
- * generate one that has the same
- * properties as if it were created by
- * the constructor of this class with
- * the same argument list as the
- * present function.
- */
- void reinit (const MPI_Comm &communicator,
- const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &local_rows_per_process,
- const std::vector<unsigned int> &local_columns_per_process,
- const unsigned int this_process);
-
- /**
- * Calls the @p reinit() function
- * above with <tt>communicator = MPI_COMM_WORLD</tt>.
- */
- void reinit (const unsigned int m,
- const unsigned int n,
- const unsigned int local_rows,
- const unsigned int local_columns);
-
- /**
- * Calls the @p reinit() function
- * above with <tt>communicator = MPI_COMM_WORLD</tt>.
- */
- void reinit (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &local_rows_per_process,
- const std::vector<unsigned int> &local_columns_per_process,
- const unsigned int this_process);
-
- /**
- * Release all memory and return
- * to a state just like after
- * having called the default
- * constructor.
- */
- void clear ();
-
- /**
- * Return a reference to the MPI
- * communicator object in use with
- * this matrix.
- */
- const MPI_Comm & get_mpi_communicator () const;
-
- /**
- * Matrix-vector multiplication:
- * let <i>dst = M*src</i> with
- * <i>M</i> being this matrix.
- *
- * Source and destination must
- * not be the same vector.
- *
- * Note that if the current object
- * represents a parallel distributed
- * matrix (of type
- * PETScWrappers::MPI::SparseMatrix),
- * then both vectors have to be
- * distributed vectors as
- * well. Conversely, if the matrix is
- * not distributed, then neither of the
- * vectors may be.
- */
- virtual
- void vmult (VectorBase &dst,
- const VectorBase &src) const = 0;
-
- /**
- * Matrix-vector multiplication: let
- * <i>dst = M<sup>T</sup>*src</i> with
- * <i>M</i> being this matrix. This
- * function does the same as @p vmult()
- * but takes the transposed matrix.
- *
- * Source and destination must
- * not be the same vector.
- *
- * Note that if the current object
- * represents a parallel distributed
- * matrix then both vectors have to be
- * distributed vectors as
- * well. Conversely, if the matrix is
- * not distributed, then neither of the
- * vectors may be.
- */
- virtual
- void Tvmult (VectorBase &dst,
- const VectorBase &src) const = 0;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M*src</i> on <i>dst</i>
- * with <i>M</i> being this
- * matrix.
- *
- * Source and destination must
- * not be the same vector.
- *
- * Note that if the current object
- * represents a parallel distributed
- * matrix then both vectors have to be
- * distributed vectors as
- * well. Conversely, if the matrix is
- * not distributed, then neither of the
- * vectors may be.
- */
- virtual
- void vmult_add (VectorBase &dst,
- const VectorBase &src) const = 0;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M<sup>T</sup>*src</i> to
- * <i>dst</i> with <i>M</i> being
- * this matrix. This function
- * does the same as @p vmult_add()
- * but takes the transposed
- * matrix.
- *
- * Source and destination must
- * not be the same vector.
- *
- * Note that if the current object
- * represents a parallel distributed
- * matrix then both vectors have to be
- * distributed vectors as
- * well. Conversely, if the matrix is
- * not distributed, then neither of the
- * vectors may be.
- */
- virtual
- void Tvmult_add (VectorBase &dst,
- const VectorBase &src) const = 0;
-
- /**
- * The matrix-vector multiplication
- * called by @p matrix_free_mult().
- * This function can be reimplemented
- * in derived classes for efficiency. The default
- * implementation copies the given vectors
- * into PETScWrappers::*::Vector
- * and calls <tt>vmult(VectorBase &dst, const VectorBase &src)</tt>
- * which is purely virtual and must be reimplemented
- * in derived classes.
- */
- virtual
- void vmult (Vec &dst, const Vec &src) const;
-
- private:
-
- /**
- * Copy of the communicator object to
- * be used for this parallel matrix-free object.
- */
- MPI_Comm communicator;
-
- /**
- * Callback-function registered
- * as the matrix-vector multiplication
- * of this matrix-free object
- * called by PETSc routines.
- * This function must be static and
- * takes a PETSc matrix @p A,
- * and vectors @p src and @p dst,
- * where <i>dst = A*src</i>
- *
- * Source and destination must
- * not be the same vector.
- *
- * This function calls
- * <tt>vmult(Vec &dst, const Vec &src)</tt>
- * which should be reimplemented in
- * derived classes.
- */
- static int matrix_free_mult (Mat A, Vec src, Vec dst);
-
- /**
- * Do the actual work for the
- * respective @p reinit() function and
- * the matching constructor,
- * i.e. create a matrix object. Getting rid
- * of the previous matrix is left to
- * the caller.
- */
- void do_reinit (const unsigned int m,
- const unsigned int n,
- const unsigned int local_rows,
- const unsigned int local_columns);
- };
+ public:
+
+ /**
+ * Default constructor. Create an
+ * empty matrix object.
+ */
+ MatrixFree ();
+
+ /**
+ * Create a matrix object of
+ * dimensions @p m times @p n
+ * with communication happening
+ * over the provided @p communicator.
+ *
+ * For the meaning of the @p local_rows
+ * and @p local_columns parameters,
+ * see the PETScWrappers::MPI::SparseMatrix
+ * class documentation.
+ *
+ * As other PETSc matrices, also the
+ * the matrix-free object needs to
+ * have a size and to perform matrix
+ * vector multiplications efficiently
+ * in parallel also @p local_rows
+ * and @p local_columns. But in contrast
+ * to PETSc::SparseMatrix classes a
+ * PETSc matrix-free object does not need
+ * any estimation of non_zero entries
+ * and has no option <tt>is_symmetric</tt>.
+ */
+ MatrixFree (const MPI_Comm &communicator,
+ const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns);
+
+ /**
+ * Create a matrix object of
+ * dimensions @p m times @p n
+ * with communication happening
+ * over the provided @p communicator.
+ *
+ * As other PETSc matrices, also the
+ * the matrix-free object needs to
+ * have a size and to perform matrix
+ * vector multiplications efficiently
+ * in parallel also @p local_rows
+ * and @p local_columns. But in contrast
+ * to PETSc::SparseMatrix classes a
+ * PETSc matrix-free object does not need
+ * any estimation of non_zero entries
+ * and has no option <tt>is_symmetric</tt>.
+ */
+ MatrixFree (const MPI_Comm &communicator,
+ const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &local_rows_per_process,
+ const std::vector<unsigned int> &local_columns_per_process,
+ const unsigned int this_process);
+
+ /**
+ * Constructor for the serial case:
+ * Same function as
+ * <tt>MatrixFree()</tt>, see above,
+ * with <tt>communicator = MPI_COMM_WORLD</tt>.
+ */
+ MatrixFree (const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns);
+
+ /**
+ * Constructor for the serial case:
+ * Same function as
+ * <tt>MatrixFree()</tt>, see above,
+ * with <tt>communicator = MPI_COMM_WORLD</tt>.
+ */
+ MatrixFree (const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &local_rows_per_process,
+ const std::vector<unsigned int> &local_columns_per_process,
+ const unsigned int this_process);
+
+ /**
+ * Throw away the present matrix and
+ * generate one that has the same
+ * properties as if it were created by
+ * the constructor of this class with
+ * the same argument list as the
+ * present function.
+ */
+ void reinit (const MPI_Comm &communicator,
+ const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns);
+
+ /**
+ * Throw away the present matrix and
+ * generate one that has the same
+ * properties as if it were created by
+ * the constructor of this class with
+ * the same argument list as the
+ * present function.
+ */
+ void reinit (const MPI_Comm &communicator,
+ const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &local_rows_per_process,
+ const std::vector<unsigned int> &local_columns_per_process,
+ const unsigned int this_process);
+
+ /**
+ * Calls the @p reinit() function
+ * above with <tt>communicator = MPI_COMM_WORLD</tt>.
+ */
+ void reinit (const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns);
+
+ /**
+ * Calls the @p reinit() function
+ * above with <tt>communicator = MPI_COMM_WORLD</tt>.
+ */
+ void reinit (const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &local_rows_per_process,
+ const std::vector<unsigned int> &local_columns_per_process,
+ const unsigned int this_process);
+
+ /**
+ * Release all memory and return
+ * to a state just like after
+ * having called the default
+ * constructor.
+ */
+ void clear ();
+
+ /**
+ * Return a reference to the MPI
+ * communicator object in use with
+ * this matrix.
+ */
+ const MPI_Comm &get_mpi_communicator () const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let <i>dst = M*src</i> with
+ * <i>M</i> being this matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that if the current object
+ * represents a parallel distributed
+ * matrix (of type
+ * PETScWrappers::MPI::SparseMatrix),
+ * then both vectors have to be
+ * distributed vectors as
+ * well. Conversely, if the matrix is
+ * not distributed, then neither of the
+ * vectors may be.
+ */
+ virtual
+ void vmult (VectorBase &dst,
+ const VectorBase &src) const = 0;
+
+ /**
+ * Matrix-vector multiplication: let
+ * <i>dst = M<sup>T</sup>*src</i> with
+ * <i>M</i> being this matrix. This
+ * function does the same as @p vmult()
+ * but takes the transposed matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that if the current object
+ * represents a parallel distributed
+ * matrix then both vectors have to be
+ * distributed vectors as
+ * well. Conversely, if the matrix is
+ * not distributed, then neither of the
+ * vectors may be.
+ */
+ virtual
+ void Tvmult (VectorBase &dst,
+ const VectorBase &src) const = 0;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M*src</i> on <i>dst</i>
+ * with <i>M</i> being this
+ * matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that if the current object
+ * represents a parallel distributed
+ * matrix then both vectors have to be
+ * distributed vectors as
+ * well. Conversely, if the matrix is
+ * not distributed, then neither of the
+ * vectors may be.
+ */
+ virtual
+ void vmult_add (VectorBase &dst,
+ const VectorBase &src) const = 0;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M<sup>T</sup>*src</i> to
+ * <i>dst</i> with <i>M</i> being
+ * this matrix. This function
+ * does the same as @p vmult_add()
+ * but takes the transposed
+ * matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that if the current object
+ * represents a parallel distributed
+ * matrix then both vectors have to be
+ * distributed vectors as
+ * well. Conversely, if the matrix is
+ * not distributed, then neither of the
+ * vectors may be.
+ */
+ virtual
+ void Tvmult_add (VectorBase &dst,
+ const VectorBase &src) const = 0;
+
+ /**
+ * The matrix-vector multiplication
+ * called by @p matrix_free_mult().
+ * This function can be reimplemented
+ * in derived classes for efficiency. The default
+ * implementation copies the given vectors
+ * into PETScWrappers::*::Vector
+ * and calls <tt>vmult(VectorBase &dst, const VectorBase &src)</tt>
+ * which is purely virtual and must be reimplemented
+ * in derived classes.
+ */
+ virtual
- void vmult (Vec &dst, const Vec &src) const;
++ void vmult (Vec &dst, const Vec &src) const;
+
+ private:
+
+ /**
+ * Copy of the communicator object to
+ * be used for this parallel matrix-free object.
+ */
+ MPI_Comm communicator;
+
+ /**
+ * Callback-function registered
+ * as the matrix-vector multiplication
+ * of this matrix-free object
+ * called by PETSc routines.
+ * This function must be static and
+ * takes a PETSc matrix @p A,
+ * and vectors @p src and @p dst,
+ * where <i>dst = A*src</i>
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * This function calls
+ * <tt>vmult(Vec &dst, const Vec &src)</tt>
+ * which should be reimplemented in
+ * derived classes.
+ */
+ static int matrix_free_mult (Mat A, Vec src, Vec dst);
+
+ /**
+ * Do the actual work for the
+ * respective @p reinit() function and
+ * the matching constructor,
+ * i.e. create a matrix object. Getting rid
+ * of the previous matrix is left to
+ * the caller.
+ */
+ void do_reinit (const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns);
+ };
namespace MPI
{
- /*! @addtogroup PETScWrappers
- *@{
- */
-
- /**
- * Blocked sparse matrix based on the PETScWrappers::SparseMatrix class. This
- * class implements the functions that are specific to the PETSc SparseMatrix
- * base objects for a blocked sparse matrix, and leaves the actual work
- * relaying most of the calls to the individual blocks to the functions
- * implemented in the base class. See there also for a description of when
- * this class is useful.
- *
- * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices do
- * not have external objects for the sparsity patterns. Thus, one does not
- * determine the size of the individual blocks of a block matrix of this type
- * by attaching a block sparsity pattern, but by calling reinit() to set the
- * number of blocks and then by setting the size of each block separately. In
- * order to fix the data structures of the block matrix, it is then necessary
- * to let it know that we have changed the sizes of the underlying
- * matrices. For this, one has to call the collect_sizes() function, for much
- * the same reason as is documented with the BlockSparsityPattern class.
- *
- * @ingroup Matrix1
- * @see @ref GlossBlockLA "Block (linear algebra)"
- * @author Wolfgang Bangerth, 2004
- */
+ /*! @addtogroup PETScWrappers
+ *@{
+ */
+
+ /**
+ * Blocked sparse matrix based on the PETScWrappers::SparseMatrix class. This
+ * class implements the functions that are specific to the PETSc SparseMatrix
+ * base objects for a blocked sparse matrix, and leaves the actual work
+ * relaying most of the calls to the individual blocks to the functions
+ * implemented in the base class. See there also for a description of when
+ * this class is useful.
+ *
+ * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices do
+ * not have external objects for the sparsity patterns. Thus, one does not
+ * determine the size of the individual blocks of a block matrix of this type
+ * by attaching a block sparsity pattern, but by calling reinit() to set the
+ * number of blocks and then by setting the size of each block separately. In
+ * order to fix the data structures of the block matrix, it is then necessary
+ * to let it know that we have changed the sizes of the underlying
+ * matrices. For this, one has to call the collect_sizes() function, for much
+ * the same reason as is documented with the BlockSparsityPattern class.
+ *
+ * @ingroup Matrix1
+ * @see @ref GlossBlockLA "Block (linear algebra)"
+ * @author Wolfgang Bangerth, 2004
+ */
class BlockSparseMatrix : public BlockMatrixBase<SparseMatrix>
{
- public:
- /**
- * Typedef the base class for simpler
- * access to its own typedefs.
- */
- typedef BlockMatrixBase<SparseMatrix> BaseClass;
-
- /**
- * Typedef the type of the underlying
- * matrix.
- */
- typedef BaseClass::BlockType BlockType;
-
- /**
- * Import the typedefs from the base
- * class.
- */
- typedef BaseClass::value_type value_type;
- typedef BaseClass::pointer pointer;
- typedef BaseClass::const_pointer const_pointer;
- typedef BaseClass::reference reference;
- typedef BaseClass::const_reference const_reference;
- typedef BaseClass::size_type size_type;
- typedef BaseClass::iterator iterator;
- typedef BaseClass::const_iterator const_iterator;
-
- /**
- * Constructor; initializes the
- * matrix to be empty, without
- * any structure, i.e. the
- * matrix is not usable at
- * all. This constructor is
- * therefore only useful for
- * matrices which are members of
- * a class. All other matrices
- * should be created at a point
- * in the data flow where all
- * necessary information is
- * available.
- *
- * You have to initialize the
- * matrix before usage with
- * reinit(BlockSparsityPattern). The
- * number of blocks per row and
- * column are then determined by
- * that function.
- */
- BlockSparseMatrix ();
-
- /**
- * Destructor.
- */
- ~BlockSparseMatrix ();
-
- /**
- * Pseudo copy operator only copying
- * empty objects. The sizes of the
- * block matrices need to be the
- * same.
- */
- BlockSparseMatrix &
- operator = (const BlockSparseMatrix &);
-
- /**
- * This operator assigns a scalar to
- * a matrix. Since this does usually
- * not make much sense (should we set
- * all matrix entries to this value?
- * Only the nonzero entries of the
- * sparsity pattern?), this operation
- * is only allowed if the actual
- * value to be assigned is zero. This
- * operator only exists to allow for
- * the obvious notation
- * <tt>matrix=0</tt>, which sets all
- * elements of the matrix to zero,
- * but keep the sparsity pattern
- * previously used.
- */
- BlockSparseMatrix &
- operator = (const double d);
-
- /**
- * Resize the matrix, by setting
- * the number of block rows and
- * columns. This deletes all
- * blocks and replaces them by
- * unitialized ones, i.e. ones
- * for which also the sizes are
- * not yet set. You have to do
- * that by calling the @p reinit
- * functions of the blocks
- * themselves. Do not forget to
- * call collect_sizes() after
- * that on this object.
- *
- * The reason that you have to
- * set sizes of the blocks
- * yourself is that the sizes may
- * be varying, the maximum number
- * of elements per row may be
- * varying, etc. It is simpler
- * not to reproduce the interface
- * of the SparsityPattern
- * class here but rather let the
- * user call whatever function
- * she desires.
- */
- void reinit (const unsigned int n_block_rows,
- const unsigned int n_block_columns);
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M*src$ with $M$
- * being this matrix.
- */
- void vmult (BlockVector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- void vmult (BlockVector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- void vmult (Vector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- */
- void vmult (Vector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M^T*src$ with $M$
- * being this matrix. This
- * function does the same as
- * vmult() but takes the
- * transposed matrix.
- */
- void Tvmult (BlockVector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- void Tvmult (BlockVector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- void Tvmult (Vector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- */
- void Tvmult (Vector &dst,
- const Vector &src) const;
-
- /**
- * This function collects the
- * sizes of the sub-objects and
- * stores them in internal
- * arrays, in order to be able to
- * relay global indices into the
- * matrix to indices into the
- * subobjects. You *must* call
- * this function each time after
- * you have changed the size of
- * the sub-objects.
- */
- void collect_sizes ();
-
- /**
- * Return a reference to the MPI
- * communicator object in use with
- * this matrix.
- */
- const MPI_Comm & get_mpi_communicator () const;
-
- /**
- * Make the clear() function in the
- * base class visible, though it is
- * protected.
- */
- using BlockMatrixBase<SparseMatrix>::clear;
+ public:
+ /**
+ * Typedef the base class for simpler
+ * access to its own typedefs.
+ */
+ typedef BlockMatrixBase<SparseMatrix> BaseClass;
+
+ /**
+ * Typedef the type of the underlying
+ * matrix.
+ */
+ typedef BaseClass::BlockType BlockType;
+
+ /**
+ * Import the typedefs from the base
+ * class.
+ */
+ typedef BaseClass::value_type value_type;
+ typedef BaseClass::pointer pointer;
+ typedef BaseClass::const_pointer const_pointer;
+ typedef BaseClass::reference reference;
+ typedef BaseClass::const_reference const_reference;
+ typedef BaseClass::size_type size_type;
+ typedef BaseClass::iterator iterator;
+ typedef BaseClass::const_iterator const_iterator;
+
+ /**
+ * Constructor; initializes the
+ * matrix to be empty, without
+ * any structure, i.e. the
+ * matrix is not usable at
+ * all. This constructor is
+ * therefore only useful for
+ * matrices which are members of
+ * a class. All other matrices
+ * should be created at a point
+ * in the data flow where all
+ * necessary information is
+ * available.
+ *
+ * You have to initialize the
+ * matrix before usage with
+ * reinit(BlockSparsityPattern). The
+ * number of blocks per row and
+ * column are then determined by
+ * that function.
+ */
+ BlockSparseMatrix ();
+
+ /**
+ * Destructor.
+ */
+ ~BlockSparseMatrix ();
+
+ /**
+ * Pseudo copy operator only copying
+ * empty objects. The sizes of the
+ * block matrices need to be the
+ * same.
+ */
+ BlockSparseMatrix &
+ operator = (const BlockSparseMatrix &);
+
+ /**
+ * This operator assigns a scalar to
+ * a matrix. Since this does usually
+ * not make much sense (should we set
+ * all matrix entries to this value?
+ * Only the nonzero entries of the
+ * sparsity pattern?), this operation
+ * is only allowed if the actual
+ * value to be assigned is zero. This
+ * operator only exists to allow for
+ * the obvious notation
+ * <tt>matrix=0</tt>, which sets all
+ * elements of the matrix to zero,
+ * but keep the sparsity pattern
+ * previously used.
+ */
+ BlockSparseMatrix &
+ operator = (const double d);
+
+ /**
+ * Resize the matrix, by setting
+ * the number of block rows and
+ * columns. This deletes all
+ * blocks and replaces them by
+ * unitialized ones, i.e. ones
+ * for which also the sizes are
+ * not yet set. You have to do
+ * that by calling the @p reinit
+ * functions of the blocks
+ * themselves. Do not forget to
+ * call collect_sizes() after
+ * that on this object.
+ *
+ * The reason that you have to
+ * set sizes of the blocks
+ * yourself is that the sizes may
+ * be varying, the maximum number
+ * of elements per row may be
+ * varying, etc. It is simpler
+ * not to reproduce the interface
+ * of the SparsityPattern
+ * class here but rather let the
+ * user call whatever function
+ * she desires.
+ */
+ void reinit (const unsigned int n_block_rows,
+ const unsigned int n_block_columns);
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M*src$ with $M$
+ * being this matrix.
+ */
+ void vmult (BlockVector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ */
+ void vmult (BlockVector &dst,
+ const Vector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ */
+ void vmult (Vector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ */
+ void vmult (Vector &dst,
+ const Vector &src) const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M^T*src$ with $M$
+ * being this matrix. This
+ * function does the same as
+ * vmult() but takes the
+ * transposed matrix.
+ */
+ void Tvmult (BlockVector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ */
- void Tvmult (BlockVector &dst,
++ void Tvmult (BlockVector &dst,
+ const Vector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ */
+ void Tvmult (Vector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ */
+ void Tvmult (Vector &dst,
+ const Vector &src) const;
+
+ /**
+ * This function collects the
+ * sizes of the sub-objects and
+ * stores them in internal
+ * arrays, in order to be able to
+ * relay global indices into the
+ * matrix to indices into the
+ * subobjects. You *must* call
+ * this function each time after
+ * you have changed the size of
+ * the sub-objects.
+ */
+ void collect_sizes ();
+
+ /**
+ * Return a reference to the MPI
+ * communicator object in use with
+ * this matrix.
+ */
+ const MPI_Comm &get_mpi_communicator () const;
+
+ /**
+ * Make the clear() function in the
+ * base class visible, though it is
+ * protected.
+ */
+ using BlockMatrixBase<SparseMatrix>::clear;
};
namespace MPI
{
- /*! @addtogroup PETScWrappers
- *@{
- */
-
- /**
- * An implementation of block vectors based on the parallel vector class
- * implemented in PETScWrappers. While the base class provides for most of the
- * interface, this class handles the actual allocation of vectors and provides
- * functions that are specific to the underlying vector type.
- *
- * The model of distribution of data is such that each of the blocks is
- * distributed across all MPI processes named in the MPI communicator. I.e. we
- * don't just distribute the whole vector, but each component. In the
- * constructors and reinit() functions, one therefore not only has to specify
- * the sizes of the individual blocks, but also the number of elements of each
- * of these blocks to be stored on the local process.
- *
- * @ingroup Vectors
- * @see @ref GlossBlockLA "Block (linear algebra)"
- * @author Wolfgang Bangerth, 2004
- */
+ /*! @addtogroup PETScWrappers
+ *@{
+ */
+
+ /**
+ * An implementation of block vectors based on the parallel vector class
+ * implemented in PETScWrappers. While the base class provides for most of the
+ * interface, this class handles the actual allocation of vectors and provides
+ * functions that are specific to the underlying vector type.
+ *
+ * The model of distribution of data is such that each of the blocks is
+ * distributed across all MPI processes named in the MPI communicator. I.e. we
+ * don't just distribute the whole vector, but each component. In the
+ * constructors and reinit() functions, one therefore not only has to specify
+ * the sizes of the individual blocks, but also the number of elements of each
+ * of these blocks to be stored on the local process.
+ *
+ * @ingroup Vectors
+ * @see @ref GlossBlockLA "Block (linear algebra)"
+ * @author Wolfgang Bangerth, 2004
+ */
class BlockVector : public BlockVectorBase<Vector>
{
- public:
- /**
- * Typedef the base class for simpler
- * access to its own typedefs.
- */
- typedef BlockVectorBase<Vector> BaseClass;
-
- /**
- * Typedef the type of the underlying
- * vector.
- */
- typedef BaseClass::BlockType BlockType;
-
- /**
- * Import the typedefs from the base
- * class.
- */
- typedef BaseClass::value_type value_type;
- typedef BaseClass::pointer pointer;
- typedef BaseClass::const_pointer const_pointer;
- typedef BaseClass::reference reference;
- typedef BaseClass::const_reference const_reference;
- typedef BaseClass::size_type size_type;
- typedef BaseClass::iterator iterator;
- typedef BaseClass::const_iterator const_iterator;
-
- /**
- * Default constructor. Generate an
- * empty vector without any blocks.
- */
- BlockVector ();
-
- /**
- * Constructor. Generate a block
- * vector with @p n_blocks blocks,
- * each of which is a parallel
- * vector across @p communicator
- * with @p block_size elements of
- * which @p local_size elements are
- * stored on the present process.
- */
- explicit BlockVector (const unsigned int n_blocks,
- const MPI_Comm &communicator,
- const unsigned int block_size,
- const unsigned int local_size);
-
- /**
- * Copy-Constructor. Set all the
- * properties of the parallel vector
- * to those of the given argument and
- * copy the elements.
- */
- BlockVector (const BlockVector &V);
-
- /**
- * Constructor. Set the number of
- * blocks to
- * <tt>block_sizes.size()</tt> and
- * initialize each block with
- * <tt>block_sizes[i]</tt> zero
- * elements. The individual blocks
- * are distributed across the given
- * communicator, and each store
- * <tt>local_elements[i]</tt>
- * elements on the present process.
- */
- BlockVector (const std::vector<unsigned int> &block_sizes,
- const MPI_Comm &communicator,
- const std::vector<unsigned int> &local_elements);
-
- /**
- * Destructor. Clears memory
- */
- ~BlockVector ();
-
- /**
- * Copy operator: fill all components
- * of the vector that are locally
- * stored with the given scalar value.
- */
- BlockVector & operator = (const value_type s);
-
- /**
- * Copy operator for arguments of the
- * same type.
- */
- BlockVector &
- operator= (const BlockVector &V);
-
- /**
- * Copy the given sequential
- * (non-distributed) block vector
- * into the present parallel block
- * vector. It is assumed that they
- * have the same size, and this
- * operation does not change the
- * partitioning of the parallel
- * vectors by which its elements are
- * distributed across several MPI
- * processes. What this operation
- * therefore does is to copy that
- * chunk of the given vector @p v
- * that corresponds to elements of
- * the target vector that are stored
- * locally, and copies them, for each
- * of the individual blocks of this
- * object. Elements that are not
- * stored locally are not touched.
- *
- * This being a parallel vector, you
- * must make sure that @em all
- * processes call this function at
- * the same time. It is not possible
- * to change the local part of a
- * parallel vector on only one
- * process, independent of what other
- * processes do, with this function.
- */
- BlockVector &
- operator = (const PETScWrappers::BlockVector &v);
-
- /**
- * Reinitialize the BlockVector to
- * contain @p n_blocks of size @p
- * block_size, each of which stores
- * @p local_size elements
- * locally. The @p communicator
- * argument denotes which MPI channel
- * each of these blocks shall
- * communicate.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- */
- void reinit (const unsigned int n_blocks,
- const MPI_Comm &communicator,
- const unsigned int block_size,
- const unsigned int local_size,
- const bool fast = false);
-
- /**
- * Reinitialize the BlockVector such
- * that it contains
- * <tt>block_sizes.size()</tt>
- * blocks. Each block is
- * reinitialized to dimension
- * <tt>block_sizes[i]</tt>. Each of
- * them stores
- * <tt>local_sizes[i]</tt> elements
- * on the present process.
- *
- * If the number of blocks is the
- * same as before this function
- * was called, all vectors remain
- * the same and reinit() is
- * called for each vector.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- *
- * Note that you must call this
- * (or the other reinit()
- * functions) function, rather
- * than calling the reinit()
- * functions of an individual
- * block, to allow the block
- * vector to update its caches of
- * vector sizes. If you call
- * reinit() of one of the
- * blocks, then subsequent
- * actions on this object may
- * yield unpredictable results
- * since they may be routed to
- * the wrong block.
- */
- void reinit (const std::vector<unsigned int> &block_sizes,
- const MPI_Comm &communicator,
- const std::vector<unsigned int> &local_sizes,
- const bool fast=false);
-
- /**
- * Change the dimension to that
- * of the vector <tt>V</tt>. The same
- * applies as for the other
- * reinit() function.
- *
- * The elements of <tt>V</tt> are not
- * copied, i.e. this function is
- * the same as calling <tt>reinit
- * (V.size(), fast)</tt>.
- *
- * Note that you must call this
- * (or the other reinit()
- * functions) function, rather
- * than calling the reinit()
- * functions of an individual
- * block, to allow the block
- * vector to update its caches of
- * vector sizes. If you call
- * reinit() on one of the
- * blocks, then subsequent
- * actions on this object may
- * yield unpredictable results
- * since they may be routed to
- * the wrong block.
- */
- void reinit (const BlockVector &V,
- const bool fast=false);
-
- /**
- * Return a reference to the MPI
- * communicator object in use with
- * this vector.
- */
- const MPI_Comm & get_mpi_communicator () const;
-
- /**
- * Swap the contents of this
- * vector and the other vector
- * <tt>v</tt>. One could do this
- * operation with a temporary
- * variable and copying over the
- * data elements, but this
- * function is significantly more
- * efficient since it only swaps
- * the pointers to the data of
- * the two vectors and therefore
- * does not need to allocate
- * temporary storage and move
- * data around.
- *
- * Limitation: right now this
- * function only works if both
- * vectors have the same number
- * of blocks. If needed, the
- * numbers of blocks should be
- * exchanged, too.
- *
- * This function is analog to the
- * the swap() function of all C++
- * standard containers. Also,
- * there is a global function
- * swap(u,v) that simply calls
- * <tt>u.swap(v)</tt>, again in analogy
- * to standard functions.
- */
- void swap (BlockVector &v);
-
- /**
- * Print to a stream.
- */
- void print (std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const bool across = true) const;
-
- /**
- * Exception
- */
- DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
- /**
- * Exception
- */
- DeclException0 (ExcNonMatchingBlockVectors);
+ public:
+ /**
+ * Typedef the base class for simpler
+ * access to its own typedefs.
+ */
+ typedef BlockVectorBase<Vector> BaseClass;
+
+ /**
+ * Typedef the type of the underlying
+ * vector.
+ */
+ typedef BaseClass::BlockType BlockType;
+
+ /**
+ * Import the typedefs from the base
+ * class.
+ */
+ typedef BaseClass::value_type value_type;
+ typedef BaseClass::pointer pointer;
+ typedef BaseClass::const_pointer const_pointer;
+ typedef BaseClass::reference reference;
+ typedef BaseClass::const_reference const_reference;
+ typedef BaseClass::size_type size_type;
+ typedef BaseClass::iterator iterator;
+ typedef BaseClass::const_iterator const_iterator;
+
+ /**
+ * Default constructor. Generate an
+ * empty vector without any blocks.
+ */
+ BlockVector ();
+
+ /**
+ * Constructor. Generate a block
+ * vector with @p n_blocks blocks,
+ * each of which is a parallel
+ * vector across @p communicator
+ * with @p block_size elements of
+ * which @p local_size elements are
+ * stored on the present process.
+ */
+ explicit BlockVector (const unsigned int n_blocks,
+ const MPI_Comm &communicator,
+ const unsigned int block_size,
+ const unsigned int local_size);
+
+ /**
+ * Copy-Constructor. Set all the
+ * properties of the parallel vector
+ * to those of the given argument and
+ * copy the elements.
+ */
- BlockVector (const BlockVector &V);
++ BlockVector (const BlockVector &V);
+
+ /**
+ * Constructor. Set the number of
+ * blocks to
+ * <tt>block_sizes.size()</tt> and
+ * initialize each block with
+ * <tt>block_sizes[i]</tt> zero
+ * elements. The individual blocks
+ * are distributed across the given
+ * communicator, and each store
+ * <tt>local_elements[i]</tt>
+ * elements on the present process.
+ */
+ BlockVector (const std::vector<unsigned int> &block_sizes,
+ const MPI_Comm &communicator,
+ const std::vector<unsigned int> &local_elements);
+
+ /**
+ * Destructor. Clears memory
+ */
+ ~BlockVector ();
+
+ /**
+ * Copy operator: fill all components
+ * of the vector that are locally
+ * stored with the given scalar value.
+ */
+ BlockVector &operator = (const value_type s);
+
+ /**
+ * Copy operator for arguments of the
+ * same type.
+ */
+ BlockVector &
+ operator= (const BlockVector &V);
+
+ /**
+ * Copy the given sequential
+ * (non-distributed) block vector
+ * into the present parallel block
+ * vector. It is assumed that they
+ * have the same size, and this
+ * operation does not change the
+ * partitioning of the parallel
+ * vectors by which its elements are
+ * distributed across several MPI
+ * processes. What this operation
+ * therefore does is to copy that
+ * chunk of the given vector @p v
+ * that corresponds to elements of
+ * the target vector that are stored
+ * locally, and copies them, for each
+ * of the individual blocks of this
+ * object. Elements that are not
+ * stored locally are not touched.
+ *
+ * This being a parallel vector, you
+ * must make sure that @em all
+ * processes call this function at
+ * the same time. It is not possible
+ * to change the local part of a
+ * parallel vector on only one
+ * process, independent of what other
+ * processes do, with this function.
+ */
+ BlockVector &
+ operator = (const PETScWrappers::BlockVector &v);
+
+ /**
+ * Reinitialize the BlockVector to
+ * contain @p n_blocks of size @p
+ * block_size, each of which stores
+ * @p local_size elements
+ * locally. The @p communicator
+ * argument denotes which MPI channel
+ * each of these blocks shall
+ * communicate.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ */
+ void reinit (const unsigned int n_blocks,
+ const MPI_Comm &communicator,
+ const unsigned int block_size,
+ const unsigned int local_size,
+ const bool fast = false);
+
+ /**
+ * Reinitialize the BlockVector such
+ * that it contains
+ * <tt>block_sizes.size()</tt>
+ * blocks. Each block is
+ * reinitialized to dimension
+ * <tt>block_sizes[i]</tt>. Each of
+ * them stores
+ * <tt>local_sizes[i]</tt> elements
+ * on the present process.
+ *
+ * If the number of blocks is the
+ * same as before this function
+ * was called, all vectors remain
+ * the same and reinit() is
+ * called for each vector.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ *
+ * Note that you must call this
+ * (or the other reinit()
+ * functions) function, rather
+ * than calling the reinit()
+ * functions of an individual
+ * block, to allow the block
+ * vector to update its caches of
+ * vector sizes. If you call
+ * reinit() of one of the
+ * blocks, then subsequent
+ * actions on this object may
+ * yield unpredictable results
+ * since they may be routed to
+ * the wrong block.
+ */
+ void reinit (const std::vector<unsigned int> &block_sizes,
+ const MPI_Comm &communicator,
+ const std::vector<unsigned int> &local_sizes,
+ const bool fast=false);
+
+ /**
+ * Change the dimension to that
+ * of the vector <tt>V</tt>. The same
+ * applies as for the other
+ * reinit() function.
+ *
+ * The elements of <tt>V</tt> are not
+ * copied, i.e. this function is
+ * the same as calling <tt>reinit
+ * (V.size(), fast)</tt>.
+ *
+ * Note that you must call this
+ * (or the other reinit()
+ * functions) function, rather
+ * than calling the reinit()
+ * functions of an individual
+ * block, to allow the block
+ * vector to update its caches of
+ * vector sizes. If you call
+ * reinit() on one of the
+ * blocks, then subsequent
+ * actions on this object may
+ * yield unpredictable results
+ * since they may be routed to
+ * the wrong block.
+ */
+ void reinit (const BlockVector &V,
+ const bool fast=false);
+
+ /**
+ * Return a reference to the MPI
+ * communicator object in use with
+ * this vector.
+ */
+ const MPI_Comm &get_mpi_communicator () const;
+
+ /**
+ * Swap the contents of this
+ * vector and the other vector
+ * <tt>v</tt>. One could do this
+ * operation with a temporary
+ * variable and copying over the
+ * data elements, but this
+ * function is significantly more
+ * efficient since it only swaps
+ * the pointers to the data of
+ * the two vectors and therefore
+ * does not need to allocate
+ * temporary storage and move
+ * data around.
+ *
+ * Limitation: right now this
+ * function only works if both
+ * vectors have the same number
+ * of blocks. If needed, the
+ * numbers of blocks should be
+ * exchanged, too.
+ *
+ * This function is analog to the
+ * the swap() function of all C++
+ * standard containers. Also,
+ * there is a global function
+ * swap(u,v) that simply calls
+ * <tt>u.swap(v)</tt>, again in analogy
+ * to standard functions.
+ */
+ void swap (BlockVector &v);
+
+ /**
+ * Print to a stream.
+ */
+ void print (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcNonMatchingBlockVectors);
};
- /*@}*/
+ /*@}*/
- /*----------------------- Inline functions ----------------------------------*/
+ /*----------------------- Inline functions ----------------------------------*/
inline
class PreconditionerBase;
- /**
- * Base class for solver classes using the PETSc solvers. Since solvers in
- * PETSc are selected based on flags passed to a generic solver object,
- * basically all the actual solver calls happen in this class, and derived
- * classes simply set the right flags to select one solver or another, or to
- * set certain parameters for individual solvers.
- *
- * Optionally, the user can create a solver derived from the
- * SolverBase class and can set the default arguments necessary to
- * solve the linear system of equations with SolverControl. These
- * default options can be overridden by specifying command line
- * arguments of the form @p -ksp_*. For example,
- * @p -ksp_monitor_true_residual prints out true residual norm
- * (unpreconditioned) at each iteration and @p -ksp_view provides
- * information about the linear solver and the preconditioner used in
- * the current context. The type of the solver can also be changed
- * during runtime by specifying @p -ksp_type {richardson, cg, gmres,
- * fgmres, ..} to dynamically test the optimal solver along with a
- * suitable preconditioner set using @p -pc_type {jacobi, bjacobi,
- * ilu, lu, ..}. There are several other command line options
- * available to modify the behavior of the PETSc linear solver and can
- * be obtained from the <a
- * href="http://www.mcs.anl.gov/petsc">documentation and manual
- * pages</a>.
- *
- * @note Repeated calls to solve() on a solver object with a Preconditioner
- * must be used with care. The preconditioner is initialized in the first call
- * to solve() and subsequent calls reuse the solver and preconditioner
- * object. This is done for performance reasons. The solver and preconditioner
- * can be reset by calling reset().
- *
- * One of the gotchas of PETSc is that -- in particular in MPI mode -- it
- * often does not produce very helpful error messages. In order to save
- * other users some time in searching a hard to track down error, here is
- * one situation and the error message one gets there:
- * when you don't specify an MPI communicator to your solver's constructor. In
- * this case, you will get an error of the following form from each of your
- * parallel processes:
- * @verbatim
- * [1]PETSC ERROR: PCSetVector() line 1173 in src/ksp/pc/interface/precon.c
- * [1]PETSC ERROR: Arguments must have same communicators!
- * [1]PETSC ERROR: Different communicators in the two objects: Argument # 1 and 2!
- * [1]PETSC ERROR: KSPSetUp() line 195 in src/ksp/ksp/interface/itfunc.c
- * @endverbatim
- *
- * This error, on which one can spend a very long time figuring out
- * what exactly goes wrong, results from not specifying an MPI
- * communicator. Note that the communicator @em must match that of the
- * matrix and all vectors in the linear system which we want to
- * solve. Aggravating the situation is the fact that the default
- * argument to the solver classes, @p PETSC_COMM_SELF, is the
- * appropriate argument for the sequential case (which is why it is
- * the default argument), so this error only shows up in parallel
- * mode.
- *
- * @ingroup PETScWrappers
- * @author Wolfgang Bangerth, 2004
- */
+ /**
+ * Base class for solver classes using the PETSc solvers. Since solvers in
+ * PETSc are selected based on flags passed to a generic solver object,
+ * basically all the actual solver calls happen in this class, and derived
+ * classes simply set the right flags to select one solver or another, or to
+ * set certain parameters for individual solvers.
+ *
+ * Optionally, the user can create a solver derived from the
+ * SolverBase class and can set the default arguments necessary to
+ * solve the linear system of equations with SolverControl. These
+ * default options can be overridden by specifying command line
+ * arguments of the form @p -ksp_*. For example,
+ * @p -ksp_monitor_true_residual prints out true residual norm
+ * (unpreconditioned) at each iteration and @p -ksp_view provides
+ * information about the linear solver and the preconditioner used in
+ * the current context. The type of the solver can also be changed
+ * during runtime by specifying @p -ksp_type {richardson, cg, gmres,
+ * fgmres, ..} to dynamically test the optimal solver along with a
+ * suitable preconditioner set using @p -pc_type {jacobi, bjacobi,
+ * ilu, lu, ..}. There are several other command line options
+ * available to modify the behavior of the PETSc linear solver and can
+ * be obtained from the <a
+ * href="http://www.mcs.anl.gov/petsc">documentation and manual
+ * pages</a>.
+ *
+ * @note Repeated calls to solve() on a solver object with a Preconditioner
+ * must be used with care. The preconditioner is initialized in the first call
+ * to solve() and subsequent calls reuse the solver and preconditioner
+ * object. This is done for performance reasons. The solver and preconditioner
+ * can be reset by calling reset().
+ *
+ * One of the gotchas of PETSc is that -- in particular in MPI mode -- it
+ * often does not produce very helpful error messages. In order to save
+ * other users some time in searching a hard to track down error, here is
+ * one situation and the error message one gets there:
+ * when you don't specify an MPI communicator to your solver's constructor. In
+ * this case, you will get an error of the following form from each of your
+ * parallel processes:
+ * @verbatim
+ * [1]PETSC ERROR: PCSetVector() line 1173 in src/ksp/pc/interface/precon.c
+ * [1]PETSC ERROR: Arguments must have same communicators!
+ * [1]PETSC ERROR: Different communicators in the two objects: Argument # 1 and 2!
+ * [1]PETSC ERROR: KSPSetUp() line 195 in src/ksp/ksp/interface/itfunc.c
+ * @endverbatim
+ *
+ * This error, on which one can spend a very long time figuring out
+ * what exactly goes wrong, results from not specifying an MPI
+ * communicator. Note that the communicator @em must match that of the
+ * matrix and all vectors in the linear system which we want to
+ * solve. Aggravating the situation is the fact that the default
+ * argument to the solver classes, @p PETSC_COMM_SELF, is the
+ * appropriate argument for the sequential case (which is why it is
+ * the default argument), so this error only shows up in parallel
+ * mode.
+ *
+ * @ingroup PETScWrappers
+ * @author Wolfgang Bangerth, 2004
+ */
class SolverBase
{
- public:
- /**
- * Constructor. Takes the solver
- * control object and the MPI
- * communicator over which parallel
- * computations are to happen.
- *
- * Note that the communicator used here
- * must match the communicator used in
- * the system matrix, solution, and
- * right hand side object of the solve
- * to be done with this
- * solver. Otherwise, PETSc will
- * generate hard to track down errors,
- * see the documentation of the
- * SolverBase class.
- */
- SolverBase (SolverControl &cn,
- const MPI_Comm &mpi_communicator);
-
- /**
- * Destructor.
- */
- virtual ~SolverBase ();
-
- /**
- * Solve the linear system
- * <tt>Ax=b</tt>. Depending on the
- * information provided by derived
- * classes and the object passed as a
- * preconditioner, one of the linear
- * solvers and preconditioners of PETSc
- * is chosen. Repeated calls to
- * solve() do not reconstruct the
- * preconditioner for performance
- * reasons. See class Documentation.
- */
- void
- solve (const MatrixBase &A,
- VectorBase &x,
- const VectorBase &b,
- const PreconditionerBase &preconditioner);
-
-
- /**
- * Resets the contained preconditioner
- * and solver object. See class
- * description for more details.
- */
- virtual void reset();
-
-
- /**
- * Sets a prefix name for the solver
- * object. Useful when customizing the
- * PETSc KSP object with command-line
- * options.
- */
- void set_prefix(const std::string &prefix);
-
-
- /**
- * Access to object that controls
- * convergence.
- */
- SolverControl & control() const;
-
- /**
- * Exception
- */
- DeclException1 (ExcPETScError,
- int,
- << "An error with error number " << arg1
- << " occurred while calling a PETSc function");
-
- protected:
-
- /**
- * Reference to the object that
- * controls convergence of the
- * iterative solver. In fact, for these
- * PETSc wrappers, PETSc does so
- * itself, but we copy the data from
- * this object before starting the
- * solution process, and copy the data
- * back into it afterwards.
- */
- SolverControl &solver_control;
-
- /**
- * Copy of the MPI communicator object
- * to be used for the solver.
- */
- const MPI_Comm mpi_communicator;
-
- /**
- * Function that takes a Krylov
- * Subspace Solver context object, and
- * sets the type of solver that is
- * requested by the derived class.
- */
- virtual void set_solver_type (KSP &ksp) const = 0;
-
- /**
- * Solver prefix name to qualify options
- * specific to the PETSc KSP object in the
- * current context.
- * Note: A hyphen (-) must NOT be given
- * at the beginning of the prefix name.
- * The first character of all runtime
- * options is AUTOMATICALLY the hyphen.
- */
- std::string prefix_name;
-
- private:
- /**
- * A function that is used in PETSc as
- * a callback to check on
- * convergence. It takes the
- * information provided from PETSc and
- * checks it against deal.II's own
- * SolverControl objects to see if
- * convergence has been reached.
- */
- static
+ public:
+ /**
+ * Constructor. Takes the solver
+ * control object and the MPI
+ * communicator over which parallel
+ * computations are to happen.
+ *
+ * Note that the communicator used here
+ * must match the communicator used in
+ * the system matrix, solution, and
+ * right hand side object of the solve
+ * to be done with this
+ * solver. Otherwise, PETSc will
+ * generate hard to track down errors,
+ * see the documentation of the
+ * SolverBase class.
+ */
- SolverBase (SolverControl &cn,
++ SolverBase (SolverControl &cn,
+ const MPI_Comm &mpi_communicator);
+
+ /**
+ * Destructor.
+ */
+ virtual ~SolverBase ();
+
+ /**
+ * Solve the linear system
+ * <tt>Ax=b</tt>. Depending on the
+ * information provided by derived
+ * classes and the object passed as a
+ * preconditioner, one of the linear
+ * solvers and preconditioners of PETSc
+ * is chosen. Repeated calls to
+ * solve() do not reconstruct the
+ * preconditioner for performance
+ * reasons. See class Documentation.
+ */
+ void
+ solve (const MatrixBase &A,
+ VectorBase &x,
+ const VectorBase &b,
+ const PreconditionerBase &preconditioner);
+
+
+ /**
+ * Resets the contained preconditioner
+ * and solver object. See class
+ * description for more details.
+ */
+ virtual void reset();
+
+
+ /**
+ * Sets a prefix name for the solver
+ * object. Useful when customizing the
+ * PETSc KSP object with command-line
+ * options.
+ */
+ void set_prefix(const std::string &prefix);
+
+
+ /**
+ * Access to object that controls
+ * convergence.
+ */
+ SolverControl &control() const;
+
+ /**
+ * Exception
+ */
+ DeclException1 (ExcPETScError,
+ int,
+ << "An error with error number " << arg1
+ << " occurred while calling a PETSc function");
+
+ protected:
+
+ /**
+ * Reference to the object that
+ * controls convergence of the
+ * iterative solver. In fact, for these
+ * PETSc wrappers, PETSc does so
+ * itself, but we copy the data from
+ * this object before starting the
+ * solution process, and copy the data
+ * back into it afterwards.
+ */
+ SolverControl &solver_control;
+
+ /**
+ * Copy of the MPI communicator object
+ * to be used for the solver.
+ */
+ const MPI_Comm mpi_communicator;
+
+ /**
+ * Function that takes a Krylov
+ * Subspace Solver context object, and
+ * sets the type of solver that is
+ * requested by the derived class.
+ */
+ virtual void set_solver_type (KSP &ksp) const = 0;
+
+ /**
+ * Solver prefix name to qualify options
+ * specific to the PETSc KSP object in the
+ * current context.
+ * Note: A hyphen (-) must NOT be given
+ * at the beginning of the prefix name.
+ * The first character of all runtime
+ * options is AUTOMATICALLY the hyphen.
+ */
+ std::string prefix_name;
+
+ private:
+ /**
+ * A function that is used in PETSc as
+ * a callback to check on
+ * convergence. It takes the
+ * information provided from PETSc and
+ * checks it against deal.II's own
+ * SolverControl objects to see if
+ * convergence has been reached.
+ */
+ static
#ifdef PETSC_USE_64BIT_INDICES
- PetscErrorCode
+ PetscErrorCode
#else
- int
+ int
#endif
- convergence_test (KSP ksp,
+ convergence_test (KSP ksp,
#ifdef PETSC_USE_64BIT_INDICES
- const PetscInt iteration,
+ const PetscInt iteration,
#else
- const int iteration,
+ const int iteration,
#endif
- const PetscReal residual_norm,
- KSPConvergedReason *reason,
- void *solver_control);
-
- /**
- * A structure that contains the PETSc
- * solver and preconditioner
- * objects. This object is preserved
- * between subsequent calls to the
- * solver if the same preconditioner is
- * used as in the previous solver
- * step. This may save some computation
- * time, if setting up a preconditioner
- * is expensive, such as in the case of
- * an ILU for example.
- *
- * The actual declaration of this class
- * is complicated by the fact that
- * PETSc changed its solver interface
- * completely and incompatibly between
- * versions 2.1.6 and 2.2.0 :-(
- *
- * Objects of this type are explicitly
- * created, but are destroyed when the
- * surrounding solver object goes out
- * of scope, or when we assign a new
- * value to the pointer to this
- * object. The respective *Destroy
- * functions are therefore written into
- * the destructor of this object, even
- * though the object does not have a
- * constructor.
- */
- struct SolverData
- {
- /**
- * Destructor
- */
- ~SolverData ();
-
- /**
- * Objects for Krylov subspace
- * solvers and preconditioners.
- */
- KSP ksp;
- PC pc;
- };
-
- /**
- * Pointer to an object that stores the
- * solver context. This is recreated in
- * the main solver routine if
- * necessary.
- */
- std_cxx1x::shared_ptr<SolverData> solver_data;
+ const PetscReal residual_norm,
+ KSPConvergedReason *reason,
+ void *solver_control);
+
+ /**
+ * A structure that contains the PETSc
+ * solver and preconditioner
+ * objects. This object is preserved
+ * between subsequent calls to the
+ * solver if the same preconditioner is
+ * used as in the previous solver
+ * step. This may save some computation
+ * time, if setting up a preconditioner
+ * is expensive, such as in the case of
+ * an ILU for example.
+ *
+ * The actual declaration of this class
+ * is complicated by the fact that
+ * PETSc changed its solver interface
+ * completely and incompatibly between
+ * versions 2.1.6 and 2.2.0 :-(
+ *
+ * Objects of this type are explicitly
+ * created, but are destroyed when the
+ * surrounding solver object goes out
+ * of scope, or when we assign a new
+ * value to the pointer to this
+ * object. The respective *Destroy
+ * functions are therefore written into
+ * the destructor of this object, even
+ * though the object does not have a
+ * constructor.
+ */
+ struct SolverData
+ {
+ /**
+ * Destructor
+ */
+ ~SolverData ();
+
+ /**
+ * Objects for Krylov subspace
+ * solvers and preconditioners.
+ */
+ KSP ksp;
+ PC pc;
+ };
+
+ /**
+ * Pointer to an object that stores the
+ * solver context. This is recreated in
+ * the main solver routine if
+ * necessary.
+ */
+ std_cxx1x::shared_ptr<SolverData> solver_data;
};
*/
namespace PETScWrappers
{
- // forward declaration
+ // forward declaration
class VectorBase;
- /**
- * @cond internal
- */
+ /**
+ * @cond internal
+ */
- /**
- * A namespace for internal implementation details of the PETScWrapper
- * members.
- * @ingroup PETScWrappers
- */
+ /**
+ * A namespace for internal implementation details of the PETScWrapper
+ * members.
+ * @ingroup PETScWrappers
+ */
namespace internal
{
- /**
- * Since access to PETSc vectors only
- * goes through functions, rather than by
- * obtaining a reference to a vector
- * element, we need a wrapper class that
- * acts as if it was a reference, and
- * basically redirects all accesses (read
- * and write) to member functions of this
- * class.
- *
- * This class implements such a wrapper:
- * it is initialized with a vector and an
- * element within it, and has a
- * conversion operator to extract the
- * scalar value of this element. It also
- * has a variety of assignment operator
- * for writing to this one element.
- * @ingroup PETScWrappers
- */
+ /**
+ * Since access to PETSc vectors only
+ * goes through functions, rather than by
+ * obtaining a reference to a vector
+ * element, we need a wrapper class that
+ * acts as if it was a reference, and
+ * basically redirects all accesses (read
+ * and write) to member functions of this
+ * class.
+ *
+ * This class implements such a wrapper:
+ * it is initialized with a vector and an
+ * element within it, and has a
+ * conversion operator to extract the
+ * scalar value of this element. It also
+ * has a variety of assignment operator
+ * for writing to this one element.
+ * @ingroup PETScWrappers
+ */
class VectorReference
{
- private:
- /**
- * Constructor. It is made private so
- * as to only allow the actual vector
- * class to create it.
- */
- VectorReference (const VectorBase &vector,
- const unsigned int index);
-
- public:
- /**
- * This looks like a copy operator,
- * but does something different than
- * usual. In particular, it does not
- * copy the member variables of this
- * reference. Rather, it handles the
- * situation where we have two
- * vectors @p v and @p w, and assign
- * elements like in
- * <tt>v(i)=w(i)</tt>. Here, both
- * left and right hand side of the
- * assignment have data type
- * VectorReference, but what we
- * really mean is to assign the
- * vector elements represented by the
- * two references. This operator
- * implements this operation. Note
- * also that this allows us to make
- * the assignment operator const.
- */
- const VectorReference & operator = (const VectorReference &r) const;
-
- /**
- * The same function as above, but
- * for non-const reference
- * objects. The function is needed
- * since the compiler might otherwise
- * automatically generate a copy
- * operator for non-const objects.
- */
- VectorReference & operator = (const VectorReference &r);
-
- /**
- * Set the referenced element of the
- * vector to <tt>s</tt>.
- */
- const VectorReference & operator = (const PetscScalar &s) const;
-
- /**
- * Add <tt>s</tt> to the referenced
- * element of the vector.
- */
- const VectorReference & operator += (const PetscScalar &s) const;
-
- /**
- * Subtract <tt>s</tt> from the
- * referenced element of the vector.
- */
- const VectorReference & operator -= (const PetscScalar &s) const;
-
- /**
- * Multiply the referenced element of
- * the vector by <tt>s</tt>.
- */
- const VectorReference & operator *= (const PetscScalar &s) const;
-
- /**
- * Divide the referenced element of
- * the vector by <tt>s</tt>.
- */
- const VectorReference & operator /= (const PetscScalar &s) const;
-
- /**
- * Convert the reference to an actual
- * value, i.e. return the value of
- * the referenced element of the
- * vector.
- */
- operator PetscScalar () const;
-
- /**
- * Exception
- */
- DeclException1 (ExcPETScError,
- int,
- << "An error with error number " << arg1
- << " occurred while calling a PETSc function");
- /**
- * Exception
- */
- DeclException3 (ExcAccessToNonlocalElement,
- int, int, int,
- << "You tried to access element " << arg1
- << " of a distributed vector, but only elements "
- << arg2 << " through " << arg3
- << " are stored locally and can be accessed.");
- /**
- * Exception.
- */
- DeclException2 (ExcWrongMode,
- int, int,
- << "You tried to do a "
- << (arg1 == 1 ?
- "'set'" :
- (arg1 == 2 ?
- "'add'" : "???"))
- << " operation but the vector is currently in "
- << (arg2 == 1 ?
- "'set'" :
- (arg2 == 2 ?
- "'add'" : "???"))
- << " mode. You first have to call 'compress()'.");
-
- private:
- /**
- * Point to the vector we are
- * referencing.
- */
- const VectorBase &vector;
-
- /**
- * Index of the referenced element of
- * the vector.
- */
- const unsigned int index;
-
- /**
- * Make the vector class a friend, so
- * that it can create objects of the
- * present type.
- */
- friend class ::dealii::PETScWrappers::VectorBase;
+ private:
+ /**
+ * Constructor. It is made private so
+ * as to only allow the actual vector
+ * class to create it.
+ */
- VectorReference (const VectorBase &vector,
++ VectorReference (const VectorBase &vector,
+ const unsigned int index);
+
+ public:
+ /**
+ * This looks like a copy operator,
+ * but does something different than
+ * usual. In particular, it does not
+ * copy the member variables of this
+ * reference. Rather, it handles the
+ * situation where we have two
+ * vectors @p v and @p w, and assign
+ * elements like in
+ * <tt>v(i)=w(i)</tt>. Here, both
+ * left and right hand side of the
+ * assignment have data type
+ * VectorReference, but what we
+ * really mean is to assign the
+ * vector elements represented by the
+ * two references. This operator
+ * implements this operation. Note
+ * also that this allows us to make
+ * the assignment operator const.
+ */
+ const VectorReference &operator = (const VectorReference &r) const;
+
+ /**
+ * The same function as above, but
+ * for non-const reference
+ * objects. The function is needed
+ * since the compiler might otherwise
+ * automatically generate a copy
+ * operator for non-const objects.
+ */
+ VectorReference &operator = (const VectorReference &r);
+
+ /**
+ * Set the referenced element of the
+ * vector to <tt>s</tt>.
+ */
+ const VectorReference &operator = (const PetscScalar &s) const;
+
+ /**
+ * Add <tt>s</tt> to the referenced
+ * element of the vector.
+ */
+ const VectorReference &operator += (const PetscScalar &s) const;
+
+ /**
+ * Subtract <tt>s</tt> from the
+ * referenced element of the vector.
+ */
+ const VectorReference &operator -= (const PetscScalar &s) const;
+
+ /**
+ * Multiply the referenced element of
+ * the vector by <tt>s</tt>.
+ */
+ const VectorReference &operator *= (const PetscScalar &s) const;
+
+ /**
+ * Divide the referenced element of
+ * the vector by <tt>s</tt>.
+ */
+ const VectorReference &operator /= (const PetscScalar &s) const;
+
+ /**
+ * Convert the reference to an actual
+ * value, i.e. return the value of
+ * the referenced element of the
+ * vector.
+ */
+ operator PetscScalar () const;
+
+ /**
+ * Exception
+ */
+ DeclException1 (ExcPETScError,
+ int,
+ << "An error with error number " << arg1
+ << " occurred while calling a PETSc function");
+ /**
+ * Exception
+ */
+ DeclException3 (ExcAccessToNonlocalElement,
+ int, int, int,
+ << "You tried to access element " << arg1
+ << " of a distributed vector, but only elements "
+ << arg2 << " through " << arg3
+ << " are stored locally and can be accessed.");
+ /**
+ * Exception.
+ */
+ DeclException2 (ExcWrongMode,
+ int, int,
+ << "You tried to do a "
+ << (arg1 == 1 ?
+ "'set'" :
+ (arg1 == 2 ?
+ "'add'" : "???"))
+ << " operation but the vector is currently in "
+ << (arg2 == 1 ?
+ "'set'" :
+ (arg2 == 2 ?
+ "'add'" : "???"))
+ << " mode. You first have to call 'compress()'.");
+
+ private:
+ /**
+ * Point to the vector we are
+ * referencing.
+ */
+ const VectorBase &vector;
+
+ /**
+ * Index of the referenced element of
+ * the vector.
+ */
+ const unsigned int index;
+
+ /**
+ * Make the vector class a friend, so
+ * that it can create objects of the
+ * present type.
+ */
+ friend class ::dealii::PETScWrappers::VectorBase;
};
}
- /**
- * @endcond
- */
-
-
- /**
- * Base class for all vector classes that are implemented on top of the PETSc
- * vector types. Since in PETSc all vector types (i.e. sequential and parallel
- * ones) are built by filling the contents of an abstract object that is only
- * referenced through a pointer of a type that is independent of the actual
- * vector type, we can implement almost all functionality of vectors in this
- * base class. Derived classes will then only have to provide the
- * functionality to create one or the other kind of vector.
- *
- * The interface of this class is modeled after the existing Vector
- * class in deal.II. It has almost the same member functions, and is often
- * exchangable. However, since PETSc only supports a single scalar type
- * (either double, float, or a complex data type), it is not templated, and
- * only works with whatever your PETSc installation has defined the data type
- * @p PetscScalar to.
- *
- * Note that PETSc only guarantees that operations do what you expect if the
- * functions @p VecAssemblyBegin and @p VecAssemblyEnd have been called
- * after vector assembly. Therefore, you need to call Vector::compress()
- * before you actually use the vector.
- *
- * @ingroup PETScWrappers
- * @author Wolfgang Bangerth, 2004
- */
+ /**
+ * @endcond
+ */
+
+
+ /**
+ * Base class for all vector classes that are implemented on top of the PETSc
+ * vector types. Since in PETSc all vector types (i.e. sequential and parallel
+ * ones) are built by filling the contents of an abstract object that is only
+ * referenced through a pointer of a type that is independent of the actual
+ * vector type, we can implement almost all functionality of vectors in this
+ * base class. Derived classes will then only have to provide the
+ * functionality to create one or the other kind of vector.
+ *
+ * The interface of this class is modeled after the existing Vector
+ * class in deal.II. It has almost the same member functions, and is often
+ * exchangable. However, since PETSc only supports a single scalar type
+ * (either double, float, or a complex data type), it is not templated, and
+ * only works with whatever your PETSc installation has defined the data type
+ * @p PetscScalar to.
+ *
+ * Note that PETSc only guarantees that operations do what you expect if the
+ * functions @p VecAssemblyBegin and @p VecAssemblyEnd have been called
+ * after vector assembly. Therefore, you need to call Vector::compress()
+ * before you actually use the vector.
+ *
+ * @ingroup PETScWrappers
+ * @author Wolfgang Bangerth, 2004
+ */
class VectorBase : public Subscriptor
{
- public:
- /**
- * Declare some of the standard types
- * used in all containers. These types
- * parallel those in the <tt>C++</tt>
- * standard libraries <tt>vector<...></tt>
- * class.
- */
- typedef PetscScalar value_type;
- typedef PetscReal real_type;
- typedef std::size_t size_type;
- typedef internal::VectorReference reference;
- typedef const internal::VectorReference const_reference;
-
- /**
- * Default constructor. It doesn't do
- * anything, derived classes will have
- * to initialize the data.
- */
- VectorBase ();
-
- /**
- * Copy constructor. Sets the dimension
- * to that of the given vector, and
- * copies all elements.
- */
- VectorBase (const VectorBase &v);
-
- /**
- * Initialize a Vector from a PETSc Vec
- * object. Note that we do not copy the
- * vector and we do not attain
- * ownership, so we do not destroy the
- * PETSc object in the destructor.
- */
- explicit VectorBase (const Vec & v);
-
- /**
- * Destructor
- */
- virtual ~VectorBase ();
-
- /**
- * Compress the underlying
- * representation of the PETSc object,
- * i.e. flush the buffers of the vector
- * object if it has any. This function
- * is necessary after writing into a
- * vector element-by-element and before
- * anything else can be done on it.
- *
- * See @ref GlossCompress "Compressing distributed objects"
- * for more information.
- */
- void compress (::dealii::VectorOperation::values operation
- =::dealii::VectorOperation::unknown);
-
- /**
- * Set all components of the vector to
- * the given number @p s. Simply pass
- * this down to the individual block
- * objects, but we still need to declare
- * this function to make the example
- * given in the discussion about making
- * the constructor explicit work.
- *
- *
- * Since the semantics of assigning a
- * scalar to a vector are not
- * immediately clear, this operator
- * should really only be used if you
- * want to set the entire vector to
- * zero. This allows the intuitive
- * notation <tt>v=0</tt>. Assigning
- * other values is deprecated and may
- * be disallowed in the future.
- */
- VectorBase & operator = (const PetscScalar s);
-
- /**
- * Test for equality. This function
- * assumes that the present vector and
- * the one to compare with have the same
- * size already, since comparing vectors
- * of different sizes makes not much
- * sense anyway.
- */
- bool operator == (const VectorBase &v) const;
-
- /**
- * Test for inequality. This function
- * assumes that the present vector and
- * the one to compare with have the same
- * size already, since comparing vectors
- * of different sizes makes not much
- * sense anyway.
- */
- bool operator != (const VectorBase &v) const;
-
- /**
- * Return the global dimension of the
- * vector.
- */
- unsigned int size () const;
-
- /**
- * Return the local dimension of the
- * vector, i.e. the number of elements
- * stored on the present MPI
- * process. For sequential vectors,
- * this number is the same as size(),
- * but for parallel vectors it may be
- * smaller.
- *
- * To figure out which elements
- * exactly are stored locally,
- * use local_range().
- */
- unsigned int local_size () const;
-
- /**
- * Return a pair of indices
- * indicating which elements of
- * this vector are stored
- * locally. The first number is
- * the index of the first
- * element stored, the second
- * the index of the one past
- * the last one that is stored
- * locally. If this is a
- * sequential vector, then the
- * result will be the pair
- * (0,N), otherwise it will be
- * a pair (i,i+n), where
- * <tt>n=local_size()</tt>.
- */
- std::pair<unsigned int, unsigned int>
- local_range () const;
-
- /**
- * Return whether @p index is
- * in the local range or not,
- * see also local_range().
- */
- bool in_local_range (const unsigned int index) const;
-
- /**
- * Return if the vector contains ghost
- * elements.
- */
- bool has_ghost_elements() const;
-
- /**
- * Provide access to a given element,
- * both read and write.
- */
- reference
- operator () (const unsigned int index);
-
- /**
- * Provide read-only access to an
- * element.
- */
- PetscScalar
- operator () (const unsigned int index) const;
-
- /**
- * Provide access to a given
- * element, both read and write.
- *
- * Exactly the same as operator().
- */
- reference
- operator [] (const unsigned int index);
-
- /**
- * Provide read-only access to an
- * element. This is equivalent to
- * the <code>el()</code> command.
- *
- * Exactly the same as operator().
- */
- PetscScalar
- operator [] (const unsigned int index) const;
-
- /**
- * A collective set operation: instead
- * of setting individual elements of a
- * vector, this function allows to set
- * a whole set of elements at once. The
- * indices of the elements to be set
- * are stated in the first argument,
- * the corresponding values in the
- * second.
- */
- void set (const std::vector<unsigned int> &indices,
- const std::vector<PetscScalar> &values);
-
- /**
- * A collective add operation: This
- * function adds a whole set of values
- * stored in @p values to the vector
- * components specified by @p indices.
- */
- void add (const std::vector<unsigned int> &indices,
- const std::vector<PetscScalar> &values);
-
- /**
- * This is a second collective
- * add operation. As a
- * difference, this function
- * takes a deal.II vector of
- * values.
- */
- void add (const std::vector<unsigned int> &indices,
- const ::dealii::Vector<PetscScalar> &values);
-
- /**
- * Take an address where
- * <tt>n_elements</tt> are stored
- * contiguously and add them into
- * the vector. Handles all cases
- * which are not covered by the
- * other two <tt>add()</tt>
- * functions above.
- */
- void add (const unsigned int n_elements,
- const unsigned int *indices,
- const PetscScalar *values);
-
- /**
- * Return the scalar product of two
- * vectors. The vectors must have the
- * same size.
- */
- PetscScalar operator * (const VectorBase &vec) const;
-
- /**
- * Return square of the $l_2$-norm.
- */
- real_type norm_sqr () const;
-
- /**
- * Mean value of the elements of
- * this vector.
- */
- PetscScalar mean_value () const;
-
- /**
- * $l_1$-norm of the vector.
- * The sum of the absolute values.
- */
- real_type l1_norm () const;
-
- /**
- * $l_2$-norm of the vector. The
- * square root of the sum of the
- * squares of the elements.
- */
- real_type l2_norm () const;
-
- /**
- * $l_p$-norm of the vector. The
- * pth root of the sum of the pth
- * powers of the absolute values
- * of the elements.
- */
- real_type lp_norm (const real_type p) const;
-
- /**
- * Maximum absolute value of the
- * elements.
- */
- real_type linfty_norm () const;
-
- /**
- * Normalize vector by dividing
- * by the $l_2$-norm of the
- * vector. Return vector norm
- * before normalization.
- */
- real_type normalize () const;
-
- /**
- * Return vector component with
- * the minimal magnitude.
- */
- real_type min () const;
-
- /**
- * Return vector component with
- * the maximal magnitude.
- */
- real_type max () const;
-
-
- /**
- * Replace every element in a
- * vector with its absolute
- * value.
- */
- VectorBase & abs ();
-
- /**
- * Conjugate a vector.
- */
- VectorBase & conjugate ();
-
- /**
- * A collective piecewise
- * multiply operation on
- * <code>this</code> vector
- * with itself. TODO: The model
- * for this function should be
- * similer to add ().
- */
- VectorBase & mult ();
-
- /**
- * Same as above, but a
- * collective piecewise
- * multiply operation of
- * <code>this</code> vector
- * with <b>v</b>.
- */
- VectorBase & mult (const VectorBase &v);
-
- /**
- * Same as above, but a
- * collective piecewise
- * multiply operation of
- * <b>u</b> with <b>v</b>.
- */
- VectorBase & mult (const VectorBase &u,
- const VectorBase &v);
-
- /**
- * Return whether the vector contains
- * only elements with value zero. This
- * function is mainly for internal
- * consistency checks and should
- * seldom be used when not in debug
- * mode since it uses quite some time.
- */
- bool all_zero () const;
-
- /**
- * Return @p true if the vector has no
- * negative entries, i.e. all entries
- * are zero or positive. This function
- * is used, for example, to check
- * whether refinement indicators are
- * really all positive (or zero).
- */
- bool is_non_negative () const;
-
- /**
- * Multiply the entire vector by a
- * fixed factor.
- */
- VectorBase & operator *= (const PetscScalar factor);
-
- /**
- * Divide the entire vector by a
- * fixed factor.
- */
- VectorBase & operator /= (const PetscScalar factor);
-
- /**
- * Add the given vector to the present
- * one.
- */
- VectorBase & operator += (const VectorBase &V);
-
- /**
- * Subtract the given vector from the
- * present one.
- */
- VectorBase & operator -= (const VectorBase &V);
-
- /**
- * Addition of @p s to all
- * components. Note that @p s is a
- * scalar and not a vector.
- */
- void add (const PetscScalar s);
-
- /**
- * Simple vector addition, equal to the
- * <tt>operator +=</tt>.
- */
- void add (const VectorBase &V);
-
- /**
- * Simple addition of a multiple of a
- * vector, i.e. <tt>*this += a*V</tt>.
- */
- void add (const PetscScalar a, const VectorBase &V);
-
- /**
- * Multiple addition of scaled vectors,
- * i.e. <tt>*this += a*V+b*W</tt>.
- */
- void add (const PetscScalar a, const VectorBase &V,
- const PetscScalar b, const VectorBase &W);
-
- /**
- * Scaling and simple vector addition,
- * i.e.
- * <tt>*this = s*(*this)+V</tt>.
- */
- void sadd (const PetscScalar s,
- const VectorBase &V);
-
- /**
- * Scaling and simple addition, i.e.
- * <tt>*this = s*(*this)+a*V</tt>.
- */
- void sadd (const PetscScalar s,
- const PetscScalar a,
- const VectorBase &V);
-
- /**
- * Scaling and multiple addition.
- */
- void sadd (const PetscScalar s,
- const PetscScalar a,
- const VectorBase &V,
- const PetscScalar b,
- const VectorBase &W);
-
- /**
- * Scaling and multiple addition.
- * <tt>*this = s*(*this)+a*V + b*W + c*X</tt>.
- */
- void sadd (const PetscScalar s,
- const PetscScalar a,
- const VectorBase &V,
- const PetscScalar b,
- const VectorBase &W,
- const PetscScalar c,
- const VectorBase &X);
-
- /**
- * Scale each element of this
- * vector by the corresponding
- * element in the argument. This
- * function is mostly meant to
- * simulate multiplication (and
- * immediate re-assignment) by a
- * diagonal scaling matrix.
- */
- void scale (const VectorBase &scaling_factors);
-
- /**
- * Assignment <tt>*this = a*V</tt>.
- */
- void equ (const PetscScalar a, const VectorBase &V);
-
- /**
- * Assignment <tt>*this = a*V + b*W</tt>.
- */
- void equ (const PetscScalar a, const VectorBase &V,
- const PetscScalar b, const VectorBase &W);
-
- /**
- * Compute the elementwise ratio of the
- * two given vectors, that is let
- * <tt>this[i] = a[i]/b[i]</tt>. This is
- * useful for example if you want to
- * compute the cellwise ratio of true to
- * estimated error.
- *
- * This vector is appropriately
- * scaled to hold the result.
- *
- * If any of the <tt>b[i]</tt> is
- * zero, the result is
- * undefined. No attempt is made
- * to catch such situations.
- */
- void ratio (const VectorBase &a,
- const VectorBase &b);
-
- /**
- * Updates the ghost values of this
- * vector. This is necessary after any
- * modification before reading ghost
- * values.
- */
- void update_ghost_values() const;
-
- /**
- * Print to a
- * stream. @p precision denotes
- * the desired precision with
- * which values shall be printed,
- * @p scientific whether
- * scientific notation shall be
- * used. If @p across is
- * @p true then the vector is
- * printed in a line, while if
- * @p false then the elements
- * are printed on a separate line
- * each.
- */
- void print (std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const bool across = true) const;
-
- /**
- * Swap the contents of this
- * vector and the other vector
- * @p v. One could do this
- * operation with a temporary
- * variable and copying over the
- * data elements, but this
- * function is significantly more
- * efficient since it only swaps
- * the pointers to the data of
- * the two vectors and therefore
- * does not need to allocate
- * temporary storage and move
- * data around.
- *
- * This function is analog to the
- * the @p swap function of all C++
- * standard containers. Also,
- * there is a global function
- * <tt>swap(u,v)</tt> that simply calls
- * <tt>u.swap(v)</tt>, again in analogy
- * to standard functions.
- */
- void swap (VectorBase &v);
-
- /**
- * Conversion operator to gain access
- * to the underlying PETSc type. If you
- * do this, you cut this class off some
- * information it may need, so this
- * conversion operator should only be
- * used if you know what you do. In
- * particular, it should only be used
- * for read-only operations into the
- * vector.
- */
- operator const Vec & () const;
-
- /**
- * Estimate for the memory
- * consumption (not implemented
- * for this class).
- */
- std::size_t memory_consumption () const;
-
- protected:
- /**
- * A generic vector object in
- * PETSc. The actual type, a sequential
- * vector, is set in the constructor.
- */
- Vec vector;
-
- /**
- * Denotes if this vector has ghost
- * indices associated with it. This
- * means that at least one of the
- * processes in a parallel programm has
- * at least one ghost index.
- */
- bool ghosted;
-
- /**
- * This vector contains the global
- * indices of the ghost values. The
- * location in this vector denotes the
- * local numbering, which is used in
- * PETSc.
- */
- IndexSet ghost_indices;
-
- /**
- * Store whether the last action was a
- * write or add operation. This
- * variable is @p mutable so that the
- * accessor classes can write to it,
- * even though the vector object they
- * refer to is constant.
- */
- mutable ::dealii::VectorOperation::values last_action;
-
- /**
- * Make the reference class a friend.
- */
- friend class internal::VectorReference;
-
- /**
- * Specifies if the vector is the owner
- * of the PETSc Vec. This is true if it
- * got created by this class and
- * determines if it gets destructed in
- * the destructor.
- */
- bool attained_ownership;
-
- /**
- * Collective set or add
- * operation: This function is
- * invoked by the collective @p
- * set and @p add with the
- * @p add_values flag set to the
- * corresponding value.
- */
- void do_set_add_operation (const unsigned int n_elements,
- const unsigned int *indices,
- const PetscScalar *values,
- const bool add_values);
+ public:
+ /**
+ * Declare some of the standard types
+ * used in all containers. These types
+ * parallel those in the <tt>C++</tt>
+ * standard libraries <tt>vector<...></tt>
+ * class.
+ */
+ typedef PetscScalar value_type;
+ typedef PetscReal real_type;
+ typedef std::size_t size_type;
+ typedef internal::VectorReference reference;
+ typedef const internal::VectorReference const_reference;
+
+ /**
+ * Default constructor. It doesn't do
+ * anything, derived classes will have
+ * to initialize the data.
+ */
+ VectorBase ();
+
+ /**
+ * Copy constructor. Sets the dimension
+ * to that of the given vector, and
+ * copies all elements.
+ */
+ VectorBase (const VectorBase &v);
+
+ /**
+ * Initialize a Vector from a PETSc Vec
+ * object. Note that we do not copy the
+ * vector and we do not attain
+ * ownership, so we do not destroy the
+ * PETSc object in the destructor.
+ */
+ explicit VectorBase (const Vec &v);
+
+ /**
+ * Destructor
+ */
+ virtual ~VectorBase ();
+
+ /**
+ * Compress the underlying
+ * representation of the PETSc object,
+ * i.e. flush the buffers of the vector
+ * object if it has any. This function
+ * is necessary after writing into a
+ * vector element-by-element and before
+ * anything else can be done on it.
+ *
+ * See @ref GlossCompress "Compressing distributed objects"
+ * for more information.
+ */
+ void compress (::dealii::VectorOperation::values operation
+ =::dealii::VectorOperation::unknown);
+
+ /**
+ * Set all components of the vector to
+ * the given number @p s. Simply pass
+ * this down to the individual block
+ * objects, but we still need to declare
+ * this function to make the example
+ * given in the discussion about making
+ * the constructor explicit work.
+ *
+ *
+ * Since the semantics of assigning a
+ * scalar to a vector are not
+ * immediately clear, this operator
+ * should really only be used if you
+ * want to set the entire vector to
+ * zero. This allows the intuitive
+ * notation <tt>v=0</tt>. Assigning
+ * other values is deprecated and may
+ * be disallowed in the future.
+ */
+ VectorBase &operator = (const PetscScalar s);
+
+ /**
+ * Test for equality. This function
+ * assumes that the present vector and
+ * the one to compare with have the same
+ * size already, since comparing vectors
+ * of different sizes makes not much
+ * sense anyway.
+ */
+ bool operator == (const VectorBase &v) const;
+
+ /**
+ * Test for inequality. This function
+ * assumes that the present vector and
+ * the one to compare with have the same
+ * size already, since comparing vectors
+ * of different sizes makes not much
+ * sense anyway.
+ */
+ bool operator != (const VectorBase &v) const;
+
+ /**
+ * Return the global dimension of the
+ * vector.
+ */
+ unsigned int size () const;
+
+ /**
+ * Return the local dimension of the
+ * vector, i.e. the number of elements
+ * stored on the present MPI
+ * process. For sequential vectors,
+ * this number is the same as size(),
+ * but for parallel vectors it may be
+ * smaller.
+ *
+ * To figure out which elements
+ * exactly are stored locally,
+ * use local_range().
+ */
+ unsigned int local_size () const;
+
+ /**
+ * Return a pair of indices
+ * indicating which elements of
+ * this vector are stored
+ * locally. The first number is
+ * the index of the first
+ * element stored, the second
+ * the index of the one past
+ * the last one that is stored
+ * locally. If this is a
+ * sequential vector, then the
+ * result will be the pair
+ * (0,N), otherwise it will be
+ * a pair (i,i+n), where
+ * <tt>n=local_size()</tt>.
+ */
+ std::pair<unsigned int, unsigned int>
+ local_range () const;
+
+ /**
+ * Return whether @p index is
+ * in the local range or not,
+ * see also local_range().
+ */
+ bool in_local_range (const unsigned int index) const;
+
+ /**
+ * Return if the vector contains ghost
+ * elements.
+ */
+ bool has_ghost_elements() const;
+
+ /**
+ * Provide access to a given element,
+ * both read and write.
+ */
+ reference
+ operator () (const unsigned int index);
+
+ /**
+ * Provide read-only access to an
+ * element.
+ */
+ PetscScalar
+ operator () (const unsigned int index) const;
+
+ /**
+ * Provide access to a given
+ * element, both read and write.
+ *
+ * Exactly the same as operator().
+ */
+ reference
+ operator [] (const unsigned int index);
+
+ /**
+ * Provide read-only access to an
+ * element. This is equivalent to
+ * the <code>el()</code> command.
+ *
+ * Exactly the same as operator().
+ */
+ PetscScalar
+ operator [] (const unsigned int index) const;
+
+ /**
+ * A collective set operation: instead
+ * of setting individual elements of a
+ * vector, this function allows to set
+ * a whole set of elements at once. The
+ * indices of the elements to be set
+ * are stated in the first argument,
+ * the corresponding values in the
+ * second.
+ */
+ void set (const std::vector<unsigned int> &indices,
- const std::vector<PetscScalar> &values);
++ const std::vector<PetscScalar> &values);
+
+ /**
+ * A collective add operation: This
+ * function adds a whole set of values
+ * stored in @p values to the vector
+ * components specified by @p indices.
+ */
+ void add (const std::vector<unsigned int> &indices,
- const std::vector<PetscScalar> &values);
++ const std::vector<PetscScalar> &values);
+
+ /**
+ * This is a second collective
+ * add operation. As a
+ * difference, this function
+ * takes a deal.II vector of
+ * values.
+ */
+ void add (const std::vector<unsigned int> &indices,
+ const ::dealii::Vector<PetscScalar> &values);
+
+ /**
+ * Take an address where
+ * <tt>n_elements</tt> are stored
+ * contiguously and add them into
+ * the vector. Handles all cases
+ * which are not covered by the
+ * other two <tt>add()</tt>
+ * functions above.
+ */
+ void add (const unsigned int n_elements,
+ const unsigned int *indices,
- const PetscScalar *values);
++ const PetscScalar *values);
+
+ /**
+ * Return the scalar product of two
+ * vectors. The vectors must have the
+ * same size.
+ */
+ PetscScalar operator * (const VectorBase &vec) const;
+
+ /**
+ * Return square of the $l_2$-norm.
+ */
+ real_type norm_sqr () const;
+
+ /**
+ * Mean value of the elements of
+ * this vector.
+ */
+ PetscScalar mean_value () const;
+
+ /**
+ * $l_1$-norm of the vector.
+ * The sum of the absolute values.
+ */
+ real_type l1_norm () const;
+
+ /**
+ * $l_2$-norm of the vector. The
+ * square root of the sum of the
+ * squares of the elements.
+ */
+ real_type l2_norm () const;
+
+ /**
+ * $l_p$-norm of the vector. The
+ * pth root of the sum of the pth
+ * powers of the absolute values
+ * of the elements.
+ */
+ real_type lp_norm (const real_type p) const;
+
+ /**
+ * Maximum absolute value of the
+ * elements.
+ */
+ real_type linfty_norm () const;
+
+ /**
+ * Normalize vector by dividing
+ * by the $l_2$-norm of the
+ * vector. Return vector norm
+ * before normalization.
+ */
+ real_type normalize () const;
+
+ /**
+ * Return vector component with
+ * the minimal magnitude.
+ */
+ real_type min () const;
+
+ /**
+ * Return vector component with
+ * the maximal magnitude.
+ */
+ real_type max () const;
+
+
+ /**
+ * Replace every element in a
+ * vector with its absolute
+ * value.
+ */
+ VectorBase &abs ();
+
+ /**
+ * Conjugate a vector.
+ */
+ VectorBase &conjugate ();
+
+ /**
+ * A collective piecewise
+ * multiply operation on
+ * <code>this</code> vector
+ * with itself. TODO: The model
+ * for this function should be
+ * similer to add ().
+ */
+ VectorBase &mult ();
+
+ /**
+ * Same as above, but a
+ * collective piecewise
+ * multiply operation of
+ * <code>this</code> vector
+ * with <b>v</b>.
+ */
+ VectorBase &mult (const VectorBase &v);
+
+ /**
+ * Same as above, but a
+ * collective piecewise
+ * multiply operation of
+ * <b>u</b> with <b>v</b>.
+ */
+ VectorBase &mult (const VectorBase &u,
+ const VectorBase &v);
+
+ /**
+ * Return whether the vector contains
+ * only elements with value zero. This
+ * function is mainly for internal
+ * consistency checks and should
+ * seldom be used when not in debug
+ * mode since it uses quite some time.
+ */
+ bool all_zero () const;
+
+ /**
+ * Return @p true if the vector has no
+ * negative entries, i.e. all entries
+ * are zero or positive. This function
+ * is used, for example, to check
+ * whether refinement indicators are
+ * really all positive (or zero).
+ */
+ bool is_non_negative () const;
+
+ /**
+ * Multiply the entire vector by a
+ * fixed factor.
+ */
+ VectorBase &operator *= (const PetscScalar factor);
+
+ /**
+ * Divide the entire vector by a
+ * fixed factor.
+ */
+ VectorBase &operator /= (const PetscScalar factor);
+
+ /**
+ * Add the given vector to the present
+ * one.
+ */
+ VectorBase &operator += (const VectorBase &V);
+
+ /**
+ * Subtract the given vector from the
+ * present one.
+ */
+ VectorBase &operator -= (const VectorBase &V);
+
+ /**
+ * Addition of @p s to all
+ * components. Note that @p s is a
+ * scalar and not a vector.
+ */
+ void add (const PetscScalar s);
+
+ /**
+ * Simple vector addition, equal to the
+ * <tt>operator +=</tt>.
+ */
+ void add (const VectorBase &V);
+
+ /**
+ * Simple addition of a multiple of a
+ * vector, i.e. <tt>*this += a*V</tt>.
+ */
+ void add (const PetscScalar a, const VectorBase &V);
+
+ /**
+ * Multiple addition of scaled vectors,
+ * i.e. <tt>*this += a*V+b*W</tt>.
+ */
+ void add (const PetscScalar a, const VectorBase &V,
+ const PetscScalar b, const VectorBase &W);
+
+ /**
+ * Scaling and simple vector addition,
+ * i.e.
+ * <tt>*this = s*(*this)+V</tt>.
+ */
+ void sadd (const PetscScalar s,
+ const VectorBase &V);
+
+ /**
+ * Scaling and simple addition, i.e.
+ * <tt>*this = s*(*this)+a*V</tt>.
+ */
+ void sadd (const PetscScalar s,
+ const PetscScalar a,
+ const VectorBase &V);
+
+ /**
+ * Scaling and multiple addition.
+ */
+ void sadd (const PetscScalar s,
+ const PetscScalar a,
+ const VectorBase &V,
+ const PetscScalar b,
+ const VectorBase &W);
+
+ /**
+ * Scaling and multiple addition.
+ * <tt>*this = s*(*this)+a*V + b*W + c*X</tt>.
+ */
+ void sadd (const PetscScalar s,
+ const PetscScalar a,
+ const VectorBase &V,
+ const PetscScalar b,
+ const VectorBase &W,
+ const PetscScalar c,
+ const VectorBase &X);
+
+ /**
+ * Scale each element of this
+ * vector by the corresponding
+ * element in the argument. This
+ * function is mostly meant to
+ * simulate multiplication (and
+ * immediate re-assignment) by a
+ * diagonal scaling matrix.
+ */
+ void scale (const VectorBase &scaling_factors);
+
+ /**
+ * Assignment <tt>*this = a*V</tt>.
+ */
+ void equ (const PetscScalar a, const VectorBase &V);
+
+ /**
+ * Assignment <tt>*this = a*V + b*W</tt>.
+ */
+ void equ (const PetscScalar a, const VectorBase &V,
+ const PetscScalar b, const VectorBase &W);
+
+ /**
+ * Compute the elementwise ratio of the
+ * two given vectors, that is let
+ * <tt>this[i] = a[i]/b[i]</tt>. This is
+ * useful for example if you want to
+ * compute the cellwise ratio of true to
+ * estimated error.
+ *
+ * This vector is appropriately
+ * scaled to hold the result.
+ *
+ * If any of the <tt>b[i]</tt> is
+ * zero, the result is
+ * undefined. No attempt is made
+ * to catch such situations.
+ */
+ void ratio (const VectorBase &a,
+ const VectorBase &b);
+
+ /**
+ * Updates the ghost values of this
+ * vector. This is necessary after any
+ * modification before reading ghost
+ * values.
+ */
+ void update_ghost_values() const;
+
+ /**
+ * Print to a
+ * stream. @p precision denotes
+ * the desired precision with
+ * which values shall be printed,
+ * @p scientific whether
+ * scientific notation shall be
+ * used. If @p across is
+ * @p true then the vector is
+ * printed in a line, while if
+ * @p false then the elements
+ * are printed on a separate line
+ * each.
+ */
+ void print (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /**
+ * Swap the contents of this
+ * vector and the other vector
+ * @p v. One could do this
+ * operation with a temporary
+ * variable and copying over the
+ * data elements, but this
+ * function is significantly more
+ * efficient since it only swaps
+ * the pointers to the data of
+ * the two vectors and therefore
+ * does not need to allocate
+ * temporary storage and move
+ * data around.
+ *
+ * This function is analog to the
+ * the @p swap function of all C++
+ * standard containers. Also,
+ * there is a global function
+ * <tt>swap(u,v)</tt> that simply calls
+ * <tt>u.swap(v)</tt>, again in analogy
+ * to standard functions.
+ */
+ void swap (VectorBase &v);
+
+ /**
+ * Conversion operator to gain access
+ * to the underlying PETSc type. If you
+ * do this, you cut this class off some
+ * information it may need, so this
+ * conversion operator should only be
+ * used if you know what you do. In
+ * particular, it should only be used
+ * for read-only operations into the
+ * vector.
+ */
+ operator const Vec &() const;
+
+ /**
+ * Estimate for the memory
+ * consumption (not implemented
+ * for this class).
+ */
+ std::size_t memory_consumption () const;
+
+ protected:
+ /**
+ * A generic vector object in
+ * PETSc. The actual type, a sequential
+ * vector, is set in the constructor.
+ */
+ Vec vector;
+
+ /**
+ * Denotes if this vector has ghost
+ * indices associated with it. This
+ * means that at least one of the
+ * processes in a parallel programm has
+ * at least one ghost index.
+ */
+ bool ghosted;
+
+ /**
+ * This vector contains the global
+ * indices of the ghost values. The
+ * location in this vector denotes the
+ * local numbering, which is used in
+ * PETSc.
+ */
+ IndexSet ghost_indices;
+
+ /**
+ * Store whether the last action was a
+ * write or add operation. This
+ * variable is @p mutable so that the
+ * accessor classes can write to it,
+ * even though the vector object they
+ * refer to is constant.
+ */
+ mutable ::dealii::VectorOperation::values last_action;
+
+ /**
+ * Make the reference class a friend.
+ */
+ friend class internal::VectorReference;
+
+ /**
+ * Specifies if the vector is the owner
+ * of the PETSc Vec. This is true if it
+ * got created by this class and
+ * determines if it gets destructed in
+ * the destructor.
+ */
+ bool attained_ownership;
+
+ /**
+ * Collective set or add
+ * operation: This function is
+ * invoked by the collective @p
+ * set and @p add with the
+ * @p add_values flag set to the
+ * corresponding value.
+ */
+ void do_set_add_operation (const unsigned int n_elements,
+ const unsigned int *indices,
- const PetscScalar *values,
++ const PetscScalar *values,
+ const bool add_values);
};
namespace internal
{
inline
- VectorReference::VectorReference (const VectorBase &vector,
+ VectorReference::VectorReference (const VectorBase &vector,
const unsigned int index)
- :
- vector (vector),
- index (index)
+ :
+ vector (vector),
+ index (index)
{}
template <typename number>
class SparseMatrix : public virtual Subscriptor
{
- public:
- /**
- * Type of matrix entries. In analogy to
- * the STL container classes.
- */
- typedef number value_type;
-
- /**
- * Declare a type that has holds
- * real-valued numbers with the
- * same precision as the template
- * argument to this class. If the
- * template argument of this
- * class is a real data type,
- * then real_type equals the
- * template argument. If the
- * template argument is a
- * std::complex type then
- * real_type equals the type
- * underlying the complex
- * numbers.
- *
- * This typedef is used to
- * represent the return type of
- * norms.
- */
- typedef typename numbers::NumberTraits<number>::real_type real_type;
-
- /**
- * Typedef of an STL conforming iterator
- * class walking over all the nonzero
- * entries of this matrix. This iterator
- * cannot change the values of the
- * matrix.
- */
- typedef
- SparseMatrixIterators::Iterator<number,true>
- const_iterator;
-
- /**
- * Typedef of an STL conforming iterator
- * class walking over all the nonzero
- * entries of this matrix. This iterator
- * @em can change the values of the
- * matrix, but of course can't change the
- * sparsity pattern as this is fixed once
- * a sparse matrix is attached to it.
- */
- typedef
- SparseMatrixIterators::Iterator<number,false>
- iterator;
-
- /**
- * A structure that describes some of the
- * traits of this class in terms of its
- * run-time behavior. Some other classes
- * (such as the block matrix classes)
- * that take one or other of the matrix
- * classes as its template parameters can
- * tune their behavior based on the
- * variables in this class.
- */
- struct Traits
- {
- /**
- * It is safe to elide additions of
- * zeros to individual elements of
- * this matrix.
- */
- static const bool zero_addition_can_be_elided = true;
- };
+ public:
+ /**
+ * Type of matrix entries. In analogy to
+ * the STL container classes.
+ */
+ typedef number value_type;
+
+ /**
+ * Declare a type that has holds
+ * real-valued numbers with the
+ * same precision as the template
+ * argument to this class. If the
+ * template argument of this
+ * class is a real data type,
+ * then real_type equals the
+ * template argument. If the
+ * template argument is a
+ * std::complex type then
+ * real_type equals the type
+ * underlying the complex
+ * numbers.
+ *
+ * This typedef is used to
+ * represent the return type of
+ * norms.
+ */
+ typedef typename numbers::NumberTraits<number>::real_type real_type;
+
+ /**
+ * Typedef of an STL conforming iterator
+ * class walking over all the nonzero
+ * entries of this matrix. This iterator
+ * cannot change the values of the
+ * matrix.
+ */
+ typedef
+ SparseMatrixIterators::Iterator<number,true>
+ const_iterator;
+
+ /**
+ * Typedef of an STL conforming iterator
+ * class walking over all the nonzero
+ * entries of this matrix. This iterator
+ * @em can change the values of the
+ * matrix, but of course can't change the
+ * sparsity pattern as this is fixed once
+ * a sparse matrix is attached to it.
+ */
+ typedef
+ SparseMatrixIterators::Iterator<number,false>
+ iterator;
+
+ /**
+ * A structure that describes some of the
+ * traits of this class in terms of its
+ * run-time behavior. Some other classes
+ * (such as the block matrix classes)
+ * that take one or other of the matrix
+ * classes as its template parameters can
+ * tune their behavior based on the
+ * variables in this class.
+ */
+ struct Traits
+ {
+ /**
+ * It is safe to elide additions of
+ * zeros to individual elements of
+ * this matrix.
+ */
+ static const bool zero_addition_can_be_elided = true;
+ };
- /**
- * @name Constructors and initalization
- */
+ /**
+ * @name Constructors and initalization
+ */
//@{
- /**
- * Constructor; initializes the matrix to
- * be empty, without any structure, i.e.
- * the matrix is not usable at all. This
- * constructor is therefore only useful
- * for matrices which are members of a
- * class. All other matrices should be
- * created at a point in the data flow
- * where all necessary information is
- * available.
- *
- * You have to initialize
- * the matrix before usage with
- * reinit(const SparsityPattern&).
- */
- SparseMatrix ();
-
- /**
- * Copy constructor. This constructor is
- * only allowed to be called if the matrix
- * to be copied is empty. This is for the
- * same reason as for the
- * SparsityPattern, see there for the
- * details.
- *
- * If you really want to copy a whole
- * matrix, you can do so by using the
- * copy_from() function.
- */
- SparseMatrix (const SparseMatrix &);
-
- /**
- * Constructor. Takes the given
- * matrix sparsity structure to
- * represent the sparsity pattern
- * of this matrix. You can change
- * the sparsity pattern later on
- * by calling the reinit(const
- * SparsityPattern&) function.
- *
- * You have to make sure that the
- * lifetime of the sparsity
- * structure is at least as long
- * as that of this matrix or as
- * long as reinit(const
- * SparsityPattern&) is not
- * called with a new sparsity
- * pattern.
- *
- * The constructor is marked
- * explicit so as to disallow
- * that someone passes a sparsity
- * pattern in place of a sparse
- * matrix to some function, where
- * an empty matrix would be
- * generated then.
- */
- explicit SparseMatrix (const SparsityPattern &sparsity);
-
- /**
- * Copy constructor: initialize
- * the matrix with the identity
- * matrix. This constructor will
- * throw an exception if the
- * sizes of the sparsity pattern
- * and the identity matrix do not
- * coincide, or if the sparsity
- * pattern does not provide for
- * nonzero entries on the entire
- * diagonal.
- */
- SparseMatrix (const SparsityPattern &sparsity,
- const IdentityMatrix &id);
-
- /**
- * Destructor. Free all memory, but do not
- * release the memory of the sparsity
- * structure.
- */
- virtual ~SparseMatrix ();
-
- /**
- * Copy operator. Since copying
- * entire sparse matrices is a
- * very expensive operation, we
- * disallow doing so except for
- * the special case of empty
- * matrices of size zero. This
- * doesn't seem particularly
- * useful, but is exactly what
- * one needs if one wanted to
- * have a
- * <code>std::vector@<SparseMatrix@<double@>
- * @></code>: in that case, one
- * can create a vector (which
- * needs the ability to copy
- * objects) of empty matrices
- * that are then later filled
- * with something useful.
- */
- SparseMatrix<number>& operator = (const SparseMatrix<number> &);
-
- /**
- * Copy operator: initialize
- * the matrix with the identity
- * matrix. This operator will
- * throw an exception if the
- * sizes of the sparsity pattern
- * and the identity matrix do not
- * coincide, or if the sparsity
- * pattern does not provide for
- * nonzero entries on the entire
- * diagonal.
- */
- SparseMatrix<number> &
- operator= (const IdentityMatrix &id);
-
- /**
- * This operator assigns a scalar to
- * a matrix. Since this does usually
- * not make much sense (should we set
- * all matrix entries to this value?
- * Only the nonzero entries of the
- * sparsity pattern?), this operation
- * is only allowed if the actual
- * value to be assigned is zero. This
- * operator only exists to allow for
- * the obvious notation
- * <tt>matrix=0</tt>, which sets all
- * elements of the matrix to zero,
- * but keep the sparsity pattern
- * previously used.
- */
- SparseMatrix & operator = (const double d);
-
- /**
- * Reinitialize the sparse matrix
- * with the given sparsity
- * pattern. The latter tells the
- * matrix how many nonzero
- * elements there need to be
- * reserved.
- *
- * Regarding memory allocation,
- * the same applies as said
- * above.
- *
- * You have to make sure that the
- * lifetime of the sparsity
- * structure is at least as long
- * as that of this matrix or as
- * long as reinit(const
- * SparsityPattern &) is not
- * called with a new sparsity
- * structure.
- *
- * The elements of the matrix are
- * set to zero by this function.
- */
- virtual void reinit (const SparsityPattern &sparsity);
-
- /**
- * Release all memory and return
- * to a state just like after
- * having called the default
- * constructor. It also forgets
- * the sparsity pattern it was
- * previously tied to.
- */
- virtual void clear ();
+ /**
+ * Constructor; initializes the matrix to
+ * be empty, without any structure, i.e.
+ * the matrix is not usable at all. This
+ * constructor is therefore only useful
+ * for matrices which are members of a
+ * class. All other matrices should be
+ * created at a point in the data flow
+ * where all necessary information is
+ * available.
+ *
+ * You have to initialize
+ * the matrix before usage with
+ * reinit(const SparsityPattern&).
+ */
+ SparseMatrix ();
+
+ /**
+ * Copy constructor. This constructor is
+ * only allowed to be called if the matrix
+ * to be copied is empty. This is for the
+ * same reason as for the
+ * SparsityPattern, see there for the
+ * details.
+ *
+ * If you really want to copy a whole
+ * matrix, you can do so by using the
+ * copy_from() function.
+ */
+ SparseMatrix (const SparseMatrix &);
+
+ /**
+ * Constructor. Takes the given
+ * matrix sparsity structure to
+ * represent the sparsity pattern
+ * of this matrix. You can change
+ * the sparsity pattern later on
+ * by calling the reinit(const
+ * SparsityPattern&) function.
+ *
+ * You have to make sure that the
+ * lifetime of the sparsity
+ * structure is at least as long
+ * as that of this matrix or as
+ * long as reinit(const
+ * SparsityPattern&) is not
+ * called with a new sparsity
+ * pattern.
+ *
+ * The constructor is marked
+ * explicit so as to disallow
+ * that someone passes a sparsity
+ * pattern in place of a sparse
+ * matrix to some function, where
+ * an empty matrix would be
+ * generated then.
+ */
+ explicit SparseMatrix (const SparsityPattern &sparsity);
+
+ /**
+ * Copy constructor: initialize
+ * the matrix with the identity
+ * matrix. This constructor will
+ * throw an exception if the
+ * sizes of the sparsity pattern
+ * and the identity matrix do not
+ * coincide, or if the sparsity
+ * pattern does not provide for
+ * nonzero entries on the entire
+ * diagonal.
+ */
+ SparseMatrix (const SparsityPattern &sparsity,
- const IdentityMatrix &id);
++ const IdentityMatrix &id);
+
+ /**
+ * Destructor. Free all memory, but do not
+ * release the memory of the sparsity
+ * structure.
+ */
+ virtual ~SparseMatrix ();
+
+ /**
+ * Copy operator. Since copying
+ * entire sparse matrices is a
+ * very expensive operation, we
+ * disallow doing so except for
+ * the special case of empty
+ * matrices of size zero. This
+ * doesn't seem particularly
+ * useful, but is exactly what
+ * one needs if one wanted to
+ * have a
+ * <code>std::vector@<SparseMatrix@<double@>
+ * @></code>: in that case, one
+ * can create a vector (which
+ * needs the ability to copy
+ * objects) of empty matrices
+ * that are then later filled
+ * with something useful.
+ */
+ SparseMatrix<number> &operator = (const SparseMatrix<number> &);
+
+ /**
+ * Copy operator: initialize
+ * the matrix with the identity
+ * matrix. This operator will
+ * throw an exception if the
+ * sizes of the sparsity pattern
+ * and the identity matrix do not
+ * coincide, or if the sparsity
+ * pattern does not provide for
+ * nonzero entries on the entire
+ * diagonal.
+ */
+ SparseMatrix<number> &
- operator= (const IdentityMatrix &id);
++ operator= (const IdentityMatrix &id);
+
+ /**
+ * This operator assigns a scalar to
+ * a matrix. Since this does usually
+ * not make much sense (should we set
+ * all matrix entries to this value?
+ * Only the nonzero entries of the
+ * sparsity pattern?), this operation
+ * is only allowed if the actual
+ * value to be assigned is zero. This
+ * operator only exists to allow for
+ * the obvious notation
+ * <tt>matrix=0</tt>, which sets all
+ * elements of the matrix to zero,
+ * but keep the sparsity pattern
+ * previously used.
+ */
+ SparseMatrix &operator = (const double d);
+
+ /**
+ * Reinitialize the sparse matrix
+ * with the given sparsity
+ * pattern. The latter tells the
+ * matrix how many nonzero
+ * elements there need to be
+ * reserved.
+ *
+ * Regarding memory allocation,
+ * the same applies as said
+ * above.
+ *
+ * You have to make sure that the
+ * lifetime of the sparsity
+ * structure is at least as long
+ * as that of this matrix or as
+ * long as reinit(const
+ * SparsityPattern &) is not
+ * called with a new sparsity
+ * structure.
+ *
+ * The elements of the matrix are
+ * set to zero by this function.
+ */
+ virtual void reinit (const SparsityPattern &sparsity);
+
+ /**
+ * Release all memory and return
+ * to a state just like after
+ * having called the default
+ * constructor. It also forgets
+ * the sparsity pattern it was
+ * previously tied to.
+ */
+ virtual void clear ();
//@}
- /**
- * @name Information on the matrix
- */
+ /**
+ * @name Information on the matrix
+ */
//@{
- /**
- * Return whether the object is
- * empty. It is empty if either
- * both dimensions are zero or no
- * SparsityPattern is
- * associated.
- */
- bool empty () const;
-
- /**
- * Return the dimension of the
- * image space. To remember: the
- * matrix is of dimension
- * $m \times n$.
- */
- unsigned int m () const;
-
- /**
- * Return the dimension of the
- * range space. To remember: the
- * matrix is of dimension
- * $m \times n$.
- */
- unsigned int n () const;
-
- /**
- * Return the number of entries
- * in a specific row.
- */
- unsigned int get_row_length (const unsigned int row) const;
-
- /**
- * Return the number of nonzero
- * elements of this
- * matrix. Actually, it returns
- * the number of entries in the
- * sparsity pattern; if any of
- * the entries should happen to
- * be zero, it is counted anyway.
- */
- unsigned int n_nonzero_elements () const;
-
- /**
- * Return the number of actually
- * nonzero elements of this matrix. It
- * is possible to specify the parameter
- * <tt>threshold</tt> in order to count
- * only the elements that have absolute
- * value greater than the threshold.
- *
- * Note, that this function does (in
- * contrary to n_nonzero_elements())
- * not count all entries of the
- * sparsity pattern but only the ones
- * that are nonzero (or whose absolute
- * value is greater than threshold).
- */
- unsigned int n_actually_nonzero_elements (const double threshold = 0.) const;
-
- /**
- * Return a (constant) reference
- * to the underlying sparsity
- * pattern of this matrix.
- *
- * Though the return value is
- * declared <tt>const</tt>, you
- * should be aware that it may
- * change if you call any
- * nonconstant function of
- * objects which operate on it.
- */
- const SparsityPattern & get_sparsity_pattern () const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object. See
- * MemoryConsumption.
- */
- std::size_t memory_consumption () const;
+ /**
+ * Return whether the object is
+ * empty. It is empty if either
+ * both dimensions are zero or no
+ * SparsityPattern is
+ * associated.
+ */
+ bool empty () const;
+
+ /**
+ * Return the dimension of the
+ * image space. To remember: the
+ * matrix is of dimension
+ * $m \times n$.
+ */
+ unsigned int m () const;
+
+ /**
+ * Return the dimension of the
+ * range space. To remember: the
+ * matrix is of dimension
+ * $m \times n$.
+ */
+ unsigned int n () const;
+
+ /**
+ * Return the number of entries
+ * in a specific row.
+ */
+ unsigned int get_row_length (const unsigned int row) const;
+
+ /**
+ * Return the number of nonzero
+ * elements of this
+ * matrix. Actually, it returns
+ * the number of entries in the
+ * sparsity pattern; if any of
+ * the entries should happen to
+ * be zero, it is counted anyway.
+ */
+ unsigned int n_nonzero_elements () const;
+
+ /**
+ * Return the number of actually
+ * nonzero elements of this matrix. It
+ * is possible to specify the parameter
+ * <tt>threshold</tt> in order to count
+ * only the elements that have absolute
+ * value greater than the threshold.
+ *
+ * Note, that this function does (in
+ * contrary to n_nonzero_elements())
+ * not count all entries of the
+ * sparsity pattern but only the ones
+ * that are nonzero (or whose absolute
+ * value is greater than threshold).
+ */
+ unsigned int n_actually_nonzero_elements (const double threshold = 0.) const;
+
+ /**
+ * Return a (constant) reference
+ * to the underlying sparsity
+ * pattern of this matrix.
+ *
+ * Though the return value is
+ * declared <tt>const</tt>, you
+ * should be aware that it may
+ * change if you call any
+ * nonconstant function of
+ * objects which operate on it.
+ */
+ const SparsityPattern &get_sparsity_pattern () const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object. See
+ * MemoryConsumption.
+ */
+ std::size_t memory_consumption () const;
//@}
- /**
- * @name Modifying entries
- */
+ /**
+ * @name Modifying entries
+ */
//@{
- /**
- * Set the element (<i>i,j</i>)
- * to <tt>value</tt>. Throws an
- * error if the entry does not
- * exist or if <tt>value</tt> is
- * not a finite number. Still, it
- * is allowed to store zero
- * values in non-existent fields.
- */
- void set (const unsigned int i,
- const unsigned int j,
- const number value);
-
- /**
- * Set all elements given in a
- * FullMatrix into the sparse matrix
- * locations given by
- * <tt>indices</tt>. In other words,
- * this function writes the elements
- * in <tt>full_matrix</tt> into the
- * calling matrix, using the
- * local-to-global indexing specified
- * by <tt>indices</tt> for both the
- * rows and the columns of the
- * matrix. This function assumes a
- * quadratic sparse matrix and a
- * quadratic full_matrix, the usual
- * situation in FE calculations.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be set anyway or
- * they should be filtered away (and
- * not change the previous content in
- * the respective element if it
- * exists). The default value is
- * <tt>false</tt>, i.e., even zero
- * values are treated.
- */
- template <typename number2>
- void set (const std::vector<unsigned int> &indices,
- const FullMatrix<number2> &full_matrix,
- const bool elide_zero_values = false);
-
- /**
- * Same function as before, but now
- * including the possibility to use
- * rectangular full_matrices and
- * different local-to-global indexing
- * on rows and columns, respectively.
- */
- template <typename number2>
- void set (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<number2> &full_matrix,
- const bool elide_zero_values = false);
-
- /**
- * Set several elements in the
- * specified row of the matrix with
- * column indices as given by
- * <tt>col_indices</tt> to the
- * respective value.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be set anyway or
- * they should be filtered away (and
- * not change the previous content in
- * the respective element if it
- * exists). The default value is
- * <tt>false</tt>, i.e., even zero
- * values are treated.
- */
- template <typename number2>
- void set (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<number2> &values,
- const bool elide_zero_values = false);
-
- /**
- * Set several elements to values
- * given by <tt>values</tt> in a
- * given row in columns given by
- * col_indices into the sparse
- * matrix.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be inserted anyway
- * or they should be filtered
- * away. The default value is
- * <tt>false</tt>, i.e., even zero
- * values are inserted/replaced.
- */
- template <typename number2>
- void set (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const number2 *values,
- const bool elide_zero_values = false);
-
- /**
- * Add <tt>value</tt> to the
- * element (<i>i,j</i>). Throws
- * an error if the entry does not
- * exist or if <tt>value</tt> is
- * not a finite number. Still, it
- * is allowed to store zero
- * values in non-existent fields.
- */
- void add (const unsigned int i,
- const unsigned int j,
- const number value);
-
- /**
- * Add all elements given in a
- * FullMatrix<double> into sparse
- * matrix locations given by
- * <tt>indices</tt>. In other words,
- * this function adds the elements in
- * <tt>full_matrix</tt> to the
- * respective entries in calling
- * matrix, using the local-to-global
- * indexing specified by
- * <tt>indices</tt> for both the rows
- * and the columns of the
- * matrix. This function assumes a
- * quadratic sparse matrix and a
- * quadratic full_matrix, the usual
- * situation in FE calculations.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- template <typename number2>
- void add (const std::vector<unsigned int> &indices,
- const FullMatrix<number2> &full_matrix,
- const bool elide_zero_values = true);
-
- /**
- * Same function as before, but now
- * including the possibility to use
- * rectangular full_matrices and
- * different local-to-global indexing
- * on rows and columns, respectively.
- */
- template <typename number2>
- void add (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<number2> &full_matrix,
- const bool elide_zero_values = true);
-
- /**
- * Set several elements in the
- * specified row of the matrix with
- * column indices as given by
- * <tt>col_indices</tt> to the
- * respective value.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- template <typename number2>
- void add (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<number2> &values,
- const bool elide_zero_values = true);
-
- /**
- * Add an array of values given by
- * <tt>values</tt> in the given
- * global matrix row at columns
- * specified by col_indices in the
- * sparse matrix.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- template <typename number2>
- void add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const number2 *values,
- const bool elide_zero_values = true,
- const bool col_indices_are_sorted = false);
-
- /**
- * Multiply the entire matrix by a
- * fixed factor.
- */
- SparseMatrix & operator *= (const number factor);
-
- /**
- * Divide the entire matrix by a
- * fixed factor.
- */
- SparseMatrix & operator /= (const number factor);
-
- /**
- * Symmetrize the matrix by
- * forming the mean value between
- * the existing matrix and its
- * transpose, $A = \frac 12(A+A^T)$.
- *
- * This operation assumes that
- * the underlying sparsity
- * pattern represents a symmetric
- * object. If this is not the
- * case, then the result of this
- * operation will not be a
- * symmetric matrix, since it
- * only explicitly symmetrizes
- * by looping over the lower left
- * triangular part for efficiency
- * reasons; if there are entries
- * in the upper right triangle,
- * then these elements are missed
- * in the
- * symmetrization. Symmetrization
- * of the sparsity pattern can be
- * obtain by
- * SparsityPattern::symmetrize().
- */
- void symmetrize ();
-
- /**
- * Copy the given matrix to this
- * one. The operation throws an
- * error if the sparsity patterns
- * of the two involved matrices
- * do not point to the same
- * object, since in this case the
- * copy operation is
- * cheaper. Since this operation
- * is notheless not for free, we
- * do not make it available
- * through <tt>operator =</tt>,
- * since this may lead to
- * unwanted usage, e.g. in copy
- * arguments to functions, which
- * should really be arguments by
- * reference.
- *
- * The source matrix may be a matrix
- * of arbitrary type, as long as its
- * data type is convertible to the
- * data type of this matrix.
- *
- * The function returns a reference to
- * <tt>*this</tt>.
- */
- template <typename somenumber>
- SparseMatrix<number> &
- copy_from (const SparseMatrix<somenumber> &source);
-
- /**
- * This function is complete
- * analogous to the
- * SparsityPattern::copy_from()
- * function in that it allows to
- * initialize a whole matrix in
- * one step. See there for more
- * information on argument types
- * and their meaning. You can
- * also find a small example on
- * how to use this function
- * there.
- *
- * The only difference to the
- * cited function is that the
- * objects which the inner
- * iterator points to need to be
- * of type <tt>std::pair<unsigned
- * int, value</tt>, where
- * <tt>value</tt> needs to be
- * convertible to the element
- * type of this class, as
- * specified by the
- * <tt>number</tt> template
- * argument.
- *
- * Previous content of the matrix
- * is overwritten. Note that the
- * entries specified by the input
- * parameters need not
- * necessarily cover all elements
- * of the matrix. Elements not
- * covered remain untouched.
- */
- template <typename ForwardIterator>
- void copy_from (const ForwardIterator begin,
- const ForwardIterator end);
-
- /**
- * Copy the nonzero entries of a
- * full matrix into this
- * object. Previous content is
- * deleted. Note that the
- * underlying sparsity pattern
- * must be appropriate to hold
- * the nonzero entries of the
- * full matrix.
- */
- template <typename somenumber>
- void copy_from (const FullMatrix<somenumber> &matrix);
-
- /**
- * Add <tt>matrix</tt> scaled by
- * <tt>factor</tt> to this matrix,
- * i.e. the matrix <tt>factor*matrix</tt>
- * is added to <tt>this</tt>. This
- * function throws an error if the
- * sparsity patterns of the two involved
- * matrices do not point to the same
- * object, since in this case the
- * operation is cheaper.
- *
- * The source matrix may be a sparse
- * matrix over an arbitrary underlying
- * scalar type, as long as its data type
- * is convertible to the data type of
- * this matrix.
- */
- template <typename somenumber>
- void add (const number factor,
- const SparseMatrix<somenumber> &matrix);
+ /**
+ * Set the element (<i>i,j</i>)
+ * to <tt>value</tt>. Throws an
+ * error if the entry does not
+ * exist or if <tt>value</tt> is
+ * not a finite number. Still, it
+ * is allowed to store zero
+ * values in non-existent fields.
+ */
+ void set (const unsigned int i,
+ const unsigned int j,
+ const number value);
+
+ /**
+ * Set all elements given in a
+ * FullMatrix into the sparse matrix
+ * locations given by
+ * <tt>indices</tt>. In other words,
+ * this function writes the elements
+ * in <tt>full_matrix</tt> into the
+ * calling matrix, using the
+ * local-to-global indexing specified
+ * by <tt>indices</tt> for both the
+ * rows and the columns of the
+ * matrix. This function assumes a
+ * quadratic sparse matrix and a
+ * quadratic full_matrix, the usual
+ * situation in FE calculations.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be set anyway or
+ * they should be filtered away (and
+ * not change the previous content in
+ * the respective element if it
+ * exists). The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are treated.
+ */
+ template <typename number2>
+ void set (const std::vector<unsigned int> &indices,
+ const FullMatrix<number2> &full_matrix,
+ const bool elide_zero_values = false);
+
+ /**
+ * Same function as before, but now
+ * including the possibility to use
+ * rectangular full_matrices and
+ * different local-to-global indexing
+ * on rows and columns, respectively.
+ */
+ template <typename number2>
+ void set (const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<number2> &full_matrix,
+ const bool elide_zero_values = false);
+
+ /**
+ * Set several elements in the
+ * specified row of the matrix with
+ * column indices as given by
+ * <tt>col_indices</tt> to the
+ * respective value.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be set anyway or
+ * they should be filtered away (and
+ * not change the previous content in
+ * the respective element if it
+ * exists). The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are treated.
+ */
+ template <typename number2>
+ void set (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
+ const std::vector<number2> &values,
+ const bool elide_zero_values = false);
+
+ /**
+ * Set several elements to values
+ * given by <tt>values</tt> in a
+ * given row in columns given by
+ * col_indices into the sparse
+ * matrix.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be inserted anyway
+ * or they should be filtered
+ * away. The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are inserted/replaced.
+ */
+ template <typename number2>
+ void set (const unsigned int row,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
+ const number2 *values,
+ const bool elide_zero_values = false);
+
+ /**
+ * Add <tt>value</tt> to the
+ * element (<i>i,j</i>). Throws
+ * an error if the entry does not
+ * exist or if <tt>value</tt> is
+ * not a finite number. Still, it
+ * is allowed to store zero
+ * values in non-existent fields.
+ */
+ void add (const unsigned int i,
+ const unsigned int j,
+ const number value);
+
+ /**
+ * Add all elements given in a
+ * FullMatrix<double> into sparse
+ * matrix locations given by
+ * <tt>indices</tt>. In other words,
+ * this function adds the elements in
+ * <tt>full_matrix</tt> to the
+ * respective entries in calling
+ * matrix, using the local-to-global
+ * indexing specified by
+ * <tt>indices</tt> for both the rows
+ * and the columns of the
+ * matrix. This function assumes a
+ * quadratic sparse matrix and a
+ * quadratic full_matrix, the usual
+ * situation in FE calculations.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ template <typename number2>
+ void add (const std::vector<unsigned int> &indices,
+ const FullMatrix<number2> &full_matrix,
+ const bool elide_zero_values = true);
+
+ /**
+ * Same function as before, but now
+ * including the possibility to use
+ * rectangular full_matrices and
+ * different local-to-global indexing
+ * on rows and columns, respectively.
+ */
+ template <typename number2>
+ void add (const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<number2> &full_matrix,
+ const bool elide_zero_values = true);
+
+ /**
+ * Set several elements in the
+ * specified row of the matrix with
+ * column indices as given by
+ * <tt>col_indices</tt> to the
+ * respective value.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ template <typename number2>
+ void add (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
+ const std::vector<number2> &values,
+ const bool elide_zero_values = true);
+
+ /**
+ * Add an array of values given by
+ * <tt>values</tt> in the given
+ * global matrix row at columns
+ * specified by col_indices in the
+ * sparse matrix.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ template <typename number2>
+ void add (const unsigned int row,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
+ const number2 *values,
+ const bool elide_zero_values = true,
+ const bool col_indices_are_sorted = false);
+
+ /**
+ * Multiply the entire matrix by a
+ * fixed factor.
+ */
+ SparseMatrix &operator *= (const number factor);
+
+ /**
+ * Divide the entire matrix by a
+ * fixed factor.
+ */
+ SparseMatrix &operator /= (const number factor);
+
+ /**
+ * Symmetrize the matrix by
+ * forming the mean value between
+ * the existing matrix and its
+ * transpose, $A = \frac 12(A+A^T)$.
+ *
+ * This operation assumes that
+ * the underlying sparsity
+ * pattern represents a symmetric
+ * object. If this is not the
+ * case, then the result of this
+ * operation will not be a
+ * symmetric matrix, since it
+ * only explicitly symmetrizes
+ * by looping over the lower left
+ * triangular part for efficiency
+ * reasons; if there are entries
+ * in the upper right triangle,
+ * then these elements are missed
+ * in the
+ * symmetrization. Symmetrization
+ * of the sparsity pattern can be
+ * obtain by
+ * SparsityPattern::symmetrize().
+ */
+ void symmetrize ();
+
+ /**
+ * Copy the given matrix to this
+ * one. The operation throws an
+ * error if the sparsity patterns
+ * of the two involved matrices
+ * do not point to the same
+ * object, since in this case the
+ * copy operation is
+ * cheaper. Since this operation
+ * is notheless not for free, we
+ * do not make it available
+ * through <tt>operator =</tt>,
+ * since this may lead to
+ * unwanted usage, e.g. in copy
+ * arguments to functions, which
+ * should really be arguments by
+ * reference.
+ *
+ * The source matrix may be a matrix
+ * of arbitrary type, as long as its
+ * data type is convertible to the
+ * data type of this matrix.
+ *
+ * The function returns a reference to
+ * <tt>*this</tt>.
+ */
+ template <typename somenumber>
+ SparseMatrix<number> &
+ copy_from (const SparseMatrix<somenumber> &source);
+
+ /**
+ * This function is complete
+ * analogous to the
+ * SparsityPattern::copy_from()
+ * function in that it allows to
+ * initialize a whole matrix in
+ * one step. See there for more
+ * information on argument types
+ * and their meaning. You can
+ * also find a small example on
+ * how to use this function
+ * there.
+ *
+ * The only difference to the
+ * cited function is that the
+ * objects which the inner
+ * iterator points to need to be
+ * of type <tt>std::pair<unsigned
+ * int, value</tt>, where
+ * <tt>value</tt> needs to be
+ * convertible to the element
+ * type of this class, as
+ * specified by the
+ * <tt>number</tt> template
+ * argument.
+ *
+ * Previous content of the matrix
+ * is overwritten. Note that the
+ * entries specified by the input
+ * parameters need not
+ * necessarily cover all elements
+ * of the matrix. Elements not
+ * covered remain untouched.
+ */
+ template <typename ForwardIterator>
+ void copy_from (const ForwardIterator begin,
+ const ForwardIterator end);
+
+ /**
+ * Copy the nonzero entries of a
+ * full matrix into this
+ * object. Previous content is
+ * deleted. Note that the
+ * underlying sparsity pattern
+ * must be appropriate to hold
+ * the nonzero entries of the
+ * full matrix.
+ */
+ template <typename somenumber>
+ void copy_from (const FullMatrix<somenumber> &matrix);
+
+ /**
+ * Add <tt>matrix</tt> scaled by
+ * <tt>factor</tt> to this matrix,
+ * i.e. the matrix <tt>factor*matrix</tt>
+ * is added to <tt>this</tt>. This
+ * function throws an error if the
+ * sparsity patterns of the two involved
+ * matrices do not point to the same
+ * object, since in this case the
+ * operation is cheaper.
+ *
+ * The source matrix may be a sparse
+ * matrix over an arbitrary underlying
+ * scalar type, as long as its data type
+ * is convertible to the data type of
+ * this matrix.
+ */
+ template <typename somenumber>
+ void add (const number factor,
+ const SparseMatrix<somenumber> &matrix);
//@}
- /**
- * @name Entry Access
- */
+ /**
+ * @name Entry Access
+ */
//@{
- /**
- * Return the value of the entry
- * (<i>i,j</i>). This may be an
- * expensive operation and you
- * should always take care where
- * to call this function. In
- * order to avoid abuse, this
- * function throws an exception
- * if the required element does
- * not exist in the matrix.
- *
- * In case you want a function
- * that returns zero instead (for
- * entries that are not in the
- * sparsity pattern of the
- * matrix), use the el()
- * function.
- *
- * If you are looping over all elements,
- * consider using one of the iterator
- * classes instead, since they are
- * tailored better to a sparse matrix
- * structure.
- */
- number operator () (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * This function is mostly like
- * operator()() in that it
- * returns the value of the
- * matrix entry (<i>i,j</i>). The
- * only difference is that if
- * this entry does not exist in
- * the sparsity pattern, then
- * instead of raising an
- * exception, zero is
- * returned. While this may be
- * convenient in some cases, note
- * that it is simple to write
- * algorithms that are slow
- * compared to an optimal
- * solution, since the sparsity
- * of the matrix is not used.
- *
- * If you are looping over all elements,
- * consider using one of the iterator
- * classes instead, since they are
- * tailored better to a sparse matrix
- * structure.
- */
- number el (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * Return the main diagonal
- * element in the <i>i</i>th
- * row. This function throws an
- * error if the matrix is not
- * quadratic (see
- * SparsityPattern::optimize_diagonal()).
- *
- * This function is considerably
- * faster than the operator()(),
- * since for quadratic matrices, the
- * diagonal entry may be the
- * first to be stored in each row
- * and access therefore does not
- * involve searching for the
- * right column number.
- */
- number diag_element (const unsigned int i) const;
-
- /**
- * Same as above, but return a
- * writeable reference. You're
- * sure you know what you do?
- */
- number & diag_element (const unsigned int i);
-
- /**
- * Access to values in internal
- * mode. Returns the value of
- * the <tt>index</tt>th entry in
- * <tt>row</tt>. Here,
- * <tt>index</tt> refers to the
- * internal representation of the
- * matrix, not the column. Be
- * sure to understand what you
- * are doing here.
- *
- * @deprecated Use iterator or
- * const_iterator instead!
- */
- number raw_entry (const unsigned int row,
- const unsigned int index) const;
-
- /**
- * @internal @deprecated Use iterator or
- * const_iterator instead!
- *
- * This is for hackers. Get
- * access to the <i>i</i>th element of
- * this matrix. The elements are
- * stored in a consecutive way,
- * refer to the SparsityPattern
- * class for more details.
- *
- * You should use this interface
- * very carefully and only if you
- * are absolutely sure to know
- * what you do. You should also
- * note that the structure of
- * these arrays may change over
- * time. If you change the
- * layout yourself, you should
- * also rename this function to
- * avoid programs relying on
- * outdated information!
- */
- number global_entry (const unsigned int i) const;
-
- /**
- * @internal @deprecated Use iterator or
- * const_iterator instead!
- *
- * Same as above, but with write
- * access. You certainly know
- * what you do?
- */
- number & global_entry (const unsigned int i);
+ /**
+ * Return the value of the entry
+ * (<i>i,j</i>). This may be an
+ * expensive operation and you
+ * should always take care where
+ * to call this function. In
+ * order to avoid abuse, this
+ * function throws an exception
+ * if the required element does
+ * not exist in the matrix.
+ *
+ * In case you want a function
+ * that returns zero instead (for
+ * entries that are not in the
+ * sparsity pattern of the
+ * matrix), use the el()
+ * function.
+ *
+ * If you are looping over all elements,
+ * consider using one of the iterator
+ * classes instead, since they are
+ * tailored better to a sparse matrix
+ * structure.
+ */
+ number operator () (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * This function is mostly like
+ * operator()() in that it
+ * returns the value of the
+ * matrix entry (<i>i,j</i>). The
+ * only difference is that if
+ * this entry does not exist in
+ * the sparsity pattern, then
+ * instead of raising an
+ * exception, zero is
+ * returned. While this may be
+ * convenient in some cases, note
+ * that it is simple to write
+ * algorithms that are slow
+ * compared to an optimal
+ * solution, since the sparsity
+ * of the matrix is not used.
+ *
+ * If you are looping over all elements,
+ * consider using one of the iterator
+ * classes instead, since they are
+ * tailored better to a sparse matrix
+ * structure.
+ */
+ number el (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * Return the main diagonal
+ * element in the <i>i</i>th
+ * row. This function throws an
+ * error if the matrix is not
+ * quadratic (see
+ * SparsityPattern::optimize_diagonal()).
+ *
+ * This function is considerably
+ * faster than the operator()(),
+ * since for quadratic matrices, the
+ * diagonal entry may be the
+ * first to be stored in each row
+ * and access therefore does not
+ * involve searching for the
+ * right column number.
+ */
+ number diag_element (const unsigned int i) const;
+
+ /**
+ * Same as above, but return a
+ * writeable reference. You're
+ * sure you know what you do?
+ */
+ number &diag_element (const unsigned int i);
+
+ /**
+ * Access to values in internal
+ * mode. Returns the value of
+ * the <tt>index</tt>th entry in
+ * <tt>row</tt>. Here,
+ * <tt>index</tt> refers to the
+ * internal representation of the
+ * matrix, not the column. Be
+ * sure to understand what you
+ * are doing here.
+ *
+ * @deprecated Use iterator or
+ * const_iterator instead!
+ */
+ number raw_entry (const unsigned int row,
+ const unsigned int index) const;
+
+ /**
+ * @internal @deprecated Use iterator or
+ * const_iterator instead!
+ *
+ * This is for hackers. Get
+ * access to the <i>i</i>th element of
+ * this matrix. The elements are
+ * stored in a consecutive way,
+ * refer to the SparsityPattern
+ * class for more details.
+ *
+ * You should use this interface
+ * very carefully and only if you
+ * are absolutely sure to know
+ * what you do. You should also
+ * note that the structure of
+ * these arrays may change over
+ * time. If you change the
+ * layout yourself, you should
+ * also rename this function to
+ * avoid programs relying on
+ * outdated information!
+ */
+ number global_entry (const unsigned int i) const;
+
+ /**
+ * @internal @deprecated Use iterator or
+ * const_iterator instead!
+ *
+ * Same as above, but with write
+ * access. You certainly know
+ * what you do?
+ */
+ number &global_entry (const unsigned int i);
//@}
- /**
- * @name Multiplications
- */
+ /**
+ * @name Multiplications
+ */
//@{
- /**
- * Matrix-vector multiplication:
- * let <i>dst = M*src</i> with
- * <i>M</i> being this matrix.
- *
- * Note that while this function can
- * operate on all vectors that offer
- * iterator classes, it is only really
- * effective for objects of type @ref
- * Vector. For all classes for which
- * iterating over elements, or random
- * member access is expensive, this
- * function is not efficient. In
- * particular, if you want to multiply
- * with BlockVector objects, you should
- * consider using a BlockSparseMatrix as
- * well.
- *
- * Source and destination must
- * not be the same vector.
- */
- template <class OutVector, class InVector>
- void vmult (OutVector& dst,
- const InVector& src) const;
-
- /**
- * Matrix-vector multiplication:
- * let <i>dst = M<sup>T</sup>*src</i> with
- * <i>M</i> being this
- * matrix. This function does the
- * same as vmult() but takes
- * the transposed matrix.
- *
- * Note that while this function can
- * operate on all vectors that offer
- * iterator classes, it is only really
- * effective for objects of type @ref
- * Vector. For all classes for which
- * iterating over elements, or random
- * member access is expensive, this
- * function is not efficient. In
- * particular, if you want to multiply
- * with BlockVector objects, you should
- * consider using a BlockSparseMatrix as
- * well.
- *
- * Source and destination must
- * not be the same vector.
- */
- template <class OutVector, class InVector>
- void Tvmult (OutVector& dst,
- const InVector& src) const;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M*src</i> on <i>dst</i>
- * with <i>M</i> being this
- * matrix.
- *
- * Note that while this function can
- * operate on all vectors that offer
- * iterator classes, it is only really
- * effective for objects of type @ref
- * Vector. For all classes for which
- * iterating over elements, or random
- * member access is expensive, this
- * function is not efficient. In
- * particular, if you want to multiply
- * with BlockVector objects, you should
- * consider using a BlockSparseMatrix as
- * well.
- *
- * Source and destination must
- * not be the same vector.
- */
- template <class OutVector, class InVector>
- void vmult_add (OutVector& dst,
- const InVector& src) const;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M<sup>T</sup>*src</i> to
- * <i>dst</i> with <i>M</i> being
- * this matrix. This function
- * does the same as vmult_add()
- * but takes the transposed
- * matrix.
- *
- * Note that while this function can
- * operate on all vectors that offer
- * iterator classes, it is only really
- * effective for objects of type @ref
- * Vector. For all classes for which
- * iterating over elements, or random
- * member access is expensive, this
- * function is not efficient. In
- * particular, if you want to multiply
- * with BlockVector objects, you should
- * consider using a BlockSparseMatrix as
- * well.
- *
- * Source and destination must
- * not be the same vector.
- */
- template <class OutVector, class InVector>
- void Tvmult_add (OutVector& dst,
- const InVector& src) const;
-
- /**
- * Return the square of the norm
- * of the vector $v$ with respect
- * to the norm induced by this
- * matrix,
- * i.e. $\left(v,Mv\right)$. This
- * is useful, e.g. in the finite
- * element context, where the
- * $L_2$ norm of a function
- * equals the matrix norm with
- * respect to the mass matrix of
- * the vector representing the
- * nodal values of the finite
- * element function.
- *
- * Obviously, the matrix needs to be
- * quadratic for this operation, and for
- * the result to actually be a norm it
- * also needs to be either real symmetric
- * or complex hermitian.
- *
- * The underlying template types of both
- * this matrix and the given vector
- * should either both be real or
- * complex-valued, but not mixed, for
- * this function to make sense.
- */
- template <typename somenumber>
- somenumber matrix_norm_square (const Vector<somenumber> &v) const;
-
- /**
- * Compute the matrix scalar
- * product $\left(u,Mv\right)$.
- */
- template <typename somenumber>
- somenumber matrix_scalar_product (const Vector<somenumber> &u,
- const Vector<somenumber> &v) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to be
- * <i>r=b-Mx</i>. Write the
- * residual into
- * <tt>dst</tt>. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned.
- *
- * Source <i>x</i> and destination
- * <i>dst</i> must not be the same
- * vector.
- */
- template <typename somenumber>
- somenumber residual (Vector<somenumber> &dst,
- const Vector<somenumber> &x,
- const Vector<somenumber> &b) const;
-
- /**
- * Perform the matrix-matrix
- * multiplication <tt>C = A * B</tt>,
- * or, if an optional vector argument
- * is given, <tt>C = A * diag(V) *
- * B</tt>, where <tt>diag(V)</tt>
- * defines a diagonal matrix with the
- * vector entries.
- *
- * This function assumes that the
- * calling matrix <tt>A</tt> and
- * <tt>B</tt> have compatible
- * sizes. The size of <tt>C</tt> will
- * be set within this function.
- *
- * The content as well as the sparsity
- * pattern of the matrix C will be
- * changed by this function, so make
- * sure that the sparsity pattern is
- * not used somewhere else in your
- * program. This is an expensive
- * operation, so think twice before you
- * use this function.
- *
- * There is an optional flag
- * <tt>rebuild_sparsity_pattern</tt>
- * that can be used to bypass the
- * creation of a new sparsity pattern
- * and instead uses the sparsity
- * pattern stored in <tt>C</tt>. In
- * that case, make sure that it really
- * fits. The default is to rebuild the
- * sparsity pattern.
- *
- * @note Rebuilding the sparsity pattern
- * requires changing it. This means that
- * all other matrices that are associated
- * with this sparsity pattern will
- * then have invalid entries.
- */
- template <typename numberB, typename numberC>
- void mmult (SparseMatrix<numberC> &C,
- const SparseMatrix<numberB> &B,
- const Vector<number> &V = Vector<number>(),
- const bool rebuild_sparsity_pattern = true) const;
-
- /**
- * Perform the matrix-matrix
- * multiplication with the transpose of
- * <tt>this</tt>, i.e., <tt>C =
- * A<sup>T</sup> * B</tt>, or, if an
- * optional vector argument is given,
- * <tt>C = A<sup>T</sup> * diag(V) *
- * B</tt>, where <tt>diag(V)</tt>
- * defines a diagonal matrix with the
- * vector entries.
- *
- * This function assumes that the
- * calling matrix <tt>A</tt> and
- * <tt>B</tt> have compatible
- * sizes. The size of <tt>C</tt> will
- * be set within this function.
- *
- * The content as well as the sparsity
- * pattern of the matrix C will be
- * changed by this function, so make
- * sure that the sparsity pattern is
- * not used somewhere else in your
- * program. This is an expensive
- * operation, so think twice before you
- * use this function.
- *
- * There is an optional flag
- * <tt>rebuild_sparsity_pattern</tt>
- * that can be used to bypass the
- * creation of a new sparsity pattern
- * and instead uses the sparsity
- * pattern stored in <tt>C</tt>. In
- * that case, make sure that it really
- * fits. The default is to rebuild the
- * sparsity pattern.
- *
- * @note Rebuilding the sparsity pattern
- * requires changing it. This means that
- * all other matrices that are associated
- * with this sparsity pattern will
- * then have invalid entries.
- */
- template <typename numberB, typename numberC>
- void Tmmult (SparseMatrix<numberC> &C,
- const SparseMatrix<numberB> &B,
- const Vector<number> &V = Vector<number>(),
- const bool rebuild_sparsity_pattern = true) const;
+ /**
+ * Matrix-vector multiplication:
+ * let <i>dst = M*src</i> with
+ * <i>M</i> being this matrix.
+ *
+ * Note that while this function can
+ * operate on all vectors that offer
+ * iterator classes, it is only really
+ * effective for objects of type @ref
+ * Vector. For all classes for which
+ * iterating over elements, or random
+ * member access is expensive, this
+ * function is not efficient. In
+ * particular, if you want to multiply
+ * with BlockVector objects, you should
+ * consider using a BlockSparseMatrix as
+ * well.
+ *
+ * Source and destination must
+ * not be the same vector.
+ */
+ template <class OutVector, class InVector>
+ void vmult (OutVector &dst,
+ const InVector &src) const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let <i>dst = M<sup>T</sup>*src</i> with
+ * <i>M</i> being this
+ * matrix. This function does the
+ * same as vmult() but takes
+ * the transposed matrix.
+ *
+ * Note that while this function can
+ * operate on all vectors that offer
+ * iterator classes, it is only really
+ * effective for objects of type @ref
+ * Vector. For all classes for which
+ * iterating over elements, or random
+ * member access is expensive, this
+ * function is not efficient. In
+ * particular, if you want to multiply
+ * with BlockVector objects, you should
+ * consider using a BlockSparseMatrix as
+ * well.
+ *
+ * Source and destination must
+ * not be the same vector.
+ */
+ template <class OutVector, class InVector>
+ void Tvmult (OutVector &dst,
+ const InVector &src) const;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M*src</i> on <i>dst</i>
+ * with <i>M</i> being this
+ * matrix.
+ *
+ * Note that while this function can
+ * operate on all vectors that offer
+ * iterator classes, it is only really
+ * effective for objects of type @ref
+ * Vector. For all classes for which
+ * iterating over elements, or random
+ * member access is expensive, this
+ * function is not efficient. In
+ * particular, if you want to multiply
+ * with BlockVector objects, you should
+ * consider using a BlockSparseMatrix as
+ * well.
+ *
+ * Source and destination must
+ * not be the same vector.
+ */
+ template <class OutVector, class InVector>
+ void vmult_add (OutVector &dst,
+ const InVector &src) const;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M<sup>T</sup>*src</i> to
+ * <i>dst</i> with <i>M</i> being
+ * this matrix. This function
+ * does the same as vmult_add()
+ * but takes the transposed
+ * matrix.
+ *
+ * Note that while this function can
+ * operate on all vectors that offer
+ * iterator classes, it is only really
+ * effective for objects of type @ref
+ * Vector. For all classes for which
+ * iterating over elements, or random
+ * member access is expensive, this
+ * function is not efficient. In
+ * particular, if you want to multiply
+ * with BlockVector objects, you should
+ * consider using a BlockSparseMatrix as
+ * well.
+ *
+ * Source and destination must
+ * not be the same vector.
+ */
+ template <class OutVector, class InVector>
+ void Tvmult_add (OutVector &dst,
+ const InVector &src) const;
+
+ /**
+ * Return the square of the norm
+ * of the vector $v$ with respect
+ * to the norm induced by this
+ * matrix,
+ * i.e. $\left(v,Mv\right)$. This
+ * is useful, e.g. in the finite
+ * element context, where the
+ * $L_2$ norm of a function
+ * equals the matrix norm with
+ * respect to the mass matrix of
+ * the vector representing the
+ * nodal values of the finite
+ * element function.
+ *
+ * Obviously, the matrix needs to be
+ * quadratic for this operation, and for
+ * the result to actually be a norm it
+ * also needs to be either real symmetric
+ * or complex hermitian.
+ *
+ * The underlying template types of both
+ * this matrix and the given vector
+ * should either both be real or
+ * complex-valued, but not mixed, for
+ * this function to make sense.
+ */
+ template <typename somenumber>
+ somenumber matrix_norm_square (const Vector<somenumber> &v) const;
+
+ /**
+ * Compute the matrix scalar
+ * product $\left(u,Mv\right)$.
+ */
+ template <typename somenumber>
+ somenumber matrix_scalar_product (const Vector<somenumber> &u,
+ const Vector<somenumber> &v) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to be
+ * <i>r=b-Mx</i>. Write the
+ * residual into
+ * <tt>dst</tt>. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned.
+ *
+ * Source <i>x</i> and destination
+ * <i>dst</i> must not be the same
+ * vector.
+ */
+ template <typename somenumber>
+ somenumber residual (Vector<somenumber> &dst,
+ const Vector<somenumber> &x,
+ const Vector<somenumber> &b) const;
+
+ /**
+ * Perform the matrix-matrix
+ * multiplication <tt>C = A * B</tt>,
+ * or, if an optional vector argument
+ * is given, <tt>C = A * diag(V) *
+ * B</tt>, where <tt>diag(V)</tt>
+ * defines a diagonal matrix with the
+ * vector entries.
+ *
+ * This function assumes that the
+ * calling matrix <tt>A</tt> and
+ * <tt>B</tt> have compatible
+ * sizes. The size of <tt>C</tt> will
+ * be set within this function.
+ *
+ * The content as well as the sparsity
+ * pattern of the matrix C will be
+ * changed by this function, so make
+ * sure that the sparsity pattern is
+ * not used somewhere else in your
+ * program. This is an expensive
+ * operation, so think twice before you
+ * use this function.
+ *
+ * There is an optional flag
+ * <tt>rebuild_sparsity_pattern</tt>
+ * that can be used to bypass the
+ * creation of a new sparsity pattern
+ * and instead uses the sparsity
+ * pattern stored in <tt>C</tt>. In
+ * that case, make sure that it really
+ * fits. The default is to rebuild the
+ * sparsity pattern.
+ *
+ * @note Rebuilding the sparsity pattern
+ * requires changing it. This means that
+ * all other matrices that are associated
+ * with this sparsity pattern will
+ * then have invalid entries.
+ */
+ template <typename numberB, typename numberC>
+ void mmult (SparseMatrix<numberC> &C,
+ const SparseMatrix<numberB> &B,
+ const Vector<number> &V = Vector<number>(),
+ const bool rebuild_sparsity_pattern = true) const;
+
+ /**
+ * Perform the matrix-matrix
+ * multiplication with the transpose of
+ * <tt>this</tt>, i.e., <tt>C =
+ * A<sup>T</sup> * B</tt>, or, if an
+ * optional vector argument is given,
+ * <tt>C = A<sup>T</sup> * diag(V) *
+ * B</tt>, where <tt>diag(V)</tt>
+ * defines a diagonal matrix with the
+ * vector entries.
+ *
+ * This function assumes that the
+ * calling matrix <tt>A</tt> and
+ * <tt>B</tt> have compatible
+ * sizes. The size of <tt>C</tt> will
+ * be set within this function.
+ *
+ * The content as well as the sparsity
+ * pattern of the matrix C will be
+ * changed by this function, so make
+ * sure that the sparsity pattern is
+ * not used somewhere else in your
+ * program. This is an expensive
+ * operation, so think twice before you
+ * use this function.
+ *
+ * There is an optional flag
+ * <tt>rebuild_sparsity_pattern</tt>
+ * that can be used to bypass the
+ * creation of a new sparsity pattern
+ * and instead uses the sparsity
+ * pattern stored in <tt>C</tt>. In
+ * that case, make sure that it really
+ * fits. The default is to rebuild the
+ * sparsity pattern.
+ *
+ * @note Rebuilding the sparsity pattern
+ * requires changing it. This means that
+ * all other matrices that are associated
+ * with this sparsity pattern will
+ * then have invalid entries.
+ */
+ template <typename numberB, typename numberC>
+ void Tmmult (SparseMatrix<numberC> &C,
+ const SparseMatrix<numberB> &B,
+ const Vector<number> &V = Vector<number>(),
+ const bool rebuild_sparsity_pattern = true) const;
//@}
- /**
- * @name Matrix norms
- */
+ /**
+ * @name Matrix norms
+ */
//@{
- /**
- * Return the $l_1$-norm of the matrix,
- * that is $|M|_1=\max_{\mathrm{all\
- * columns\ }j}\sum_{\mathrm{all\ rows\
- * } i} |M_{ij}|$, (max. sum of
- * columns). This is the natural
- * matrix norm that is compatible to
- * the $l_1$-norm for vectors, i.e.
- * $|Mv|_1\leq |M|_1 |v|_1$.
- * (cf. Haemmerlin-Hoffmann :
- * Numerische Mathematik)
- */
- real_type l1_norm () const;
-
- /**
- * Return the $l_\infty$-norm of the
- * matrix, that is
- * $|M|_\infty=\max_{\mathrm{all\ rows\
- * }i}\sum_{\mathrm{all\ columns\ }j}
- * |M_{ij}|$, (max. sum of rows). This
- * is the natural matrix norm that is
- * compatible to the $l_\infty$-norm of
- * vectors, i.e. $|Mv|_\infty \leq
- * |M|_\infty |v|_\infty$.
- * (cf. Haemmerlin-Hoffmann :
- * Numerische Mathematik)
- */
- real_type linfty_norm () const;
-
- /**
- * Return the frobenius norm of the
- * matrix, i.e. the square root of the
- * sum of squares of all entries in the
- * matrix.
- */
- real_type frobenius_norm () const;
+ /**
+ * Return the $l_1$-norm of the matrix,
+ * that is $|M|_1=\max_{\mathrm{all\
+ * columns\ }j}\sum_{\mathrm{all\ rows\
+ * } i} |M_{ij}|$, (max. sum of
+ * columns). This is the natural
+ * matrix norm that is compatible to
+ * the $l_1$-norm for vectors, i.e.
+ * $|Mv|_1\leq |M|_1 |v|_1$.
+ * (cf. Haemmerlin-Hoffmann :
+ * Numerische Mathematik)
+ */
+ real_type l1_norm () const;
+
+ /**
+ * Return the $l_\infty$-norm of the
+ * matrix, that is
+ * $|M|_\infty=\max_{\mathrm{all\ rows\
+ * }i}\sum_{\mathrm{all\ columns\ }j}
+ * |M_{ij}|$, (max. sum of rows). This
+ * is the natural matrix norm that is
+ * compatible to the $l_\infty$-norm of
+ * vectors, i.e. $|Mv|_\infty \leq
+ * |M|_\infty |v|_\infty$.
+ * (cf. Haemmerlin-Hoffmann :
+ * Numerische Mathematik)
+ */
+ real_type linfty_norm () const;
+
+ /**
+ * Return the frobenius norm of the
+ * matrix, i.e. the square root of the
+ * sum of squares of all entries in the
+ * matrix.
+ */
+ real_type frobenius_norm () const;
//@}
- /**
- * @name Preconditioning methods
- */
+ /**
+ * @name Preconditioning methods
+ */
//@{
- /**
- * Apply the Jacobi
- * preconditioner, which
- * multiplies every element of
- * the <tt>src</tt> vector by the
- * inverse of the respective
- * diagonal element and
- * multiplies the result with the
- * relaxation factor <tt>omega</tt>.
- */
- template <typename somenumber>
- void precondition_Jacobi (Vector<somenumber> &dst,
- const Vector<somenumber> &src,
- const number omega = 1.) const;
-
- /**
- * Apply SSOR preconditioning to
- * <tt>src</tt> with damping
- * <tt>omega</tt>. The optional
- * argument
- * <tt>pos_right_of_diagonal</tt> is
- * supposed to provide an array where
- * each entry specifies the position
- * just right of the diagonal in the
- * global array of nonzeros.
- */
- template <typename somenumber>
- void precondition_SSOR (Vector<somenumber> &dst,
- const Vector<somenumber> &src,
- const number omega = 1.,
- const std::vector<unsigned int>&pos_right_of_diagonal=std::vector<unsigned int>()) const;
-
- /**
- * Apply SOR preconditioning
- * matrix to <tt>src</tt>.
- */
- template <typename somenumber>
- void precondition_SOR (Vector<somenumber> &dst,
- const Vector<somenumber> &src,
- const number om = 1.) const;
-
- /**
- * Apply transpose SOR
- * preconditioning matrix to
- * <tt>src</tt>.
- */
- template <typename somenumber>
- void precondition_TSOR (Vector<somenumber> &dst,
+ /**
+ * Apply the Jacobi
+ * preconditioner, which
+ * multiplies every element of
+ * the <tt>src</tt> vector by the
+ * inverse of the respective
+ * diagonal element and
+ * multiplies the result with the
+ * relaxation factor <tt>omega</tt>.
+ */
+ template <typename somenumber>
+ void precondition_Jacobi (Vector<somenumber> &dst,
const Vector<somenumber> &src,
- const number om = 1.) const;
-
- /**
- * Perform SSOR preconditioning
- * in-place. Apply the
- * preconditioner matrix without
- * copying to a second vector.
- * <tt>omega</tt> is the relaxation
- * parameter.
- */
- template <typename somenumber>
- void SSOR (Vector<somenumber> &v,
- const number omega = 1.) const;
-
- /**
- * Perform an SOR preconditioning
- * in-place. <tt>omega</tt> is
- * the relaxation parameter.
- */
- template <typename somenumber>
- void SOR (Vector<somenumber> &v,
+ const number omega = 1.) const;
+
+ /**
+ * Apply SSOR preconditioning to
+ * <tt>src</tt> with damping
+ * <tt>omega</tt>. The optional
+ * argument
+ * <tt>pos_right_of_diagonal</tt> is
+ * supposed to provide an array where
+ * each entry specifies the position
+ * just right of the diagonal in the
+ * global array of nonzeros.
+ */
+ template <typename somenumber>
+ void precondition_SSOR (Vector<somenumber> &dst,
+ const Vector<somenumber> &src,
+ const number omega = 1.,
+ const std::vector<unsigned int> &pos_right_of_diagonal=std::vector<unsigned int>()) const;
+
+ /**
+ * Apply SOR preconditioning
+ * matrix to <tt>src</tt>.
+ */
+ template <typename somenumber>
+ void precondition_SOR (Vector<somenumber> &dst,
+ const Vector<somenumber> &src,
+ const number om = 1.) const;
+
+ /**
+ * Apply transpose SOR
+ * preconditioning matrix to
+ * <tt>src</tt>.
+ */
+ template <typename somenumber>
+ void precondition_TSOR (Vector<somenumber> &dst,
+ const Vector<somenumber> &src,
+ const number om = 1.) const;
+
+ /**
+ * Perform SSOR preconditioning
+ * in-place. Apply the
+ * preconditioner matrix without
+ * copying to a second vector.
+ * <tt>omega</tt> is the relaxation
+ * parameter.
+ */
+ template <typename somenumber>
+ void SSOR (Vector<somenumber> &v,
+ const number omega = 1.) const;
+
+ /**
+ * Perform an SOR preconditioning
+ * in-place. <tt>omega</tt> is
+ * the relaxation parameter.
+ */
+ template <typename somenumber>
+ void SOR (Vector<somenumber> &v,
+ const number om = 1.) const;
+
+ /**
+ * Perform a transpose SOR
+ * preconditioning in-place.
+ * <tt>omega</tt> is the
+ * relaxation parameter.
+ */
+ template <typename somenumber>
+ void TSOR (Vector<somenumber> &v,
+ const number om = 1.) const;
+
+ /**
+ * Perform a permuted SOR
+ * preconditioning in-place.
+ *
+ * The standard SOR method is
+ * applied in the order
+ * prescribed by <tt>permutation</tt>,
+ * that is, first the row
+ * <tt>permutation[0]</tt>, then
+ * <tt>permutation[1]</tt> and so
+ * on. For efficiency reasons,
+ * the permutation as well as its
+ * inverse are required.
+ *
+ * <tt>omega</tt> is the
+ * relaxation parameter.
+ */
+ template <typename somenumber>
+ void PSOR (Vector<somenumber> &v,
+ const std::vector<unsigned int> &permutation,
+ const std::vector<unsigned int> &inverse_permutation,
+ const number om = 1.) const;
+
+ /**
+ * Perform a transposed permuted SOR
+ * preconditioning in-place.
+ *
+ * The transposed SOR method is
+ * applied in the order
+ * prescribed by
+ * <tt>permutation</tt>, that is,
+ * first the row
+ * <tt>permutation[m()-1]</tt>,
+ * then
+ * <tt>permutation[m()-2]</tt>
+ * and so on. For efficiency
+ * reasons, the permutation as
+ * well as its inverse are
+ * required.
+ *
+ * <tt>omega</tt> is the
+ * relaxation parameter.
+ */
+ template <typename somenumber>
+ void TPSOR (Vector<somenumber> &v,
+ const std::vector<unsigned int> &permutation,
+ const std::vector<unsigned int> &inverse_permutation,
const number om = 1.) const;
- /**
- * Perform a transpose SOR
- * preconditioning in-place.
- * <tt>omega</tt> is the
- * relaxation parameter.
- */
- template <typename somenumber>
- void TSOR (Vector<somenumber> &v,
- const number om = 1.) const;
-
- /**
- * Perform a permuted SOR
- * preconditioning in-place.
- *
- * The standard SOR method is
- * applied in the order
- * prescribed by <tt>permutation</tt>,
- * that is, first the row
- * <tt>permutation[0]</tt>, then
- * <tt>permutation[1]</tt> and so
- * on. For efficiency reasons,
- * the permutation as well as its
- * inverse are required.
- *
- * <tt>omega</tt> is the
- * relaxation parameter.
- */
- template <typename somenumber>
- void PSOR (Vector<somenumber> &v,
- const std::vector<unsigned int>& permutation,
- const std::vector<unsigned int>& inverse_permutation,
- const number om = 1.) const;
-
- /**
- * Perform a transposed permuted SOR
- * preconditioning in-place.
- *
- * The transposed SOR method is
- * applied in the order
- * prescribed by
- * <tt>permutation</tt>, that is,
- * first the row
- * <tt>permutation[m()-1]</tt>,
- * then
- * <tt>permutation[m()-2]</tt>
- * and so on. For efficiency
- * reasons, the permutation as
- * well as its inverse are
- * required.
- *
- * <tt>omega</tt> is the
- * relaxation parameter.
- */
- template <typename somenumber>
- void TPSOR (Vector<somenumber> &v,
- const std::vector<unsigned int>& permutation,
- const std::vector<unsigned int>& inverse_permutation,
- const number om = 1.) const;
-
- /**
- * Do one Jacobi step on
- * <tt>v</tt>. Performs a direct
- * Jacobi step with right hand
- * side <tt>b</tt>. This function
- * will need an auxiliary vector,
- * which is acquired from
- * GrowingVectorMemory.
- */
- template <typename somenumber>
- void Jacobi_step (Vector<somenumber> &v,
- const Vector<somenumber> &b,
- const number om = 1.) const;
-
- /**
- * Do one SOR step on <tt>v</tt>.
- * Performs a direct SOR step
- * with right hand side
- * <tt>b</tt>.
- */
- template <typename somenumber>
- void SOR_step (Vector<somenumber> &v,
- const Vector<somenumber> &b,
- const number om = 1.) const;
-
- /**
- * Do one adjoint SOR step on
- * <tt>v</tt>. Performs a direct
- * TSOR step with right hand side
- * <tt>b</tt>.
- */
- template <typename somenumber>
- void TSOR_step (Vector<somenumber> &v,
+ /**
+ * Do one Jacobi step on
+ * <tt>v</tt>. Performs a direct
+ * Jacobi step with right hand
+ * side <tt>b</tt>. This function
+ * will need an auxiliary vector,
+ * which is acquired from
+ * GrowingVectorMemory.
+ */
+ template <typename somenumber>
+ void Jacobi_step (Vector<somenumber> &v,
const Vector<somenumber> &b,
const number om = 1.) const;
template <typename number>
SparseMatrix<number>::SparseMatrix (const SparsityPattern &c,
- const IdentityMatrix &id)
+ const IdentityMatrix &id)
- :
- cols(0, "SparseMatrix"),
- val(0),
- max_len(0)
+ :
+ cols(0, "SparseMatrix"),
+ val(0),
+ max_len(0)
{
Assert (c.n_rows() == id.m(), ExcDimensionMismatch (c.n_rows(), id.m()));
Assert (c.n_cols() == id.n(), ExcDimensionMismatch (c.n_cols(), id.n()));
const unsigned int n = src.size();
somenumber *dst_ptr = dst.begin();
const somenumber *src_ptr = src.begin();
- const std::size_t *rowstart_ptr = &cols->rowstart[0];
+ const std::size_t *rowstart_ptr = &cols->rowstart[0];
- // optimize the following loop for
- // the case that the relaxation
- // factor is one. In that case, we
- // can save one FP multiplication
- // per row
- //
- // note that for square matrices,
- // the diagonal entry is the first
- // in each row, i.e. at index
- // rowstart[i]. and we do have a
- // square matrix by above assertion
+ // optimize the following loop for
+ // the case that the relaxation
+ // factor is one. In that case, we
+ // can save one FP multiplication
+ // per row
+ //
+ // note that for square matrices,
+ // the diagonal entry is the first
+ // in each row, i.e. at index
+ // rowstart[i]. and we do have a
+ // square matrix by above assertion
if (om != 1.)
for (unsigned int i=0; i<n; ++i, ++dst_ptr, ++src_ptr, ++rowstart_ptr)
*dst_ptr = om * *src_ptr / val[*rowstart_ptr];
Assert (src.size() == n(), ExcDimensionMismatch (src.size(), n()));
const unsigned int n = src.size();
- const std::size_t *rowstart_ptr = &cols->rowstart[0];
+ const std::size_t *rowstart_ptr = &cols->rowstart[0];
somenumber *dst_ptr = &dst(0);
- // case when we have stored the position
- // just right of the diagonal (then we
- // don't have to search for it).
+ // case when we have stored the position
+ // just right of the diagonal (then we
+ // don't have to search for it).
if (pos_right_of_diagonal.size() != 0)
{
Assert (pos_right_of_diagonal.size() == dst.size(),
*/
class SparsityPattern : public Subscriptor
{
- public:
- /**
- * Typedef an iterator class that allows
- * to walk over all nonzero elements of a
- * sparsity pattern.
- */
- typedef
- SparsityPatternIterators::Iterator
- const_iterator;
-
- /**
- * Typedef an iterator class that allows
- * to walk over the nonzero elements of a
- * row of a sparsity pattern.
- */
- typedef
- const unsigned int * row_iterator;
-
- /**
- * Typedef an iterator class that allows
- * to walk over all nonzero elements of a
- * sparsity pattern.
- *
- * Since the iterator does not allow to
- * modify the sparsity pattern, this type
- * is the same as that for @p
- * const_iterator.
- */
- typedef
- SparsityPatternIterators::Iterator
- iterator;
-
-
- /**
- * Define a value which is used
- * to indicate that a certain
- * value in the #colnums array
- * is unused, i.e. does not
- * represent a certain column
- * number index.
- *
- * Indices with this invalid
- * value are used to insert new
- * entries to the sparsity
- * pattern using the add() member
- * function, and are removed when
- * calling compress().
- *
- * You should not assume that the
- * variable declared here has a
- * certain value. The
- * initialization is given here
- * only to enable the compiler to
- * perform some optimizations,
- * but the actual value of the
- * variable may change over time.
- */
- static const unsigned int invalid_entry = numbers::invalid_unsigned_int;
-
- /**
- * @name Construction and setup
- * Constructors, destructor; functions initializing, copying and filling an object.
- */
+ public:
+ /**
+ * Typedef an iterator class that allows
+ * to walk over all nonzero elements of a
+ * sparsity pattern.
+ */
+ typedef
+ SparsityPatternIterators::Iterator
+ const_iterator;
+
+ /**
+ * Typedef an iterator class that allows
+ * to walk over the nonzero elements of a
+ * row of a sparsity pattern.
+ */
+ typedef
+ const unsigned int *row_iterator;
+
+ /**
+ * Typedef an iterator class that allows
+ * to walk over all nonzero elements of a
+ * sparsity pattern.
+ *
+ * Since the iterator does not allow to
+ * modify the sparsity pattern, this type
+ * is the same as that for @p
+ * const_iterator.
+ */
+ typedef
+ SparsityPatternIterators::Iterator
+ iterator;
+
+
+ /**
+ * Define a value which is used
+ * to indicate that a certain
+ * value in the #colnums array
+ * is unused, i.e. does not
+ * represent a certain column
+ * number index.
+ *
+ * Indices with this invalid
+ * value are used to insert new
+ * entries to the sparsity
+ * pattern using the add() member
+ * function, and are removed when
+ * calling compress().
+ *
+ * You should not assume that the
+ * variable declared here has a
+ * certain value. The
+ * initialization is given here
+ * only to enable the compiler to
+ * perform some optimizations,
+ * but the actual value of the
+ * variable may change over time.
+ */
+ static const unsigned int invalid_entry = numbers::invalid_unsigned_int;
+
+ /**
+ * @name Construction and setup
+ * Constructors, destructor; functions initializing, copying and filling an object.
+ */
// @{
- /**
- * Initialize the matrix empty,
- * that is with no memory
- * allocated. This is useful if
- * you want such objects as
- * member variables in other
- * classes. You can make the
- * structure usable by calling
- * the reinit() function.
- */
- SparsityPattern ();
-
- /**
- * Copy constructor. This
- * constructor is only allowed to
- * be called if the matrix
- * structure to be copied is
- * empty. This is so in order to
- * prevent involuntary copies of
- * objects for temporaries, which
- * can use large amounts of
- * computing time. However, copy
- * constructors are needed if yo
- * want to use the STL data types
- * on classes like this, e.g. to
- * write such statements like
- * <tt>v.push_back
- * (SparsityPattern());</tt>,
- * with <tt>v</tt> a vector of
- * SparsityPattern objects.
- *
- * Usually, it is sufficient to
- * use the explicit keyword to
- * disallow unwanted temporaries,
- * but for the STL vectors, this
- * does not work. Since copying a
- * structure like this is not
- * useful anyway because multiple
- * matrices can use the same
- * sparsity structure, copies are
- * only allowed for empty
- * objects, as described above.
- */
- SparsityPattern (const SparsityPattern &);
-
- /**
- * Initialize a rectangular
- * matrix.
- *
- * @arg m number of rows
- * @arg n number of columns
- * @arg max_per_row maximum
- * number of nonzero entries per row
- *
- * @arg optimize_diagonal store
- * diagonal entries first in row;
- * see optimize_diagonal(). This
- * takes effect for quadratic
- * matrices only.
- */
- SparsityPattern (const unsigned int m,
- const unsigned int n,
- const unsigned int max_per_row,
- const bool optimize_diagonal = true);
-
- /**
- * Initialize a rectangular
- * matrix.
- *
- * @arg m number of rows
- * @arg n number of columns
- *
- * @arg row_lengths possible
- * number of nonzero entries for
- * each row. This vector must
- * have one entry for each row.
- *
- * @arg optimize_diagonal store
- * diagonal entries first in row;
- * see optimize_diagonal(). This
- * takes effect for quadratic
- * matrices only.
- */
- SparsityPattern (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int>& row_lengths,
- const bool optimize_diagonal = true);
-
- /**
- * Initialize a quadratic matrix
- * of dimension <tt>n</tt> with
- * at most <tt>max_per_row</tt>
- * nonzero entries per row.
- *
- * This constructor automatically
- * enables optimized storage of
- * diagonal elements. To avoid
- * this, use the constructor
- * taking row and column numbers
- * separately.
- */
- SparsityPattern (const unsigned int n,
- const unsigned int max_per_row);
-
- /**
- * Initialize a quadratic matrix.
- *
- * @arg m number of rows and columns
- *
- * @arg row_lengths possible
- * number of nonzero entries for
- * each row. This vector must
- * have one entry for each row.
- *
- * @arg optimize_diagonal store
- * diagonal entries first in row;
- * see optimize_diagonal().
- */
- SparsityPattern (const unsigned int m,
- const std::vector<unsigned int>& row_lengths,
- const bool optimize_diagonal = true);
-
- /**
- * Make a copy with extra off-diagonals.
- *
- * This constructs objects intended for
- * the application of the ILU(n)-method
- * or other incomplete decompositions.
- * Therefore, additional to the original
- * entry structure, space for
- * <tt>extra_off_diagonals</tt>
- * side-diagonals is provided on both
- * sides of the main diagonal.
- *
- * <tt>max_per_row</tt> is the
- * maximum number of nonzero
- * elements per row which this
- * structure is to hold. It is
- * assumed that this number is
- * sufficiently large to
- * accommodate both the elements
- * in <tt>original</tt> as well
- * as the new off-diagonal
- * elements created by this
- * constructor. You will usually
- * want to give the same number
- * as you gave for
- * <tt>original</tt> plus the
- * number of side diagonals times
- * two. You may however give a
- * larger value if you wish to
- * add further nonzero entries
- * for the decomposition based on
- * other criteria than their
- * being on side-diagonals.
- *
- * This function requires that
- * <tt>original</tt> refers to a
- * quadratic matrix structure.
- * It must be compressed. The
- * matrix structure is not
- * compressed after this function
- * finishes.
- */
- SparsityPattern (const SparsityPattern &original,
- const unsigned int max_per_row,
- const unsigned int extra_off_diagonals);
-
- /**
- * Destructor.
- */
- ~SparsityPattern ();
-
- /**
- * Copy operator. For this the
- * same holds as for the copy
- * constructor: it is declared,
- * defined and fine to be called,
- * but the latter only for empty
- * objects.
- */
- SparsityPattern & operator = (const SparsityPattern &);
-
- /**
- * Reallocate memory and set up data
- * structures for a new matrix with
- * <tt>m </tt>rows and <tt>n</tt> columns,
- * with at most <tt>max_per_row</tt>
- * nonzero entries per row.
- *
- * This function simply maps its
- * operations to the other
- * <tt>reinit</tt> function.
- */
- void reinit (const unsigned int m,
- const unsigned int n,
- const unsigned int max_per_row,
- const bool optimize_diagonal = true);
-
- /**
- * Reallocate memory for a matrix
- * of size <tt>m x n</tt>. The
- * number of entries for each row
- * is taken from the array
- * <tt>row_lengths</tt> which has to
- * give this number of each row
- * <tt>i=1...m</tt>.
- *
- * If <tt>m*n==0</tt> all memory is freed,
- * resulting in a total reinitialization
- * of the object. If it is nonzero, new
- * memory is only allocated if the new
- * size extends the old one. This is done
- * to save time and to avoid fragmentation
- * of the heap.
- *
- * If the number of rows equals
- * the number of columns and the
- * last parameter is true,
- * diagonal elements are stored
- * first in each row to allow
- * optimized access in relaxation
- * methods of SparseMatrix.
- */
- void reinit (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &row_lengths,
- const bool optimize_diagonal = true);
-
- /**
- * Same as above, but with a
- * VectorSlice argument instead.
- */
- void reinit (const unsigned int m,
- const unsigned int n,
- const VectorSlice<const std::vector<unsigned int> > &row_lengths,
- const bool optimize_diagonal = true);
-
- /**
- * This function compresses the sparsity
- * structure that this object represents.
- * It does so by eliminating unused
- * entries and sorting the remaining ones
- * to allow faster access by usage of
- * binary search algorithms. A special
- * sorting scheme is used for the
- * diagonal entry of quadratic matrices,
- * which is always the first entry of
- * each row.
- *
- * The memory which is no more
- * needed is released.
- *
- * SparseMatrix objects require the
- * SparsityPattern objects they are
- * initialized with to be compressed, to
- * reduce memory requirements.
- */
- void compress ();
-
- /**
- * This function can be used as a
- * replacement for reinit(),
- * subsequent calls to add() and
- * a final call to close() if you
- * know exactly in advance the
- * entries that will form the
- * matrix sparsity pattern.
- *
- * The first two parameters
- * determine the size of the
- * matrix. For the two last ones,
- * note that a sparse matrix can
- * be described by a sequence of
- * rows, each of which is
- * represented by a sequence of
- * pairs of column indices and
- * values. In the present
- * context, the begin() and
- * end() parameters designate
- * iterators (of forward iterator
- * type) into a container, one
- * representing one row. The
- * distance between begin()
- * and end() should therefore
- * be equal to
- * n_rows(). These iterators
- * may be iterators of
- * <tt>std::vector</tt>,
- * <tt>std::list</tt>, pointers into a
- * C-style array, or any other
- * iterator satisfying the
- * requirements of a forward
- * iterator. The objects pointed
- * to by these iterators
- * (i.e. what we get after
- * applying <tt>operator*</tt> or
- * <tt>operator-></tt> to one of these
- * iterators) must be a container
- * itself that provides functions
- * <tt>begin</tt> and <tt>end</tt>
- * designating a range of
- * iterators that describe the
- * contents of one
- * line. Dereferencing these
- * inner iterators must either
- * yield a pair of an unsigned
- * integer as column index and a
- * value of arbitrary type (such
- * a type would be used if we
- * wanted to describe a sparse
- * matrix with one such object),
- * or simply an unsigned integer
- * (of we only wanted to describe
- * a sparsity pattern). The
- * function is able to determine
- * itself whether an unsigned
- * integer or a pair is what we
- * get after dereferencing the
- * inner iterators, through some
- * template magic.
- *
- * While the order of the outer
- * iterators denotes the
- * different rows of the matrix,
- * the order of the inner
- * iterator denoting the columns
- * does not matter, as they are
- * sorted internal to this
- * function anyway.
- *
- * Since that all sounds very
- * complicated, consider the
- * following example code, which
- * may be used to fill a sparsity
- * pattern:
- * @code
- * std::vector<std::vector<unsigned int> > column_indices (n_rows);
- * for (unsigned int row=0; row<n_rows; ++row)
- * // generate necessary columns in this row
- * fill_row (column_indices[row]);
- *
- * sparsity.copy_from (n_rows, n_cols,
- * column_indices.begin(),
- * column_indices.end());
- * @endcode
- *
- * Note that this example works
- * since the iterators
- * dereferenced yield containers
- * with functions <tt>begin</tt> and
- * <tt>end</tt> (namely
- * <tt>std::vector</tt>s), and the
- * inner iterators dereferenced
- * yield unsigned integers as
- * column indices. Note that we
- * could have replaced each of
- * the two <tt>std::vector</tt>
- * occurrences by <tt>std::list</tt>,
- * and the inner one by
- * <tt>std::set</tt> as well.
- *
- * Another example would be as
- * follows, where we initialize a
- * whole matrix, not only a
- * sparsity pattern:
- * @code
- * std::vector<std::map<unsigned int,double> > entries (n_rows);
- * for (unsigned int row=0; row<n_rows; ++row)
- * // generate necessary pairs of columns
- * // and corresponding values in this row
- * fill_row (entries[row]);
- *
- * sparsity.copy_from (n_rows, n_cols,
- * column_indices.begin(),
- * column_indices.end());
- * matrix.reinit (sparsity);
- * matrix.copy_from (column_indices.begin(),
- * column_indices.end());
- * @endcode
- *
- * This example works because
- * dereferencing iterators of the
- * inner type yields a pair of
- * unsigned integers and a value,
- * the first of which we take as
- * column index. As previously,
- * the outer <tt>std::vector</tt>
- * could be replaced by
- * <tt>std::list</tt>, and the inner
- * <tt>std::map<unsigned int,double></tt>
- * could be replaced by
- * <tt>std::vector<std::pair<unsigned int,double> ></tt>,
- * or a list or set of such
- * pairs, as they all return
- * iterators that point to such
- * pairs.
- */
- template <typename ForwardIterator>
- void copy_from (const unsigned int n_rows,
- const unsigned int n_cols,
- const ForwardIterator begin,
- const ForwardIterator end,
- const bool optimize_diagonal = true);
-
- /**
- * Copy data from an object of type
- * CompressedSparsityPattern,
- * CompressedSetSparsityPattern or
- * CompressedSimpleSparsityPattern.
- * Previous content of this object is
- * lost, and the sparsity pattern is in
- * compressed mode afterwards.
- */
- template <typename CompressedSparsityType>
- void copy_from (const CompressedSparsityType &csp,
- const bool optimize_diagonal = true);
-
- /**
- * Take a full matrix and use its
- * nonzero entries to generate a
- * sparse matrix entry pattern
- * for this object.
- *
- * Previous content of this
- * object is lost, and the
- * sparsity pattern is in
- * compressed mode afterwards.
- */
- template <typename number>
- void copy_from (const FullMatrix<number> &matrix,
- const bool optimize_diagonal = true);
-
- /**
- * Make the sparsity pattern
- * symmetric by adding the
- * sparsity pattern of the
- * transpose object.
- *
- * This function throws an
- * exception if the sparsity
- * pattern does not represent a
- * quadratic matrix.
- */
- void symmetrize ();
-
- /**
- * Add a nonzero entry to the matrix.
- * This function may only be called
- * for non-compressed sparsity patterns.
- *
- * If the entry already exists, nothing
- * bad happens.
- */
- void add (const unsigned int i,
- const unsigned int j);
-
- /**
- * Add several nonzero entries to the
- * specified matrix row. This function
- * may only be called for
- * non-compressed sparsity patterns.
- *
- * If some of the entries already
- * exist, nothing bad happens.
- */
- template <typename ForwardIterator>
- void add_entries (const unsigned int row,
- ForwardIterator begin,
- ForwardIterator end,
- const bool indices_are_sorted = false);
+ /**
+ * Initialize the matrix empty,
+ * that is with no memory
+ * allocated. This is useful if
+ * you want such objects as
+ * member variables in other
+ * classes. You can make the
+ * structure usable by calling
+ * the reinit() function.
+ */
+ SparsityPattern ();
+
+ /**
+ * Copy constructor. This
+ * constructor is only allowed to
+ * be called if the matrix
+ * structure to be copied is
+ * empty. This is so in order to
+ * prevent involuntary copies of
+ * objects for temporaries, which
+ * can use large amounts of
+ * computing time. However, copy
+ * constructors are needed if yo
+ * want to use the STL data types
+ * on classes like this, e.g. to
+ * write such statements like
+ * <tt>v.push_back
+ * (SparsityPattern());</tt>,
+ * with <tt>v</tt> a vector of
+ * SparsityPattern objects.
+ *
+ * Usually, it is sufficient to
+ * use the explicit keyword to
+ * disallow unwanted temporaries,
+ * but for the STL vectors, this
+ * does not work. Since copying a
+ * structure like this is not
+ * useful anyway because multiple
+ * matrices can use the same
+ * sparsity structure, copies are
+ * only allowed for empty
+ * objects, as described above.
+ */
+ SparsityPattern (const SparsityPattern &);
+
+ /**
+ * Initialize a rectangular
+ * matrix.
+ *
+ * @arg m number of rows
+ * @arg n number of columns
+ * @arg max_per_row maximum
+ * number of nonzero entries per row
+ *
+ * @arg optimize_diagonal store
+ * diagonal entries first in row;
+ * see optimize_diagonal(). This
+ * takes effect for quadratic
+ * matrices only.
+ */
+ SparsityPattern (const unsigned int m,
+ const unsigned int n,
+ const unsigned int max_per_row,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Initialize a rectangular
+ * matrix.
+ *
+ * @arg m number of rows
+ * @arg n number of columns
+ *
+ * @arg row_lengths possible
+ * number of nonzero entries for
+ * each row. This vector must
+ * have one entry for each row.
+ *
+ * @arg optimize_diagonal store
+ * diagonal entries first in row;
+ * see optimize_diagonal(). This
+ * takes effect for quadratic
+ * matrices only.
+ */
+ SparsityPattern (const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &row_lengths,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Initialize a quadratic matrix
+ * of dimension <tt>n</tt> with
+ * at most <tt>max_per_row</tt>
+ * nonzero entries per row.
+ *
+ * This constructor automatically
+ * enables optimized storage of
+ * diagonal elements. To avoid
+ * this, use the constructor
+ * taking row and column numbers
+ * separately.
+ */
+ SparsityPattern (const unsigned int n,
+ const unsigned int max_per_row);
+
+ /**
+ * Initialize a quadratic matrix.
+ *
+ * @arg m number of rows and columns
+ *
+ * @arg row_lengths possible
+ * number of nonzero entries for
+ * each row. This vector must
+ * have one entry for each row.
+ *
+ * @arg optimize_diagonal store
+ * diagonal entries first in row;
+ * see optimize_diagonal().
+ */
+ SparsityPattern (const unsigned int m,
+ const std::vector<unsigned int> &row_lengths,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Make a copy with extra off-diagonals.
+ *
+ * This constructs objects intended for
+ * the application of the ILU(n)-method
+ * or other incomplete decompositions.
+ * Therefore, additional to the original
+ * entry structure, space for
+ * <tt>extra_off_diagonals</tt>
+ * side-diagonals is provided on both
+ * sides of the main diagonal.
+ *
+ * <tt>max_per_row</tt> is the
+ * maximum number of nonzero
+ * elements per row which this
+ * structure is to hold. It is
+ * assumed that this number is
+ * sufficiently large to
+ * accommodate both the elements
+ * in <tt>original</tt> as well
+ * as the new off-diagonal
+ * elements created by this
+ * constructor. You will usually
+ * want to give the same number
+ * as you gave for
+ * <tt>original</tt> plus the
+ * number of side diagonals times
+ * two. You may however give a
+ * larger value if you wish to
+ * add further nonzero entries
+ * for the decomposition based on
+ * other criteria than their
+ * being on side-diagonals.
+ *
+ * This function requires that
+ * <tt>original</tt> refers to a
+ * quadratic matrix structure.
+ * It must be compressed. The
+ * matrix structure is not
+ * compressed after this function
+ * finishes.
+ */
- SparsityPattern (const SparsityPattern &original,
++ SparsityPattern (const SparsityPattern &original,
+ const unsigned int max_per_row,
+ const unsigned int extra_off_diagonals);
+
+ /**
+ * Destructor.
+ */
+ ~SparsityPattern ();
+
+ /**
+ * Copy operator. For this the
+ * same holds as for the copy
+ * constructor: it is declared,
+ * defined and fine to be called,
+ * but the latter only for empty
+ * objects.
+ */
+ SparsityPattern &operator = (const SparsityPattern &);
+
+ /**
+ * Reallocate memory and set up data
+ * structures for a new matrix with
+ * <tt>m </tt>rows and <tt>n</tt> columns,
+ * with at most <tt>max_per_row</tt>
+ * nonzero entries per row.
+ *
+ * This function simply maps its
+ * operations to the other
+ * <tt>reinit</tt> function.
+ */
+ void reinit (const unsigned int m,
+ const unsigned int n,
+ const unsigned int max_per_row,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Reallocate memory for a matrix
+ * of size <tt>m x n</tt>. The
+ * number of entries for each row
+ * is taken from the array
+ * <tt>row_lengths</tt> which has to
+ * give this number of each row
+ * <tt>i=1...m</tt>.
+ *
+ * If <tt>m*n==0</tt> all memory is freed,
+ * resulting in a total reinitialization
+ * of the object. If it is nonzero, new
+ * memory is only allocated if the new
+ * size extends the old one. This is done
+ * to save time and to avoid fragmentation
+ * of the heap.
+ *
+ * If the number of rows equals
+ * the number of columns and the
+ * last parameter is true,
+ * diagonal elements are stored
+ * first in each row to allow
+ * optimized access in relaxation
+ * methods of SparseMatrix.
+ */
+ void reinit (const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &row_lengths,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Same as above, but with a
+ * VectorSlice argument instead.
+ */
+ void reinit (const unsigned int m,
+ const unsigned int n,
+ const VectorSlice<const std::vector<unsigned int> > &row_lengths,
+ const bool optimize_diagonal = true);
+
+ /**
+ * This function compresses the sparsity
+ * structure that this object represents.
+ * It does so by eliminating unused
+ * entries and sorting the remaining ones
+ * to allow faster access by usage of
+ * binary search algorithms. A special
+ * sorting scheme is used for the
+ * diagonal entry of quadratic matrices,
+ * which is always the first entry of
+ * each row.
+ *
+ * The memory which is no more
+ * needed is released.
+ *
+ * SparseMatrix objects require the
+ * SparsityPattern objects they are
+ * initialized with to be compressed, to
+ * reduce memory requirements.
+ */
+ void compress ();
+
+ /**
+ * This function can be used as a
+ * replacement for reinit(),
+ * subsequent calls to add() and
+ * a final call to close() if you
+ * know exactly in advance the
+ * entries that will form the
+ * matrix sparsity pattern.
+ *
+ * The first two parameters
+ * determine the size of the
+ * matrix. For the two last ones,
+ * note that a sparse matrix can
+ * be described by a sequence of
+ * rows, each of which is
+ * represented by a sequence of
+ * pairs of column indices and
+ * values. In the present
+ * context, the begin() and
+ * end() parameters designate
+ * iterators (of forward iterator
+ * type) into a container, one
+ * representing one row. The
+ * distance between begin()
+ * and end() should therefore
+ * be equal to
+ * n_rows(). These iterators
+ * may be iterators of
+ * <tt>std::vector</tt>,
+ * <tt>std::list</tt>, pointers into a
+ * C-style array, or any other
+ * iterator satisfying the
+ * requirements of a forward
+ * iterator. The objects pointed
+ * to by these iterators
+ * (i.e. what we get after
+ * applying <tt>operator*</tt> or
+ * <tt>operator-></tt> to one of these
+ * iterators) must be a container
+ * itself that provides functions
+ * <tt>begin</tt> and <tt>end</tt>
+ * designating a range of
+ * iterators that describe the
+ * contents of one
+ * line. Dereferencing these
+ * inner iterators must either
+ * yield a pair of an unsigned
+ * integer as column index and a
+ * value of arbitrary type (such
+ * a type would be used if we
+ * wanted to describe a sparse
+ * matrix with one such object),
+ * or simply an unsigned integer
+ * (of we only wanted to describe
+ * a sparsity pattern). The
+ * function is able to determine
+ * itself whether an unsigned
+ * integer or a pair is what we
+ * get after dereferencing the
+ * inner iterators, through some
+ * template magic.
+ *
+ * While the order of the outer
+ * iterators denotes the
+ * different rows of the matrix,
+ * the order of the inner
+ * iterator denoting the columns
+ * does not matter, as they are
+ * sorted internal to this
+ * function anyway.
+ *
+ * Since that all sounds very
+ * complicated, consider the
+ * following example code, which
+ * may be used to fill a sparsity
+ * pattern:
+ * @code
+ * std::vector<std::vector<unsigned int> > column_indices (n_rows);
+ * for (unsigned int row=0; row<n_rows; ++row)
+ * // generate necessary columns in this row
+ * fill_row (column_indices[row]);
+ *
+ * sparsity.copy_from (n_rows, n_cols,
+ * column_indices.begin(),
+ * column_indices.end());
+ * @endcode
+ *
+ * Note that this example works
+ * since the iterators
+ * dereferenced yield containers
+ * with functions <tt>begin</tt> and
+ * <tt>end</tt> (namely
+ * <tt>std::vector</tt>s), and the
+ * inner iterators dereferenced
+ * yield unsigned integers as
+ * column indices. Note that we
+ * could have replaced each of
+ * the two <tt>std::vector</tt>
+ * occurrences by <tt>std::list</tt>,
+ * and the inner one by
+ * <tt>std::set</tt> as well.
+ *
+ * Another example would be as
+ * follows, where we initialize a
+ * whole matrix, not only a
+ * sparsity pattern:
+ * @code
+ * std::vector<std::map<unsigned int,double> > entries (n_rows);
+ * for (unsigned int row=0; row<n_rows; ++row)
+ * // generate necessary pairs of columns
+ * // and corresponding values in this row
+ * fill_row (entries[row]);
+ *
+ * sparsity.copy_from (n_rows, n_cols,
+ * column_indices.begin(),
+ * column_indices.end());
+ * matrix.reinit (sparsity);
+ * matrix.copy_from (column_indices.begin(),
+ * column_indices.end());
+ * @endcode
+ *
+ * This example works because
+ * dereferencing iterators of the
+ * inner type yields a pair of
+ * unsigned integers and a value,
+ * the first of which we take as
+ * column index. As previously,
+ * the outer <tt>std::vector</tt>
+ * could be replaced by
+ * <tt>std::list</tt>, and the inner
+ * <tt>std::map<unsigned int,double></tt>
+ * could be replaced by
+ * <tt>std::vector<std::pair<unsigned int,double> ></tt>,
+ * or a list or set of such
+ * pairs, as they all return
+ * iterators that point to such
+ * pairs.
+ */
+ template <typename ForwardIterator>
+ void copy_from (const unsigned int n_rows,
+ const unsigned int n_cols,
+ const ForwardIterator begin,
+ const ForwardIterator end,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Copy data from an object of type
+ * CompressedSparsityPattern,
+ * CompressedSetSparsityPattern or
+ * CompressedSimpleSparsityPattern.
+ * Previous content of this object is
+ * lost, and the sparsity pattern is in
+ * compressed mode afterwards.
+ */
+ template <typename CompressedSparsityType>
+ void copy_from (const CompressedSparsityType &csp,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Take a full matrix and use its
+ * nonzero entries to generate a
+ * sparse matrix entry pattern
+ * for this object.
+ *
+ * Previous content of this
+ * object is lost, and the
+ * sparsity pattern is in
+ * compressed mode afterwards.
+ */
+ template <typename number>
+ void copy_from (const FullMatrix<number> &matrix,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Make the sparsity pattern
+ * symmetric by adding the
+ * sparsity pattern of the
+ * transpose object.
+ *
+ * This function throws an
+ * exception if the sparsity
+ * pattern does not represent a
+ * quadratic matrix.
+ */
+ void symmetrize ();
+
+ /**
+ * Add a nonzero entry to the matrix.
+ * This function may only be called
+ * for non-compressed sparsity patterns.
+ *
+ * If the entry already exists, nothing
+ * bad happens.
+ */
+ void add (const unsigned int i,
+ const unsigned int j);
+
+ /**
+ * Add several nonzero entries to the
+ * specified matrix row. This function
+ * may only be called for
+ * non-compressed sparsity patterns.
+ *
+ * If some of the entries already
+ * exist, nothing bad happens.
+ */
+ template <typename ForwardIterator>
+ void add_entries (const unsigned int row,
+ ForwardIterator begin,
+ ForwardIterator end,
+ const bool indices_are_sorted = false);
// @}
- /**
- * @name Iterators
- */
+ /**
+ * @name Iterators
+ */
// @{
- /**
- * STL-like iterator with the first entry
- * of the matrix. The resulting iterator
- * can be used to walk over all nonzero
- * entries of the sparsity pattern.
- */
- inline iterator begin () const;
-
- /**
- * Final iterator.
- */
- inline iterator end () const;
-
- /**
- * STL-like iterator with the first entry
- * of row <tt>r</tt>.
- *
- * Note that if the given row is empty,
- * i.e. does not contain any nonzero
- * entries, then the iterator returned by
- * this function equals
- * <tt>end(r)</tt>. Note also that the
- * iterator may not be dereferencable in
- * that case.
- */
- inline iterator begin (const unsigned int r) const;
-
- /**
- * Final iterator of row <tt>r</tt>. It
- * points to the first element past the
- * end of line @p r, or past the end of
- * the entire sparsity pattern.
- *
- * Note that the end iterator is not
- * necessarily dereferencable. This is in
- * particular the case if it is the end
- * iterator for the last row of a matrix.
- */
- inline iterator end (const unsigned int r) const;
-
- /**
- * STL-like iterator with the first entry
- * of row <tt>r</tt>.
- *
- * Note that if the given row is empty,
- * i.e. does not contain any nonzero
- * entries, then the iterator returned by
- * this function equals
- * <tt>end(r)</tt>. Note also that the
- * iterator may not be dereferencable in
- * that case.
- */
- inline row_iterator row_begin (const unsigned int r) const;
-
- /**
- * Final iterator of row <tt>r</tt>. It
- * points to the first element past the
- * end of line @p r, or past the end of
- * the entire sparsity pattern.
- *
- * Note that the end iterator is not
- * necessarily dereferencable. This is in
- * particular the case if it is the end
- * iterator for the last row of a matrix.
- */
- inline row_iterator row_end (const unsigned int r) const;
+ /**
+ * STL-like iterator with the first entry
+ * of the matrix. The resulting iterator
+ * can be used to walk over all nonzero
+ * entries of the sparsity pattern.
+ */
+ inline iterator begin () const;
+
+ /**
+ * Final iterator.
+ */
+ inline iterator end () const;
+
+ /**
+ * STL-like iterator with the first entry
+ * of row <tt>r</tt>.
+ *
+ * Note that if the given row is empty,
+ * i.e. does not contain any nonzero
+ * entries, then the iterator returned by
+ * this function equals
+ * <tt>end(r)</tt>. Note also that the
+ * iterator may not be dereferencable in
+ * that case.
+ */
+ inline iterator begin (const unsigned int r) const;
+
+ /**
+ * Final iterator of row <tt>r</tt>. It
+ * points to the first element past the
+ * end of line @p r, or past the end of
+ * the entire sparsity pattern.
+ *
+ * Note that the end iterator is not
+ * necessarily dereferencable. This is in
+ * particular the case if it is the end
+ * iterator for the last row of a matrix.
+ */
+ inline iterator end (const unsigned int r) const;
+
+ /**
+ * STL-like iterator with the first entry
+ * of row <tt>r</tt>.
+ *
+ * Note that if the given row is empty,
+ * i.e. does not contain any nonzero
+ * entries, then the iterator returned by
+ * this function equals
+ * <tt>end(r)</tt>. Note also that the
+ * iterator may not be dereferencable in
+ * that case.
+ */
+ inline row_iterator row_begin (const unsigned int r) const;
+
+ /**
+ * Final iterator of row <tt>r</tt>. It
+ * points to the first element past the
+ * end of line @p r, or past the end of
+ * the entire sparsity pattern.
+ *
+ * Note that the end iterator is not
+ * necessarily dereferencable. This is in
+ * particular the case if it is the end
+ * iterator for the last row of a matrix.
+ */
+ inline row_iterator row_end (const unsigned int r) const;
// @}
- /**
- * @name Querying information
- */
+ /**
+ * @name Querying information
+ */
// @{
- /**
- * Test for equality of two SparsityPatterns.
- */
- bool operator == (const SparsityPattern &) const;
-
- /**
- * Return whether the object is empty. It
- * is empty if no memory is allocated,
- * which is the same as that both
- * dimensions are zero.
- */
- bool empty () const;
-
- /**
- * Return the maximum number of entries per
- * row. Before compression, this equals the
- * number given to the constructor, while
- * after compression, it equals the maximum
- * number of entries actually allocated by
- * the user.
- */
- unsigned int max_entries_per_row () const;
-
- /**
- * Compute the bandwidth of the matrix
- * represented by this structure. The
- * bandwidth is the maximum of $|i-j|$
- * for which the index pair $(i,j)$
- * represents a nonzero entry of the
- * matrix. Consequently, the maximum
- * bandwidth a $n\times m$ matrix can
- * have is $\max\{n-1,m-1\}$.
- */
- unsigned int bandwidth () const;
-
- /**
- * Return the number of nonzero elements of
- * this matrix. Actually, it returns the
- * number of entries in the sparsity
- * pattern; if any of the entries should
- * happen to be zero, it is counted
- * anyway.
- *
- * This function may only be called if the
- * matrix struct is compressed. It does not
- * make too much sense otherwise anyway.
- */
- std::size_t n_nonzero_elements () const;
-
- /**
- * Return whether the structure is
- * compressed or not.
- */
- bool is_compressed () const;
-
- /**
- * Return number of rows of this
- * matrix, which equals the dimension
- * of the image space.
- */
- inline unsigned int n_rows () const;
-
- /**
- * Return number of columns of this
- * matrix, which equals the dimension
- * of the range space.
- */
- inline unsigned int n_cols () const;
-
- /**
- * Number of entries in a specific row.
- */
- unsigned int row_length (const unsigned int row) const;
-
- /**
- * Determine whether the matrix
- * uses special convention for
- * quadratic matrices.
- *
- * A return value <tt>true</tt> means
- * that diagonal elements are stored
- * first in each row. A number of
- * functions in this class and the
- * library in general, for example
- * relaxation methods like Jacobi() and
- * SOR(), require this to make their
- * operations more efficient, since they
- * need to quickly access the diagonal
- * elements and do not have to search for
- * them if they are the first element of
- * each row. A side effect of this scheme
- * is that each row contains at least one
- * element, even if the row is empty
- * (i.e. the diagonal element exists, but
- * has value zero).
- *
- * A return value <tt>false</tt> means
- * that diagonal elements are stored
- * anywhere in the row, or not at all. In
- * particular, a row or even the whole
- * matrix may be empty. This can be used
- * if you have block matrices where the
- * off-diagonal blocks are quadratic but
- * are never used for operations like the
- * ones mentioned above. In this case,
- * some memory can be saved by not using
- * the diagonal storage optimization.
- */
- bool optimize_diagonal () const;
-
- /**
- * Return whether this object stores only
- * those entries that have been added
- * explicitly, or if the sparsity pattern
- * contains elements that have been added
- * through other means (implicitly) while
- * building it. For the current class,
- * the result is true iff optimize_diag
- * in the constructor or reinit() calls
- * has been set to false, or if the
- * represented matrix is not square.
- *
- * This function mainly serves the
- * purpose of describing the current
- * class in cases where several kinds of
- * sparsity patterns can be passed as
- * template arguments.
- */
- bool stores_only_added_elements () const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object. See
- * MemoryConsumption.
- */
- std::size_t memory_consumption () const;
+ /**
+ * Test for equality of two SparsityPatterns.
+ */
+ bool operator == (const SparsityPattern &) const;
+
+ /**
+ * Return whether the object is empty. It
+ * is empty if no memory is allocated,
+ * which is the same as that both
+ * dimensions are zero.
+ */
+ bool empty () const;
+
+ /**
+ * Return the maximum number of entries per
+ * row. Before compression, this equals the
+ * number given to the constructor, while
+ * after compression, it equals the maximum
+ * number of entries actually allocated by
+ * the user.
+ */
+ unsigned int max_entries_per_row () const;
+
+ /**
+ * Compute the bandwidth of the matrix
+ * represented by this structure. The
+ * bandwidth is the maximum of $|i-j|$
+ * for which the index pair $(i,j)$
+ * represents a nonzero entry of the
+ * matrix. Consequently, the maximum
+ * bandwidth a $n\times m$ matrix can
+ * have is $\max\{n-1,m-1\}$.
+ */
+ unsigned int bandwidth () const;
+
+ /**
+ * Return the number of nonzero elements of
+ * this matrix. Actually, it returns the
+ * number of entries in the sparsity
+ * pattern; if any of the entries should
+ * happen to be zero, it is counted
+ * anyway.
+ *
+ * This function may only be called if the
+ * matrix struct is compressed. It does not
+ * make too much sense otherwise anyway.
+ */
+ std::size_t n_nonzero_elements () const;
+
+ /**
+ * Return whether the structure is
+ * compressed or not.
+ */
+ bool is_compressed () const;
+
+ /**
+ * Return number of rows of this
+ * matrix, which equals the dimension
+ * of the image space.
+ */
+ inline unsigned int n_rows () const;
+
+ /**
+ * Return number of columns of this
+ * matrix, which equals the dimension
+ * of the range space.
+ */
+ inline unsigned int n_cols () const;
+
+ /**
+ * Number of entries in a specific row.
+ */
+ unsigned int row_length (const unsigned int row) const;
+
+ /**
+ * Determine whether the matrix
+ * uses special convention for
+ * quadratic matrices.
+ *
+ * A return value <tt>true</tt> means
+ * that diagonal elements are stored
+ * first in each row. A number of
+ * functions in this class and the
+ * library in general, for example
+ * relaxation methods like Jacobi() and
+ * SOR(), require this to make their
+ * operations more efficient, since they
+ * need to quickly access the diagonal
+ * elements and do not have to search for
+ * them if they are the first element of
+ * each row. A side effect of this scheme
+ * is that each row contains at least one
+ * element, even if the row is empty
+ * (i.e. the diagonal element exists, but
+ * has value zero).
+ *
+ * A return value <tt>false</tt> means
+ * that diagonal elements are stored
+ * anywhere in the row, or not at all. In
+ * particular, a row or even the whole
+ * matrix may be empty. This can be used
+ * if you have block matrices where the
+ * off-diagonal blocks are quadratic but
+ * are never used for operations like the
+ * ones mentioned above. In this case,
+ * some memory can be saved by not using
+ * the diagonal storage optimization.
+ */
+ bool optimize_diagonal () const;
+
+ /**
+ * Return whether this object stores only
+ * those entries that have been added
+ * explicitly, or if the sparsity pattern
+ * contains elements that have been added
+ * through other means (implicitly) while
+ * building it. For the current class,
+ * the result is true iff optimize_diag
+ * in the constructor or reinit() calls
+ * has been set to false, or if the
+ * represented matrix is not square.
+ *
+ * This function mainly serves the
+ * purpose of describing the current
+ * class in cases where several kinds of
+ * sparsity patterns can be passed as
+ * template arguments.
+ */
+ bool stores_only_added_elements () const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object. See
+ * MemoryConsumption.
+ */
+ std::size_t memory_consumption () const;
// @}
- /**
- * @name Accessing entries
- */
+ /**
+ * @name Accessing entries
+ */
// @{
- /**
- * Return the index of the matrix
- * element with row number <tt>i</tt>
- * and column number <tt>j</tt>. If
- * the matrix element is not a
- * nonzero one, return
- * SparsityPattern::invalid_entry.
- *
- * This function is usually
- * called by the
- * SparseMatrix::operator()(). It
- * may only be called for
- * compressed sparsity patterns,
- * since in this case searching
- * whether the entry exists can
- * be done quite fast with a
- * binary sort algorithm because
- * the column numbers are sorted.
- *
- * If <tt>m</tt> is the number of
- * entries in <tt>row</tt>, then the
- * complexity of this function is
- * <i>log(m)</i> if the sparsity
- * pattern is compressed.
- *
- * @deprecated Use
- * SparseMatrix::const_iterator
- */
- unsigned int operator() (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * This is the inverse operation
- * to operator()(): given a
- * global index, find out row and
- * column of the matrix entry to
- * which it belongs. The returned
- * value is the pair composed of
- * row and column index.
- *
- * This function may only be
- * called if the sparsity pattern
- * is closed. The global index
- * must then be between zero and
- * n_nonzero_elements().
- *
- * If <tt>N</tt> is the number of
- * rows of this matrix, then the
- * complexity of this function is
- * <i>log(N)</i>.
- */
- std::pair<unsigned int, unsigned int>
- matrix_position (const unsigned int global_index) const;
-
- /**
- * Check if a value at a certain
- * position may be non-zero.
- */
- bool exists (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * The index of a global matrix
- * entry in its row.
- *
- * This function is analogous to
- * operator(), but it computes
- * the index not with respect to
- * the total field, but only with
- * respect to the row <tt>j</tt>.
- */
- unsigned int row_position(const unsigned int i,
- const unsigned int j) const;
-
- /**
- * Access to column number field.
- * Return the column number of
- * the <tt>index</tt>th entry in
- * <tt>row</tt>. Note that if
- * diagonal elements are
- * optimized, the first element
- * in each row is the diagonal
- * element,
- * i.e. <tt>column_number(row,0)==row</tt>.
- *
- * If the sparsity pattern is
- * already compressed, then
- * (except for the diagonal
- * element), the entries are
- * sorted by columns,
- * i.e. <tt>column_number(row,i)</tt>
- * <tt><</tt> <tt>column_number(row,i+1)</tt>.
- */
- unsigned int column_number (const unsigned int row,
- const unsigned int index) const;
+ /**
+ * Return the index of the matrix
+ * element with row number <tt>i</tt>
+ * and column number <tt>j</tt>. If
+ * the matrix element is not a
+ * nonzero one, return
+ * SparsityPattern::invalid_entry.
+ *
+ * This function is usually
+ * called by the
+ * SparseMatrix::operator()(). It
+ * may only be called for
+ * compressed sparsity patterns,
+ * since in this case searching
+ * whether the entry exists can
+ * be done quite fast with a
+ * binary sort algorithm because
+ * the column numbers are sorted.
+ *
+ * If <tt>m</tt> is the number of
+ * entries in <tt>row</tt>, then the
+ * complexity of this function is
+ * <i>log(m)</i> if the sparsity
+ * pattern is compressed.
+ *
+ * @deprecated Use
+ * SparseMatrix::const_iterator
+ */
+ unsigned int operator() (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * This is the inverse operation
+ * to operator()(): given a
+ * global index, find out row and
+ * column of the matrix entry to
+ * which it belongs. The returned
+ * value is the pair composed of
+ * row and column index.
+ *
+ * This function may only be
+ * called if the sparsity pattern
+ * is closed. The global index
+ * must then be between zero and
+ * n_nonzero_elements().
+ *
+ * If <tt>N</tt> is the number of
+ * rows of this matrix, then the
+ * complexity of this function is
+ * <i>log(N)</i>.
+ */
+ std::pair<unsigned int, unsigned int>
+ matrix_position (const unsigned int global_index) const;
+
+ /**
+ * Check if a value at a certain
+ * position may be non-zero.
+ */
+ bool exists (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * The index of a global matrix
+ * entry in its row.
+ *
+ * This function is analogous to
+ * operator(), but it computes
+ * the index not with respect to
+ * the total field, but only with
+ * respect to the row <tt>j</tt>.
+ */
+ unsigned int row_position(const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * Access to column number field.
+ * Return the column number of
+ * the <tt>index</tt>th entry in
+ * <tt>row</tt>. Note that if
+ * diagonal elements are
+ * optimized, the first element
+ * in each row is the diagonal
+ * element,
+ * i.e. <tt>column_number(row,0)==row</tt>.
+ *
+ * If the sparsity pattern is
+ * already compressed, then
+ * (except for the diagonal
+ * element), the entries are
+ * sorted by columns,
+ * i.e. <tt>column_number(row,i)</tt>
+ * <tt><</tt> <tt>column_number(row,i+1)</tt>.
+ */
+ unsigned int column_number (const unsigned int row,
+ const unsigned int index) const;
// @}
namespace TrilinosWrappers
{
- /*! @addtogroup TrilinosWrappers
- *@{
- */
-
- /**
- * Blocked sparse matrix based on the TrilinosWrappers::SparseMatrix class. This
- * class implements the functions that are specific to the Trilinos SparseMatrix
- * base objects for a blocked sparse matrix, and leaves the actual work
- * relaying most of the calls to the individual blocks to the functions
- * implemented in the base class. See there also for a description of when
- * this class is useful.
- *
- * In contrast to the deal.II-type SparseMatrix class, the Trilinos matrices do
- * not have external objects for the sparsity patterns. Thus, one does not
- * determine the size of the individual blocks of a block matrix of this type
- * by attaching a block sparsity pattern, but by calling reinit() to set the
- * number of blocks and then by setting the size of each block separately. In
- * order to fix the data structures of the block matrix, it is then necessary
- * to let it know that we have changed the sizes of the underlying
- * matrices. For this, one has to call the collect_sizes() function, for much
- * the same reason as is documented with the BlockSparsityPattern class.
- *
- * @ingroup Matrix1
- * @see @ref GlossBlockLA "Block (linear algebra)"
- * @author Martin Kronbichler, Wolfgang Bangerth, 2008
- */
+ /*! @addtogroup TrilinosWrappers
+ *@{
+ */
+
+ /**
+ * Blocked sparse matrix based on the TrilinosWrappers::SparseMatrix class. This
+ * class implements the functions that are specific to the Trilinos SparseMatrix
+ * base objects for a blocked sparse matrix, and leaves the actual work
+ * relaying most of the calls to the individual blocks to the functions
+ * implemented in the base class. See there also for a description of when
+ * this class is useful.
+ *
+ * In contrast to the deal.II-type SparseMatrix class, the Trilinos matrices do
+ * not have external objects for the sparsity patterns. Thus, one does not
+ * determine the size of the individual blocks of a block matrix of this type
+ * by attaching a block sparsity pattern, but by calling reinit() to set the
+ * number of blocks and then by setting the size of each block separately. In
+ * order to fix the data structures of the block matrix, it is then necessary
+ * to let it know that we have changed the sizes of the underlying
+ * matrices. For this, one has to call the collect_sizes() function, for much
+ * the same reason as is documented with the BlockSparsityPattern class.
+ *
+ * @ingroup Matrix1
+ * @see @ref GlossBlockLA "Block (linear algebra)"
+ * @author Martin Kronbichler, Wolfgang Bangerth, 2008
+ */
class BlockSparseMatrix : public BlockMatrixBase<SparseMatrix>
{
- public:
- /**
- * Typedef the base class for simpler
- * access to its own typedefs.
- */
- typedef BlockMatrixBase<SparseMatrix> BaseClass;
-
- /**
- * Typedef the type of the underlying
- * matrix.
- */
- typedef BaseClass::BlockType BlockType;
-
- /**
- * Import the typedefs from the base
- * class.
- */
- typedef BaseClass::value_type value_type;
- typedef BaseClass::pointer pointer;
- typedef BaseClass::const_pointer const_pointer;
- typedef BaseClass::reference reference;
- typedef BaseClass::const_reference const_reference;
- typedef BaseClass::size_type size_type;
- typedef BaseClass::iterator iterator;
- typedef BaseClass::const_iterator const_iterator;
-
- /**
- * Constructor; initializes the
- * matrix to be empty, without
- * any structure, i.e. the
- * matrix is not usable at
- * all. This constructor is
- * therefore only useful for
- * matrices which are members of
- * a class. All other matrices
- * should be created at a point
- * in the data flow where all
- * necessary information is
- * available.
- *
- * You have to initialize the
- * matrix before usage with
- * reinit(BlockSparsityPattern). The
- * number of blocks per row and
- * column are then determined by
- * that function.
- */
- BlockSparseMatrix ();
-
- /**
- * Destructor.
- */
- ~BlockSparseMatrix ();
-
- /**
- * Pseudo copy operator only copying
- * empty objects. The sizes of the block
- * matrices need to be the same.
- */
- BlockSparseMatrix &
- operator = (const BlockSparseMatrix &);
-
- /**
- * This operator assigns a scalar to a
- * matrix. Since this does usually not
- * make much sense (should we set all
- * matrix entries to this value? Only
- * the nonzero entries of the sparsity
- * pattern?), this operation is only
- * allowed if the actual value to be
- * assigned is zero. This operator only
- * exists to allow for the obvious
- * notation <tt>matrix=0</tt>, which
- * sets all elements of the matrix to
- * zero, but keep the sparsity pattern
- * previously used.
- */
- BlockSparseMatrix &
- operator = (const double d);
-
- /**
- * Resize the matrix, by setting
- * the number of block rows and
- * columns. This deletes all
- * blocks and replaces them by
- * unitialized ones, i.e. ones
- * for which also the sizes are
- * not yet set. You have to do
- * that by calling the @p reinit
- * functions of the blocks
- * themselves. Do not forget to
- * call collect_sizes() after
- * that on this object.
- *
- * The reason that you have to
- * set sizes of the blocks
- * yourself is that the sizes may
- * be varying, the maximum number
- * of elements per row may be
- * varying, etc. It is simpler
- * not to reproduce the interface
- * of the @p SparsityPattern
- * class here but rather let the
- * user call whatever function
- * she desires.
- */
- void reinit (const unsigned int n_block_rows,
- const unsigned int n_block_columns);
-
- /**
- * Resize the matrix, by using an
- * array of Epetra maps to determine
- * the %parallel distribution of the
- * individual matrices. This function
- * assumes that a quadratic block
- * matrix is generated.
- */
- template <typename BlockSparsityType>
- void reinit (const std::vector<Epetra_Map> &input_maps,
- const BlockSparsityType &block_sparsity_pattern);
-
- /**
- * Resize the matrix, by using an
- * array of index sets to determine
- * the %parallel distribution of the
- * individual matrices. This function
- * assumes that a quadratic block
- * matrix is generated.
- */
- template <typename BlockSparsityType>
- void reinit (const std::vector<IndexSet> &input_maps,
- const BlockSparsityType &block_sparsity_pattern,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
-
- /**
- * Resize the matrix and initialize it
- * by the given sparsity pattern. Since
- * no distribution map is given, the
- * result is a block matrix for which
- * all elements are stored locally.
- */
- template <typename BlockSparsityType>
- void reinit (const BlockSparsityType &block_sparsity_pattern);
-
- /**
- * This function initializes the
- * Trilinos matrix using the deal.II
- * sparse matrix and the entries stored
- * therein. It uses a threshold
- * to copy only elements whose
- * modulus is larger than the
- * threshold (so zeros in the
- * deal.II matrix can be filtered
- * away).
- */
- void reinit (const std::vector<Epetra_Map> &input_maps,
- const ::dealii::BlockSparseMatrix<double> &deal_ii_sparse_matrix,
- const double drop_tolerance=1e-13);
-
- /**
- * This function initializes
- * the Trilinos matrix using
- * the deal.II sparse matrix
- * and the entries stored
- * therein. It uses a threshold
- * to copy only elements whose
- * modulus is larger than the
- * threshold (so zeros in the
- * deal.II matrix can be
- * filtered away). Since no
- * Epetra_Map is given, all the
- * elements will be locally
- * stored.
- */
- void reinit (const ::dealii::BlockSparseMatrix<double> &deal_ii_sparse_matrix,
- const double drop_tolerance=1e-13);
-
- /**
- * Returns the state of the
- * matrix, i.e., whether
- * compress() needs to be called
- * after an operation requiring
- * data exchange. Does only
- * return non-true values when
- * used in <tt>debug</tt> mode,
- * since it is quite expensive to
- * keep track of all operations
- * that lead to the need for
- * compress().
- */
- bool is_compressed () const;
-
- /**
- * This function collects the
- * sizes of the sub-objects and
- * stores them in internal
- * arrays, in order to be able to
- * relay global indices into the
- * matrix to indices into the
- * subobjects. You *must* call
- * this function each time after
- * you have changed the size of
- * the sub-objects. Note that
- * this is a collective
- * operation, i.e., it needs to
- * be called on all MPI
- * processes. This command
- * internally calls the method
- * <tt>compress()</tt>, so you
- * don't need to call that
- * function in case you use
- * <tt>collect_sizes()</tt>.
- */
- void collect_sizes ();
-
- /**
- * Return the number of nonzero
- * elements of this
- * matrix.
- */
- unsigned int n_nonzero_elements () const;
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M*src$ with $M$
- * being this matrix.
- */
- void vmult (MPI::BlockVector &dst,
- const MPI::BlockVector &src) const;
-
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M*src$ with $M$
- * being this matrix, now applied
- * to localized block vectors
- * (works only when run on one
- * processor).
- */
- void vmult (BlockVector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- void vmult (MPI::BlockVector &dst,
- const MPI::Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column, now
- * applied to localized vectors
- * (works only when run on one
- * processor).
- */
- void vmult (BlockVector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- void vmult (MPI::Vector &dst,
- const MPI::BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row, now
- * applied to localized vectors
- * (works only when run on one
- * processor).
- */
- void vmult (Vector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- */
- void vmult (VectorBase &dst,
- const VectorBase &src) const;
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M^T*src$ with $M$
- * being this matrix. This
- * function does the same as
- * vmult() but takes the
- * transposed matrix.
- */
- void Tvmult (MPI::BlockVector &dst,
- const MPI::BlockVector &src) const;
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M^T*src$ with $M$
- * being this matrix. This
- * function does the same as
- * vmult() but takes the
- * transposed matrix, now applied
- * to localized Trilinos vectors
- * (works only when run on one
- * processor).
- */
- void Tvmult (BlockVector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- void Tvmult (MPI::BlockVector &dst,
- const MPI::Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row, now
- * applied to localized Trilinos
- * vectors (works only when run
- * on one processor).
- */
- void Tvmult (BlockVector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- void Tvmult (MPI::Vector &dst,
- const MPI::BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column, now
- * applied to localized Trilinos
- * vectors (works only when run
- * on one processor).
- */
- void Tvmult (Vector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- */
- void Tvmult (VectorBase &dst,
- const VectorBase &src) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to
- * be <i>r=b-Mx</i>. Write the
- * residual into @p dst. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned.
- *
- * Source <i>x</i> and
- * destination <i>dst</i> must
- * not be the same vector.
- *
- * Note that both vectors have
- * to be distributed vectors
- * generated using the same Map
- * as was used for the matrix
- * in case you work on a
- * distributed memory
- * architecture, using the
- * interface in the
- * TrilinosWrappers::MPI::BlockVector
- * class.
- */
- TrilinosScalar residual (MPI::BlockVector &dst,
- const MPI::BlockVector &x,
- const MPI::BlockVector &b) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to
- * be <i>r=b-Mx</i>. Write the
- * residual into @p dst. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned.
- *
- * Source <i>x</i> and
- * destination <i>dst</i> must
- * not be the same vector.
- *
- * Note that both vectors have
- * to be distributed vectors
- * generated using the same Map
- * as was used for the matrix
- * in case you work on a
- * distributed memory
- * architecture, using the
- * interface in the
- * TrilinosWrappers::BlockVector
- * class. Since the block
- * matrix is in general
- * distributed among processes,
- * this function only works
- * when running the program on
- * one processor.
- */
- TrilinosScalar residual (BlockVector &dst,
- const BlockVector &x,
- const BlockVector &b) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to
- * be <i>r=b-Mx</i>. Write the
- * residual into @p dst. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned. Just like the
- * previous function, but only
- * applicable if the matrix
- * only has one block row.
- */
- TrilinosScalar residual (MPI::BlockVector &dst,
- const MPI::Vector &x,
- const MPI::BlockVector &b) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to
- * be <i>r=b-Mx</i>. Write the
- * residual into @p dst. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned. Just like the
- * previous function, but only
- * applicable if the matrix
- * only has one block row.
- */
- TrilinosScalar residual (BlockVector &dst,
- const Vector &x,
- const BlockVector &b) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to
- * be <i>r=b-Mx</i>. Write the
- * residual into @p dst. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned. Just like the
- * previous function, but only
- * applicable if the matrix
- * only has one block column.
- */
- TrilinosScalar residual (MPI::Vector &dst,
- const MPI::BlockVector &x,
- const MPI::Vector &b) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to
- * be <i>r=b-Mx</i>. Write the
- * residual into @p dst. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned. Just like the
- * previous function, but only
- * applicable if the matrix
- * only has one block column.
- */
- TrilinosScalar residual (Vector &dst,
- const BlockVector &x,
- const Vector &b) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to
- * be <i>r=b-Mx</i>. Write the
- * residual into @p dst. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned. Just like the
- * previous function, but only
- * applicable if the matrix
- * only has one block.
- */
- TrilinosScalar residual (VectorBase &dst,
- const VectorBase &x,
- const VectorBase &b) const;
-
- /**
- * Make the clear() function in the
- * base class visible, though it is
- * protected.
- */
- using BlockMatrixBase<SparseMatrix>::clear;
-
- /** @addtogroup Exceptions
- * @{
- */
-
- /**
- * Exception
- */
- DeclException4 (ExcIncompatibleRowNumbers,
- int, int, int, int,
- << "The blocks [" << arg1 << ',' << arg2 << "] and ["
- << arg3 << ',' << arg4 << "] have differing row numbers.");
-
- /**
- * Exception
- */
- DeclException4 (ExcIncompatibleColNumbers,
- int, int, int, int,
- << "The blocks [" << arg1 << ',' << arg2 << "] and ["
- << arg3 << ',' << arg4 << "] have differing column numbers.");
- ///@}
+ public:
+ /**
+ * Typedef the base class for simpler
+ * access to its own typedefs.
+ */
+ typedef BlockMatrixBase<SparseMatrix> BaseClass;
+
+ /**
+ * Typedef the type of the underlying
+ * matrix.
+ */
+ typedef BaseClass::BlockType BlockType;
+
+ /**
+ * Import the typedefs from the base
+ * class.
+ */
+ typedef BaseClass::value_type value_type;
+ typedef BaseClass::pointer pointer;
+ typedef BaseClass::const_pointer const_pointer;
+ typedef BaseClass::reference reference;
+ typedef BaseClass::const_reference const_reference;
+ typedef BaseClass::size_type size_type;
+ typedef BaseClass::iterator iterator;
+ typedef BaseClass::const_iterator const_iterator;
+
+ /**
+ * Constructor; initializes the
+ * matrix to be empty, without
+ * any structure, i.e. the
+ * matrix is not usable at
+ * all. This constructor is
+ * therefore only useful for
+ * matrices which are members of
+ * a class. All other matrices
+ * should be created at a point
+ * in the data flow where all
+ * necessary information is
+ * available.
+ *
+ * You have to initialize the
+ * matrix before usage with
+ * reinit(BlockSparsityPattern). The
+ * number of blocks per row and
+ * column are then determined by
+ * that function.
+ */
+ BlockSparseMatrix ();
+
+ /**
+ * Destructor.
+ */
+ ~BlockSparseMatrix ();
+
+ /**
+ * Pseudo copy operator only copying
+ * empty objects. The sizes of the block
+ * matrices need to be the same.
+ */
+ BlockSparseMatrix &
+ operator = (const BlockSparseMatrix &);
+
+ /**
+ * This operator assigns a scalar to a
+ * matrix. Since this does usually not
+ * make much sense (should we set all
+ * matrix entries to this value? Only
+ * the nonzero entries of the sparsity
+ * pattern?), this operation is only
+ * allowed if the actual value to be
+ * assigned is zero. This operator only
+ * exists to allow for the obvious
+ * notation <tt>matrix=0</tt>, which
+ * sets all elements of the matrix to
+ * zero, but keep the sparsity pattern
+ * previously used.
+ */
+ BlockSparseMatrix &
+ operator = (const double d);
+
+ /**
+ * Resize the matrix, by setting
+ * the number of block rows and
+ * columns. This deletes all
+ * blocks and replaces them by
+ * unitialized ones, i.e. ones
+ * for which also the sizes are
+ * not yet set. You have to do
+ * that by calling the @p reinit
+ * functions of the blocks
+ * themselves. Do not forget to
+ * call collect_sizes() after
+ * that on this object.
+ *
+ * The reason that you have to
+ * set sizes of the blocks
+ * yourself is that the sizes may
+ * be varying, the maximum number
+ * of elements per row may be
+ * varying, etc. It is simpler
+ * not to reproduce the interface
+ * of the @p SparsityPattern
+ * class here but rather let the
+ * user call whatever function
+ * she desires.
+ */
+ void reinit (const unsigned int n_block_rows,
+ const unsigned int n_block_columns);
+
+ /**
+ * Resize the matrix, by using an
+ * array of Epetra maps to determine
+ * the %parallel distribution of the
+ * individual matrices. This function
+ * assumes that a quadratic block
+ * matrix is generated.
+ */
+ template <typename BlockSparsityType>
+ void reinit (const std::vector<Epetra_Map> &input_maps,
+ const BlockSparsityType &block_sparsity_pattern);
+
+ /**
+ * Resize the matrix, by using an
+ * array of index sets to determine
+ * the %parallel distribution of the
+ * individual matrices. This function
+ * assumes that a quadratic block
+ * matrix is generated.
+ */
+ template <typename BlockSparsityType>
+ void reinit (const std::vector<IndexSet> &input_maps,
+ const BlockSparsityType &block_sparsity_pattern,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
+
+ /**
+ * Resize the matrix and initialize it
+ * by the given sparsity pattern. Since
+ * no distribution map is given, the
+ * result is a block matrix for which
+ * all elements are stored locally.
+ */
+ template <typename BlockSparsityType>
+ void reinit (const BlockSparsityType &block_sparsity_pattern);
+
+ /**
+ * This function initializes the
+ * Trilinos matrix using the deal.II
+ * sparse matrix and the entries stored
+ * therein. It uses a threshold
+ * to copy only elements whose
+ * modulus is larger than the
+ * threshold (so zeros in the
+ * deal.II matrix can be filtered
+ * away).
+ */
+ void reinit (const std::vector<Epetra_Map> &input_maps,
+ const ::dealii::BlockSparseMatrix<double> &deal_ii_sparse_matrix,
+ const double drop_tolerance=1e-13);
+
+ /**
+ * This function initializes
+ * the Trilinos matrix using
+ * the deal.II sparse matrix
+ * and the entries stored
+ * therein. It uses a threshold
+ * to copy only elements whose
+ * modulus is larger than the
+ * threshold (so zeros in the
+ * deal.II matrix can be
+ * filtered away). Since no
+ * Epetra_Map is given, all the
+ * elements will be locally
+ * stored.
+ */
+ void reinit (const ::dealii::BlockSparseMatrix<double> &deal_ii_sparse_matrix,
+ const double drop_tolerance=1e-13);
+
+ /**
+ * Returns the state of the
+ * matrix, i.e., whether
+ * compress() needs to be called
+ * after an operation requiring
+ * data exchange. Does only
+ * return non-true values when
+ * used in <tt>debug</tt> mode,
+ * since it is quite expensive to
+ * keep track of all operations
+ * that lead to the need for
+ * compress().
+ */
+ bool is_compressed () const;
+
+ /**
+ * This function collects the
+ * sizes of the sub-objects and
+ * stores them in internal
+ * arrays, in order to be able to
+ * relay global indices into the
+ * matrix to indices into the
+ * subobjects. You *must* call
+ * this function each time after
+ * you have changed the size of
+ * the sub-objects. Note that
+ * this is a collective
+ * operation, i.e., it needs to
+ * be called on all MPI
+ * processes. This command
+ * internally calls the method
+ * <tt>compress()</tt>, so you
+ * don't need to call that
+ * function in case you use
+ * <tt>collect_sizes()</tt>.
+ */
+ void collect_sizes ();
+
+ /**
+ * Return the number of nonzero
+ * elements of this
+ * matrix.
+ */
+ unsigned int n_nonzero_elements () const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M*src$ with $M$
+ * being this matrix.
+ */
+ void vmult (MPI::BlockVector &dst,
+ const MPI::BlockVector &src) const;
+
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M*src$ with $M$
+ * being this matrix, now applied
+ * to localized block vectors
+ * (works only when run on one
+ * processor).
+ */
+ void vmult (BlockVector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ */
- void vmult (MPI::BlockVector &dst,
++ void vmult (MPI::BlockVector &dst,
+ const MPI::Vector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column, now
+ * applied to localized vectors
+ * (works only when run on one
+ * processor).
+ */
- void vmult (BlockVector &dst,
++ void vmult (BlockVector &dst,
+ const Vector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ */
+ void vmult (MPI::Vector &dst,
+ const MPI::BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row, now
+ * applied to localized vectors
+ * (works only when run on one
+ * processor).
+ */
+ void vmult (Vector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ */
+ void vmult (VectorBase &dst,
+ const VectorBase &src) const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M^T*src$ with $M$
+ * being this matrix. This
+ * function does the same as
+ * vmult() but takes the
+ * transposed matrix.
+ */
+ void Tvmult (MPI::BlockVector &dst,
+ const MPI::BlockVector &src) const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M^T*src$ with $M$
+ * being this matrix. This
+ * function does the same as
+ * vmult() but takes the
+ * transposed matrix, now applied
+ * to localized Trilinos vectors
+ * (works only when run on one
+ * processor).
+ */
+ void Tvmult (BlockVector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ */
- void Tvmult (MPI::BlockVector &dst,
++ void Tvmult (MPI::BlockVector &dst,
+ const MPI::Vector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row, now
+ * applied to localized Trilinos
+ * vectors (works only when run
+ * on one processor).
+ */
- void Tvmult (BlockVector &dst,
++ void Tvmult (BlockVector &dst,
+ const Vector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ */
+ void Tvmult (MPI::Vector &dst,
+ const MPI::BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column, now
+ * applied to localized Trilinos
+ * vectors (works only when run
+ * on one processor).
+ */
+ void Tvmult (Vector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ */
+ void Tvmult (VectorBase &dst,
+ const VectorBase &src) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to
+ * be <i>r=b-Mx</i>. Write the
+ * residual into @p dst. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned.
+ *
+ * Source <i>x</i> and
+ * destination <i>dst</i> must
+ * not be the same vector.
+ *
+ * Note that both vectors have
+ * to be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix
+ * in case you work on a
+ * distributed memory
+ * architecture, using the
+ * interface in the
+ * TrilinosWrappers::MPI::BlockVector
+ * class.
+ */
+ TrilinosScalar residual (MPI::BlockVector &dst,
+ const MPI::BlockVector &x,
+ const MPI::BlockVector &b) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to
+ * be <i>r=b-Mx</i>. Write the
+ * residual into @p dst. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned.
+ *
+ * Source <i>x</i> and
+ * destination <i>dst</i> must
+ * not be the same vector.
+ *
+ * Note that both vectors have
+ * to be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix
+ * in case you work on a
+ * distributed memory
+ * architecture, using the
+ * interface in the
+ * TrilinosWrappers::BlockVector
+ * class. Since the block
+ * matrix is in general
+ * distributed among processes,
+ * this function only works
+ * when running the program on
+ * one processor.
+ */
+ TrilinosScalar residual (BlockVector &dst,
+ const BlockVector &x,
+ const BlockVector &b) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to
+ * be <i>r=b-Mx</i>. Write the
+ * residual into @p dst. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned. Just like the
+ * previous function, but only
+ * applicable if the matrix
+ * only has one block row.
+ */
+ TrilinosScalar residual (MPI::BlockVector &dst,
+ const MPI::Vector &x,
+ const MPI::BlockVector &b) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to
+ * be <i>r=b-Mx</i>. Write the
+ * residual into @p dst. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned. Just like the
+ * previous function, but only
+ * applicable if the matrix
+ * only has one block row.
+ */
+ TrilinosScalar residual (BlockVector &dst,
+ const Vector &x,
+ const BlockVector &b) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to
+ * be <i>r=b-Mx</i>. Write the
+ * residual into @p dst. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned. Just like the
+ * previous function, but only
+ * applicable if the matrix
+ * only has one block column.
+ */
+ TrilinosScalar residual (MPI::Vector &dst,
+ const MPI::BlockVector &x,
+ const MPI::Vector &b) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to
+ * be <i>r=b-Mx</i>. Write the
+ * residual into @p dst. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned. Just like the
+ * previous function, but only
+ * applicable if the matrix
+ * only has one block column.
+ */
+ TrilinosScalar residual (Vector &dst,
+ const BlockVector &x,
+ const Vector &b) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to
+ * be <i>r=b-Mx</i>. Write the
+ * residual into @p dst. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned. Just like the
+ * previous function, but only
+ * applicable if the matrix
+ * only has one block.
+ */
+ TrilinosScalar residual (VectorBase &dst,
+ const VectorBase &x,
+ const VectorBase &b) const;
+
+ /**
+ * Make the clear() function in the
+ * base class visible, though it is
+ * protected.
+ */
+ using BlockMatrixBase<SparseMatrix>::clear;
+
+ /** @addtogroup Exceptions
+ * @{
+ */
+
+ /**
+ * Exception
+ */
+ DeclException4 (ExcIncompatibleRowNumbers,
+ int, int, int, int,
+ << "The blocks [" << arg1 << ',' << arg2 << "] and ["
+ << arg3 << ',' << arg4 << "] have differing row numbers.");
+
+ /**
+ * Exception
+ */
+ DeclException4 (ExcIncompatibleColNumbers,
+ int, int, int, int,
+ << "The blocks [" << arg1 << ',' << arg2 << "] and ["
+ << arg3 << ',' << arg4 << "] have differing column numbers.");
+ ///@}
};
class BlockSparseMatrix;
- /**
- * An implementation of block vectors based on the vector class
- * implemented in TrilinosWrappers. While the base class provides for
- * most of the interface, this class handles the actual allocation of
- * vectors and provides functions that are specific to the underlying
- * vector type.
- *
- * In contrast to the class MPI::BlockVector, this class is based on a
- * localized version of the vectors, which means that the whole vector
- * is stored on each processor. Note that matrix vector products with
- * this block vector class do only work in case the program is run on
- * only one processor, since the Trilinos matrices are inherently
- * parallel.
- *
- * @ingroup Vectors
- * @ingroup TrilinosWrappers
- * @see @ref GlossBlockLA "Block (linear algebra)"
- * @author Martin Kronbichler, 2008
- */
+ /**
+ * An implementation of block vectors based on the vector class
+ * implemented in TrilinosWrappers. While the base class provides for
+ * most of the interface, this class handles the actual allocation of
+ * vectors and provides functions that are specific to the underlying
+ * vector type.
+ *
+ * In contrast to the class MPI::BlockVector, this class is based on a
+ * localized version of the vectors, which means that the whole vector
+ * is stored on each processor. Note that matrix vector products with
+ * this block vector class do only work in case the program is run on
+ * only one processor, since the Trilinos matrices are inherently
+ * parallel.
+ *
+ * @ingroup Vectors
+ * @ingroup TrilinosWrappers
+ * @see @ref GlossBlockLA "Block (linear algebra)"
+ * @author Martin Kronbichler, 2008
+ */
class BlockVector : public BlockVectorBase<Vector>
{
- public:
- /**
- * Typedef the base class for simpler
- * access to its own typedefs.
- */
- typedef BlockVectorBase<Vector> BaseClass;
-
- /**
- * Typedef the type of the underlying
- * vector.
- */
- typedef BaseClass::BlockType BlockType;
-
- /**
- * Import the typedefs from the base
- * class.
- */
- typedef BaseClass::value_type value_type;
- typedef BaseClass::pointer pointer;
- typedef BaseClass::const_pointer const_pointer;
- typedef BaseClass::reference reference;
- typedef BaseClass::const_reference const_reference;
- typedef BaseClass::size_type size_type;
- typedef BaseClass::iterator iterator;
- typedef BaseClass::const_iterator const_iterator;
-
- /**
- * Default constructor. Generate an
- * empty vector without any blocks.
- */
- BlockVector ();
-
- /**
- * Constructor. Generate a block
- * vector with as many blocks as
- * there are entries in Input_Maps.
- * For this non-distributed vector,
- * the %parallel partitioning is not
- * used, just the global size of the
- * partitioner.
- */
- BlockVector (const std::vector<Epetra_Map> &partitioner);
-
- /**
- * Constructor. Generate a block
- * vector with as many blocks as
- * there are entries in Input_Maps.
- * For this non-distributed vector,
- * the %parallel partitioning is not
- * used, just the global size of the
- * partitioner.
- */
- BlockVector (const std::vector<IndexSet> &partitioner,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
-
- /**
- * Copy-Constructor. Set all the
- * properties of the non-%parallel
- * vector to those of the given
- * %parallel vector and import the
- * elements.
- */
- BlockVector (const MPI::BlockVector &V);
-
- /**
- * Copy-Constructor. Set all the
- * properties of the vector to those
- * of the given input vector and copy
- * the elements.
- */
- BlockVector (const BlockVector &V);
-
- /**
- * Creates a block vector
- * consisting of
- * <tt>num_blocks</tt>
- * components, but there is no
- * content in the individual
- * components and the user has to
- * fill appropriate data using a
- * reinit of the blocks.
- */
- BlockVector (const unsigned int num_blocks);
-
- /**
- * Constructor. Set the number of
- * blocks to <tt>n.size()</tt> and
- * initialize each block with
- * <tt>n[i]</tt> zero elements.
- *
- * References BlockVector.reinit().
- */
- BlockVector (const std::vector<unsigned int> &N);
-
- /**
- * Constructor. Set the number of
- * blocks to
- * <tt>n.size()</tt>. Initialize the
- * vector with the elements
- * pointed to by the range of
- * iterators given as second and
- * third argument. Apart from the
- * first argument, this
- * constructor is in complete
- * analogy to the respective
- * constructor of the
- * <tt>std::vector</tt> class, but the
- * first argument is needed in
- * order to know how to subdivide
- * the block vector into
- * different blocks.
- */
- template <typename InputIterator>
- BlockVector (const std::vector<unsigned int> &n,
- const InputIterator first,
- const InputIterator end);
-
- /**
- * Destructor. Clears memory
- */
- ~BlockVector ();
-
- /**
- * use compress(VectorOperation) instead
- *
- * @deprecated
- *
- * See @ref GlossCompress "Compressing
- * distributed objects" for more
- * information.
- */
- void compress (const Epetra_CombineMode last_action);
-
- /**
- * so it is not hidden
- */
- using BlockVectorBase<Vector>::compress;
-
- /**
- * Copy operator: fill all
- * components of the vector that
- * are locally stored with the
- * given scalar value.
- */
- BlockVector &
- operator = (const value_type s);
-
- /**
- * Copy operator for a
- * distributed Trilinos vector to
- * a localized one.
- */
- BlockVector &
- operator = (const MPI::BlockVector &V);
-
- /**
- * Copy operator for arguments of
- * the same type.
- */
- BlockVector &
- operator = (const BlockVector &V);
-
- /**
- * Another copy function. This
- * one takes a deal.II block
- * vector and copies it into a
- * TrilinosWrappers block
- * vector. Note that the number
- * of blocks has to be the same
- * in the vector as in the input
- * vector. Use the reinit()
- * command for resizing the
- * BlockVector or for changing
- * the internal structure of the
- * block components.
- *
- * Since Trilinos only works on
- * doubles, this function is
- * limited to accept only one
- * possible number type in the
- * deal.II vector.
- */
- template <typename Number>
- BlockVector &
- operator = (const ::dealii::BlockVector<Number> &V);
-
- /**
- * Reinitialize the BlockVector to
- * contain as many blocks as there
- * are Epetra_Maps given in the
- * input argument, according to the
- * global size of the individual
- * components described in the
- * maps. Note that the resulting
- * vector will be stored completely
- * on each process. The Epetra_Map
- * is useful when data exchange
- * with a distributed vector based
- * on the same Epetra_map is
- * intended. In that case, the same
- * communicator is used for data
- * exchange.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- */
- void reinit (const std::vector<Epetra_Map> &partitioning,
- const bool fast = false);
-
- /**
- * Reinitialize the BlockVector to
- * contain as many blocks as there
- * are index sets given in the
- * input argument, according to the
- * global size of the individual
- * components described in the
- * index set, and using a given MPI
- * communicator. The MPI
- * communicator is useful when data
- * exchange with a distributed
- * vector based on the same
- * initialization is intended. In
- * that case, the same communicator
- * is used for data exchange.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- */
- void reinit (const std::vector<IndexSet> &partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const bool fast = false);
-
- /**
- * Reinitialize the BlockVector to
- * contain as many blocks as there
- * are elements in the first
- * argument, and with the respective
- * sizes. Since no distribution map
- * is given, all vectors are local
- * vectors.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- */
- void reinit (const std::vector<unsigned int> &N,
- const bool fast=false);
-
- /**
- * Reinit the function
- * according to a distributed
- * block vector. The elements
- * will be copied in this
- * process.
- */
- void reinit (const MPI::BlockVector &V);
-
- /**
- * Change the dimension to that
- * of the vector <tt>V</tt>. The same
- * applies as for the other
- * reinit() function.
- *
- * The elements of <tt>V</tt> are not
- * copied, i.e. this function is
- * the same as calling <tt>reinit
- * (V.size(), fast)</tt>.
- *
- * Note that you must call this
- * (or the other reinit()
- * functions) function, rather
- * than calling the reinit()
- * functions of an individual
- * block, to allow the block
- * vector to update its caches of
- * vector sizes. If you call
- * reinit() on one of the
- * blocks, then subsequent
- * actions on this object may
- * yield unpredictable results
- * since they may be routed to
- * the wrong block.
- */
- void reinit (const BlockVector &V,
- const bool fast = false);
-
- /**
- * Change the number of blocks to
- * <tt>num_blocks</tt>. The individual
- * blocks will get initialized with
- * zero size, so it is assumed that
- * the user resizes the
- * individual blocks by herself
- * in an appropriate way, and
- * calls <tt>collect_sizes</tt>
- * afterwards.
- */
- void reinit (const unsigned int num_blocks);
-
- /**
- * Swap the contents of this
- * vector and the other vector
- * <tt>v</tt>. One could do this
- * operation with a temporary
- * variable and copying over the
- * data elements, but this
- * function is significantly more
- * efficient since it only swaps
- * the pointers to the data of
- * the two vectors and therefore
- * does not need to allocate
- * temporary storage and move
- * data around.
- *
- * Limitation: right now this
- * function only works if both
- * vectors have the same number
- * of blocks. If needed, the
- * numbers of blocks should be
- * exchanged, too.
- *
- * This function is analog to the
- * the swap() function of all C++
- * standard containers. Also,
- * there is a global function
- * swap(u,v) that simply calls
- * <tt>u.swap(v)</tt>, again in analogy
- * to standard functions.
- */
- void swap (BlockVector &v);
-
- /**
- * Print to a stream.
- */
- void print (std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const bool across = true) const;
-
- /**
- * Exception
- */
- DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
-
- /**
- * Exception
- */
- DeclException0 (ExcNonMatchingBlockVectors);
-
- /**
- * Exception
- */
- DeclException2 (ExcNonLocalizedMap,
- int, int,
- << "For the generation of a localized vector the map has "
- << "to assign all elements to all vectors! "
- << "local_size = global_size is a necessary condition, but"
- << arg1 << " != " << arg2 << " was given!");
+ public:
+ /**
+ * Typedef the base class for simpler
+ * access to its own typedefs.
+ */
+ typedef BlockVectorBase<Vector> BaseClass;
+
+ /**
+ * Typedef the type of the underlying
+ * vector.
+ */
+ typedef BaseClass::BlockType BlockType;
+
+ /**
+ * Import the typedefs from the base
+ * class.
+ */
+ typedef BaseClass::value_type value_type;
+ typedef BaseClass::pointer pointer;
+ typedef BaseClass::const_pointer const_pointer;
+ typedef BaseClass::reference reference;
+ typedef BaseClass::const_reference const_reference;
+ typedef BaseClass::size_type size_type;
+ typedef BaseClass::iterator iterator;
+ typedef BaseClass::const_iterator const_iterator;
+
+ /**
+ * Default constructor. Generate an
+ * empty vector without any blocks.
+ */
+ BlockVector ();
+
+ /**
+ * Constructor. Generate a block
+ * vector with as many blocks as
+ * there are entries in Input_Maps.
+ * For this non-distributed vector,
+ * the %parallel partitioning is not
+ * used, just the global size of the
+ * partitioner.
+ */
+ BlockVector (const std::vector<Epetra_Map> &partitioner);
+
+ /**
+ * Constructor. Generate a block
+ * vector with as many blocks as
+ * there are entries in Input_Maps.
+ * For this non-distributed vector,
+ * the %parallel partitioning is not
+ * used, just the global size of the
+ * partitioner.
+ */
+ BlockVector (const std::vector<IndexSet> &partitioner,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
+
+ /**
+ * Copy-Constructor. Set all the
+ * properties of the non-%parallel
+ * vector to those of the given
+ * %parallel vector and import the
+ * elements.
+ */
+ BlockVector (const MPI::BlockVector &V);
+
+ /**
+ * Copy-Constructor. Set all the
+ * properties of the vector to those
+ * of the given input vector and copy
+ * the elements.
+ */
- BlockVector (const BlockVector &V);
++ BlockVector (const BlockVector &V);
+
+ /**
+ * Creates a block vector
+ * consisting of
+ * <tt>num_blocks</tt>
+ * components, but there is no
+ * content in the individual
+ * components and the user has to
+ * fill appropriate data using a
+ * reinit of the blocks.
+ */
+ BlockVector (const unsigned int num_blocks);
+
+ /**
+ * Constructor. Set the number of
+ * blocks to <tt>n.size()</tt> and
+ * initialize each block with
+ * <tt>n[i]</tt> zero elements.
+ *
+ * References BlockVector.reinit().
+ */
+ BlockVector (const std::vector<unsigned int> &N);
+
+ /**
+ * Constructor. Set the number of
+ * blocks to
+ * <tt>n.size()</tt>. Initialize the
+ * vector with the elements
+ * pointed to by the range of
+ * iterators given as second and
+ * third argument. Apart from the
+ * first argument, this
+ * constructor is in complete
+ * analogy to the respective
+ * constructor of the
+ * <tt>std::vector</tt> class, but the
+ * first argument is needed in
+ * order to know how to subdivide
+ * the block vector into
+ * different blocks.
+ */
+ template <typename InputIterator>
+ BlockVector (const std::vector<unsigned int> &n,
+ const InputIterator first,
+ const InputIterator end);
+
+ /**
+ * Destructor. Clears memory
+ */
+ ~BlockVector ();
+
+ /**
+ * use compress(VectorOperation) instead
+ *
+ * @deprecated
+ *
+ * See @ref GlossCompress "Compressing
+ * distributed objects" for more
+ * information.
+ */
+ void compress (const Epetra_CombineMode last_action);
+
+ /**
+ * so it is not hidden
+ */
+ using BlockVectorBase<Vector>::compress;
+
+ /**
+ * Copy operator: fill all
+ * components of the vector that
+ * are locally stored with the
+ * given scalar value.
+ */
+ BlockVector &
+ operator = (const value_type s);
+
+ /**
+ * Copy operator for a
+ * distributed Trilinos vector to
+ * a localized one.
+ */
+ BlockVector &
+ operator = (const MPI::BlockVector &V);
+
+ /**
+ * Copy operator for arguments of
+ * the same type.
+ */
+ BlockVector &
+ operator = (const BlockVector &V);
+
+ /**
+ * Another copy function. This
+ * one takes a deal.II block
+ * vector and copies it into a
+ * TrilinosWrappers block
+ * vector. Note that the number
+ * of blocks has to be the same
+ * in the vector as in the input
+ * vector. Use the reinit()
+ * command for resizing the
+ * BlockVector or for changing
+ * the internal structure of the
+ * block components.
+ *
+ * Since Trilinos only works on
+ * doubles, this function is
+ * limited to accept only one
+ * possible number type in the
+ * deal.II vector.
+ */
+ template <typename Number>
+ BlockVector &
+ operator = (const ::dealii::BlockVector<Number> &V);
+
+ /**
+ * Reinitialize the BlockVector to
+ * contain as many blocks as there
+ * are Epetra_Maps given in the
+ * input argument, according to the
+ * global size of the individual
+ * components described in the
+ * maps. Note that the resulting
+ * vector will be stored completely
+ * on each process. The Epetra_Map
+ * is useful when data exchange
+ * with a distributed vector based
+ * on the same Epetra_map is
+ * intended. In that case, the same
+ * communicator is used for data
+ * exchange.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ */
+ void reinit (const std::vector<Epetra_Map> &partitioning,
+ const bool fast = false);
+
+ /**
+ * Reinitialize the BlockVector to
+ * contain as many blocks as there
+ * are index sets given in the
+ * input argument, according to the
+ * global size of the individual
+ * components described in the
+ * index set, and using a given MPI
+ * communicator. The MPI
+ * communicator is useful when data
+ * exchange with a distributed
+ * vector based on the same
+ * initialization is intended. In
+ * that case, the same communicator
+ * is used for data exchange.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ */
+ void reinit (const std::vector<IndexSet> &partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const bool fast = false);
+
+ /**
+ * Reinitialize the BlockVector to
+ * contain as many blocks as there
+ * are elements in the first
+ * argument, and with the respective
+ * sizes. Since no distribution map
+ * is given, all vectors are local
+ * vectors.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ */
+ void reinit (const std::vector<unsigned int> &N,
+ const bool fast=false);
+
+ /**
+ * Reinit the function
+ * according to a distributed
+ * block vector. The elements
+ * will be copied in this
+ * process.
+ */
+ void reinit (const MPI::BlockVector &V);
+
+ /**
+ * Change the dimension to that
+ * of the vector <tt>V</tt>. The same
+ * applies as for the other
+ * reinit() function.
+ *
+ * The elements of <tt>V</tt> are not
+ * copied, i.e. this function is
+ * the same as calling <tt>reinit
+ * (V.size(), fast)</tt>.
+ *
+ * Note that you must call this
+ * (or the other reinit()
+ * functions) function, rather
+ * than calling the reinit()
+ * functions of an individual
+ * block, to allow the block
+ * vector to update its caches of
+ * vector sizes. If you call
+ * reinit() on one of the
+ * blocks, then subsequent
+ * actions on this object may
+ * yield unpredictable results
+ * since they may be routed to
+ * the wrong block.
+ */
+ void reinit (const BlockVector &V,
+ const bool fast = false);
+
+ /**
+ * Change the number of blocks to
+ * <tt>num_blocks</tt>. The individual
+ * blocks will get initialized with
+ * zero size, so it is assumed that
+ * the user resizes the
+ * individual blocks by herself
+ * in an appropriate way, and
+ * calls <tt>collect_sizes</tt>
+ * afterwards.
+ */
+ void reinit (const unsigned int num_blocks);
+
+ /**
+ * Swap the contents of this
+ * vector and the other vector
+ * <tt>v</tt>. One could do this
+ * operation with a temporary
+ * variable and copying over the
+ * data elements, but this
+ * function is significantly more
+ * efficient since it only swaps
+ * the pointers to the data of
+ * the two vectors and therefore
+ * does not need to allocate
+ * temporary storage and move
+ * data around.
+ *
+ * Limitation: right now this
+ * function only works if both
+ * vectors have the same number
+ * of blocks. If needed, the
+ * numbers of blocks should be
+ * exchanged, too.
+ *
+ * This function is analog to the
+ * the swap() function of all C++
+ * standard containers. Also,
+ * there is a global function
+ * swap(u,v) that simply calls
+ * <tt>u.swap(v)</tt>, again in analogy
+ * to standard functions.
+ */
+ void swap (BlockVector &v);
+
+ /**
+ * Print to a stream.
+ */
+ void print (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcNonMatchingBlockVectors);
+
+ /**
+ * Exception
+ */
+ DeclException2 (ExcNonLocalizedMap,
+ int, int,
+ << "For the generation of a localized vector the map has "
+ << "to assign all elements to all vectors! "
+ << "local_size = global_size is a necessary condition, but"
+ << arg1 << " != " << arg2 << " was given!");
};
namespace MPI
{
- /**
- * An implementation of block vectors based on the vector class
- * implemented in TrilinosWrappers. While the base class provides for
- * most of the interface, this class handles the actual allocation of
- * vectors and provides functions that are specific to the underlying
- * vector type.
- *
- * The model of distribution of data is such that each of the blocks
- * is distributed across all MPI processes named in the MPI
- * communicator. I.e. we don't just distribute the whole vector, but
- * each component. In the constructors and reinit() functions, one
- * therefore not only has to specify the sizes of the individual
- * blocks, but also the number of elements of each of these blocks to
- * be stored on the local process.
- *
- * @ingroup Vectors
- * @ingroup TrilinosWrappers
- * @see @ref GlossBlockLA "Block (linear algebra)"
- * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009
- */
+ /**
+ * An implementation of block vectors based on the vector class
+ * implemented in TrilinosWrappers. While the base class provides for
+ * most of the interface, this class handles the actual allocation of
+ * vectors and provides functions that are specific to the underlying
+ * vector type.
+ *
+ * The model of distribution of data is such that each of the blocks
+ * is distributed across all MPI processes named in the MPI
+ * communicator. I.e. we don't just distribute the whole vector, but
+ * each component. In the constructors and reinit() functions, one
+ * therefore not only has to specify the sizes of the individual
+ * blocks, but also the number of elements of each of these blocks to
+ * be stored on the local process.
+ *
+ * @ingroup Vectors
+ * @ingroup TrilinosWrappers
+ * @see @ref GlossBlockLA "Block (linear algebra)"
+ * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009
+ */
class BlockVector : public BlockVectorBase<Vector>
{
- public:
- /**
- * Typedef the base class for simpler
- * access to its own typedefs.
- */
- typedef BlockVectorBase<Vector> BaseClass;
-
- /**
- * Typedef the type of the underlying
- * vector.
- */
- typedef BaseClass::BlockType BlockType;
-
- /**
- * Import the typedefs from the base
- * class.
- */
- typedef BaseClass::value_type value_type;
- typedef BaseClass::pointer pointer;
- typedef BaseClass::const_pointer const_pointer;
- typedef BaseClass::reference reference;
- typedef BaseClass::const_reference const_reference;
- typedef BaseClass::size_type size_type;
- typedef BaseClass::iterator iterator;
- typedef BaseClass::const_iterator const_iterator;
-
- /**
- * Default constructor. Generate an
- * empty vector without any blocks.
- */
- BlockVector ();
-
- /**
- * Constructor. Generate a block
- * vector with as many blocks as
- * there are entries in @p
- * partitioning. Each Epetra_Map
- * contains the layout of the
- * distribution of data among the MPI
- * processes.
- */
- BlockVector (const std::vector<Epetra_Map> ¶llel_partitioning);
-
- /**
- * Constructor. Generate a block
- * vector with as many blocks as
- * there are entries in
- * @p partitioning. Each IndexSet
- * together with the MPI communicator
- * contains the layout of the
- * distribution of data among the MPI
- * processes.
- */
- BlockVector (const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
-
- /**
- * Copy-Constructor. Set all the
- * properties of the parallel vector
- * to those of the given argument and
- * copy the elements.
- */
- BlockVector (const BlockVector &V);
-
- /**
- * Creates a block vector
- * consisting of
- * <tt>num_blocks</tt>
- * components, but there is no
- * content in the individual
- * components and the user has to
- * fill appropriate data using a
- * reinit of the blocks.
- */
- BlockVector (const unsigned int num_blocks);
-
- /**
- * Destructor. Clears memory
- */
- ~BlockVector ();
-
- /**
- * Copy operator: fill all
- * components of the vector that
- * are locally stored with the
- * given scalar value.
- */
- BlockVector &
- operator = (const value_type s);
-
- /**
- * Copy operator for arguments of
- * the same type.
- */
- BlockVector &
- operator = (const BlockVector &V);
-
- /**
- * Copy operator for arguments of
- * the localized Trilinos vector
- * type.
- */
- BlockVector &
- operator = (const ::dealii::TrilinosWrappers::BlockVector &V);
-
- /**
- * Another copy function. This
- * one takes a deal.II block
- * vector and copies it into a
- * TrilinosWrappers block
- * vector. Note that the number
- * of blocks has to be the same
- * in the vector as in the input
- * vector. Use the reinit()
- * command for resizing the
- * BlockVector or for changing
- * the internal structure of the
- * block components.
- *
- * Since Trilinos only works on
- * doubles, this function is
- * limited to accept only one
- * possible number type in the
- * deal.II vector.
- */
- template <typename Number>
- BlockVector &
- operator = (const ::dealii::BlockVector<Number> &V);
-
- /**
- * Reinitialize the BlockVector to
- * contain as many blocks as there
- * are Epetra_Maps given in the input
- * argument, according to the
- * parallel distribution of the
- * individual components described
- * in the maps.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- */
- void reinit (const std::vector<Epetra_Map> ¶llel_partitioning,
- const bool fast = false);
-
- /**
- * Reinitialize the BlockVector to
- * contain as many blocks as there
- * are index sets given in the input
- * argument, according to the
- * parallel distribution of the
- * individual components described
- * in the maps.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- */
- void reinit (const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const bool fast = false);
-
- /**
- * Change the dimension to that
- * of the vector <tt>V</tt>. The same
- * applies as for the other
- * reinit() function.
- *
- * The elements of <tt>V</tt> are not
- * copied, i.e. this function is
- * the same as calling <tt>reinit
- * (V.size(), fast)</tt>.
- *
- * Note that you must call this
- * (or the other reinit()
- * functions) function, rather
- * than calling the reinit()
- * functions of an individual
- * block, to allow the block
- * vector to update its caches of
- * vector sizes. If you call
- * reinit() on one of the
- * blocks, then subsequent
- * actions on this object may
- * yield unpredictable results
- * since they may be routed to
- * the wrong block.
- */
- void reinit (const BlockVector &V,
- const bool fast = false);
-
- /**
- * Change the number of blocks to
- * <tt>num_blocks</tt>. The individual
- * blocks will get initialized with
- * zero size, so it is assumed that
- * the user resizes the
- * individual blocks by herself
- * in an appropriate way, and
- * calls <tt>collect_sizes</tt>
- * afterwards.
- */
- void reinit (const unsigned int num_blocks);
-
- /**
- * This reinit function is meant to
- * be used for parallel
- * calculations where some
- * non-local data has to be
- * used. The typical situation
- * where one needs this function is
- * the call of the
- * FEValues<dim>::get_function_values
- * function (or of some
- * derivatives) in parallel. Since
- * it is usually faster to retrieve
- * the data in advance, this
- * function can be called before
- * the assembly forks out to the
- * different processors. What this
- * function does is the following:
- * It takes the information in the
- * columns of the given matrix and
- * looks which data couples between
- * the different processors. That
- * data is then queried from the
- * input vector. Note that you
- * should not write to the
- * resulting vector any more, since
- * the some data can be stored
- * several times on different
- * processors, leading to
- * unpredictable results. In
- * particular, such a vector cannot
- * be used for matrix-vector
- * products as for example done
- * during the solution of linear
- * systems.
- */
- void import_nonlocal_data_for_fe (const TrilinosWrappers::BlockSparseMatrix &m,
- const BlockVector &v);
-
-
- /**
- * use compress(VectorOperation) instead
- *
- * @deprecated
- *
- * See @ref GlossCompress "Compressing
- * distributed objects" for more
- * information.
- */
- void compress (const Epetra_CombineMode last_action);
-
- /**
- * so it is not hidden
- */
- using BlockVectorBase<Vector>::compress;
-
-
- /**
- * Returns the state of the
- * vector, i.e., whether
- * compress() needs to be
- * called after an operation
- * requiring data
- * exchange. Does only return
- * non-true values when used in
- * <tt>debug</tt> mode, since
- * it is quite expensive to
- * keep track of all operations
- * that lead to the need for
- * compress().
- */
- bool is_compressed () const;
-
- /**
- * Swap the contents of this
- * vector and the other vector
- * <tt>v</tt>. One could do this
- * operation with a temporary
- * variable and copying over the
- * data elements, but this
- * function is significantly more
- * efficient since it only swaps
- * the pointers to the data of
- * the two vectors and therefore
- * does not need to allocate
- * temporary storage and move
- * data around.
- *
- * Limitation: right now this
- * function only works if both
- * vectors have the same number
- * of blocks. If needed, the
- * numbers of blocks should be
- * exchanged, too.
- *
- * This function is analog to the
- * the swap() function of all C++
- * standard containers. Also,
- * there is a global function
- * swap(u,v) that simply calls
- * <tt>u.swap(v)</tt>, again in analogy
- * to standard functions.
- */
- void swap (BlockVector &v);
-
- /**
- * Print to a stream.
- */
- void print (std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const bool across = true) const;
-
- /**
- * Exception
- */
- DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
-
- /**
- * Exception
- */
- DeclException0 (ExcNonMatchingBlockVectors);
+ public:
+ /**
+ * Typedef the base class for simpler
+ * access to its own typedefs.
+ */
+ typedef BlockVectorBase<Vector> BaseClass;
+
+ /**
+ * Typedef the type of the underlying
+ * vector.
+ */
+ typedef BaseClass::BlockType BlockType;
+
+ /**
+ * Import the typedefs from the base
+ * class.
+ */
+ typedef BaseClass::value_type value_type;
+ typedef BaseClass::pointer pointer;
+ typedef BaseClass::const_pointer const_pointer;
+ typedef BaseClass::reference reference;
+ typedef BaseClass::const_reference const_reference;
+ typedef BaseClass::size_type size_type;
+ typedef BaseClass::iterator iterator;
+ typedef BaseClass::const_iterator const_iterator;
+
+ /**
+ * Default constructor. Generate an
+ * empty vector without any blocks.
+ */
+ BlockVector ();
+
+ /**
+ * Constructor. Generate a block
+ * vector with as many blocks as
+ * there are entries in @p
+ * partitioning. Each Epetra_Map
+ * contains the layout of the
+ * distribution of data among the MPI
+ * processes.
+ */
+ BlockVector (const std::vector<Epetra_Map> ¶llel_partitioning);
+
+ /**
+ * Constructor. Generate a block
+ * vector with as many blocks as
+ * there are entries in
+ * @p partitioning. Each IndexSet
+ * together with the MPI communicator
+ * contains the layout of the
+ * distribution of data among the MPI
+ * processes.
+ */
+ BlockVector (const std::vector<IndexSet> ¶llel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
+
+ /**
+ * Copy-Constructor. Set all the
+ * properties of the parallel vector
+ * to those of the given argument and
+ * copy the elements.
+ */
- BlockVector (const BlockVector &V);
++ BlockVector (const BlockVector &V);
+
+ /**
+ * Creates a block vector
+ * consisting of
+ * <tt>num_blocks</tt>
+ * components, but there is no
+ * content in the individual
+ * components and the user has to
+ * fill appropriate data using a
+ * reinit of the blocks.
+ */
+ BlockVector (const unsigned int num_blocks);
+
+ /**
+ * Destructor. Clears memory
+ */
+ ~BlockVector ();
+
+ /**
+ * Copy operator: fill all
+ * components of the vector that
+ * are locally stored with the
+ * given scalar value.
+ */
+ BlockVector &
+ operator = (const value_type s);
+
+ /**
+ * Copy operator for arguments of
+ * the same type.
+ */
+ BlockVector &
+ operator = (const BlockVector &V);
+
+ /**
+ * Copy operator for arguments of
+ * the localized Trilinos vector
+ * type.
+ */
+ BlockVector &
+ operator = (const ::dealii::TrilinosWrappers::BlockVector &V);
+
+ /**
+ * Another copy function. This
+ * one takes a deal.II block
+ * vector and copies it into a
+ * TrilinosWrappers block
+ * vector. Note that the number
+ * of blocks has to be the same
+ * in the vector as in the input
+ * vector. Use the reinit()
+ * command for resizing the
+ * BlockVector or for changing
+ * the internal structure of the
+ * block components.
+ *
+ * Since Trilinos only works on
+ * doubles, this function is
+ * limited to accept only one
+ * possible number type in the
+ * deal.II vector.
+ */
+ template <typename Number>
+ BlockVector &
+ operator = (const ::dealii::BlockVector<Number> &V);
+
+ /**
+ * Reinitialize the BlockVector to
+ * contain as many blocks as there
+ * are Epetra_Maps given in the input
+ * argument, according to the
+ * parallel distribution of the
+ * individual components described
+ * in the maps.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ */
+ void reinit (const std::vector<Epetra_Map> ¶llel_partitioning,
+ const bool fast = false);
+
+ /**
+ * Reinitialize the BlockVector to
+ * contain as many blocks as there
+ * are index sets given in the input
+ * argument, according to the
+ * parallel distribution of the
+ * individual components described
+ * in the maps.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ */
+ void reinit (const std::vector<IndexSet> ¶llel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const bool fast = false);
+
+ /**
+ * Change the dimension to that
+ * of the vector <tt>V</tt>. The same
+ * applies as for the other
+ * reinit() function.
+ *
+ * The elements of <tt>V</tt> are not
+ * copied, i.e. this function is
+ * the same as calling <tt>reinit
+ * (V.size(), fast)</tt>.
+ *
+ * Note that you must call this
+ * (or the other reinit()
+ * functions) function, rather
+ * than calling the reinit()
+ * functions of an individual
+ * block, to allow the block
+ * vector to update its caches of
+ * vector sizes. If you call
+ * reinit() on one of the
+ * blocks, then subsequent
+ * actions on this object may
+ * yield unpredictable results
+ * since they may be routed to
+ * the wrong block.
+ */
+ void reinit (const BlockVector &V,
+ const bool fast = false);
+
+ /**
+ * Change the number of blocks to
+ * <tt>num_blocks</tt>. The individual
+ * blocks will get initialized with
+ * zero size, so it is assumed that
+ * the user resizes the
+ * individual blocks by herself
+ * in an appropriate way, and
+ * calls <tt>collect_sizes</tt>
+ * afterwards.
+ */
+ void reinit (const unsigned int num_blocks);
+
+ /**
+ * This reinit function is meant to
+ * be used for parallel
+ * calculations where some
+ * non-local data has to be
+ * used. The typical situation
+ * where one needs this function is
+ * the call of the
+ * FEValues<dim>::get_function_values
+ * function (or of some
+ * derivatives) in parallel. Since
+ * it is usually faster to retrieve
+ * the data in advance, this
+ * function can be called before
+ * the assembly forks out to the
+ * different processors. What this
+ * function does is the following:
+ * It takes the information in the
+ * columns of the given matrix and
+ * looks which data couples between
+ * the different processors. That
+ * data is then queried from the
+ * input vector. Note that you
+ * should not write to the
+ * resulting vector any more, since
+ * the some data can be stored
+ * several times on different
+ * processors, leading to
+ * unpredictable results. In
+ * particular, such a vector cannot
+ * be used for matrix-vector
+ * products as for example done
+ * during the solution of linear
+ * systems.
+ */
+ void import_nonlocal_data_for_fe (const TrilinosWrappers::BlockSparseMatrix &m,
+ const BlockVector &v);
+
+
+ /**
+ * use compress(VectorOperation) instead
+ *
+ * @deprecated
+ *
+ * See @ref GlossCompress "Compressing
+ * distributed objects" for more
+ * information.
+ */
+ void compress (const Epetra_CombineMode last_action);
+
+ /**
+ * so it is not hidden
+ */
+ using BlockVectorBase<Vector>::compress;
+
+
+ /**
+ * Returns the state of the
+ * vector, i.e., whether
+ * compress() needs to be
+ * called after an operation
+ * requiring data
+ * exchange. Does only return
+ * non-true values when used in
+ * <tt>debug</tt> mode, since
+ * it is quite expensive to
+ * keep track of all operations
+ * that lead to the need for
+ * compress().
+ */
+ bool is_compressed () const;
+
+ /**
+ * Swap the contents of this
+ * vector and the other vector
+ * <tt>v</tt>. One could do this
+ * operation with a temporary
+ * variable and copying over the
+ * data elements, but this
+ * function is significantly more
+ * efficient since it only swaps
+ * the pointers to the data of
+ * the two vectors and therefore
+ * does not need to allocate
+ * temporary storage and move
+ * data around.
+ *
+ * Limitation: right now this
+ * function only works if both
+ * vectors have the same number
+ * of blocks. If needed, the
+ * numbers of blocks should be
+ * exchanged, too.
+ *
+ * This function is analog to the
+ * the swap() function of all C++
+ * standard containers. Also,
+ * there is a global function
+ * swap(u,v) that simply calls
+ * <tt>u.swap(v)</tt>, again in analogy
+ * to standard functions.
+ */
+ void swap (BlockVector &v);
+
+ /**
+ * Print to a stream.
+ */
+ void print (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcNonMatchingBlockVectors);
};
class PreconditionBase;
- /**
- * Base class for solver classes using the Trilinos solvers. Since
- * solvers in Trilinos are selected based on flags passed to a generic
- * solver object, basically all the actual solver calls happen in this
- * class, and derived classes simply set the right flags to select one
- * solver or another, or to set certain parameters for individual
- * solvers. For a general discussion on the Trilinos solver package
- * AztecOO, we refer to the <a href =
- * "http://trilinos.sandia.gov/packages/aztecoo/AztecOOUserGuide.pdf">AztecOO
- * user guide</a>.
- *
- * This solver class can also be used as a standalone class, where the
- * respective Krylov method is set via the flag
- * <tt>solver_name</tt>. This can be done at runtime (e.g., when
- * parsing the solver from a ParameterList) and is similar to the
- * deal.II class SolverSelector.
- *
- * @ingroup TrilinosWrappers
- * @author Martin Kronbichler, 2008, 2009
- */
+ /**
+ * Base class for solver classes using the Trilinos solvers. Since
+ * solvers in Trilinos are selected based on flags passed to a generic
+ * solver object, basically all the actual solver calls happen in this
+ * class, and derived classes simply set the right flags to select one
+ * solver or another, or to set certain parameters for individual
+ * solvers. For a general discussion on the Trilinos solver package
+ * AztecOO, we refer to the <a href =
+ * "http://trilinos.sandia.gov/packages/aztecoo/AztecOOUserGuide.pdf">AztecOO
+ * user guide</a>.
+ *
+ * This solver class can also be used as a standalone class, where the
+ * respective Krylov method is set via the flag
+ * <tt>solver_name</tt>. This can be done at runtime (e.g., when
+ * parsing the solver from a ParameterList) and is similar to the
+ * deal.II class SolverSelector.
+ *
+ * @ingroup TrilinosWrappers
+ * @author Martin Kronbichler, 2008, 2009
+ */
class SolverBase
{
- public:
-
- /**
- * Enumeration object that is
- * set in the constructor of
- * the derived classes and
- * tells Trilinos which solver
- * to use. This option can also
- * be set in the user program,
- * so one might use this base
- * class instead of one of the
- * specialized derived classes
- * when the solver should be
- * set at runtime. Currently
- * enabled options are:
- */
- enum SolverName {cg, cgs, gmres, bicgstab, tfqmr} solver_name;
-
- /**
- * Standardized data struct to
- * pipe additional data to the
- * solver.
- */
-
- struct AdditionalData
- {
- /**
- * Sets the additional data field to
- * the desired output format and puts
- * the restart parameter in case the
- * derived class is GMRES.
- *
- * TODO: Find a better way for
- * setting the GMRES restart
- * parameter since it is quite
- * inelegant to set a specific option
- * of one solver in the base class
- * for all solvers.
- */
- AdditionalData (const bool output_solver_details = false,
- const unsigned int gmres_restart_parameter = 30);
-
- /**
- * Enables/disables the output of
- * solver details (residual in each
- * iterations etc.).
- */
- const bool output_solver_details;
-
- /**
- * Restart parameter for GMRES
- * solver.
- */
- const unsigned int gmres_restart_parameter;
- };
-
- /**
- * Constructor. Takes the
- * solver control object and
- * creates the solver.
- */
- SolverBase (SolverControl &cn);
-
- /**
- * Second constructor. This
- * constructor takes an enum
- * object that specifies the
- * solver name and sets the
- * appropriate Krylov
- * method.
- */
- SolverBase (const enum SolverName solver_name,
- SolverControl &cn);
-
- /**
- * Destructor.
- */
- virtual ~SolverBase ();
-
- /**
- * Solve the linear system
- * <tt>Ax=b</tt>. Depending on
- * the information provided by
- * derived classes and the
- * object passed as a
- * preconditioner, one of the
- * linear solvers and
- * preconditioners of Trilinos
- * is chosen.
- */
- void
- solve (const SparseMatrix &A,
- VectorBase &x,
- const VectorBase &b,
- const PreconditionBase &preconditioner);
-
- /**
- * Solve the linear system
- * <tt>Ax=b</tt>. Depending on the
- * information provided by derived
- * classes and the object passed as a
- * preconditioner, one of the linear
- * solvers and preconditioners of
- * Trilinos is chosen. This class
- * works with matrices according to
- * the TrilinosWrappers format, but
- * can take deal.II vectors as
- * argument. Since deal.II are serial
- * vectors (not distributed), this
- * function does only what you expect
- * in case the matrix is locally
- * owned. Otherwise, an exception
- * will be thrown.
- */
- void
- solve (const SparseMatrix &A,
- dealii::Vector<double> &x,
- const dealii::Vector<double> &b,
- const PreconditionBase &preconditioner);
-
- /**
- * Access to object that controls
- * convergence.
- */
- SolverControl & control() const;
-
- /**
- * Exception
- */
- DeclException1 (ExcTrilinosError,
- int,
- << "An error with error number " << arg1
- << " occurred while calling a Trilinos function");
-
- protected:
-
- /**
- * Reference to the object that
- * controls convergence of the
- * iterative solver. In fact,
- * for these Trilinos wrappers,
- * Trilinos does so itself, but
- * we copy the data from this
- * object before starting the
- * solution process, and copy
- * the data back into it
- * afterwards.
- */
- SolverControl &solver_control;
-
- private:
-
- /**
- * A structure that collects
- * the Trilinos sparse matrix,
- * the right hand side vector
- * and the solution vector,
- * which is passed down to the
- * Trilinos solver.
- */
- std_cxx1x::shared_ptr<Epetra_LinearProblem> linear_problem;
-
- /**
- * A structure that contains
- * the Trilinos solver and
- * preconditioner objects.
- */
- AztecOO solver;
-
- /**
- * Store a copy of the flags for this
- * particular solver.
- */
- const AdditionalData additional_data;
+ public:
+
+ /**
+ * Enumeration object that is
+ * set in the constructor of
+ * the derived classes and
+ * tells Trilinos which solver
+ * to use. This option can also
+ * be set in the user program,
+ * so one might use this base
+ * class instead of one of the
+ * specialized derived classes
+ * when the solver should be
+ * set at runtime. Currently
+ * enabled options are:
+ */
+ enum SolverName {cg, cgs, gmres, bicgstab, tfqmr} solver_name;
+
+ /**
+ * Standardized data struct to
+ * pipe additional data to the
+ * solver.
+ */
+
+ struct AdditionalData
+ {
+ /**
+ * Sets the additional data field to
+ * the desired output format and puts
+ * the restart parameter in case the
+ * derived class is GMRES.
+ *
+ * TODO: Find a better way for
+ * setting the GMRES restart
+ * parameter since it is quite
+ * inelegant to set a specific option
+ * of one solver in the base class
+ * for all solvers.
+ */
+ AdditionalData (const bool output_solver_details = false,
+ const unsigned int gmres_restart_parameter = 30);
+
+ /**
+ * Enables/disables the output of
+ * solver details (residual in each
+ * iterations etc.).
+ */
+ const bool output_solver_details;
+
+ /**
+ * Restart parameter for GMRES
+ * solver.
+ */
+ const unsigned int gmres_restart_parameter;
+ };
+
+ /**
+ * Constructor. Takes the
+ * solver control object and
+ * creates the solver.
+ */
- SolverBase (SolverControl &cn);
++ SolverBase (SolverControl &cn);
+
+ /**
+ * Second constructor. This
+ * constructor takes an enum
+ * object that specifies the
+ * solver name and sets the
+ * appropriate Krylov
+ * method.
+ */
+ SolverBase (const enum SolverName solver_name,
+ SolverControl &cn);
+
+ /**
+ * Destructor.
+ */
+ virtual ~SolverBase ();
+
+ /**
+ * Solve the linear system
+ * <tt>Ax=b</tt>. Depending on
+ * the information provided by
+ * derived classes and the
+ * object passed as a
+ * preconditioner, one of the
+ * linear solvers and
+ * preconditioners of Trilinos
+ * is chosen.
+ */
+ void
+ solve (const SparseMatrix &A,
+ VectorBase &x,
+ const VectorBase &b,
+ const PreconditionBase &preconditioner);
+
+ /**
+ * Solve the linear system
+ * <tt>Ax=b</tt>. Depending on the
+ * information provided by derived
+ * classes and the object passed as a
+ * preconditioner, one of the linear
+ * solvers and preconditioners of
+ * Trilinos is chosen. This class
+ * works with matrices according to
+ * the TrilinosWrappers format, but
+ * can take deal.II vectors as
+ * argument. Since deal.II are serial
+ * vectors (not distributed), this
+ * function does only what you expect
+ * in case the matrix is locally
+ * owned. Otherwise, an exception
+ * will be thrown.
+ */
+ void
+ solve (const SparseMatrix &A,
+ dealii::Vector<double> &x,
+ const dealii::Vector<double> &b,
+ const PreconditionBase &preconditioner);
+
+ /**
+ * Access to object that controls
+ * convergence.
+ */
+ SolverControl &control() const;
+
+ /**
+ * Exception
+ */
+ DeclException1 (ExcTrilinosError,
+ int,
+ << "An error with error number " << arg1
+ << " occurred while calling a Trilinos function");
+
+ protected:
+
+ /**
+ * Reference to the object that
+ * controls convergence of the
+ * iterative solver. In fact,
+ * for these Trilinos wrappers,
+ * Trilinos does so itself, but
+ * we copy the data from this
+ * object before starting the
+ * solution process, and copy
+ * the data back into it
+ * afterwards.
+ */
+ SolverControl &solver_control;
+
+ private:
+
+ /**
+ * A structure that collects
+ * the Trilinos sparse matrix,
+ * the right hand side vector
+ * and the solution vector,
+ * which is passed down to the
+ * Trilinos solver.
+ */
+ std_cxx1x::shared_ptr<Epetra_LinearProblem> linear_problem;
+
+ /**
+ * A structure that contains
+ * the Trilinos solver and
+ * preconditioner objects.
+ */
+ AztecOO solver;
+
+ /**
+ * Store a copy of the flags for this
+ * particular solver.
+ */
+ const AdditionalData additional_data;
};
- /**
- * An implementation of the Trilinos KLU direct solver (using the Amesos
- * package).
- *
- * @ingroup TrilinosWrappers
- * @author Martin Kronbichler, 2009
- */
+ /**
+ * An implementation of the Trilinos KLU direct solver (using the Amesos
+ * package).
+ *
+ * @ingroup TrilinosWrappers
+ * @author Martin Kronbichler, 2009
+ */
class SolverDirect
{
- public:
-
- /**
- * Standardized data struct to
- * pipe additional data to the
- * solver.
- */
-
- struct AdditionalData
- {
- /**
- * Sets the additional data field to
- * the desired output format.
- */
- AdditionalData (const bool output_solver_details = false);
-
- /**
- * Enables/disables the output of
- * solver details (residual in each
- * iterations etc.).
- */
- bool output_solver_details;
- };
-
- /**
- * Constructor. Takes the
- * solver control object and
- * creates the solver.
- */
- SolverDirect (SolverControl &cn,
- const AdditionalData &data = AdditionalData());
-
- /**
- * Destructor.
- */
- virtual ~SolverDirect ();
-
- /**
- * Solve the linear system
- * <tt>Ax=b</tt>. Creates a KLU
- * factorization of the matrix and
- * performs the solve. Note that
- * there is no need for a
- * preconditioner here.
- */
- void
- solve (const SparseMatrix &A,
- VectorBase &x,
- const VectorBase &b);
-
- /**
- * Solve the linear system
- * <tt>Ax=b</tt>. Depending on the
- * information provided by derived
- * classes and the object passed as a
- * preconditioner, one of the linear
- * solvers and preconditioners of
- * Trilinos is chosen. This class
- * works with matrices according to
- * the TrilinosWrappers format, but
- * can take deal.II vectors as
- * argument. Since deal.II are serial
- * vectors (not distributed), this
- * function does only what you expect
- * in case the matrix is locally
- * owned. Otherwise, an exception
- * will be thrown.
- */
- void
- solve (const SparseMatrix &A,
- dealii::Vector<double> &x,
- const dealii::Vector<double> &b);
-
- /**
- * Access to object that controls
- * convergence.
- */
- SolverControl & control() const;
-
- /**
- * Exception
- */
- DeclException1 (ExcTrilinosError,
- int,
- << "An error with error number " << arg1
- << " occurred while calling a Trilinos function");
-
- private:
-
- /**
- * Reference to the object that
- * controls convergence of the
- * iterative solver. In fact,
- * for these Trilinos wrappers,
- * Trilinos does so itself, but
- * we copy the data from this
- * object before starting the
- * solution process, and copy
- * the data back into it
- * afterwards.
- */
- SolverControl &solver_control;
-
- /**
- * A structure that collects
- * the Trilinos sparse matrix,
- * the right hand side vector
- * and the solution vector,
- * which is passed down to the
- * Trilinos solver.
- */
- std_cxx1x::shared_ptr<Epetra_LinearProblem> linear_problem;
-
- /**
- * A structure that contains
- * the Trilinos solver and
- * preconditioner objects.
- */
- std_cxx1x::shared_ptr<Amesos_BaseSolver> solver;
-
- /**
- * Store a copy of the flags for this
- * particular solver.
- */
- const AdditionalData additional_data;
+ public:
+
+ /**
+ * Standardized data struct to
+ * pipe additional data to the
+ * solver.
+ */
+
+ struct AdditionalData
+ {
+ /**
+ * Sets the additional data field to
+ * the desired output format.
+ */
+ AdditionalData (const bool output_solver_details = false);
+
+ /**
+ * Enables/disables the output of
+ * solver details (residual in each
+ * iterations etc.).
+ */
+ bool output_solver_details;
+ };
+
+ /**
+ * Constructor. Takes the
+ * solver control object and
+ * creates the solver.
+ */
- SolverDirect (SolverControl &cn,
++ SolverDirect (SolverControl &cn,
+ const AdditionalData &data = AdditionalData());
+
+ /**
+ * Destructor.
+ */
+ virtual ~SolverDirect ();
+
+ /**
+ * Solve the linear system
+ * <tt>Ax=b</tt>. Creates a KLU
+ * factorization of the matrix and
+ * performs the solve. Note that
+ * there is no need for a
+ * preconditioner here.
+ */
+ void
+ solve (const SparseMatrix &A,
+ VectorBase &x,
+ const VectorBase &b);
+
+ /**
+ * Solve the linear system
+ * <tt>Ax=b</tt>. Depending on the
+ * information provided by derived
+ * classes and the object passed as a
+ * preconditioner, one of the linear
+ * solvers and preconditioners of
+ * Trilinos is chosen. This class
+ * works with matrices according to
+ * the TrilinosWrappers format, but
+ * can take deal.II vectors as
+ * argument. Since deal.II are serial
+ * vectors (not distributed), this
+ * function does only what you expect
+ * in case the matrix is locally
+ * owned. Otherwise, an exception
+ * will be thrown.
+ */
+ void
+ solve (const SparseMatrix &A,
+ dealii::Vector<double> &x,
+ const dealii::Vector<double> &b);
+
+ /**
+ * Access to object that controls
+ * convergence.
+ */
+ SolverControl &control() const;
+
+ /**
+ * Exception
+ */
+ DeclException1 (ExcTrilinosError,
+ int,
+ << "An error with error number " << arg1
+ << " occurred while calling a Trilinos function");
+
+ private:
+
+ /**
+ * Reference to the object that
+ * controls convergence of the
+ * iterative solver. In fact,
+ * for these Trilinos wrappers,
+ * Trilinos does so itself, but
+ * we copy the data from this
+ * object before starting the
+ * solution process, and copy
+ * the data back into it
+ * afterwards.
+ */
+ SolverControl &solver_control;
+
+ /**
+ * A structure that collects
+ * the Trilinos sparse matrix,
+ * the right hand side vector
+ * and the solution vector,
+ * which is passed down to the
+ * Trilinos solver.
+ */
+ std_cxx1x::shared_ptr<Epetra_LinearProblem> linear_problem;
+
+ /**
+ * A structure that contains
+ * the Trilinos solver and
+ * preconditioner objects.
+ */
+ std_cxx1x::shared_ptr<Amesos_BaseSolver> solver;
+
+ /**
+ * Store a copy of the flags for this
+ * particular solver.
+ */
+ const AdditionalData additional_data;
};
}
- /**
- * This class implements a wrapper to use the Trilinos distributed
- * sparse matrix class Epetra_FECrsMatrix. This is precisely the kind of
- * matrix we deal with all the time - we most likely get it from some
- * assembly process, where also entries not locally owned might need to
- * be written and hence need to be forwarded to the owner process. This
- * class is designed to be used in a distributed memory architecture
- * with an MPI compiler on the bottom, but works equally well also for
- * serial processes. The only requirement for this class to work is that
- * Trilinos has been installed with the same compiler as is used for
- * generating deal.II.
- *
- * The interface of this class is modeled after the existing
- * SparseMatrix class in deal.II. It has almost the same member
- * functions, and is often exchangable. However, since Trilinos only
- * supports a single scalar type (double), it is not templated, and only
- * works with doubles.
- *
- * Note that Trilinos only guarantees that operations do what you expect
- * if the functions @p GlobalAssemble has been called after matrix
- * assembly. Therefore, you need to call SparseMatrix::compress()
- * before you actually use the matrix. This also calls @p FillComplete
- * that compresses the storage format for sparse matrices by discarding
- * unused elements. Trilinos allows to continue with assembling the
- * matrix after calls to these functions, though.
- *
- * @ingroup TrilinosWrappers
- * @ingroup Matrix1
- * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009
- */
+ /**
+ * This class implements a wrapper to use the Trilinos distributed
+ * sparse matrix class Epetra_FECrsMatrix. This is precisely the kind of
+ * matrix we deal with all the time - we most likely get it from some
+ * assembly process, where also entries not locally owned might need to
+ * be written and hence need to be forwarded to the owner process. This
+ * class is designed to be used in a distributed memory architecture
+ * with an MPI compiler on the bottom, but works equally well also for
+ * serial processes. The only requirement for this class to work is that
+ * Trilinos has been installed with the same compiler as is used for
+ * generating deal.II.
+ *
+ * The interface of this class is modeled after the existing
+ * SparseMatrix class in deal.II. It has almost the same member
+ * functions, and is often exchangable. However, since Trilinos only
+ * supports a single scalar type (double), it is not templated, and only
+ * works with doubles.
+ *
+ * Note that Trilinos only guarantees that operations do what you expect
+ * if the functions @p GlobalAssemble has been called after matrix
+ * assembly. Therefore, you need to call SparseMatrix::compress()
+ * before you actually use the matrix. This also calls @p FillComplete
+ * that compresses the storage format for sparse matrices by discarding
+ * unused elements. Trilinos allows to continue with assembling the
+ * matrix after calls to these functions, though.
+ *
+ * @ingroup TrilinosWrappers
+ * @ingroup Matrix1
+ * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009
+ */
class SparseMatrix : public Subscriptor
{
- public:
- /**
- * A structure that describes
- * some of the traits of this
- * class in terms of its run-time
- * behavior. Some other classes
- * (such as the block matrix
- * classes) that take one or
- * other of the matrix classes as
- * its template parameters can
- * tune their behavior based on
- * the variables in this class.
- */
- struct Traits
- {
- /**
- * It is safe to elide additions
- * of zeros to individual
- * elements of this matrix.
- */
- static const bool zero_addition_can_be_elided = true;
- };
+ public:
+ /**
+ * A structure that describes
+ * some of the traits of this
+ * class in terms of its run-time
+ * behavior. Some other classes
+ * (such as the block matrix
+ * classes) that take one or
+ * other of the matrix classes as
+ * its template parameters can
+ * tune their behavior based on
+ * the variables in this class.
+ */
+ struct Traits
+ {
+ /**
+ * It is safe to elide additions
+ * of zeros to individual
+ * elements of this matrix.
+ */
+ static const bool zero_addition_can_be_elided = true;
+ };
- /**
- * Declare a typedef for the
- * iterator class.
- */
- typedef MatrixIterators::const_iterator const_iterator;
-
- /**
- * Declare a typedef in analogy
- * to all the other container
- * classes.
- */
- typedef TrilinosScalar value_type;
-
- /**
- * @name Constructors and initalization.
- */
+ /**
+ * Declare a typedef for the
+ * iterator class.
+ */
+ typedef MatrixIterators::const_iterator const_iterator;
+
+ /**
+ * Declare a typedef in analogy
+ * to all the other container
+ * classes.
+ */
+ typedef TrilinosScalar value_type;
+
+ /**
+ * @name Constructors and initalization.
+ */
//@{
- /**
- * Default constructor. Generates
- * an empty (zero-size) matrix.
- */
- SparseMatrix ();
-
- /**
- * Generate a matrix that is completely
- * stored locally, having #m rows and
- * #n columns.
- *
- * The number of columns entries per
- * row is specified as the maximum
- * number of entries argument.
- */
- SparseMatrix (const unsigned int m,
- const unsigned int n,
- const unsigned int n_max_entries_per_row);
-
- /**
- * Generate a matrix that is completely
- * stored locally, having #m rows and
- * #n columns.
- *
- * The vector
- * <tt>n_entries_per_row</tt>
- * specifies the number of entries in
- * each row.
- */
- SparseMatrix (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &n_entries_per_row);
-
- /**
- * Generate a matrix from a Trilinos
- * sparsity pattern object.
- */
- SparseMatrix (const SparsityPattern &InputSparsityPattern);
-
- /**
- * Copy constructor. Sets the
- * calling matrix to be the same
- * as the input matrix, i.e.,
- * using the same sparsity
- * pattern and entries.
- */
- SparseMatrix (const SparseMatrix &InputMatrix);
-
- /**
- * Destructor. Made virtual so
- * that one can use pointers to
- * this class.
- */
- virtual ~SparseMatrix ();
-
- /**
- * This function initializes the
- * Trilinos matrix with a deal.II
- * sparsity pattern, i.e. it makes
- * the Trilinos Epetra matrix know
- * the position of nonzero entries
- * according to the sparsity
- * pattern. This function is meant
- * for use in serial programs, where
- * there is no need to specify how
- * the matrix is going to be
- * distributed among different
- * processors. This function works in
- * %parallel, too, but it is
- * recommended to manually specify
- * the %parallel partioning of the
- * matrix using an Epetra_Map. When
- * run in %parallel, it is currently
- * necessary that each processor
- * holds the sparsity_pattern
- * structure because each processor
- * sets its rows.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template<typename SparsityType>
- void reinit (const SparsityType &sparsity_pattern);
-
- /**
- * This function reinitializes the
- * Trilinos sparse matrix from a
- * (possibly distributed) Trilinos
- * sparsity pattern.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- void reinit (const SparsityPattern &sparsity_pattern);
-
- /**
- * This function copies the content
- * in <tt>sparse_matrix</tt> to the
- * calling matrix.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- void reinit (const SparseMatrix &sparse_matrix);
-
- /**
- * This function initializes the
- * Trilinos matrix using the deal.II
- * sparse matrix and the entries
- * stored therein. It uses a
- * threshold to copy only elements
- * with modulus larger than the
- * threshold (so zeros in the deal.II
- * matrix can be filtered away).
- *
- * The optional parameter
- * <tt>copy_values</tt> decides
- * whether only the sparsity
- * structure of the input matrix
- * should be used or the matrix
- * entries should be copied, too.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template <typename number>
- void reinit (const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
- const double drop_tolerance=1e-13,
- const bool copy_values=true,
- const ::dealii::SparsityPattern *use_this_sparsity=0);
-
- /**
- * This reinit function takes as
- * input a Trilinos Epetra_CrsMatrix
- * and copies its sparsity
- * pattern. If so requested, even the
- * content (values) will be copied.
- */
- void reinit (const Epetra_CrsMatrix &input_matrix,
- const bool copy_values = true);
+ /**
+ * Default constructor. Generates
+ * an empty (zero-size) matrix.
+ */
+ SparseMatrix ();
+
+ /**
+ * Generate a matrix that is completely
+ * stored locally, having #m rows and
+ * #n columns.
+ *
+ * The number of columns entries per
+ * row is specified as the maximum
+ * number of entries argument.
+ */
+ SparseMatrix (const unsigned int m,
+ const unsigned int n,
+ const unsigned int n_max_entries_per_row);
+
+ /**
+ * Generate a matrix that is completely
+ * stored locally, having #m rows and
+ * #n columns.
+ *
+ * The vector
+ * <tt>n_entries_per_row</tt>
+ * specifies the number of entries in
+ * each row.
+ */
+ SparseMatrix (const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &n_entries_per_row);
+
+ /**
+ * Generate a matrix from a Trilinos
+ * sparsity pattern object.
+ */
+ SparseMatrix (const SparsityPattern &InputSparsityPattern);
+
+ /**
+ * Copy constructor. Sets the
+ * calling matrix to be the same
+ * as the input matrix, i.e.,
+ * using the same sparsity
+ * pattern and entries.
+ */
+ SparseMatrix (const SparseMatrix &InputMatrix);
+
+ /**
+ * Destructor. Made virtual so
+ * that one can use pointers to
+ * this class.
+ */
+ virtual ~SparseMatrix ();
+
+ /**
+ * This function initializes the
+ * Trilinos matrix with a deal.II
+ * sparsity pattern, i.e. it makes
+ * the Trilinos Epetra matrix know
+ * the position of nonzero entries
+ * according to the sparsity
+ * pattern. This function is meant
+ * for use in serial programs, where
+ * there is no need to specify how
+ * the matrix is going to be
+ * distributed among different
+ * processors. This function works in
+ * %parallel, too, but it is
+ * recommended to manually specify
+ * the %parallel partioning of the
+ * matrix using an Epetra_Map. When
+ * run in %parallel, it is currently
+ * necessary that each processor
+ * holds the sparsity_pattern
+ * structure because each processor
+ * sets its rows.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template<typename SparsityType>
+ void reinit (const SparsityType &sparsity_pattern);
+
+ /**
+ * This function reinitializes the
+ * Trilinos sparse matrix from a
+ * (possibly distributed) Trilinos
+ * sparsity pattern.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ void reinit (const SparsityPattern &sparsity_pattern);
+
+ /**
+ * This function copies the content
+ * in <tt>sparse_matrix</tt> to the
+ * calling matrix.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ void reinit (const SparseMatrix &sparse_matrix);
+
+ /**
+ * This function initializes the
+ * Trilinos matrix using the deal.II
+ * sparse matrix and the entries
+ * stored therein. It uses a
+ * threshold to copy only elements
+ * with modulus larger than the
+ * threshold (so zeros in the deal.II
+ * matrix can be filtered away).
+ *
+ * The optional parameter
+ * <tt>copy_values</tt> decides
+ * whether only the sparsity
+ * structure of the input matrix
+ * should be used or the matrix
+ * entries should be copied, too.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template <typename number>
+ void reinit (const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
+ const double drop_tolerance=1e-13,
+ const bool copy_values=true,
+ const ::dealii::SparsityPattern *use_this_sparsity=0);
+
+ /**
+ * This reinit function takes as
+ * input a Trilinos Epetra_CrsMatrix
+ * and copies its sparsity
+ * pattern. If so requested, even the
+ * content (values) will be copied.
+ */
+ void reinit (const Epetra_CrsMatrix &input_matrix,
+ const bool copy_values = true);
//@}
- /**
- * @name Constructors and initialization using an Epetra_Map description
- */
+ /**
+ * @name Constructors and initialization using an Epetra_Map description
+ */
//@{
- /**
- * Constructor using an Epetra_Map to
- * describe the %parallel
- * partitioning. The parameter @p
- * n_max_entries_per_row sets the
- * number of nonzero entries in each
- * row that will be allocated. Note
- * that this number does not need to
- * be exact, and it is even allowed
- * that the actual matrix structure
- * has more nonzero entries than
- * specified in the
- * constructor. However it is still
- * advantageous to provide good
- * estimates here since this will
- * considerably increase the
- * performance of the matrix
- * setup. However, there is no effect
- * in the performance of
- * matrix-vector products, since
- * Trilinos reorganizes the matrix
- * memory prior to use (in the
- * compress() step).
- */
- SparseMatrix (const Epetra_Map ¶llel_partitioning,
- const unsigned int n_max_entries_per_row = 0);
-
- /**
- * Same as before, but now set a
- * value of nonzeros for each matrix
- * row. Since we know the number of
- * elements in the matrix exactly in
- * this case, we can already allocate
- * the right amount of memory, which
- * makes the creation process
- * including the insertion of nonzero
- * elements by the respective
- * SparseMatrix::reinit call
- * considerably faster.
- */
- SparseMatrix (const Epetra_Map ¶llel_partitioning,
- const std::vector<unsigned int> &n_entries_per_row);
-
- /**
- * This constructor is similar to the
- * one above, but it now takes two
- * different Epetra maps for rows and
- * columns. This interface is meant
- * to be used for generating
- * rectangular matrices, where one
- * map describes the %parallel
- * partitioning of the dofs
- * associated with the matrix rows
- * and the other one the partitioning
- * of dofs in the matrix
- * columns. Note that there is no
- * real parallelism along the columns
- * – the processor that owns a
- * certain row always owns all the
- * column elements, no matter how far
- * they might be spread out. The
- * second Epetra_Map is only used to
- * specify the number of columns and
- * for internal arragements when
- * doing matrix-vector products with
- * vectors based on that column map.
- *
- * The integer input @p
- * n_max_entries_per_row defines the
- * number of columns entries per row
- * that will be allocated.
- */
- SparseMatrix (const Epetra_Map &row_parallel_partitioning,
- const Epetra_Map &col_parallel_partitioning,
- const unsigned int n_max_entries_per_row = 0);
-
- /**
- * This constructor is similar to the
- * one above, but it now takes two
- * different Epetra maps for rows and
- * columns. This interface is meant
- * to be used for generating
- * rectangular matrices, where one
- * map specifies the %parallel
- * distribution of degrees of freedom
- * associated with matrix rows and
- * the second one specifies the
- * %parallel distribution the dofs
- * associated with columns in the
- * matrix. The second map also
- * provides information for the
- * internal arrangement in matrix
- * vector products (i.e., the
- * distribution of vector this matrix
- * is to be multiplied with), but is
- * not used for the distribution of
- * the columns – rather, all
- * column elements of a row are
- * stored on the same processor in
- * any case. The vector
- * <tt>n_entries_per_row</tt>
- * specifies the number of entries in
- * each row of the newly generated
- * matrix.
- */
- SparseMatrix (const Epetra_Map &row_parallel_partitioning,
- const Epetra_Map &col_parallel_partitioning,
- const std::vector<unsigned int> &n_entries_per_row);
-
- /**
- * This function is initializes the
- * Trilinos Epetra matrix according to
- * the specified sparsity_pattern, and
- * also reassigns the matrix rows to
- * different processes according to a
- * user-supplied Epetra map. In
- * programs following the style of the
- * tutorial programs, this function
- * (and the respective call for a
- * rectangular matrix) are the natural
- * way to initialize the matrix size,
- * its distribution among the MPI
- * processes (if run in %parallel) as
- * well as the locatoin of non-zero
- * elements. Trilinos stores the
- * sparsity pattern internally, so it
- * won't be needed any more after this
- * call, in contrast to the deal.II own
- * object. The optional argument @p
- * exchange_data can be used for
- * reinitialization with a sparsity
- * pattern that is not fully
- * constructed. This feature is only
- * implemented for input sparsity
- * patterns of type
- * CompressedSimpleSparsityPattern. If
- * the flag is not set, each processor
- * just sets the elements in the
- * sparsity pattern that belong to its
- * rows.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template<typename SparsityType>
- void reinit (const Epetra_Map ¶llel_partitioning,
- const SparsityType &sparsity_pattern,
- const bool exchange_data = false);
-
- /**
- * This function is similar to the
- * other initialization function
- * above, but now also reassigns the
- * matrix rows and columns according
- * to two user-supplied Epetra maps.
- * To be used for rectangular
- * matrices. The optional argument @p
- * exchange_data can be used for
- * reinitialization with a sparsity
- * pattern that is not fully
- * constructed. This feature is only
- * implemented for input sparsity
- * patterns of type
- * CompressedSimpleSparsityPattern.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template<typename SparsityType>
- void reinit (const Epetra_Map &row_parallel_partitioning,
- const Epetra_Map &col_parallel_partitioning,
- const SparsityType &sparsity_pattern,
- const bool exchange_data = false);
-
- /**
- * This function initializes the
- * Trilinos matrix using the deal.II
- * sparse matrix and the entries
- * stored therein. It uses a
- * threshold to copy only elements
- * with modulus larger than the
- * threshold (so zeros in the deal.II
- * matrix can be filtered away). In
- * contrast to the other reinit
- * function with deal.II sparse
- * matrix argument, this function
- * takes a %parallel partitioning
- * specified by the user instead of
- * internally generating it.
- *
- * The optional parameter
- * <tt>copy_values</tt> decides
- * whether only the sparsity
- * structure of the input matrix
- * should be used or the matrix
- * entries should be copied, too.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template <typename number>
- void reinit (const Epetra_Map ¶llel_partitioning,
- const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
- const double drop_tolerance=1e-13,
- const bool copy_values=true,
- const ::dealii::SparsityPattern *use_this_sparsity=0);
-
- /**
- * This function is similar to the
- * other initialization function with
- * deal.II sparse matrix input above,
- * but now takes Epetra maps for both
- * the rows and the columns of the
- * matrix. Chosen for rectangular
- * matrices.
- *
- * The optional parameter
- * <tt>copy_values</tt> decides
- * whether only the sparsity
- * structure of the input matrix
- * should be used or the matrix
- * entries should be copied, too.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template <typename number>
- void reinit (const Epetra_Map &row_parallel_partitioning,
- const Epetra_Map &col_parallel_partitioning,
- const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
- const double drop_tolerance=1e-13,
- const bool copy_values=true,
- const ::dealii::SparsityPattern *use_this_sparsity=0);
+ /**
+ * Constructor using an Epetra_Map to
+ * describe the %parallel
+ * partitioning. The parameter @p
+ * n_max_entries_per_row sets the
+ * number of nonzero entries in each
+ * row that will be allocated. Note
+ * that this number does not need to
+ * be exact, and it is even allowed
+ * that the actual matrix structure
+ * has more nonzero entries than
+ * specified in the
+ * constructor. However it is still
+ * advantageous to provide good
+ * estimates here since this will
+ * considerably increase the
+ * performance of the matrix
+ * setup. However, there is no effect
+ * in the performance of
+ * matrix-vector products, since
+ * Trilinos reorganizes the matrix
+ * memory prior to use (in the
+ * compress() step).
+ */
+ SparseMatrix (const Epetra_Map ¶llel_partitioning,
+ const unsigned int n_max_entries_per_row = 0);
+
+ /**
+ * Same as before, but now set a
+ * value of nonzeros for each matrix
+ * row. Since we know the number of
+ * elements in the matrix exactly in
+ * this case, we can already allocate
+ * the right amount of memory, which
+ * makes the creation process
+ * including the insertion of nonzero
+ * elements by the respective
+ * SparseMatrix::reinit call
+ * considerably faster.
+ */
+ SparseMatrix (const Epetra_Map ¶llel_partitioning,
+ const std::vector<unsigned int> &n_entries_per_row);
+
+ /**
+ * This constructor is similar to the
+ * one above, but it now takes two
+ * different Epetra maps for rows and
+ * columns. This interface is meant
+ * to be used for generating
+ * rectangular matrices, where one
+ * map describes the %parallel
+ * partitioning of the dofs
+ * associated with the matrix rows
+ * and the other one the partitioning
+ * of dofs in the matrix
+ * columns. Note that there is no
+ * real parallelism along the columns
+ * – the processor that owns a
+ * certain row always owns all the
+ * column elements, no matter how far
+ * they might be spread out. The
+ * second Epetra_Map is only used to
+ * specify the number of columns and
+ * for internal arragements when
+ * doing matrix-vector products with
+ * vectors based on that column map.
+ *
+ * The integer input @p
+ * n_max_entries_per_row defines the
+ * number of columns entries per row
+ * that will be allocated.
+ */
+ SparseMatrix (const Epetra_Map &row_parallel_partitioning,
+ const Epetra_Map &col_parallel_partitioning,
+ const unsigned int n_max_entries_per_row = 0);
+
+ /**
+ * This constructor is similar to the
+ * one above, but it now takes two
+ * different Epetra maps for rows and
+ * columns. This interface is meant
+ * to be used for generating
+ * rectangular matrices, where one
+ * map specifies the %parallel
+ * distribution of degrees of freedom
+ * associated with matrix rows and
+ * the second one specifies the
+ * %parallel distribution the dofs
+ * associated with columns in the
+ * matrix. The second map also
+ * provides information for the
+ * internal arrangement in matrix
+ * vector products (i.e., the
+ * distribution of vector this matrix
+ * is to be multiplied with), but is
+ * not used for the distribution of
+ * the columns – rather, all
+ * column elements of a row are
+ * stored on the same processor in
+ * any case. The vector
+ * <tt>n_entries_per_row</tt>
+ * specifies the number of entries in
+ * each row of the newly generated
+ * matrix.
+ */
+ SparseMatrix (const Epetra_Map &row_parallel_partitioning,
+ const Epetra_Map &col_parallel_partitioning,
+ const std::vector<unsigned int> &n_entries_per_row);
+
+ /**
+ * This function is initializes the
+ * Trilinos Epetra matrix according to
+ * the specified sparsity_pattern, and
+ * also reassigns the matrix rows to
+ * different processes according to a
+ * user-supplied Epetra map. In
+ * programs following the style of the
+ * tutorial programs, this function
+ * (and the respective call for a
+ * rectangular matrix) are the natural
+ * way to initialize the matrix size,
+ * its distribution among the MPI
+ * processes (if run in %parallel) as
+ * well as the locatoin of non-zero
+ * elements. Trilinos stores the
+ * sparsity pattern internally, so it
+ * won't be needed any more after this
+ * call, in contrast to the deal.II own
+ * object. The optional argument @p
+ * exchange_data can be used for
+ * reinitialization with a sparsity
+ * pattern that is not fully
+ * constructed. This feature is only
+ * implemented for input sparsity
+ * patterns of type
+ * CompressedSimpleSparsityPattern. If
+ * the flag is not set, each processor
+ * just sets the elements in the
+ * sparsity pattern that belong to its
+ * rows.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template<typename SparsityType>
+ void reinit (const Epetra_Map ¶llel_partitioning,
- const SparsityType &sparsity_pattern,
++ const SparsityType &sparsity_pattern,
+ const bool exchange_data = false);
+
+ /**
+ * This function is similar to the
+ * other initialization function
+ * above, but now also reassigns the
+ * matrix rows and columns according
+ * to two user-supplied Epetra maps.
+ * To be used for rectangular
+ * matrices. The optional argument @p
+ * exchange_data can be used for
+ * reinitialization with a sparsity
+ * pattern that is not fully
+ * constructed. This feature is only
+ * implemented for input sparsity
+ * patterns of type
+ * CompressedSimpleSparsityPattern.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template<typename SparsityType>
+ void reinit (const Epetra_Map &row_parallel_partitioning,
+ const Epetra_Map &col_parallel_partitioning,
- const SparsityType &sparsity_pattern,
++ const SparsityType &sparsity_pattern,
+ const bool exchange_data = false);
+
+ /**
+ * This function initializes the
+ * Trilinos matrix using the deal.II
+ * sparse matrix and the entries
+ * stored therein. It uses a
+ * threshold to copy only elements
+ * with modulus larger than the
+ * threshold (so zeros in the deal.II
+ * matrix can be filtered away). In
+ * contrast to the other reinit
+ * function with deal.II sparse
+ * matrix argument, this function
+ * takes a %parallel partitioning
+ * specified by the user instead of
+ * internally generating it.
+ *
+ * The optional parameter
+ * <tt>copy_values</tt> decides
+ * whether only the sparsity
+ * structure of the input matrix
+ * should be used or the matrix
+ * entries should be copied, too.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template <typename number>
+ void reinit (const Epetra_Map ¶llel_partitioning,
+ const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
+ const double drop_tolerance=1e-13,
+ const bool copy_values=true,
+ const ::dealii::SparsityPattern *use_this_sparsity=0);
+
+ /**
+ * This function is similar to the
+ * other initialization function with
+ * deal.II sparse matrix input above,
+ * but now takes Epetra maps for both
+ * the rows and the columns of the
+ * matrix. Chosen for rectangular
+ * matrices.
+ *
+ * The optional parameter
+ * <tt>copy_values</tt> decides
+ * whether only the sparsity
+ * structure of the input matrix
+ * should be used or the matrix
+ * entries should be copied, too.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template <typename number>
+ void reinit (const Epetra_Map &row_parallel_partitioning,
+ const Epetra_Map &col_parallel_partitioning,
- const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
++ const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
+ const double drop_tolerance=1e-13,
+ const bool copy_values=true,
+ const ::dealii::SparsityPattern *use_this_sparsity=0);
//@}
- /**
- * @name Constructors and initialization using an IndexSet description
- */
+ /**
+ * @name Constructors and initialization using an IndexSet description
+ */
//@{
- /**
- * Constructor using an IndexSet and
- * an MPI communicator to describe
- * the %parallel partitioning. The
- * parameter @p n_max_entries_per_row
- * sets the number of nonzero entries
- * in each row that will be
- * allocated. Note that this number
- * does not need to be exact, and it
- * is even allowed that the actual
- * matrix structure has more nonzero
- * entries than specified in the
- * constructor. However it is still
- * advantageous to provide good
- * estimates here since this will
- * considerably increase the
- * performance of the matrix
- * setup. However, there is no effect
- * in the performance of
- * matrix-vector products, since
- * Trilinos reorganizes the matrix
- * memory prior to use (in the
- * compress() step).
- */
- SparseMatrix (const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const unsigned int n_max_entries_per_row = 0);
-
- /**
- * Same as before, but now set the
- * number of nonzeros in each matrix
- * row separately. Since we know the
- * number of elements in the matrix
- * exactly in this case, we can
- * already allocate the right amount
- * of memory, which makes the
- * creation process including the
- * insertion of nonzero elements by
- * the respective
- * SparseMatrix::reinit call
- * considerably faster.
- */
- SparseMatrix (const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator,
- const std::vector<unsigned int> &n_entries_per_row);
-
- /**
- * This constructor is similar to the
- * one above, but it now takes two
- * different IndexSet partitions for
- * row and columns. This interface is
- * meant to be used for generating
- * rectangular matrices, where the
- * first index set describes the
- * %parallel partitioning of the
- * degrees of freedom associated with
- * the matrix rows and the second one
- * the partitioning of the matrix
- * columns. The second index set
- * specifies the partitioning of the
- * vectors this matrix is to be
- * multiplied with, not the
- * distribution of the elements that
- * actually appear in the matrix.
- *
- * The parameter @p
- * n_max_entries_per_row defines how
- * much memory will be allocated for
- * each row. This number does not
- * need to be accurate, as the
- * structure is reorganized in the
- * compress() call.
- */
- SparseMatrix (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const unsigned int n_max_entries_per_row = 0);
-
- /**
- * This constructor is similar to the
- * one above, but it now takes two
- * different Epetra maps for rows and
- * columns. This interface is meant
- * to be used for generating
- * rectangular matrices, where one
- * map specifies the %parallel
- * distribution of degrees of freedom
- * associated with matrix rows and
- * the second one specifies the
- * %parallel distribution the dofs
- * associated with columns in the
- * matrix. The second map also
- * provides information for the
- * internal arrangement in matrix
- * vector products (i.e., the
- * distribution of vector this matrix
- * is to be multiplied with), but is
- * not used for the distribution of
- * the columns – rather, all
- * column elements of a row are
- * stored on the same processor in
- * any case. The vector
- * <tt>n_entries_per_row</tt>
- * specifies the number of entries in
- * each row of the newly generated
- * matrix.
- */
- SparseMatrix (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator,
- const std::vector<unsigned int> &n_entries_per_row);
-
- /**
- * This function is initializes the
- * Trilinos Epetra matrix according
- * to the specified sparsity_pattern,
- * and also reassigns the matrix rows
- * to different processes according
- * to a user-supplied index set and
- * %parallel communicator. In
- * programs following the style of
- * the tutorial programs, this
- * function (and the respective call
- * for a rectangular matrix) are the
- * natural way to initialize the
- * matrix size, its distribution
- * among the MPI processes (if run in
- * %parallel) as well as the locatoin
- * of non-zero elements. Trilinos
- * stores the sparsity pattern
- * internally, so it won't be needed
- * any more after this call, in
- * contrast to the deal.II own
- * object. The optional argument @p
- * exchange_data can be used for
- * reinitialization with a sparsity
- * pattern that is not fully
- * constructed. This feature is only
- * implemented for input sparsity
- * patterns of type
- * CompressedSimpleSparsityPattern. If
- * the flag is not set, each
- * processor just sets the elements
- * in the sparsity pattern that
- * belong to its rows.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template<typename SparsityType>
- void reinit (const IndexSet ¶llel_partitioning,
- const SparsityType &sparsity_pattern,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const bool exchange_data = false);
-
- /**
- * This function is similar to the
- * other initialization function
- * above, but now also reassigns the
- * matrix rows and columns according
- * to two user-supplied index sets.
- * To be used for rectangular
- * matrices. The optional argument @p
- * exchange_data can be used for
- * reinitialization with a sparsity
- * pattern that is not fully
- * constructed. This feature is only
- * implemented for input sparsity
- * patterns of type
- * CompressedSimpleSparsityPattern.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template<typename SparsityType>
- void reinit (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const SparsityType &sparsity_pattern,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const bool exchange_data = false);
-
- /**
- * This function initializes the
- * Trilinos matrix using the deal.II
- * sparse matrix and the entries
- * stored therein. It uses a
- * threshold to copy only elements
- * with modulus larger than the
- * threshold (so zeros in the deal.II
- * matrix can be filtered away). In
- * contrast to the other reinit
- * function with deal.II sparse
- * matrix argument, this function
- * takes a %parallel partitioning
- * specified by the user instead of
- * internally generating it.
- *
- * The optional parameter
- * <tt>copy_values</tt> decides
- * whether only the sparsity
- * structure of the input matrix
- * should be used or the matrix
- * entries should be copied, too.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template <typename number>
- void reinit (const IndexSet ¶llel_partitioning,
- const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const double drop_tolerance=1e-13,
- const bool copy_values=true,
- const ::dealii::SparsityPattern *use_this_sparsity=0);
-
- /**
- * This function is similar to the
- * other initialization function with
- * deal.II sparse matrix input above,
- * but now takes index sets for both
- * the rows and the columns of the
- * matrix. Chosen for rectangular
- * matrices.
- *
- * The optional parameter
- * <tt>copy_values</tt> decides
- * whether only the sparsity
- * structure of the input matrix
- * should be used or the matrix
- * entries should be copied, too.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template <typename number>
- void reinit (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const double drop_tolerance=1e-13,
- const bool copy_values=true,
- const ::dealii::SparsityPattern *use_this_sparsity=0);
+ /**
+ * Constructor using an IndexSet and
+ * an MPI communicator to describe
+ * the %parallel partitioning. The
+ * parameter @p n_max_entries_per_row
+ * sets the number of nonzero entries
+ * in each row that will be
+ * allocated. Note that this number
+ * does not need to be exact, and it
+ * is even allowed that the actual
+ * matrix structure has more nonzero
+ * entries than specified in the
+ * constructor. However it is still
+ * advantageous to provide good
+ * estimates here since this will
+ * considerably increase the
+ * performance of the matrix
+ * setup. However, there is no effect
+ * in the performance of
+ * matrix-vector products, since
+ * Trilinos reorganizes the matrix
+ * memory prior to use (in the
+ * compress() step).
+ */
+ SparseMatrix (const IndexSet ¶llel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const unsigned int n_max_entries_per_row = 0);
+
+ /**
+ * Same as before, but now set the
+ * number of nonzeros in each matrix
+ * row separately. Since we know the
+ * number of elements in the matrix
+ * exactly in this case, we can
+ * already allocate the right amount
+ * of memory, which makes the
+ * creation process including the
+ * insertion of nonzero elements by
+ * the respective
+ * SparseMatrix::reinit call
+ * considerably faster.
+ */
+ SparseMatrix (const IndexSet ¶llel_partitioning,
+ const MPI_Comm &communicator,
+ const std::vector<unsigned int> &n_entries_per_row);
+
+ /**
+ * This constructor is similar to the
+ * one above, but it now takes two
+ * different IndexSet partitions for
+ * row and columns. This interface is
+ * meant to be used for generating
+ * rectangular matrices, where the
+ * first index set describes the
+ * %parallel partitioning of the
+ * degrees of freedom associated with
+ * the matrix rows and the second one
+ * the partitioning of the matrix
+ * columns. The second index set
+ * specifies the partitioning of the
+ * vectors this matrix is to be
+ * multiplied with, not the
+ * distribution of the elements that
+ * actually appear in the matrix.
+ *
+ * The parameter @p
+ * n_max_entries_per_row defines how
+ * much memory will be allocated for
+ * each row. This number does not
+ * need to be accurate, as the
+ * structure is reorganized in the
+ * compress() call.
+ */
+ SparseMatrix (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const unsigned int n_max_entries_per_row = 0);
+
+ /**
+ * This constructor is similar to the
+ * one above, but it now takes two
+ * different Epetra maps for rows and
+ * columns. This interface is meant
+ * to be used for generating
+ * rectangular matrices, where one
+ * map specifies the %parallel
+ * distribution of degrees of freedom
+ * associated with matrix rows and
+ * the second one specifies the
+ * %parallel distribution the dofs
+ * associated with columns in the
+ * matrix. The second map also
+ * provides information for the
+ * internal arrangement in matrix
+ * vector products (i.e., the
+ * distribution of vector this matrix
+ * is to be multiplied with), but is
+ * not used for the distribution of
+ * the columns – rather, all
+ * column elements of a row are
+ * stored on the same processor in
+ * any case. The vector
+ * <tt>n_entries_per_row</tt>
+ * specifies the number of entries in
+ * each row of the newly generated
+ * matrix.
+ */
+ SparseMatrix (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm &communicator,
+ const std::vector<unsigned int> &n_entries_per_row);
+
+ /**
+ * This function is initializes the
+ * Trilinos Epetra matrix according
+ * to the specified sparsity_pattern,
+ * and also reassigns the matrix rows
+ * to different processes according
+ * to a user-supplied index set and
+ * %parallel communicator. In
+ * programs following the style of
+ * the tutorial programs, this
+ * function (and the respective call
+ * for a rectangular matrix) are the
+ * natural way to initialize the
+ * matrix size, its distribution
+ * among the MPI processes (if run in
+ * %parallel) as well as the locatoin
+ * of non-zero elements. Trilinos
+ * stores the sparsity pattern
+ * internally, so it won't be needed
+ * any more after this call, in
+ * contrast to the deal.II own
+ * object. The optional argument @p
+ * exchange_data can be used for
+ * reinitialization with a sparsity
+ * pattern that is not fully
+ * constructed. This feature is only
+ * implemented for input sparsity
+ * patterns of type
+ * CompressedSimpleSparsityPattern. If
+ * the flag is not set, each
+ * processor just sets the elements
+ * in the sparsity pattern that
+ * belong to its rows.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template<typename SparsityType>
+ void reinit (const IndexSet ¶llel_partitioning,
- const SparsityType &sparsity_pattern,
++ const SparsityType &sparsity_pattern,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const bool exchange_data = false);
+
+ /**
+ * This function is similar to the
+ * other initialization function
+ * above, but now also reassigns the
+ * matrix rows and columns according
+ * to two user-supplied index sets.
+ * To be used for rectangular
+ * matrices. The optional argument @p
+ * exchange_data can be used for
+ * reinitialization with a sparsity
+ * pattern that is not fully
+ * constructed. This feature is only
+ * implemented for input sparsity
+ * patterns of type
+ * CompressedSimpleSparsityPattern.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template<typename SparsityType>
+ void reinit (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
- const SparsityType &sparsity_pattern,
++ const SparsityType &sparsity_pattern,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const bool exchange_data = false);
+
+ /**
+ * This function initializes the
+ * Trilinos matrix using the deal.II
+ * sparse matrix and the entries
+ * stored therein. It uses a
+ * threshold to copy only elements
+ * with modulus larger than the
+ * threshold (so zeros in the deal.II
+ * matrix can be filtered away). In
+ * contrast to the other reinit
+ * function with deal.II sparse
+ * matrix argument, this function
+ * takes a %parallel partitioning
+ * specified by the user instead of
+ * internally generating it.
+ *
+ * The optional parameter
+ * <tt>copy_values</tt> decides
+ * whether only the sparsity
+ * structure of the input matrix
+ * should be used or the matrix
+ * entries should be copied, too.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template <typename number>
+ void reinit (const IndexSet ¶llel_partitioning,
+ const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const double drop_tolerance=1e-13,
+ const bool copy_values=true,
+ const ::dealii::SparsityPattern *use_this_sparsity=0);
+
+ /**
+ * This function is similar to the
+ * other initialization function with
+ * deal.II sparse matrix input above,
+ * but now takes index sets for both
+ * the rows and the columns of the
+ * matrix. Chosen for rectangular
+ * matrices.
+ *
+ * The optional parameter
+ * <tt>copy_values</tt> decides
+ * whether only the sparsity
+ * structure of the input matrix
+ * should be used or the matrix
+ * entries should be copied, too.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template <typename number>
+ void reinit (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
- const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
++ const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const double drop_tolerance=1e-13,
+ const bool copy_values=true,
+ const ::dealii::SparsityPattern *use_this_sparsity=0);
//@}
- /**
- * @name Information on the matrix
- */
+ /**
+ * @name Information on the matrix
+ */
//@{
- /**
- * Return the number of rows in
- * this matrix.
- */
- unsigned int m () const;
-
- /**
- * Return the number of columns
- * in this matrix.
- */
- unsigned int n () const;
-
- /**
- * Return the local dimension
- * of the matrix, i.e. the
- * number of rows stored on the
- * present MPI process. For
- * sequential matrices, this
- * number is the same as m(),
- * but for %parallel matrices it
- * may be smaller.
- *
- * To figure out which elements
- * exactly are stored locally,
- * use local_range().
- */
- unsigned int local_size () const;
-
- /**
- * Return a pair of indices
- * indicating which rows of
- * this matrix are stored
- * locally. The first number is
- * the index of the first row
- * stored, the second the index
- * of the one past the last one
- * that is stored locally. If
- * this is a sequential matrix,
- * then the result will be the
- * pair (0,m()), otherwise it
- * will be a pair (i,i+n),
- * where
- * <tt>n=local_size()</tt>.
- */
- std::pair<unsigned int, unsigned int>
- local_range () const;
-
- /**
- * Return whether @p index is
- * in the local range or not,
- * see also local_range().
- */
- bool in_local_range (const unsigned int index) const;
-
- /**
- * Return the number of nonzero
- * elements of this matrix.
- */
- unsigned int n_nonzero_elements () const;
-
- /**
- * Number of entries in a
- * specific row.
- */
- unsigned int row_length (const unsigned int row) const;
-
- /**
- * Returns the state of the matrix,
- * i.e., whether compress() needs to
- * be called after an operation
- * requiring data exchange. A call to
- * compress() is also needed when the
- * method set() has been called (even
- * when working in serial).
- */
- bool is_compressed () const;
-
- /**
- * Determine an estimate for the memory
- * consumption (in bytes) of this
- * object. Note that only the memory
- * reserved on the current processor is
- * returned in case this is called in
- * an MPI-based program.
- */
- std::size_t memory_consumption () const;
+ /**
+ * Return the number of rows in
+ * this matrix.
+ */
+ unsigned int m () const;
+
+ /**
+ * Return the number of columns
+ * in this matrix.
+ */
+ unsigned int n () const;
+
+ /**
+ * Return the local dimension
+ * of the matrix, i.e. the
+ * number of rows stored on the
+ * present MPI process. For
+ * sequential matrices, this
+ * number is the same as m(),
+ * but for %parallel matrices it
+ * may be smaller.
+ *
+ * To figure out which elements
+ * exactly are stored locally,
+ * use local_range().
+ */
+ unsigned int local_size () const;
+
+ /**
+ * Return a pair of indices
+ * indicating which rows of
+ * this matrix are stored
+ * locally. The first number is
+ * the index of the first row
+ * stored, the second the index
+ * of the one past the last one
+ * that is stored locally. If
+ * this is a sequential matrix,
+ * then the result will be the
+ * pair (0,m()), otherwise it
+ * will be a pair (i,i+n),
+ * where
+ * <tt>n=local_size()</tt>.
+ */
+ std::pair<unsigned int, unsigned int>
+ local_range () const;
+
+ /**
+ * Return whether @p index is
+ * in the local range or not,
+ * see also local_range().
+ */
+ bool in_local_range (const unsigned int index) const;
+
+ /**
+ * Return the number of nonzero
+ * elements of this matrix.
+ */
+ unsigned int n_nonzero_elements () const;
+
+ /**
+ * Number of entries in a
+ * specific row.
+ */
+ unsigned int row_length (const unsigned int row) const;
+
+ /**
+ * Returns the state of the matrix,
+ * i.e., whether compress() needs to
+ * be called after an operation
+ * requiring data exchange. A call to
+ * compress() is also needed when the
+ * method set() has been called (even
+ * when working in serial).
+ */
+ bool is_compressed () const;
+
+ /**
+ * Determine an estimate for the memory
+ * consumption (in bytes) of this
+ * object. Note that only the memory
+ * reserved on the current processor is
+ * returned in case this is called in
+ * an MPI-based program.
+ */
+ std::size_t memory_consumption () const;
//@}
- /**
- * @name Modifying entries
- */
+ /**
+ * @name Modifying entries
+ */
//@{
- /**
- * This operator assigns a scalar to
- * a matrix. Since this does usually
- * not make much sense (should we set
- * all matrix entries to this value?
- * Only the nonzero entries of the
- * sparsity pattern?), this operation
- * is only allowed if the actual
- * value to be assigned is zero. This
- * operator only exists to allow for
- * the obvious notation
- * <tt>matrix=0</tt>, which sets all
- * elements of the matrix to zero,
- * but keeps the sparsity pattern
- * previously used.
- */
- SparseMatrix &
- operator = (const double d);
-
- /**
- * Release all memory and return to a
- * state just like after having
- * called the default constructor.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- void clear ();
-
- /**
- * This command does two things:
- * <ul>
- * <li> If the matrix was initialized
- * without a sparsity pattern,
- * elements have been added manually
- * using the set() command. When this
- * process is completed, a call to
- * compress() reorganizes the
- * internal data structures (aparsity
- * pattern) so that a fast access to
- * data is possible in matrix-vector
- * products.
- * <li> If the matrix structure has
- * already been fixed (either by
- * initialization with a sparsity
- * pattern or by calling compress()
- * during the setup phase), this
- * command does the %parallel
- * exchange of data. This is
- * necessary when we perform assembly
- * on more than one (MPI) process,
- * because then some non-local row
- * data will accumulate on nodes that
- * belong to the current's processor
- * element, but are actually held by
- * another. This command is usually
- * called after all elements have
- * been traversed.
- * </ul>
- *
- * In both cases, this function
- * compresses the data structures and
- * allows the resulting matrix to be
- * used in all other operations like
- * matrix-vector products. This is a
- * collective operation, i.e., it
- * needs to be run on all processors
- * when used in %parallel.
- *
- * See @ref GlossCompress "Compressing distributed objects"
- * for more information.
- */
- void compress (::dealii::VectorOperation::values operation
- =::dealii::VectorOperation::unknown);
-
- /**
- * Set the element (<i>i,j</i>)
- * to @p value.
- *
- * This function is able to insert new
- * elements into the matrix as long as
- * compress() has not been called, so
- * the sparsity pattern will be
- * extended. When compress() is called
- * for the first time, then this is no
- * longer possible and an insertion of
- * elements at positions which have not
- * been initialized will throw an
- * exception. Note that in case
- * elements need to be inserted, it is
- * mandatory that elements are inserted
- * only once. Otherwise, the elements
- * will actually be added in the end
- * (since it is not possible to
- * efficiently find values to the same
- * entry before compress() has been
- * called). In the case that an element
- * is set more than once, initialize
- * the matrix with a sparsity pattern
- * first.
- */
- void set (const unsigned int i,
- const unsigned int j,
- const TrilinosScalar value);
-
- /**
- * Set all elements given in a
- * FullMatrix<double> into the sparse
- * matrix locations given by
- * <tt>indices</tt>. In other words,
- * this function writes the elements
- * in <tt>full_matrix</tt> into the
- * calling matrix, using the
- * local-to-global indexing specified
- * by <tt>indices</tt> for both the
- * rows and the columns of the
- * matrix. This function assumes a
- * quadratic sparse matrix and a
- * quadratic full_matrix, the usual
- * situation in FE calculations.
- *
- * This function is able to insert
- * new elements into the matrix as
- * long as compress() has not been
- * called, so the sparsity pattern
- * will be extended. When compress()
- * is called for the first time, then
- * this is no longer possible and an
- * insertion of elements at positions
- * which have not been initialized
- * will throw an exception.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be inserted anyway
- * or they should be filtered
- * away. The default value is
- * <tt>false</tt>, i.e., even zero
- * values are inserted/replaced.
- */
- void set (const std::vector<unsigned int> &indices,
- const FullMatrix<TrilinosScalar> &full_matrix,
- const bool elide_zero_values = false);
-
- /**
- * Same function as before, but now
- * including the possibility to use
- * rectangular full_matrices and
- * different local-to-global indexing
- * on rows and columns, respectively.
- */
- void set (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<TrilinosScalar> &full_matrix,
- const bool elide_zero_values = false);
-
- /**
- * Set several elements in the
- * specified row of the matrix with
- * column indices as given by
- * <tt>col_indices</tt> to the
- * respective value.
- *
- * This function is able to insert
- * new elements into the matrix as
- * long as compress() has not been
- * called, so the sparsity pattern
- * will be extended. When compress()
- * is called for the first time, then
- * this is no longer possible and an
- * insertion of elements at positions
- * which have not been initialized
- * will throw an exception.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be inserted anyway
- * or they should be filtered
- * away. The default value is
- * <tt>false</tt>, i.e., even zero
- * values are inserted/replaced.
- */
- void set (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<TrilinosScalar> &values,
- const bool elide_zero_values = false);
-
- /**
- * Set several elements to values
- * given by <tt>values</tt> in a
- * given row in columns given by
- * col_indices into the sparse
- * matrix.
- *
- * This function is able to insert
- * new elements into the matrix as
- * long as compress() has not been
- * called, so the sparsity pattern
- * will be extended. When compress()
- * is called for the first time, then
- * this is no longer possible and an
- * insertion of elements at positions
- * which have not been initialized
- * will throw an exception.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be inserted anyway
- * or they should be filtered
- * away. The default value is
- * <tt>false</tt>, i.e., even zero
- * values are inserted/replaced.
- */
- void set (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const TrilinosScalar *values,
- const bool elide_zero_values = false);
-
- /**
- * Add @p value to the element
- * (<i>i,j</i>).
- *
- * Just as the respective call in
- * deal.II SparseMatrix<Number>
- * class (but in contrast to the
- * situation for PETSc based
- * matrices), this function
- * throws an exception if an
- * entry does not exist in the
- * sparsity pattern. Moreover, if
- * <tt>value</tt> is not a finite
- * number an exception is thrown.
- */
- void add (const unsigned int i,
- const unsigned int j,
- const TrilinosScalar value);
-
- /**
- * Add all elements given in a
- * FullMatrix<double> into sparse
- * matrix locations given by
- * <tt>indices</tt>. In other words,
- * this function adds the elements in
- * <tt>full_matrix</tt> to the
- * respective entries in calling
- * matrix, using the local-to-global
- * indexing specified by
- * <tt>indices</tt> for both the rows
- * and the columns of the
- * matrix. This function assumes a
- * quadratic sparse matrix and a
- * quadratic full_matrix, the usual
- * situation in FE calculations.
- *
- * Just as the respective call in
- * deal.II SparseMatrix<Number>
- * class (but in contrast to the
- * situation for PETSc based
- * matrices), this function
- * throws an exception if an
- * entry does not exist in the
- * sparsity pattern.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- void add (const std::vector<unsigned int> &indices,
- const FullMatrix<TrilinosScalar> &full_matrix,
- const bool elide_zero_values = true);
-
- /**
- * Same function as before, but now
- * including the possibility to use
- * rectangular full_matrices and
- * different local-to-global indexing
- * on rows and columns, respectively.
- */
- void add (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<TrilinosScalar> &full_matrix,
- const bool elide_zero_values = true);
-
- /**
- * Set several elements in the
- * specified row of the matrix with
- * column indices as given by
- * <tt>col_indices</tt> to the
- * respective value.
- *
- * Just as the respective call in
- * deal.II SparseMatrix<Number>
- * class (but in contrast to the
- * situation for PETSc based
- * matrices), this function
- * throws an exception if an
- * entry does not exist in the
- * sparsity pattern.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- void add (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<TrilinosScalar> &values,
- const bool elide_zero_values = true);
-
- /**
- * Add an array of values given by
- * <tt>values</tt> in the given
- * global matrix row at columns
- * specified by col_indices in the
- * sparse matrix.
- *
- * Just as the respective call in
- * deal.II SparseMatrix<Number> class
- * (but in contrast to the situation
- * for PETSc based matrices), this
- * function throws an exception if an
- * entry does not exist in the
- * sparsity pattern.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- void add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const TrilinosScalar *values,
- const bool elide_zero_values = true,
- const bool col_indices_are_sorted = false);
-
- /**
- * Multiply the entire matrix
- * by a fixed factor.
- */
- SparseMatrix & operator *= (const TrilinosScalar factor);
-
- /**
- * Divide the entire matrix by
- * a fixed factor.
- */
- SparseMatrix & operator /= (const TrilinosScalar factor);
-
- /**
- * Copy the given (Trilinos) matrix
- * (sparsity pattern and entries).
- */
- void copy_from (const SparseMatrix &source);
-
- /**
- * Add <tt>matrix</tt> scaled by
- * <tt>factor</tt> to this matrix,
- * i.e. the matrix
- * <tt>factor*matrix</tt> is added to
- * <tt>this</tt>. If the sparsity
- * pattern of the calling matrix does
- * not contain all the elements in
- * the sparsity pattern of the input
- * matrix, this function will throw
- * an exception.
- */
- void add (const TrilinosScalar factor,
- const SparseMatrix &matrix);
-
- /**
- * Remove all elements from
- * this <tt>row</tt> by setting
- * them to zero. The function
- * does not modify the number
- * of allocated nonzero
- * entries, it only sets some
- * entries to zero. It may drop
- * them from the sparsity
- * pattern, though (but retains
- * the allocated memory in case
- * new entries are again added
- * later). Note that this is a
- * global operation, so this
- * needs to be done on all MPI
- * processes.
- *
- * This operation is used in
- * eliminating constraints
- * (e.g. due to hanging nodes)
- * and makes sure that we can
- * write this modification to
- * the matrix without having to
- * read entries (such as the
- * locations of non-zero
- * elements) from it —
- * without this operation,
- * removing constraints on
- * %parallel matrices is a
- * rather complicated
- * procedure.
- *
- * The second parameter can be
- * used to set the diagonal
- * entry of this row to a value
- * different from zero. The
- * default is to set it to
- * zero.
- */
- void clear_row (const unsigned int row,
- const TrilinosScalar new_diag_value = 0);
-
- /**
- * Same as clear_row(), except
- * that it works on a number of
- * rows at once.
- *
- * The second parameter can be
- * used to set the diagonal
- * entries of all cleared rows
- * to something different from
- * zero. Note that all of these
- * diagonal entries get the
- * same value -- if you want
- * different values for the
- * diagonal entries, you have
- * to set them by hand.
- */
- void clear_rows (const std::vector<unsigned int> &rows,
- const TrilinosScalar new_diag_value = 0);
-
- /**
- * Make an in-place transpose
- * of a matrix.
- */
- void transpose ();
+ /**
+ * This operator assigns a scalar to
+ * a matrix. Since this does usually
+ * not make much sense (should we set
+ * all matrix entries to this value?
+ * Only the nonzero entries of the
+ * sparsity pattern?), this operation
+ * is only allowed if the actual
+ * value to be assigned is zero. This
+ * operator only exists to allow for
+ * the obvious notation
+ * <tt>matrix=0</tt>, which sets all
+ * elements of the matrix to zero,
+ * but keeps the sparsity pattern
+ * previously used.
+ */
+ SparseMatrix &
+ operator = (const double d);
+
+ /**
+ * Release all memory and return to a
+ * state just like after having
+ * called the default constructor.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ void clear ();
+
+ /**
+ * This command does two things:
+ * <ul>
+ * <li> If the matrix was initialized
+ * without a sparsity pattern,
+ * elements have been added manually
+ * using the set() command. When this
+ * process is completed, a call to
+ * compress() reorganizes the
+ * internal data structures (aparsity
+ * pattern) so that a fast access to
+ * data is possible in matrix-vector
+ * products.
+ * <li> If the matrix structure has
+ * already been fixed (either by
+ * initialization with a sparsity
+ * pattern or by calling compress()
+ * during the setup phase), this
+ * command does the %parallel
+ * exchange of data. This is
+ * necessary when we perform assembly
+ * on more than one (MPI) process,
+ * because then some non-local row
+ * data will accumulate on nodes that
+ * belong to the current's processor
+ * element, but are actually held by
+ * another. This command is usually
+ * called after all elements have
+ * been traversed.
+ * </ul>
+ *
+ * In both cases, this function
+ * compresses the data structures and
+ * allows the resulting matrix to be
+ * used in all other operations like
+ * matrix-vector products. This is a
+ * collective operation, i.e., it
+ * needs to be run on all processors
+ * when used in %parallel.
+ *
+ * See @ref GlossCompress "Compressing distributed objects"
+ * for more information.
+ */
+ void compress (::dealii::VectorOperation::values operation
+ =::dealii::VectorOperation::unknown);
+
+ /**
+ * Set the element (<i>i,j</i>)
+ * to @p value.
+ *
+ * This function is able to insert new
+ * elements into the matrix as long as
+ * compress() has not been called, so
+ * the sparsity pattern will be
+ * extended. When compress() is called
+ * for the first time, then this is no
+ * longer possible and an insertion of
+ * elements at positions which have not
+ * been initialized will throw an
+ * exception. Note that in case
+ * elements need to be inserted, it is
+ * mandatory that elements are inserted
+ * only once. Otherwise, the elements
+ * will actually be added in the end
+ * (since it is not possible to
+ * efficiently find values to the same
+ * entry before compress() has been
+ * called). In the case that an element
+ * is set more than once, initialize
+ * the matrix with a sparsity pattern
+ * first.
+ */
+ void set (const unsigned int i,
+ const unsigned int j,
+ const TrilinosScalar value);
+
+ /**
+ * Set all elements given in a
+ * FullMatrix<double> into the sparse
+ * matrix locations given by
+ * <tt>indices</tt>. In other words,
+ * this function writes the elements
+ * in <tt>full_matrix</tt> into the
+ * calling matrix, using the
+ * local-to-global indexing specified
+ * by <tt>indices</tt> for both the
+ * rows and the columns of the
+ * matrix. This function assumes a
+ * quadratic sparse matrix and a
+ * quadratic full_matrix, the usual
+ * situation in FE calculations.
+ *
+ * This function is able to insert
+ * new elements into the matrix as
+ * long as compress() has not been
+ * called, so the sparsity pattern
+ * will be extended. When compress()
+ * is called for the first time, then
+ * this is no longer possible and an
+ * insertion of elements at positions
+ * which have not been initialized
+ * will throw an exception.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be inserted anyway
+ * or they should be filtered
+ * away. The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are inserted/replaced.
+ */
- void set (const std::vector<unsigned int> &indices,
++ void set (const std::vector<unsigned int> &indices,
+ const FullMatrix<TrilinosScalar> &full_matrix,
+ const bool elide_zero_values = false);
+
+ /**
+ * Same function as before, but now
+ * including the possibility to use
+ * rectangular full_matrices and
+ * different local-to-global indexing
+ * on rows and columns, respectively.
+ */
- void set (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
++ void set (const std::vector<unsigned int> &row_indices,
++ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<TrilinosScalar> &full_matrix,
+ const bool elide_zero_values = false);
+
+ /**
+ * Set several elements in the
+ * specified row of the matrix with
+ * column indices as given by
+ * <tt>col_indices</tt> to the
+ * respective value.
+ *
+ * This function is able to insert
+ * new elements into the matrix as
+ * long as compress() has not been
+ * called, so the sparsity pattern
+ * will be extended. When compress()
+ * is called for the first time, then
+ * this is no longer possible and an
+ * insertion of elements at positions
+ * which have not been initialized
+ * will throw an exception.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be inserted anyway
+ * or they should be filtered
+ * away. The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are inserted/replaced.
+ */
+ void set (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
+ const std::vector<TrilinosScalar> &values,
+ const bool elide_zero_values = false);
+
+ /**
+ * Set several elements to values
+ * given by <tt>values</tt> in a
+ * given row in columns given by
+ * col_indices into the sparse
+ * matrix.
+ *
+ * This function is able to insert
+ * new elements into the matrix as
+ * long as compress() has not been
+ * called, so the sparsity pattern
+ * will be extended. When compress()
+ * is called for the first time, then
+ * this is no longer possible and an
+ * insertion of elements at positions
+ * which have not been initialized
+ * will throw an exception.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be inserted anyway
+ * or they should be filtered
+ * away. The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are inserted/replaced.
+ */
+ void set (const unsigned int row,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
+ const TrilinosScalar *values,
+ const bool elide_zero_values = false);
+
+ /**
+ * Add @p value to the element
+ * (<i>i,j</i>).
+ *
+ * Just as the respective call in
+ * deal.II SparseMatrix<Number>
+ * class (but in contrast to the
+ * situation for PETSc based
+ * matrices), this function
+ * throws an exception if an
+ * entry does not exist in the
+ * sparsity pattern. Moreover, if
+ * <tt>value</tt> is not a finite
+ * number an exception is thrown.
+ */
+ void add (const unsigned int i,
+ const unsigned int j,
+ const TrilinosScalar value);
+
+ /**
+ * Add all elements given in a
+ * FullMatrix<double> into sparse
+ * matrix locations given by
+ * <tt>indices</tt>. In other words,
+ * this function adds the elements in
+ * <tt>full_matrix</tt> to the
+ * respective entries in calling
+ * matrix, using the local-to-global
+ * indexing specified by
+ * <tt>indices</tt> for both the rows
+ * and the columns of the
+ * matrix. This function assumes a
+ * quadratic sparse matrix and a
+ * quadratic full_matrix, the usual
+ * situation in FE calculations.
+ *
+ * Just as the respective call in
+ * deal.II SparseMatrix<Number>
+ * class (but in contrast to the
+ * situation for PETSc based
+ * matrices), this function
+ * throws an exception if an
+ * entry does not exist in the
+ * sparsity pattern.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
- void add (const std::vector<unsigned int> &indices,
++ void add (const std::vector<unsigned int> &indices,
+ const FullMatrix<TrilinosScalar> &full_matrix,
+ const bool elide_zero_values = true);
+
+ /**
+ * Same function as before, but now
+ * including the possibility to use
+ * rectangular full_matrices and
+ * different local-to-global indexing
+ * on rows and columns, respectively.
+ */
- void add (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
++ void add (const std::vector<unsigned int> &row_indices,
++ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<TrilinosScalar> &full_matrix,
+ const bool elide_zero_values = true);
+
+ /**
+ * Set several elements in the
+ * specified row of the matrix with
+ * column indices as given by
+ * <tt>col_indices</tt> to the
+ * respective value.
+ *
+ * Just as the respective call in
+ * deal.II SparseMatrix<Number>
+ * class (but in contrast to the
+ * situation for PETSc based
+ * matrices), this function
+ * throws an exception if an
+ * entry does not exist in the
+ * sparsity pattern.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ void add (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
+ const std::vector<TrilinosScalar> &values,
+ const bool elide_zero_values = true);
+
+ /**
+ * Add an array of values given by
+ * <tt>values</tt> in the given
+ * global matrix row at columns
+ * specified by col_indices in the
+ * sparse matrix.
+ *
+ * Just as the respective call in
+ * deal.II SparseMatrix<Number> class
+ * (but in contrast to the situation
+ * for PETSc based matrices), this
+ * function throws an exception if an
+ * entry does not exist in the
+ * sparsity pattern.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ void add (const unsigned int row,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
+ const TrilinosScalar *values,
+ const bool elide_zero_values = true,
+ const bool col_indices_are_sorted = false);
+
+ /**
+ * Multiply the entire matrix
+ * by a fixed factor.
+ */
+ SparseMatrix &operator *= (const TrilinosScalar factor);
+
+ /**
+ * Divide the entire matrix by
+ * a fixed factor.
+ */
+ SparseMatrix &operator /= (const TrilinosScalar factor);
+
+ /**
+ * Copy the given (Trilinos) matrix
+ * (sparsity pattern and entries).
+ */
+ void copy_from (const SparseMatrix &source);
+
+ /**
+ * Add <tt>matrix</tt> scaled by
+ * <tt>factor</tt> to this matrix,
+ * i.e. the matrix
+ * <tt>factor*matrix</tt> is added to
+ * <tt>this</tt>. If the sparsity
+ * pattern of the calling matrix does
+ * not contain all the elements in
+ * the sparsity pattern of the input
+ * matrix, this function will throw
+ * an exception.
+ */
+ void add (const TrilinosScalar factor,
+ const SparseMatrix &matrix);
+
+ /**
+ * Remove all elements from
+ * this <tt>row</tt> by setting
+ * them to zero. The function
+ * does not modify the number
+ * of allocated nonzero
+ * entries, it only sets some
+ * entries to zero. It may drop
+ * them from the sparsity
+ * pattern, though (but retains
+ * the allocated memory in case
+ * new entries are again added
+ * later). Note that this is a
+ * global operation, so this
+ * needs to be done on all MPI
+ * processes.
+ *
+ * This operation is used in
+ * eliminating constraints
+ * (e.g. due to hanging nodes)
+ * and makes sure that we can
+ * write this modification to
+ * the matrix without having to
+ * read entries (such as the
+ * locations of non-zero
+ * elements) from it —
+ * without this operation,
+ * removing constraints on
+ * %parallel matrices is a
+ * rather complicated
+ * procedure.
+ *
+ * The second parameter can be
+ * used to set the diagonal
+ * entry of this row to a value
+ * different from zero. The
+ * default is to set it to
+ * zero.
+ */
+ void clear_row (const unsigned int row,
+ const TrilinosScalar new_diag_value = 0);
+
+ /**
+ * Same as clear_row(), except
+ * that it works on a number of
+ * rows at once.
+ *
+ * The second parameter can be
+ * used to set the diagonal
+ * entries of all cleared rows
+ * to something different from
+ * zero. Note that all of these
+ * diagonal entries get the
+ * same value -- if you want
+ * different values for the
+ * diagonal entries, you have
+ * to set them by hand.
+ */
+ void clear_rows (const std::vector<unsigned int> &rows,
+ const TrilinosScalar new_diag_value = 0);
+
+ /**
+ * Make an in-place transpose
+ * of a matrix.
+ */
+ void transpose ();
//@}
- /**
- * @name Entry Access
- */
+ /**
+ * @name Entry Access
+ */
//@{
- /**
- * Return the value of the
- * entry (<i>i,j</i>). This
- * may be an expensive
- * operation and you should
- * always take care where to
- * call this function. As in
- * the deal.II sparse matrix
- * class, we throw an exception
- * if the respective entry
- * doesn't exist in the
- * sparsity pattern of this
- * class, which is requested
- * from Trilinos. Moreover, an
- * exception will be thrown
- * when the requested element
- * is not saved on the calling
- * process.
- */
- TrilinosScalar operator () (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * Return the value of the
- * matrix entry
- * (<i>i,j</i>). If this entry
- * does not exist in the
- * sparsity pattern, then zero
- * is returned. While this may
- * be convenient in some cases,
- * note that it is simple to
- * write algorithms that are
- * slow compared to an optimal
- * solution, since the sparsity
- * of the matrix is not used.
- * On the other hand, if you
- * want to be sure the entry
- * exists, you should use
- * operator() instead.
- *
- * The lack of error checking
- * in this function can also
- * yield surprising results if
- * you have a parallel
- * matrix. In that case, just
- * because you get a zero
- * result from this function
- * does not mean that either
- * the entry does not exist in
- * the sparsity pattern or that
- * it does but has a value of
- * zero. Rather, it could also
- * be that it simply isn't
- * stored on the current
- * processor; in that case, it
- * may be stored on a different
- * processor, and possibly so
- * with a nonzero value.
- */
- TrilinosScalar el (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * Return the main diagonal
- * element in the <i>i</i>th
- * row. This function throws an
- * error if the matrix is not
- * quadratic and it also throws
- * an error if <i>(i,i)</i> is not
- * element of the local matrix.
- * See also the comment in
- * trilinos_sparse_matrix.cc.
- */
- TrilinosScalar diag_element (const unsigned int i) const;
+ /**
+ * Return the value of the
+ * entry (<i>i,j</i>). This
+ * may be an expensive
+ * operation and you should
+ * always take care where to
+ * call this function. As in
+ * the deal.II sparse matrix
+ * class, we throw an exception
+ * if the respective entry
+ * doesn't exist in the
+ * sparsity pattern of this
+ * class, which is requested
+ * from Trilinos. Moreover, an
+ * exception will be thrown
+ * when the requested element
+ * is not saved on the calling
+ * process.
+ */
+ TrilinosScalar operator () (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * Return the value of the
+ * matrix entry
+ * (<i>i,j</i>). If this entry
+ * does not exist in the
+ * sparsity pattern, then zero
+ * is returned. While this may
+ * be convenient in some cases,
+ * note that it is simple to
+ * write algorithms that are
+ * slow compared to an optimal
+ * solution, since the sparsity
+ * of the matrix is not used.
+ * On the other hand, if you
+ * want to be sure the entry
+ * exists, you should use
+ * operator() instead.
+ *
+ * The lack of error checking
+ * in this function can also
+ * yield surprising results if
+ * you have a parallel
+ * matrix. In that case, just
+ * because you get a zero
+ * result from this function
+ * does not mean that either
+ * the entry does not exist in
+ * the sparsity pattern or that
+ * it does but has a value of
+ * zero. Rather, it could also
+ * be that it simply isn't
+ * stored on the current
+ * processor; in that case, it
+ * may be stored on a different
+ * processor, and possibly so
+ * with a nonzero value.
+ */
+ TrilinosScalar el (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * Return the main diagonal
+ * element in the <i>i</i>th
+ * row. This function throws an
+ * error if the matrix is not
+ * quadratic and it also throws
+ * an error if <i>(i,i)</i> is not
+ * element of the local matrix.
+ * See also the comment in
+ * trilinos_sparse_matrix.cc.
+ */
+ TrilinosScalar diag_element (const unsigned int i) const;
//@}
- /**
- * @name Multiplications
- */
+ /**
+ * @name Multiplications
+ */
//@{
- /**
- * Matrix-vector multiplication:
- * let <i>dst = M*src</i> with
- * <i>M</i> being this matrix.
- *
- * Source and destination must
- * not be the same vector.
- *
- * Note that both vectors have to
- * be distributed vectors
- * generated using the same Map
- * as was used for the matrix in
- * case you work on a distributed
- * memory architecture, using the
- * interface in the
- * TrilinosWrappers::VectorBase
- * class (or one of the two
- * derived classes Vector and
- * MPI::Vector).
- *
- * In case of a localized Vector,
- * this function will only work
- * when running on one processor,
- * since the matrix object is
- * inherently
- * distributed. Otherwise, and
- * exception will be thrown.
- */
- void vmult (VectorBase &dst,
- const VectorBase &src) const;
-
- /**
- * Same as before, but working with
- * deal.II's own distributed vector
- * class.
- */
- void vmult (parallel::distributed::Vector<TrilinosScalar> &dst,
- const parallel::distributed::Vector<TrilinosScalar> &src) const;
-
- /**
- * Matrix-vector multiplication:
- * let <i>dst =
- * M<sup>T</sup>*src</i> with
- * <i>M</i> being this
- * matrix. This function does the
- * same as vmult() but takes the
- * transposed matrix.
- *
- * Source and destination must
- * not be the same vector.
- *
- * Note that both vectors have to
- * be distributed vectors
- * generated using the same Map
- * as was used for the matrix in
- * case you work on a distributed
- * memory architecture, using the
- * interface in the
- * TrilinosWrappers::VectorBase
- * class (or one of the two
- * derived classes Vector and
- * MPI::Vector).
- *
- * In case of a localized Vector,
- * this function will only work
- * when running on one processor,
- * since the matrix object is
- * inherently
- * distributed. Otherwise, and
- * exception will be thrown.
- */
- void Tvmult (VectorBase &dst,
- const VectorBase &src) const;
-
- /**
- * Same as before, but working with
- * deal.II's own distributed vector
- * class.
- */
- void Tvmult (parallel::distributed::Vector<TrilinosScalar> &dst,
- const parallel::distributed::Vector<TrilinosScalar> &src) const;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M*src</i> on <i>dst</i>
- * with <i>M</i> being this
- * matrix.
- *
- * Source and destination must
- * not be the same vector.
- *
- * Note that both vectors have to
- * be distributed vectors
- * generated using the same Map
- * as was used for the matrix in
- * case you work on a distributed
- * memory architecture, using the
- * interface in the
- * TrilinosWrappers::VectorBase
- * class (or one of the two
- * derived classes Vector and
- * MPI::Vector).
- *
- * In case of a localized Vector,
- * this function will only work
- * when running on one processor,
- * since the matrix object is
- * inherently
- * distributed. Otherwise, and
- * exception will be thrown.
- */
- void vmult_add (VectorBase &dst,
- const VectorBase &src) const;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M<sup>T</sup>*src</i> to
- * <i>dst</i> with <i>M</i> being
- * this matrix. This function
- * does the same as vmult_add()
- * but takes the transposed
- * matrix.
- *
- * Source and destination must
- * not be the same vector.
- *
- * Note that both vectors have to
- * be distributed vectors
- * generated using the same Map
- * as was used for the matrix in
- * case you work on a distributed
- * memory architecture, using the
- * interface in the
- * TrilinosWrappers::VectorBase
- * class (or one of the two
- * derived classes Vector and
- * MPI::Vector).
- *
- * In case of a localized Vector,
- * this function will only work
- * when running on one processor,
- * since the matrix object is
- * inherently
- * distributed. Otherwise, and
- * exception will be thrown.
- */
- void Tvmult_add (VectorBase &dst,
- const VectorBase &src) const;
-
- /**
- * Return the square of the norm
- * of the vector $v$ with respect
- * to the norm induced by this
- * matrix, i.e.,
- * $\left(v,Mv\right)$. This is
- * useful, e.g. in the finite
- * element context, where the
- * $L_2$ norm of a function
- * equals the matrix norm with
- * respect to the mass matrix of
- * the vector representing the
- * nodal values of the finite
- * element function.
- *
- * Obviously, the matrix needs to
- * be quadratic for this
- * operation.
- *
- * The implementation of this
- * function is not as efficient
- * as the one in the @p
- * SparseMatrix class used in
- * deal.II (i.e. the original
- * one, not the Trilinos wrapper
- * class) since Trilinos doesn't
- * support this operation and
- * needs a temporary vector.
- *
- * Note that both vectors have to
- * be distributed vectors
- * generated using the same Map
- * as was used for the matrix in
- * case you work on a distributed
- * memory architecture, using the
- * interface in the
- * TrilinosWrappers::VectorBase
- * class (or one of the two
- * derived classes Vector and
- * MPI::Vector).
- *
- * In case of a localized Vector,
- * this function will only work
- * when running on one processor,
- * since the matrix object is
- * inherently
- * distributed. Otherwise, and
- * exception will be thrown.
- */
- TrilinosScalar matrix_norm_square (const VectorBase &v) const;
-
- /**
- * Compute the matrix scalar
- * product $\left(u,Mv\right)$.
- *
- * The implementation of this
- * function is not as efficient
- * as the one in the @p
- * SparseMatrix class used in
- * deal.II (i.e. the original
- * one, not the Trilinos
- * wrapper class) since
- * Trilinos doesn't support
- * this operation and needs a
- * temporary vector.
- *
- * Note that both vectors have to
- * be distributed vectors
- * generated using the same Map
- * as was used for the matrix in
- * case you work on a distributed
- * memory architecture, using the
- * interface in the
- * TrilinosWrappers::VectorBase
- * class (or one of the two
- * derived classes Vector and
- * MPI::Vector).
- *
- * In case of a localized Vector,
- * this function will only work
- * when running on one processor,
- * since the matrix object is
- * inherently
- * distributed. Otherwise, and
- * exception will be thrown.
- */
- TrilinosScalar matrix_scalar_product (const VectorBase &u,
- const VectorBase &v) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to
- * be <i>r=b-Mx</i>. Write the
- * residual into @p dst. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned.
- *
- * Source <i>x</i> and
- * destination <i>dst</i> must
- * not be the same vector.
- *
- * Note that both vectors have to
- * be distributed vectors
- * generated using the same Map
- * as was used for the matrix in
- * case you work on a distributed
- * memory architecture, using the
- * interface in the
- * TrilinosWrappers::VectorBase
- * class (or one of the two
- * derived classes Vector and
- * MPI::Vector).
- *
- * In case of a localized Vector,
- * this function will only work
- * when running on one processor,
- * since the matrix object is
- * inherently
- * distributed. Otherwise, and
- * exception will be thrown.
- */
- TrilinosScalar residual (VectorBase &dst,
- const VectorBase &x,
- const VectorBase &b) const;
-
- /**
- * Perform the matrix-matrix
- * multiplication <tt>C = A * B</tt>,
- * or, if an optional vector argument
- * is given, <tt>C = A * diag(V) *
- * B</tt>, where <tt>diag(V)</tt>
- * defines a diagonal matrix with the
- * vector entries.
- *
- * This function assumes that the
- * calling matrix <tt>A</tt> and
- * <tt>B</tt> have compatible
- * sizes. The size of <tt>C</tt> will
- * be set within this function.
- *
- * The content as well as the sparsity
- * pattern of the matrix C will be
- * changed by this function, so make
- * sure that the sparsity pattern is
- * not used somewhere else in your
- * program. This is an expensive
- * operation, so think twice before you
- * use this function.
- */
+ /**
+ * Matrix-vector multiplication:
+ * let <i>dst = M*src</i> with
+ * <i>M</i> being this matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that both vectors have to
+ * be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix in
+ * case you work on a distributed
+ * memory architecture, using the
+ * interface in the
+ * TrilinosWrappers::VectorBase
+ * class (or one of the two
+ * derived classes Vector and
+ * MPI::Vector).
+ *
+ * In case of a localized Vector,
+ * this function will only work
+ * when running on one processor,
+ * since the matrix object is
+ * inherently
+ * distributed. Otherwise, and
+ * exception will be thrown.
+ */
+ void vmult (VectorBase &dst,
+ const VectorBase &src) const;
+
+ /**
+ * Same as before, but working with
+ * deal.II's own distributed vector
+ * class.
+ */
+ void vmult (parallel::distributed::Vector<TrilinosScalar> &dst,
+ const parallel::distributed::Vector<TrilinosScalar> &src) const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let <i>dst =
+ * M<sup>T</sup>*src</i> with
+ * <i>M</i> being this
+ * matrix. This function does the
+ * same as vmult() but takes the
+ * transposed matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that both vectors have to
+ * be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix in
+ * case you work on a distributed
+ * memory architecture, using the
+ * interface in the
+ * TrilinosWrappers::VectorBase
+ * class (or one of the two
+ * derived classes Vector and
+ * MPI::Vector).
+ *
+ * In case of a localized Vector,
+ * this function will only work
+ * when running on one processor,
+ * since the matrix object is
+ * inherently
+ * distributed. Otherwise, and
+ * exception will be thrown.
+ */
+ void Tvmult (VectorBase &dst,
+ const VectorBase &src) const;
+
+ /**
+ * Same as before, but working with
+ * deal.II's own distributed vector
+ * class.
+ */
+ void Tvmult (parallel::distributed::Vector<TrilinosScalar> &dst,
+ const parallel::distributed::Vector<TrilinosScalar> &src) const;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M*src</i> on <i>dst</i>
+ * with <i>M</i> being this
+ * matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that both vectors have to
+ * be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix in
+ * case you work on a distributed
+ * memory architecture, using the
+ * interface in the
+ * TrilinosWrappers::VectorBase
+ * class (or one of the two
+ * derived classes Vector and
+ * MPI::Vector).
+ *
+ * In case of a localized Vector,
+ * this function will only work
+ * when running on one processor,
+ * since the matrix object is
+ * inherently
+ * distributed. Otherwise, and
+ * exception will be thrown.
+ */
+ void vmult_add (VectorBase &dst,
+ const VectorBase &src) const;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M<sup>T</sup>*src</i> to
+ * <i>dst</i> with <i>M</i> being
+ * this matrix. This function
+ * does the same as vmult_add()
+ * but takes the transposed
+ * matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that both vectors have to
+ * be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix in
+ * case you work on a distributed
+ * memory architecture, using the
+ * interface in the
+ * TrilinosWrappers::VectorBase
+ * class (or one of the two
+ * derived classes Vector and
+ * MPI::Vector).
+ *
+ * In case of a localized Vector,
+ * this function will only work
+ * when running on one processor,
+ * since the matrix object is
+ * inherently
+ * distributed. Otherwise, and
+ * exception will be thrown.
+ */
+ void Tvmult_add (VectorBase &dst,
+ const VectorBase &src) const;
+
+ /**
+ * Return the square of the norm
+ * of the vector $v$ with respect
+ * to the norm induced by this
+ * matrix, i.e.,
+ * $\left(v,Mv\right)$. This is
+ * useful, e.g. in the finite
+ * element context, where the
+ * $L_2$ norm of a function
+ * equals the matrix norm with
+ * respect to the mass matrix of
+ * the vector representing the
+ * nodal values of the finite
+ * element function.
+ *
+ * Obviously, the matrix needs to
+ * be quadratic for this
+ * operation.
+ *
+ * The implementation of this
+ * function is not as efficient
+ * as the one in the @p
+ * SparseMatrix class used in
+ * deal.II (i.e. the original
+ * one, not the Trilinos wrapper
+ * class) since Trilinos doesn't
+ * support this operation and
+ * needs a temporary vector.
+ *
+ * Note that both vectors have to
+ * be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix in
+ * case you work on a distributed
+ * memory architecture, using the
+ * interface in the
+ * TrilinosWrappers::VectorBase
+ * class (or one of the two
+ * derived classes Vector and
+ * MPI::Vector).
+ *
+ * In case of a localized Vector,
+ * this function will only work
+ * when running on one processor,
+ * since the matrix object is
+ * inherently
+ * distributed. Otherwise, and
+ * exception will be thrown.
+ */
+ TrilinosScalar matrix_norm_square (const VectorBase &v) const;
+
+ /**
+ * Compute the matrix scalar
+ * product $\left(u,Mv\right)$.
+ *
+ * The implementation of this
+ * function is not as efficient
+ * as the one in the @p
+ * SparseMatrix class used in
+ * deal.II (i.e. the original
+ * one, not the Trilinos
+ * wrapper class) since
+ * Trilinos doesn't support
+ * this operation and needs a
+ * temporary vector.
+ *
+ * Note that both vectors have to
+ * be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix in
+ * case you work on a distributed
+ * memory architecture, using the
+ * interface in the
+ * TrilinosWrappers::VectorBase
+ * class (or one of the two
+ * derived classes Vector and
+ * MPI::Vector).
+ *
+ * In case of a localized Vector,
+ * this function will only work
+ * when running on one processor,
+ * since the matrix object is
+ * inherently
+ * distributed. Otherwise, and
+ * exception will be thrown.
+ */
+ TrilinosScalar matrix_scalar_product (const VectorBase &u,
+ const VectorBase &v) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to
+ * be <i>r=b-Mx</i>. Write the
+ * residual into @p dst. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned.
+ *
+ * Source <i>x</i> and
+ * destination <i>dst</i> must
+ * not be the same vector.
+ *
+ * Note that both vectors have to
+ * be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix in
+ * case you work on a distributed
+ * memory architecture, using the
+ * interface in the
+ * TrilinosWrappers::VectorBase
+ * class (or one of the two
+ * derived classes Vector and
+ * MPI::Vector).
+ *
+ * In case of a localized Vector,
+ * this function will only work
+ * when running on one processor,
+ * since the matrix object is
+ * inherently
+ * distributed. Otherwise, and
+ * exception will be thrown.
+ */
+ TrilinosScalar residual (VectorBase &dst,
+ const VectorBase &x,
+ const VectorBase &b) const;
+
+ /**
+ * Perform the matrix-matrix
+ * multiplication <tt>C = A * B</tt>,
+ * or, if an optional vector argument
+ * is given, <tt>C = A * diag(V) *
+ * B</tt>, where <tt>diag(V)</tt>
+ * defines a diagonal matrix with the
+ * vector entries.
+ *
+ * This function assumes that the
+ * calling matrix <tt>A</tt> and
+ * <tt>B</tt> have compatible
+ * sizes. The size of <tt>C</tt> will
+ * be set within this function.
+ *
+ * The content as well as the sparsity
+ * pattern of the matrix C will be
+ * changed by this function, so make
+ * sure that the sparsity pattern is
+ * not used somewhere else in your
+ * program. This is an expensive
+ * operation, so think twice before you
+ * use this function.
+ */
void mmult (SparseMatrix &C,
const SparseMatrix &B,
const VectorBase &V = VectorBase()) const;
{
class BlockVector;
- /**
- * This class implements a wrapper to use the Trilinos distributed
- * vector class Epetra_FEVector. This class is derived from the
- * TrilinosWrappers::VectorBase class and provides all functionality
- * included there.
- *
- * Note that Trilinos only guarantees that operations do what you expect
- * if the function @p GlobalAssemble has been called after vector
- * assembly in order to distribute the data. This is necessary since
- * some processes might have accumulated data of elements that are not
- * owned by themselves, but must be sent to the owning process. In order
- * to avoid using the wrong data, you need to call Vector::compress()
- * before you actually use the vectors.
- *
- * <h3>Parallel communication model</h3>
- *
- * The parallel functionality of Trilinos is built on top of the Message
- * Passing Interface (MPI). MPI's communication model is built on
- * collective communications: if one process wants something from
- * another, that other process has to be willing to accept this
- * communication. A process cannot query data from another process by
- * calling a remote function, without that other process expecting such
- * a transaction. The consequence is that most of the operations in the
- * base class of this class have to be called collectively. For example,
- * if you want to compute the l2 norm of a parallel vector, @em all
- * processes across which this vector is shared have to call the @p
- * l2_norm function. If you don't do this, but instead only call the @p
- * l2_norm function on one process, then the following happens: This one
- * process will call one of the collective MPI functions and wait for
- * all the other processes to join in on this. Since the other processes
- * don't call this function, you will either get a time-out on the first
- * process, or, worse, by the time the next a callto a Trilinos function
- * generates an MPI message on the other processes , you will get a
- * cryptic message that only a subset of processes attempted a
- * communication. These bugs can be very hard to figure out, unless you
- * are well-acquainted with the communication model of MPI, and know
- * which functions may generate MPI messages.
- *
- * One particular case, where an MPI message may be generated
- * unexpectedly is discussed below.
- *
- * <h3>Accessing individual elements of a vector</h3>
- *
- * Trilinos does allow read access to individual elements of a vector,
- * but in the distributed case only to elements that are stored
- * locally. We implement this through calls like
- * <tt>d=vec(i)</tt>. However, if you access an element outside the
- * locally stored range, an exception is generated.
- *
- * In contrast to read access, Trilinos (and the respective deal.II
- * wrapper classes) allow to write (or add) to individual elements of
- * vectors, even if they are stored on a different process. You can do
- * this writing, for example, <tt>vec(i)=d</tt> or <tt>vec(i)+=d</tt>,
- * or similar operations. There is one catch, however, that may lead to
- * very confusing error messages: Trilinos requires application programs
- * to call the compress() function when they switch from adding, to
- * elements to writing to elements. The reasoning is that all processes
- * might accumulate addition operations to elements, even if multiple
- * processes write to the same elements. By the time we call compress()
- * the next time, all these additions are executed. However, if one
- * process adds to an element, and another overwrites to it, the order
- * of execution would yield non-deterministic behavior if we don't make
- * sure that a synchronisation with compress() happens in between.
- *
- * In order to make sure these calls to compress() happen at the
- * appropriate time, the deal.II wrappers keep a state variable that
- * store which is the presently allowed operation: additions or
- * writes. If it encounters an operation of the opposite kind, it calls
- * compress() and flips the state. This can sometimes lead to very
- * confusing behavior, in code that may for example look like this:
- *
- * @verbatim
- * TrilinosWrappers::Vector vector;
- * // do some write operations on the vector
- * for (unsigned int i=0; i<vector->size(); ++i)
- * vector(i) = i;
- *
- * // do some additions to vector elements, but
- * // only for some elements
- * for (unsigned int i=0; i<vector->size(); ++i)
- * if (some_condition(i) == true)
- * vector(i) += 1;
- *
- * // do another collective operation
- * const double norm = vector->l2_norm();
- * @endverbatim
- *
- * This code can run into trouble: by the time we see the first addition
- * operation, we need to flush the overwrite buffers for the vector, and
- * the deal.II library will do so by calling compress(). However, it
- * will only do so for all processes that actually do an addition -- if
- * the condition is never true for one of the processes, then this one
- * will not get to the actual compress() call, whereas all the other
- * ones do. This gets us into trouble, since all the other processes
- * hang in the call to flush the write buffers, while the one other
- * process advances to the call to compute the l2 norm. At this time,
- * you will get an error that some operation was attempted by only a
- * subset of processes. This behavior may seem surprising, unless you
- * know that write/addition operations on single elements may trigger
- * this behavior.
- *
- * The problem described here may be avoided by placing additional calls
- * to compress(), or making sure that all processes do the same type of
- * operations at the same time, for example by placing zero additions if
- * necessary.
- *
- * @ingroup TrilinosWrappers
- * @ingroup Vectors
- * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009
- */
+ /**
+ * This class implements a wrapper to use the Trilinos distributed
+ * vector class Epetra_FEVector. This class is derived from the
+ * TrilinosWrappers::VectorBase class and provides all functionality
+ * included there.
+ *
+ * Note that Trilinos only guarantees that operations do what you expect
+ * if the function @p GlobalAssemble has been called after vector
+ * assembly in order to distribute the data. This is necessary since
+ * some processes might have accumulated data of elements that are not
+ * owned by themselves, but must be sent to the owning process. In order
+ * to avoid using the wrong data, you need to call Vector::compress()
+ * before you actually use the vectors.
+ *
+ * <h3>Parallel communication model</h3>
+ *
+ * The parallel functionality of Trilinos is built on top of the Message
+ * Passing Interface (MPI). MPI's communication model is built on
+ * collective communications: if one process wants something from
+ * another, that other process has to be willing to accept this
+ * communication. A process cannot query data from another process by
+ * calling a remote function, without that other process expecting such
+ * a transaction. The consequence is that most of the operations in the
+ * base class of this class have to be called collectively. For example,
+ * if you want to compute the l2 norm of a parallel vector, @em all
+ * processes across which this vector is shared have to call the @p
+ * l2_norm function. If you don't do this, but instead only call the @p
+ * l2_norm function on one process, then the following happens: This one
+ * process will call one of the collective MPI functions and wait for
+ * all the other processes to join in on this. Since the other processes
+ * don't call this function, you will either get a time-out on the first
+ * process, or, worse, by the time the next a callto a Trilinos function
+ * generates an MPI message on the other processes , you will get a
+ * cryptic message that only a subset of processes attempted a
+ * communication. These bugs can be very hard to figure out, unless you
+ * are well-acquainted with the communication model of MPI, and know
+ * which functions may generate MPI messages.
+ *
+ * One particular case, where an MPI message may be generated
+ * unexpectedly is discussed below.
+ *
+ * <h3>Accessing individual elements of a vector</h3>
+ *
+ * Trilinos does allow read access to individual elements of a vector,
+ * but in the distributed case only to elements that are stored
+ * locally. We implement this through calls like
+ * <tt>d=vec(i)</tt>. However, if you access an element outside the
+ * locally stored range, an exception is generated.
+ *
+ * In contrast to read access, Trilinos (and the respective deal.II
+ * wrapper classes) allow to write (or add) to individual elements of
+ * vectors, even if they are stored on a different process. You can do
+ * this writing, for example, <tt>vec(i)=d</tt> or <tt>vec(i)+=d</tt>,
+ * or similar operations. There is one catch, however, that may lead to
+ * very confusing error messages: Trilinos requires application programs
+ * to call the compress() function when they switch from adding, to
+ * elements to writing to elements. The reasoning is that all processes
+ * might accumulate addition operations to elements, even if multiple
+ * processes write to the same elements. By the time we call compress()
+ * the next time, all these additions are executed. However, if one
+ * process adds to an element, and another overwrites to it, the order
+ * of execution would yield non-deterministic behavior if we don't make
+ * sure that a synchronisation with compress() happens in between.
+ *
+ * In order to make sure these calls to compress() happen at the
+ * appropriate time, the deal.II wrappers keep a state variable that
+ * store which is the presently allowed operation: additions or
+ * writes. If it encounters an operation of the opposite kind, it calls
+ * compress() and flips the state. This can sometimes lead to very
+ * confusing behavior, in code that may for example look like this:
+ *
+ * @verbatim
+ * TrilinosWrappers::Vector vector;
+ * // do some write operations on the vector
+ * for (unsigned int i=0; i<vector->size(); ++i)
+ * vector(i) = i;
+ *
+ * // do some additions to vector elements, but
+ * // only for some elements
+ * for (unsigned int i=0; i<vector->size(); ++i)
+ * if (some_condition(i) == true)
+ * vector(i) += 1;
+ *
+ * // do another collective operation
+ * const double norm = vector->l2_norm();
+ * @endverbatim
+ *
+ * This code can run into trouble: by the time we see the first addition
+ * operation, we need to flush the overwrite buffers for the vector, and
+ * the deal.II library will do so by calling compress(). However, it
+ * will only do so for all processes that actually do an addition -- if
+ * the condition is never true for one of the processes, then this one
+ * will not get to the actual compress() call, whereas all the other
+ * ones do. This gets us into trouble, since all the other processes
+ * hang in the call to flush the write buffers, while the one other
+ * process advances to the call to compute the l2 norm. At this time,
+ * you will get an error that some operation was attempted by only a
+ * subset of processes. This behavior may seem surprising, unless you
+ * know that write/addition operations on single elements may trigger
+ * this behavior.
+ *
+ * The problem described here may be avoided by placing additional calls
+ * to compress(), or making sure that all processes do the same type of
+ * operations at the same time, for example by placing zero additions if
+ * necessary.
+ *
+ * @ingroup TrilinosWrappers
+ * @ingroup Vectors
+ * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009
+ */
class Vector : public VectorBase
{
- public:
- /**
- * @name Basic constructors and initalization.
- */
+ public:
+ /**
+ * @name Basic constructors and initalization.
+ */
//@{
- /**
- * Default constructor that
- * generates an empty (zero size)
- * vector. The function
- * <tt>reinit()</tt> will have to
- * give the vector the correct
- * size and distribution among
- * processes in case of an MPI
- * run.
- */
- Vector ();
-
- /**
- * Copy constructor using the
- * given vector.
- */
- Vector (const Vector &V);
-
- /**
- * Destructor.
- */
- ~Vector ();
-
- /**
- * Reinit functionality. This
- * function sets the calling vector
- * to the dimension and the parallel
- * distribution of the input vector,
- * but does not copy the elements in
- * <tt>v</tt>. If <tt>fast</tt> is
- * not <tt>true</tt>, the elements in
- * the vector are initialized with
- * zero, otherwise the content will
- * be left unchanged and the user has
- * to set all elements.
- *
- * This function has a third argument,
- * <tt>allow_different_maps</tt>,
- * that allows for an exchange of
- * data between two equal-sized
- * vectors (but being distributed
- * differently among the
- * processors). A trivial application
- * of this function is to generate a
- * replication of a whole vector on
- * each machine, when the calling
- * vector is built according to the
- * localized vector class
- * TrilinosWrappers::Vector, and
- * <tt>v</tt> is a distributed
- * vector. In this case, the variable
- * <tt>fast</tt> needs to be set to
- * <tt>false</tt>, since it does not
- * make sense to exchange data
- * between differently parallelized
- * vectors without touching the
- * elements.
- */
- void reinit (const VectorBase &v,
- const bool fast = false,
- const bool allow_different_maps = false);
-
- void reinit (const BlockVector &v,
- const bool import_data = false);
-
- /**
- * Reinit function. Creates a vector
- * using the IndexSet local as our
- * own unknowns, add optional ghost
- * values ghost.
- */
- void reinit (const MPI_Comm &communicator,
- const IndexSet &local,
- const IndexSet &ghost=IndexSet(0));
-
- /**
- * Set all components of the
- * vector to the given number @p
- * s. Simply pass this down to
- * the base class, but we still
- * need to declare this function
- * to make the example given in
- * the discussion about making
- * the constructor explicit work.
- */
- Vector & operator = (const TrilinosScalar s);
-
- /**
- * Copy the given vector. Resize
- * the present vector if
- * necessary. In this case, also
- * the Epetra_Map that designs
- * the parallel partitioning is
- * taken from the input vector.
- */
- Vector &
- operator = (const Vector &V);
-
- /**
- * Copy operator from a given
- * localized vector (present on
- * all processes) in
- * TrilinosWrappers format to the
- * current distributed
- * vector. This function assumes
- * that the calling vector (left
- * hand object) already is of the
- * same size as the right hand
- * side vector. Otherwise, an
- * exception will be thrown.
- */
- Vector &
- operator = (const ::dealii::TrilinosWrappers::Vector &V);
-
- /**
- * Another copy function. This
- * one takes a deal.II vector and
- * copies it into a
- * TrilinosWrapper vector. Note
- * that since we do not provide
- * any Epetra_map that tells
- * about the partitioning of the
- * vector among the MPI
- * processes, the size of the
- * TrilinosWrapper vector has to
- * be the same as the size of the
- * input vector. In order to
- * change the map, use the
- * reinit(const Epetra_Map
- * &input_map) function.
- */
- template <typename Number>
- Vector &
- operator = (const ::dealii::Vector<Number> &v);
-
- /**
- * This reinit function is
- * meant to be used for
- * parallel calculations where
- * some non-local data has to
- * be used. The typical
- * situation where one needs
- * this function is the call of
- * the
- * FEValues<dim>::get_function_values
- * function (or of some
- * derivatives) in
- * parallel. Since it is
- * usually faster to retrieve
- * the data in advance, this
- * function can be called
- * before the assembly forks
- * out to the different
- * processors. What this
- * function does is the
- * following: It takes the
- * information in the columns
- * of the given matrix and
- * looks which data couples
- * between the different
- * processors. That data is
- * then queried from the input
- * vector. Note that you should
- * not write to the resulting
- * vector any more, since the
- * some data can be stored
- * several times on different
- * processors, leading to
- * unpredictable results. In
- * particular, such a vector
- * cannot be used for
- * matrix-vector products as
- * for example done during the
- * solution of linear systems.
- */
- void import_nonlocal_data_for_fe
- (const dealii::TrilinosWrappers::SparseMatrix &matrix,
- const Vector &vector);
+ /**
+ * Default constructor that
+ * generates an empty (zero size)
+ * vector. The function
+ * <tt>reinit()</tt> will have to
+ * give the vector the correct
+ * size and distribution among
+ * processes in case of an MPI
+ * run.
+ */
+ Vector ();
+
+ /**
+ * Copy constructor using the
+ * given vector.
+ */
+ Vector (const Vector &V);
+
+ /**
+ * Destructor.
+ */
+ ~Vector ();
+
+ /**
+ * Reinit functionality. This
+ * function sets the calling vector
+ * to the dimension and the parallel
+ * distribution of the input vector,
+ * but does not copy the elements in
+ * <tt>v</tt>. If <tt>fast</tt> is
+ * not <tt>true</tt>, the elements in
+ * the vector are initialized with
+ * zero, otherwise the content will
+ * be left unchanged and the user has
+ * to set all elements.
+ *
+ * This function has a third argument,
+ * <tt>allow_different_maps</tt>,
+ * that allows for an exchange of
+ * data between two equal-sized
+ * vectors (but being distributed
+ * differently among the
+ * processors). A trivial application
+ * of this function is to generate a
+ * replication of a whole vector on
+ * each machine, when the calling
+ * vector is built according to the
+ * localized vector class
+ * TrilinosWrappers::Vector, and
+ * <tt>v</tt> is a distributed
+ * vector. In this case, the variable
+ * <tt>fast</tt> needs to be set to
+ * <tt>false</tt>, since it does not
+ * make sense to exchange data
+ * between differently parallelized
+ * vectors without touching the
+ * elements.
+ */
+ void reinit (const VectorBase &v,
+ const bool fast = false,
+ const bool allow_different_maps = false);
+
+ void reinit (const BlockVector &v,
+ const bool import_data = false);
+
++ /**
++ * Reinit function. Creates a vector
++ * using the IndexSet local as our
++ * own unknowns, add optional ghost
++ * values ghost.
++ */
++ void reinit (const MPI_Comm &communicator,
++ const IndexSet &local,
++ const IndexSet &ghost=IndexSet(0));
++
+ /**
+ * Set all components of the
+ * vector to the given number @p
+ * s. Simply pass this down to
+ * the base class, but we still
+ * need to declare this function
+ * to make the example given in
+ * the discussion about making
+ * the constructor explicit work.
+ */
+ Vector &operator = (const TrilinosScalar s);
+
+ /**
+ * Copy the given vector. Resize
+ * the present vector if
+ * necessary. In this case, also
+ * the Epetra_Map that designs
+ * the parallel partitioning is
+ * taken from the input vector.
+ */
+ Vector &
+ operator = (const Vector &V);
+
+ /**
+ * Copy operator from a given
+ * localized vector (present on
+ * all processes) in
+ * TrilinosWrappers format to the
+ * current distributed
+ * vector. This function assumes
+ * that the calling vector (left
+ * hand object) already is of the
+ * same size as the right hand
+ * side vector. Otherwise, an
+ * exception will be thrown.
+ */
+ Vector &
+ operator = (const ::dealii::TrilinosWrappers::Vector &V);
+
+ /**
+ * Another copy function. This
+ * one takes a deal.II vector and
+ * copies it into a
+ * TrilinosWrapper vector. Note
+ * that since we do not provide
+ * any Epetra_map that tells
+ * about the partitioning of the
+ * vector among the MPI
+ * processes, the size of the
+ * TrilinosWrapper vector has to
+ * be the same as the size of the
+ * input vector. In order to
+ * change the map, use the
+ * reinit(const Epetra_Map
+ * &input_map) function.
+ */
+ template <typename Number>
+ Vector &
+ operator = (const ::dealii::Vector<Number> &v);
+
+ /**
+ * This reinit function is
+ * meant to be used for
+ * parallel calculations where
+ * some non-local data has to
+ * be used. The typical
+ * situation where one needs
+ * this function is the call of
+ * the
+ * FEValues<dim>::get_function_values
+ * function (or of some
+ * derivatives) in
+ * parallel. Since it is
+ * usually faster to retrieve
+ * the data in advance, this
+ * function can be called
+ * before the assembly forks
+ * out to the different
+ * processors. What this
+ * function does is the
+ * following: It takes the
+ * information in the columns
+ * of the given matrix and
+ * looks which data couples
+ * between the different
+ * processors. That data is
+ * then queried from the input
+ * vector. Note that you should
+ * not write to the resulting
+ * vector any more, since the
+ * some data can be stored
+ * several times on different
+ * processors, leading to
+ * unpredictable results. In
+ * particular, such a vector
+ * cannot be used for
+ * matrix-vector products as
+ * for example done during the
+ * solution of linear systems.
+ */
+ void import_nonlocal_data_for_fe
+ (const dealii::TrilinosWrappers::SparseMatrix &matrix,
+ const Vector &vector);
//@}
- /**
- * @name Initialization with an Epetra_Map
- */
+ /**
+ * @name Initialization with an Epetra_Map
+ */
//@{
- /**
- * This constructor takes an
- * Epetra_Map that already knows
- * how to distribute the
- * individual components among
- * the MPI processors. Since it
- * also includes information
- * about the size of the vector,
- * this is all we need to
- * generate a parallel vector.
- */
- Vector (const Epetra_Map ¶llel_partitioning);
-
- /**
- * Copy constructor from the
- * TrilinosWrappers vector
- * class. Since a vector of this
- * class does not necessarily
- * need to be distributed among
- * processes, the user needs to
- * supply us with an Epetra_Map
- * that sets the partitioning
- * details.
- */
- explicit Vector (const Epetra_Map ¶llel_partitioning,
- const VectorBase &v);
-
- /**
- * Reinitialize from a deal.II
- * vector. The Epetra_Map specifies the
- * %parallel partitioning.
- */
- template <typename number>
- void reinit (const Epetra_Map ¶llel_partitioner,
- const dealii::Vector<number> &v);
-
- /**
- * Reinit functionality. This
- * function destroys the old
- * vector content and generates a
- * new one based on the input
- * map.
- */
- void reinit (const Epetra_Map ¶llel_partitioning,
- const bool fast = false);
-
- /**
- * Copy-constructor from deal.II
- * vectors. Sets the dimension to that
- * of the given vector, and copies all
- * elements.
- */
- template <typename Number>
- explicit Vector (const Epetra_Map ¶llel_partitioning,
- const dealii::Vector<Number> &v);
+ /**
+ * This constructor takes an
+ * Epetra_Map that already knows
+ * how to distribute the
+ * individual components among
+ * the MPI processors. Since it
+ * also includes information
+ * about the size of the vector,
+ * this is all we need to
+ * generate a parallel vector.
+ */
+ Vector (const Epetra_Map ¶llel_partitioning);
+
+ /**
+ * Copy constructor from the
+ * TrilinosWrappers vector
+ * class. Since a vector of this
+ * class does not necessarily
+ * need to be distributed among
+ * processes, the user needs to
+ * supply us with an Epetra_Map
+ * that sets the partitioning
+ * details.
+ */
+ explicit Vector (const Epetra_Map ¶llel_partitioning,
+ const VectorBase &v);
+
+ /**
+ * Reinitialize from a deal.II
+ * vector. The Epetra_Map specifies the
+ * %parallel partitioning.
+ */
+ template <typename number>
+ void reinit (const Epetra_Map ¶llel_partitioner,
+ const dealii::Vector<number> &v);
+
+ /**
+ * Reinit functionality. This
+ * function destroys the old
+ * vector content and generates a
+ * new one based on the input
+ * map.
+ */
+ void reinit (const Epetra_Map ¶llel_partitioning,
+ const bool fast = false);
+
+ /**
+ * Copy-constructor from deal.II
+ * vectors. Sets the dimension to that
+ * of the given vector, and copies all
+ * elements.
+ */
+ template <typename Number>
+ explicit Vector (const Epetra_Map ¶llel_partitioning,
+ const dealii::Vector<Number> &v);
//@}
- /**
- * @name Initialization with an IndexSet
- */
+ /**
+ * @name Initialization with an IndexSet
+ */
//@{
- /**
- * This constructor takes an IndexSet
- * that defines how to distribute the
- * individual components among the
- * MPI processors. Since it also
- * includes information about the
- * size of the vector, this is all we
- * need to generate a %parallel
- * vector.
- */
- Vector (const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
-
- Vector (const MPI_Comm &communicator,
- const IndexSet &local,
- const IndexSet &ghost=IndexSet(0));
-
- /**
- * Copy constructor from the
- * TrilinosWrappers vector
- * class. Since a vector of this
- * class does not necessarily need to
- * be distributed among processes,
- * the user needs to supply us with
- * an IndexSet and an MPI
- * communicator that set the
- * partitioning details.
- */
- explicit Vector (const IndexSet ¶llel_partitioning,
- const VectorBase &v,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
-
- /**
- * Copy-constructor from deal.II
- * vectors. Sets the dimension to
- * that of the given vector, and
- * copies all the elements.
- */
- template <typename Number>
- explicit Vector (const IndexSet ¶llel_partitioning,
- const dealii::Vector<Number> &v,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
-
- /**
- * Reinit functionality. This function
- * destroys the old vector content and
- * generates a new one based on the
- * input partitioning. The flag
- * <tt>fast</tt> determines whether the
- * vector should be filled with zero
- * (false) or left untouched (true).
- */
- void reinit (const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const bool fast = false);
+ /**
+ * This constructor takes an IndexSet
+ * that defines how to distribute the
+ * individual components among the
+ * MPI processors. Since it also
+ * includes information about the
+ * size of the vector, this is all we
+ * need to generate a %parallel
+ * vector.
+ */
+ Vector (const IndexSet ¶llel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
+
++ Vector (const MPI_Comm &communicator,
++ const IndexSet &local,
++ const IndexSet &ghost=IndexSet(0));
++
+ /**
+ * Copy constructor from the
+ * TrilinosWrappers vector
+ * class. Since a vector of this
+ * class does not necessarily need to
+ * be distributed among processes,
+ * the user needs to supply us with
+ * an IndexSet and an MPI
+ * communicator that set the
+ * partitioning details.
+ */
+ explicit Vector (const IndexSet ¶llel_partitioning,
+ const VectorBase &v,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
+
+ /**
+ * Copy-constructor from deal.II
+ * vectors. Sets the dimension to
+ * that of the given vector, and
+ * copies all the elements.
+ */
+ template <typename Number>
+ explicit Vector (const IndexSet ¶llel_partitioning,
+ const dealii::Vector<Number> &v,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
+
+ /**
+ * Reinit functionality. This function
+ * destroys the old vector content and
+ * generates a new one based on the
+ * input partitioning. The flag
+ * <tt>fast</tt> determines whether the
+ * vector should be filled with zero
+ * (false) or left untouched (true).
+ */
+ void reinit (const IndexSet ¶llel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const bool fast = false);
//@}
};
- /**
- * This class is a specialization of a Trilinos vector to a localized
- * version. The purpose of this class is to provide a copy interface
- * from the possibly parallel Vector class to a local vector on each
- * processor, in order to be able to access all elements in the vector
- * or to apply certain deal.II functions.
- *
- * @ingroup TrilinosWrappers
- * @ingroup Vectors
- * @author Martin Kronbichler, 2008
- */
+ /**
+ * This class is a specialization of a Trilinos vector to a localized
+ * version. The purpose of this class is to provide a copy interface
+ * from the possibly parallel Vector class to a local vector on each
+ * processor, in order to be able to access all elements in the vector
+ * or to apply certain deal.II functions.
+ *
+ * @ingroup TrilinosWrappers
+ * @ingroup Vectors
+ * @author Martin Kronbichler, 2008
+ */
class Vector : public VectorBase
{
- public:
- /**
- * Default constructor that
- * generates an empty (zero size)
- * vector. The function
- * <tt>reinit()</tt> will have to
- * give the vector the correct
- * size.
- */
- Vector ();
-
- /**
- * This constructor takes as
- * input the number of elements
- * in the vector.
- */
- Vector (const unsigned int n);
-
- /**
- * This constructor takes as
- * input the number of elements
- * in the vector. If the map is
- * not localized, i.e., if there
- * are some elements that are not
- * present on all processes, only
- * the global size of the map
- * will be taken and a localized
- * map will be generated
- * internally.
- */
- Vector (const Epetra_Map &partitioning);
-
- /**
- * This constructor takes as input
- * the number of elements in the
- * vector. If the index set is not
- * localized, i.e., if there are some
- * elements that are not present on
- * all processes, only the global
- * size of the index set will be
- * taken and a localized version will
- * be generated internally.
- */
- Vector (const IndexSet &partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
-
- /**
- * This constructor creates a vector
- * using the IndexSet local as our
- * own unknowns, add optional ghost
- * values ghost.
- */
- Vector (const MPI_Comm &communicator,
- const IndexSet &local,
- const IndexSet &ghost);
-
- /**
- * This constructor takes a
- * (possibly parallel) Trilinos
- * Vector and generates a
- * localized version of the whole
- * content on each processor.
- */
- explicit Vector (const VectorBase &V);
-
- /**
- * Copy-constructor from deal.II
- * vectors. Sets the dimension to that
- * of the given vector, and copies all
- * elements.
- */
- template <typename Number>
- explicit Vector (const dealii::Vector<Number> &v);
-
- /**
- * Reinit function that resizes
- * the vector to the size
- * specified by <tt>n</tt>.
- */
- void reinit (const unsigned int n,
- const bool fast = false);
-
- /**
- * Initialization with an
- * Epetra_Map. Similar to the call in
- * the other class MPI::Vector, with
- * the difference that now a copy on
- * all processes is generated. This
- * initialization function is
- * appropriate when the data in the
- * localized vector should be
- * imported from a distributed vector
- * that has been initialized with the
- * same communicator. The variable
- * <tt>fast</tt> determines whether
- * the vector should be filled with
- * zero or left untouched.
- */
- void reinit (const Epetra_Map &input_map,
- const bool fast = false);
-
- /**
- * Initialization with an
- * IndexSet. Similar to the call in the
- * other class MPI::Vector, with the
- * difference that now a copy on all
- * processes is generated. This
- * initialization function is
- * appropriate in case the data in the
- * localized vector should be imported
- * from a distributed vector that has
- * been initialized with the same
- * communicator. The variable
- * <tt>fast</tt> determines whether the
- * vector should be filled with zero
- * (false) or left untouched (true).
- */
- void reinit (const IndexSet &input_map,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const bool fast = false);
-
- /**
- * Reinit function. Takes the
- * information of a Vector and copies
- * everything to the calling vector,
- * now also allowing different maps.
- */
- void reinit (const VectorBase &V,
- const bool fast = false,
- const bool allow_different_maps = false);
-
-
- /**
- * Set all components of the
- * vector to the given number @p
- * s. Simply pass this down to
- * the base class, but we still
- * need to declare this function
- * to make the example given in
- * the discussion about making
- * the constructor explicit work.
- */
- Vector & operator = (const TrilinosScalar s);
-
- /**
- * Sets the left hand argument to
- * the (parallel) Trilinos
- * Vector. Equivalent to the @p
- * reinit function.
- */
- Vector &
- operator = (const MPI::Vector &V);
+ public:
+ /**
+ * Default constructor that
+ * generates an empty (zero size)
+ * vector. The function
+ * <tt>reinit()</tt> will have to
+ * give the vector the correct
+ * size.
+ */
+ Vector ();
+
+ /**
+ * This constructor takes as
+ * input the number of elements
+ * in the vector.
+ */
+ Vector (const unsigned int n);
+
+ /**
+ * This constructor takes as
+ * input the number of elements
+ * in the vector. If the map is
+ * not localized, i.e., if there
+ * are some elements that are not
+ * present on all processes, only
+ * the global size of the map
+ * will be taken and a localized
+ * map will be generated
+ * internally.
+ */
+ Vector (const Epetra_Map &partitioning);
+
+ /**
+ * This constructor takes as input
+ * the number of elements in the
+ * vector. If the index set is not
+ * localized, i.e., if there are some
+ * elements that are not present on
+ * all processes, only the global
+ * size of the index set will be
+ * taken and a localized version will
+ * be generated internally.
+ */
+ Vector (const IndexSet &partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
+
++ /**
++ * This constructor creates a vector
++ * using the IndexSet local as our
++ * own unknowns, add optional ghost
++ * values ghost.
++ */
++ Vector (const MPI_Comm &communicator,
++ const IndexSet &local,
++ const IndexSet &ghost);
++
+ /**
+ * This constructor takes a
+ * (possibly parallel) Trilinos
+ * Vector and generates a
+ * localized version of the whole
+ * content on each processor.
+ */
+ explicit Vector (const VectorBase &V);
+
+ /**
+ * Copy-constructor from deal.II
+ * vectors. Sets the dimension to that
+ * of the given vector, and copies all
+ * elements.
+ */
+ template <typename Number>
+ explicit Vector (const dealii::Vector<Number> &v);
+
+ /**
+ * Reinit function that resizes
+ * the vector to the size
+ * specified by <tt>n</tt>.
+ */
+ void reinit (const unsigned int n,
+ const bool fast = false);
+
+ /**
+ * Initialization with an
+ * Epetra_Map. Similar to the call in
+ * the other class MPI::Vector, with
+ * the difference that now a copy on
+ * all processes is generated. This
+ * initialization function is
+ * appropriate when the data in the
+ * localized vector should be
+ * imported from a distributed vector
+ * that has been initialized with the
+ * same communicator. The variable
+ * <tt>fast</tt> determines whether
+ * the vector should be filled with
+ * zero or left untouched.
+ */
+ void reinit (const Epetra_Map &input_map,
+ const bool fast = false);
+
+ /**
+ * Initialization with an
+ * IndexSet. Similar to the call in the
+ * other class MPI::Vector, with the
+ * difference that now a copy on all
+ * processes is generated. This
+ * initialization function is
+ * appropriate in case the data in the
+ * localized vector should be imported
+ * from a distributed vector that has
+ * been initialized with the same
+ * communicator. The variable
+ * <tt>fast</tt> determines whether the
+ * vector should be filled with zero
+ * (false) or left untouched (true).
+ */
+ void reinit (const IndexSet &input_map,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const bool fast = false);
+
+ /**
+ * Reinit function. Takes the
+ * information of a Vector and copies
+ * everything to the calling vector,
+ * now also allowing different maps.
+ */
+ void reinit (const VectorBase &V,
+ const bool fast = false,
+ const bool allow_different_maps = false);
+
++
+ /**
+ * Set all components of the
+ * vector to the given number @p
+ * s. Simply pass this down to
+ * the base class, but we still
+ * need to declare this function
+ * to make the example given in
+ * the discussion about making
+ * the constructor explicit work.
+ */
+ Vector &operator = (const TrilinosScalar s);
+
+ /**
+ * Sets the left hand argument to
+ * the (parallel) Trilinos
+ * Vector. Equivalent to the @p
+ * reinit function.
+ */
+ Vector &
+ operator = (const MPI::Vector &V);
- /**
- * Sets the left hand argument to
- * the deal.II vector.
- */
- template <typename Number>
- Vector &
- operator = (const ::dealii::Vector<Number> &V);
+ /**
+ * Sets the left hand argument to
+ * the deal.II vector.
+ */
+ template <typename Number>
+ Vector &
+ operator = (const ::dealii::Vector<Number> &V);
- /**
- * Copy operator. Copies both the
- * dimension and the content in
- * the right hand argument.
- */
- Vector &
- operator = (const Vector &V);
-
- /**
- * This function does nothing but is
- * there for compatibility with the
- * @p PETScWrappers::Vector class.
- *
- * For the PETSc vector wrapper class,
- * this function updates the ghost
- * values of the PETSc vector. This
- * is necessary after any modification
- * before reading ghost values.
- *
- * However, for the implementation of
- * this class, it is immaterial and thus
- * an empty function.
- */
- void update_ghost_values () const;
+ /**
+ * Copy operator. Copies both the
+ * dimension and the content in
+ * the right hand argument.
+ */
+ Vector &
+ operator = (const Vector &V);
+
+ /**
+ * This function does nothing but is
+ * there for compatibility with the
+ * @p PETScWrappers::Vector class.
+ *
+ * For the PETSc vector wrapper class,
+ * this function updates the ghost
+ * values of the PETSc vector. This
+ * is necessary after any modification
+ * before reading ghost values.
+ *
+ * However, for the implementation of
+ * this class, it is immaterial and thus
+ * an empty function.
+ */
+ void update_ghost_values () const;
};
<< arg2 << " through " << arg3
<< " are stored locally and can be accessed.");
-
private:
- /**
- * Trilinos doesn't allow to
- * mix additions to matrix
- * entries and overwriting them
- * (to make synchronisation of
- * parallel computations
- * simpler). The way we do it
- * is to, for each access
- * operation, store whether it
- * is an insertion or an
- * addition. If the previous
- * one was of different type,
- * then we first have to flush
- * the Trilinos buffers;
- * otherwise, we can simply go
- * on. Luckily, Trilinos has
- * an object for this which
- * does already all the
- * parallel communications in
- * such a case, so we simply
- * use their model, which
- * stores whether the last
- * operation was an addition or
- * an insertion.
- */
- Epetra_CombineMode last_action;
-
- /**
- * A boolean variable to hold
- * information on whether the
- * vector is compressed or not.
- */
- bool compressed;
+ /**
+ * Point to the vector we are
+ * referencing.
+ */
+ VectorBase &vector;
/**
- * Whether this vector has ghost elements. This is true
- * on all processors even if only one of them has any
- * ghost elements.
+ * Index of the referenced element
+ * of the vector.
*/
- bool has_ghosts;
-
- /**
- * An Epetra distibuted vector
- * type. Requires an existing
- * Epetra_Map for storing data.
- */
- std_cxx1x::shared_ptr<Epetra_FEVector> vector;
-
-
- /**
- * Make the reference class a
- * friend.
- */
- friend class internal::VectorReference;
- friend class Vector;
- friend class MPI::Vector;
+ const unsigned int index;
+
+ /**
+ * Make the vector class a
+ * friend, so that it can
+ * create objects of the
+ * present type.
+ */
+ friend class ::dealii::TrilinosWrappers::VectorBase;
+ };
+ }
+ /**
+ * @endcond
+ */
+
+
+ /**
+ * Base class for the two types of Trilinos vectors, the distributed
+ * memory vector MPI::Vector and a localized vector Vector. The latter
+ * is designed for use in either serial implementations or as a
+ * localized copy on each processor. The implementation of this class
+ * is based on the Trilinos vector class Epetra_FEVector, the (parallel)
+ * partitioning of which is governed by an Epetra_Map. This means that
+ * the vector type is generic and can be done in this base class, while
+ * the definition of the partition map (and hence, the constructor and
+ * reinit function) will have to be done in the derived classes. The
+ * Epetra_FEVector is precisely the kind of vector we deal with all the
+ * time - we probably get it from some assembly process, where also
+ * entries not locally owned might need to written and hence need to be
+ * forwarded to the owner. The only requirement for this class to work
+ * is that Trilinos is installed with the same compiler as is used for
+ * compilation of deal.II.
+ *
+ * The interface of this class is modeled after the existing Vector
+ * class in deal.II. It has almost the same member functions, and is
+ * often exchangable. However, since Trilinos only supports a single
+ * scalar type (double), it is not templated, and only works with that
+ * type.
+ *
+ * Note that Trilinos only guarantees that operations do what you expect
+ * if the function @p GlobalAssemble has been called after vector
+ * assembly in order to distribute the data. Therefore, you need to call
+ * Vector::compress() before you actually use the vectors.
+ *
+ * @ingroup TrilinosWrappers
+ * @ingroup Vectors
+ * @author Martin Kronbichler, 2008
+ */
+ class VectorBase : public Subscriptor
+ {
+ public:
+ /**
+ * Declare some of the standard
+ * types used in all
+ * containers. These types
+ * parallel those in the
+ * <tt>C</tt> standard libraries
+ * <tt>vector<...></tt> class.
+ */
+ typedef TrilinosScalar value_type;
+ typedef TrilinosScalar real_type;
+ typedef std::size_t size_type;
+ typedef internal::VectorReference reference;
+ typedef const internal::VectorReference const_reference;
+
+ /**
+ * @name 1: Basic Object-handling
+ */
+ //@{
+
+ /**
+ * Default constructor that
+ * generates an empty (zero size)
+ * vector. The function
+ * <tt>reinit()</tt> will have to
+ * give the vector the correct
+ * size and distribution among
+ * processes in case of an MPI
+ * run.
+ */
+ VectorBase ();
+
+ /**
+ * Copy constructor. Sets the
+ * dimension to that of the given
+ * vector, and copies all the
+ * elements.
+ */
+ VectorBase (const VectorBase &v);
+
+ /**
+ * Destructor
+ */
+ virtual ~VectorBase ();
+
+ /**
+ * Release all memory and return
+ * to a state just like after
+ * having called the default
+ * constructor.
+ */
+ void clear ();
+
+ /**
+ * Reinit functionality, sets the
+ * dimension and possibly the
+ * parallel partitioning (Epetra_Map)
+ * of the calling vector to the
+ * settings of the input vector.
+ */
+ void reinit (const VectorBase &v,
+ const bool fast = false);
+
+ /**
+ * Compress the underlying
+ * representation of the Trilinos
+ * object, i.e. flush the buffers
+ * of the vector object if it has
+ * any. This function is
+ * necessary after writing into a
+ * vector element-by-element and
+ * before anything else can be
+ * done on it.
+ *
+ * The (defaulted) argument can
+ * be used to specify the
+ * compress mode
+ * (<code>Add</code> or
+ * <code>Insert</code>) in case
+ * the vector has not been
+ * written to since the last
+ * time this function was
+ * called. The argument is
+ * ignored if the vector has
+ * been added or written to
+ * since the last time
+ * compress() was called.
+ *
+ * See @ref GlossCompress "Compressing distributed objects"
+ * for more information.
+ */
+ void compress (::dealii::VectorOperation::values operation
+ =::dealii::VectorOperation::unknown);
+
+ /**
+ * @deprecated
+ */
+ void compress (const Epetra_CombineMode last_action);
+
+ /**
+ * Returns the state of the
+ * vector, i.e., whether
+ * compress() has already been
+ * called after an operation
+ * requiring data exchange.
+ */
+ bool is_compressed () const;
+
+ /**
+ * Set all components of the
+ * vector to the given number @p
+ * s. Simply pass this down to
+ * the Trilinos Epetra object,
+ * but we still need to declare
+ * this function to make the
+ * example given in the
+ * discussion about making the
+ * constructor explicit work.
+ *
+ * Since the semantics of
+ * assigning a scalar to a vector
+ * are not immediately clear,
+ * this operator should really
+ * only be used if you want to
+ * set the entire vector to
+ * zero. This allows the
+ * intuitive notation
+ * <tt>v=0</tt>. Assigning other
+ * values is deprecated and may
+ * be disallowed in the future.
+ */
+ VectorBase &
+ operator = (const TrilinosScalar s);
+
+ /**
+ * Copy function. This function takes
+ * a VectorBase vector and copies all
+ * the elements. The target vector
+ * will have the same parallel
+ * distribution as the calling
+ * vector.
+ */
+ VectorBase &
+ operator = (const VectorBase &v);
+
+ /**
+ * Another copy function. This
+ * one takes a deal.II vector and
+ * copies it into a
+ * TrilinosWrapper vector. Note
+ * that since we do not provide
+ * any Epetra_map that tells
+ * about the partitioning of the
+ * vector among the MPI
+ * processes, the size of the
+ * TrilinosWrapper vector has to
+ * be the same as the size of the
+ * input vector. In order to
+ * change the map, use the
+ * reinit(const Epetra_Map
+ * &input_map) function.
+ */
+ template <typename Number>
+ VectorBase &
+ operator = (const ::dealii::Vector<Number> &v);
+
+ /**
+ * Test for equality. This
+ * function assumes that the
+ * present vector and the one to
+ * compare with have the same
+ * size already, since comparing
+ * vectors of different sizes
+ * makes not much sense anyway.
+ */
+ bool operator == (const VectorBase &v) const;
+
+ /**
+ * Test for inequality. This
+ * function assumes that the
+ * present vector and the one to
+ * compare with have the same
+ * size already, since comparing
+ * vectors of different sizes
+ * makes not much sense anyway.
+ */
+ bool operator != (const VectorBase &v) const;
+
+ /**
+ * Return the global dimension of
+ * the vector.
+ */
+ unsigned int size () const;
+
+ /**
+ * Return the local dimension of
+ * the vector, i.e. the number of
+ * elements stored on the present
+ * MPI process. For sequential
+ * vectors, this number is the
+ * same as size(), but for
+ * parallel vectors it may be
+ * smaller.
+ *
+ * To figure out which elements
+ * exactly are stored locally,
+ * use local_range().
+ *
+ * If the vector contains ghost
+ * elements, they are included in
+ * this number.
+ */
+ unsigned int local_size () const;
+
+ /**
+ * Return a pair of indices
+ * indicating which elements of
+ * this vector are stored
+ * locally. The first number is
+ * the index of the first element
+ * stored, the second the index
+ * of the one past the last one
+ * that is stored locally. If
+ * this is a sequential vector,
+ * then the result will be the
+ * pair (0,N), otherwise it will
+ * be a pair (i,i+n), where
+ * <tt>n=local_size()</tt>.
+ */
+ std::pair<unsigned int, unsigned int> local_range () const;
+
+ /**
+ * Return whether @p index is in
+ * the local range or not, see
+ * also local_range().
+ */
+ bool in_local_range (const unsigned int index) const;
+
+ /**
+ * Return if the vector contains ghost
+ * elements. This answer is true if there
+ * are ghost elements on at least one
+ * process.
+ */
+ bool has_ghost_elements() const;
+
+ /**
+ * Return the scalar (inner)
+ * product of two vectors. The
+ * vectors must have the same
+ * size.
+ */
+ TrilinosScalar operator * (const VectorBase &vec) const;
+
+ /**
+ * Return square of the
+ * $l_2$-norm.
+ */
+ real_type norm_sqr () const;
+
+ /**
+ * Mean value of the elements of
+ * this vector.
+ */
+ TrilinosScalar mean_value () const;
+
+ /**
+ * Compute the minimal value of
+ * the elements of this vector.
+ */
+ TrilinosScalar minimal_value () const;
+
+ /**
+ * $l_1$-norm of the vector. The
+ * sum of the absolute values.
+ */
+ real_type l1_norm () const;
+
+ /**
+ * $l_2$-norm of the vector. The
+ * square root of the sum of the
+ * squares of the elements.
+ */
+ real_type l2_norm () const;
+
+ /**
+ * $l_p$-norm of the vector. The
+ * <i>p</i>th root of the sum of
+ * the <i>p</i>th powers of the
+ * absolute values of the
+ * elements.
+ */
+ real_type lp_norm (const TrilinosScalar p) const;
+
+ /**
+ * Maximum absolute value of the
+ * elements.
+ */
+ real_type linfty_norm () const;
+
+ /**
+ * Return whether the vector
+ * contains only elements with
+ * value zero. This function is
+ * mainly for internal
+ * consistency checks and should
+ * seldom be used when not in
+ * debug mode since it uses quite
+ * some time.
+ */
+ bool all_zero () const;
+
+ /**
+ * Return @p true if the vector
+ * has no negative entries,
+ * i.e. all entries are zero or
+ * positive. This function is
+ * used, for example, to check
+ * whether refinement indicators
+ * are really all positive (or
+ * zero).
+ */
+ bool is_non_negative () const;
+ //@}
+
+
+ /**
+ * @name 2: Data-Access
+ */
+ //@{
+
+ /**
+ * Provide access to a given
+ * element, both read and write.
+ */
+ reference
+ operator () (const unsigned int index);
+
+ /**
+ * Provide read-only access to an
+ * element. This is equivalent to
+ * the <code>el()</code> command.
+ */
+ TrilinosScalar
+ operator () (const unsigned int index) const;
+
+ /**
+ * Provide access to a given
+ * element, both read and write.
+ *
+ * Exactly the same as operator().
+ */
+ reference
+ operator [] (const unsigned int index);
+
+ /**
+ * Provide read-only access to an
+ * element. This is equivalent to
+ * the <code>el()</code> command.
+ *
+ * Exactly the same as operator().
+ */
+ TrilinosScalar
+ operator [] (const unsigned int index) const;
+
+ /**
+ * Return the value of the vector
+ * entry <i>i</i>. Note that this
+ * function does only work
+ * properly when we request a
+ * data stored on the local
+ * processor. The function will
+ * throw an exception in case the
+ * elements sits on another
+ * process.
+ */
+ TrilinosScalar el (const unsigned int index) const;
+
+ /**
+ * A collective set operation:
+ * instead of setting individual
+ * elements of a vector, this
+ * function allows to set a whole
+ * set of elements at once. The
+ * indices of the elements to be
+ * set are stated in the first
+ * argument, the corresponding
+ * values in the second.
+ */
+ void set (const std::vector<unsigned int> &indices,
- const std::vector<TrilinosScalar> &values);
++ const std::vector<TrilinosScalar> &values);
+
+ /**
+ * This is a second collective
+ * set operation. As a
+ * difference, this function
+ * takes a deal.II vector of
+ * values.
+ */
+ void set (const std::vector<unsigned int> &indices,
+ const ::dealii::Vector<TrilinosScalar> &values);
+ //@}
+
+
+ /**
+ * @name 3: Modification of vectors
+ */
+ //@{
+
+ /**
+ * This collective set operation
+ * is of lower level and can
+ * handle anything else —
+ * the only thing you have to
+ * provide is an address where
+ * all the indices are stored and
+ * the number of elements to be
+ * set.
+ */
+ void set (const unsigned int n_elements,
+ const unsigned int *indices,
+ const TrilinosScalar *values);
+
+ /**
+ * A collective add operation:
+ * This funnction adds a whole
+ * set of values stored in @p
+ * values to the vector
+ * components specified by @p
+ * indices.
+ */
+ void add (const std::vector<unsigned int> &indices,
+ const std::vector<TrilinosScalar> &values);
+
+ /**
+ * This is a second collective
+ * add operation. As a
+ * difference, this function
+ * takes a deal.II vector of
+ * values.
+ */
+ void add (const std::vector<unsigned int> &indices,
+ const ::dealii::Vector<TrilinosScalar> &values);
+
+ /**
+ * Take an address where
+ * <tt>n_elements</tt> are stored
+ * contiguously and add them into
+ * the vector. Handles all cases
+ * which are not covered by the
+ * other two <tt>add()</tt>
+ * functions above.
+ */
+ void add (const unsigned int n_elements,
+ const unsigned int *indices,
+ const TrilinosScalar *values);
+
+ /**
+ * Multiply the entire vector by
+ * a fixed factor.
+ */
+ VectorBase &operator *= (const TrilinosScalar factor);
+
+ /**
+ * Divide the entire vector by a
+ * fixed factor.
+ */
+ VectorBase &operator /= (const TrilinosScalar factor);
+
+ /**
+ * Add the given vector to the
+ * present one.
+ */
+ VectorBase &operator += (const VectorBase &V);
+
+ /**
+ * Subtract the given vector from
+ * the present one.
+ */
+ VectorBase &operator -= (const VectorBase &V);
+
+ /**
+ * Addition of @p s to all
+ * components. Note that @p s is
+ * a scalar and not a vector.
+ */
+ void add (const TrilinosScalar s);
+
+ /**
+ * Simple vector addition, equal
+ * to the <tt>operator
+ * +=</tt>.
+ *
+ * Though, if the second argument
+ * <tt>allow_different_maps</tt>
+ * is set, then it is possible to
+ * add data from a different map.
+ */
+ void add (const VectorBase &V,
+ const bool allow_different_maps = false);
+
+ /**
+ * Simple addition of a multiple
+ * of a vector, i.e. <tt>*this =
+ * a*V</tt>.
+ */
+ void add (const TrilinosScalar a,
+ const VectorBase &V);
+
+ /**
+ * Multiple addition of scaled
+ * vectors, i.e. <tt>*this = a*V +
+ * b*W</tt>.
+ */
+ void add (const TrilinosScalar a,
+ const VectorBase &V,
+ const TrilinosScalar b,
+ const VectorBase &W);
+
+ /**
+ * Scaling and simple vector
+ * addition, i.e. <tt>*this =
+ * s*(*this) + V</tt>.
+ */
+ void sadd (const TrilinosScalar s,
+ const VectorBase &V);
+
+ /**
+ * Scaling and simple addition,
+ * i.e. <tt>*this = s*(*this) +
+ * a*V</tt>.
+ */
+ void sadd (const TrilinosScalar s,
+ const TrilinosScalar a,
+ const VectorBase &V);
+
+ /**
+ * Scaling and multiple addition.
+ */
+ void sadd (const TrilinosScalar s,
+ const TrilinosScalar a,
+ const VectorBase &V,
+ const TrilinosScalar b,
+ const VectorBase &W);
+
+ /**
+ * Scaling and multiple addition.
+ * <tt>*this = s*(*this) + a*V +
+ * b*W + c*X</tt>.
+ */
+ void sadd (const TrilinosScalar s,
+ const TrilinosScalar a,
+ const VectorBase &V,
+ const TrilinosScalar b,
+ const VectorBase &W,
+ const TrilinosScalar c,
+ const VectorBase &X);
+
+ /**
+ * Scale each element of this
+ * vector by the corresponding
+ * element in the argument. This
+ * function is mostly meant to
+ * simulate multiplication (and
+ * immediate re-assignment) by a
+ * diagonal scaling matrix.
+ */
+ void scale (const VectorBase &scaling_factors);
+
+ /**
+ * Assignment <tt>*this =
+ * a*V</tt>.
+ */
+ void equ (const TrilinosScalar a,
+ const VectorBase &V);
+
+ /**
+ * Assignment <tt>*this = a*V +
+ * b*W</tt>.
+ */
+ void equ (const TrilinosScalar a,
+ const VectorBase &V,
+ const TrilinosScalar b,
+ const VectorBase &W);
+
+ /**
+ * Compute the elementwise ratio
+ * of the two given vectors, that
+ * is let <tt>this[i] =
+ * a[i]/b[i]</tt>. This is useful
+ * for example if you want to
+ * compute the cellwise ratio of
+ * true to estimated error.
+ *
+ * This vector is appropriately
+ * scaled to hold the result.
+ *
+ * If any of the <tt>b[i]</tt> is
+ * zero, the result is
+ * undefined. No attempt is made
+ * to catch such situations.
+ */
+ void ratio (const VectorBase &a,
+ const VectorBase &b);
++
++
++ /**
++ * Empty function; added to allow
++ * PETSc and Trilinos Vectors to
++ * be used interchangeably
++ */
++ void update_ghost_values() const;
+ //@}
+
+
+ /**
+ * @name 4: Mixed stuff
+ */
+ //@{
+
+ /**
+ * Return a const reference to the
+ * underlying Trilinos
+ * Epetra_MultiVector class.
+ */
+ const Epetra_MultiVector &trilinos_vector () const;
+
+ /**
+ * Return a (modifyable) reference to
+ * the underlying Trilinos
+ * Epetra_FEVector class.
+ */
+ Epetra_FEVector &trilinos_vector ();
+
+ /**
+ * Return a const reference to the
+ * underlying Trilinos Epetra_Map
+ * that sets the parallel
+ * partitioning of the vector.
+ */
+ const Epetra_Map &vector_partitioner () const;
+
+ /**
+ * Output of vector in
+ * user-defined format in analogy
+ * to the dealii::Vector<number>
+ * class.
+ */
+ void print (const char *format = 0) const;
+
+ /**
+ * Print to a stream. @p
+ * precision denotes the desired
+ * precision with which values
+ * shall be printed, @p
+ * scientific whether scientific
+ * notation shall be used. If @p
+ * across is @p true then the
+ * vector is printed in a line,
+ * while if @p false then the
+ * elements are printed on a
+ * separate line each.
+ */
+ void print (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /**
+ * Swap the contents of this
+ * vector and the other vector @p
+ * v. One could do this operation
+ * with a temporary variable and
+ * copying over the data
+ * elements, but this function is
+ * significantly more efficient
+ * since it only swaps the
+ * pointers to the data of the
+ * two vectors and therefore does
+ * not need to allocate temporary
+ * storage and move data
+ * around. Note that the vectors
+ * need to be of the same size
+ * and base on the same map.
+ *
+ * This function is analog to the
+ * the @p swap function of all C
+ * standard containers. Also,
+ * there is a global function
+ * <tt>swap(u,v)</tt> that simply
+ * calls <tt>u.swap(v)</tt>,
+ * again in analogy to standard
+ * functions.
+ */
+ void swap (VectorBase &v);
+
+ /**
+ * Estimate for the memory
+ * consumption in bytes.
+ */
+ std::size_t memory_consumption () const;
+ //@}
+
++
+ /**
+ * Exception
+ */
+ DeclException0 (ExcGhostsPresent);
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcDifferentParallelPartitioning);
+
+ /**
+ * Exception
+ */
+ DeclException1 (ExcTrilinosError,
+ int,
+ << "An error with error number " << arg1
+ << " occurred while calling a Trilinos function");
+
+ /**
+ * Exception
+ */
+ DeclException3 (ExcAccessToNonlocalElement,
+ int, int, int,
+ << "You tried to access element " << arg1
+ << " of a distributed vector, but only entries "
+ << arg2 << " through " << arg3
+ << " are stored locally and can be accessed.");
+
+
+ private:
+ /**
+ * Trilinos doesn't allow to
+ * mix additions to matrix
+ * entries and overwriting them
+ * (to make synchronisation of
+ * parallel computations
+ * simpler). The way we do it
+ * is to, for each access
+ * operation, store whether it
+ * is an insertion or an
+ * addition. If the previous
+ * one was of different type,
+ * then we first have to flush
+ * the Trilinos buffers;
+ * otherwise, we can simply go
+ * on. Luckily, Trilinos has
+ * an object for this which
+ * does already all the
+ * parallel communications in
+ * such a case, so we simply
+ * use their model, which
+ * stores whether the last
+ * operation was an addition or
+ * an insertion.
+ */
+ Epetra_CombineMode last_action;
+
+ /**
+ * A boolean variable to hold
+ * information on whether the
+ * vector is compressed or not.
+ */
+ bool compressed;
+
+ /**
+ * Whether this vector has ghost elements. This is true
+ * on all processors even if only one of them has any
+ * ghost elements.
+ */
+ bool has_ghosts;
+
+ /**
+ * An Epetra distibuted vector
+ * type. Requires an existing
+ * Epetra_Map for storing data.
+ */
+ std_cxx1x::shared_ptr<Epetra_FEVector> vector;
+
+
+ /**
+ * Make the reference class a
+ * friend.
+ */
+ friend class internal::VectorReference;
+ friend class Vector;
+ friend class MPI::Vector;
};
inline
void
VectorBase::set (const std::vector<unsigned int> &indices,
- const std::vector<TrilinosScalar> &values)
+ const std::vector<TrilinosScalar> &values)
{
- // if we have ghost values, do not allow
- // writing to this vector at all.
+ // if we have ghost values, do not allow
+ // writing to this vector at all.
Assert (!has_ghost_elements(), ExcGhostsPresent());
Assert (indices.size() == values.size(),
inline
void
VectorBase::add (const std::vector<unsigned int> &indices,
- const std::vector<TrilinosScalar> &values)
+ const std::vector<TrilinosScalar> &values)
{
- // if we have ghost values, do not allow
- // writing to this vector at all.
+ // if we have ghost values, do not allow
+ // writing to this vector at all.
Assert (!has_ghost_elements(), ExcGhostsPresent());
Assert (indices.size() == values.size(),
ExcDimensionMismatch(indices.size(),values.size()));
#ifdef DEAL_II_USE_TRILINOS
- /**
- * Another copy operator: copy
- * the values from a (sequential
- * or parallel, depending on the
- * underlying compiler) Trilinos
- * wrapper vector class. This
- * operator is only available if
- * Trilinos was detected during
- * configuration time.
- *
- * Note that due to the
- * communication model used in MPI,
- * this operation can only succeed
- * if all processes do it at the
- * same time. I.e., it is not
- * possible for only one process to
- * obtain a copy of a parallel
- * vector while the other jobs do
- * something else.
- */
- Vector<Number> &
- operator = (const TrilinosWrappers::MPI::Vector &v);
-
- /**
- * Another copy operator: copy the
- * values from a sequential
- * Trilinos wrapper vector
- * class. This operator is only
- * available if Trilinos was
- * detected during configuration
- * time.
- */
- Vector<Number> &
- operator = (const TrilinosWrappers::Vector &v);
+ /**
+ * Another copy operator: copy
+ * the values from a (sequential
+ * or parallel, depending on the
+ * underlying compiler) Trilinos
+ * wrapper vector class. This
+ * operator is only available if
+ * Trilinos was detected during
+ * configuration time.
+ *
+ * Note that due to the
+ * communication model used in MPI,
+ * this operation can only succeed
+ * if all processes do it at the
+ * same time. I.e., it is not
+ * possible for only one process to
+ * obtain a copy of a parallel
+ * vector while the other jobs do
+ * something else.
+ */
+ Vector<Number> &
+ operator = (const TrilinosWrappers::MPI::Vector &v);
+
+ /**
+ * Another copy operator: copy the
+ * values from a sequential
+ * Trilinos wrapper vector
+ * class. This operator is only
+ * available if Trilinos was
+ * detected during configuration
+ * time.
+ */
+ Vector<Number> &
+ operator = (const TrilinosWrappers::Vector &v);
#endif
- /**
- * Test for equality. This function
- * assumes that the present vector
- * and the one to compare with have
- * the same size already, since
- * comparing vectors of different
- * sizes makes not much sense
- * anyway.
- */
- template <typename Number2>
- bool operator == (const Vector<Number2> &v) const;
-
- /**
- * Test for inequality. This function
- * assumes that the present vector and
- * the one to compare with have the same
- * size already, since comparing vectors
- * of different sizes makes not much
- * sense anyway.
- */
- template <typename Number2>
- bool operator != (const Vector<Number2> &v) const;
-
- /**
- * Return the scalar product of
- * two vectors. The return type
- * is the underlying type of
- * @p this vector, so the return
- * type and the accuracy with
- * which it the result is
- * computed depend on the order
- * of the arguments of this
- * vector.
- *
- * For complex vectors, the
- * scalar product is implemented
- * as $\left<v,w\right>=\sum_i
- * v_i \bar{w_i}$.
- */
- template <typename Number2>
- Number operator * (const Vector<Number2> &V) const;
-
- /**
- * Return square of the $l_2$-norm.
- */
- real_type norm_sqr () const;
-
- /**
- * Mean value of the elements of
- * this vector.
- */
- Number mean_value () const;
-
- /**
- * $l_1$-norm of the vector.
- * The sum of the absolute values.
- */
- real_type l1_norm () const;
-
- /**
- * $l_2$-norm of the vector. The
- * square root of the sum of the
- * squares of the elements.
- */
- real_type l2_norm () const;
-
- /**
- * $l_p$-norm of the vector. The
- * pth root of the sum of the pth
- * powers of the absolute values
- * of the elements.
- */
- real_type lp_norm (const real_type p) const;
-
- /**
- * Maximum absolute value of the
- * elements.
- */
- real_type linfty_norm () const;
-
- /**
- * Return dimension of the vector.
- */
- unsigned int size () const;
-
- /**
- * Return whether the vector contains only
- * elements with value zero. This function
- * is mainly for internal consistency
- * checks and should seldom be used when
- * not in debug mode since it uses quite
- * some time.
- */
- bool all_zero () const;
-
- /**
- * Return @p true if the vector has no
- * negative entries, i.e. all entries are
- * zero or positive. This function is
- * used, for example, to check whether
- * refinement indicators are really all
- * positive (or zero).
- *
- * The function obviously only makes
- * sense if the template argument of this
- * class is a real type. If it is a
- * complex type, then an exception is
- * thrown.
- */
- bool is_non_negative () const;
-
- /**
- * Make the @p Vector class a bit like
- * the <tt>vector<></tt> class of the C++
- * standard library by returning
- * iterators to the start and end of the
- * elements of this vector.
- */
- iterator begin ();
-
- /**
- * Return constant iterator to the start of
- * the vectors.
- */
- const_iterator begin () const;
-
- /**
- * Return an iterator pointing to the
- * element past the end of the array.
- */
- iterator end ();
-
- /**
- * Return a constant iterator pointing to
- * the element past the end of the array.
- */
- const_iterator end () const;
- //@}
-
-
- /**
- * @name 2: Data-Access
- */
- //@{
- /**
- * Access the value of the @p ith
- * component.
- */
- Number operator() (const unsigned int i) const;
-
- /**
- * Access the @p ith component
- * as a writeable reference.
- */
- Number& operator() (const unsigned int i);
-
- /**
- * Access the value of the @p ith
- * component.
- *
- * Exactly the same as operator().
- */
- Number operator[] (const unsigned int i) const;
-
- /**
- * Access the @p ith component
- * as a writeable reference.
- *
- * Exactly the same as operator().
- */
- Number& operator[] (const unsigned int i);
- //@}
-
-
- /**
- * @name 3: Modification of vectors
- */
- //@{
-
- /**
- * Add the given vector to the present
- * one.
- */
- Vector<Number> & operator += (const Vector<Number> &V);
-
- /**
- * Subtract the given vector from the
- * present one.
- */
- Vector<Number> & operator -= (const Vector<Number> &V);
-
- /**
- * A collective add operation:
- * This funnction adds a whole
- * set of values stored in @p
- * values to the vector
- * components specified by @p
- * indices.
- */
- template <typename OtherNumber>
- void add (const std::vector<unsigned int> &indices,
- const std::vector<OtherNumber> &values);
-
- /**
- * This is a second collective
- * add operation. As a
- * difference, this function
- * takes a deal.II vector of
- * values.
- */
- template <typename OtherNumber>
- void add (const std::vector<unsigned int> &indices,
- const Vector<OtherNumber> &values);
-
- /**
- * Take an address where
- * <tt>n_elements</tt> are stored
- * contiguously and add them into
- * the vector. Handles all cases
- * which are not covered by the
- * other two <tt>add()</tt>
- * functions above.
- */
- template <typename OtherNumber>
- void add (const unsigned int n_elements,
- const unsigned int *indices,
- const OtherNumber *values);
-
- /**
- * Addition of @p s to all
- * components. Note that @p s is a
- * scalar and not a vector.
- */
- void add (const Number s);
-
- /**
- * Simple vector addition, equal to the
- * <tt>operator +=</tt>.
- */
- void add (const Vector<Number> &V);
-
- /**
- * Simple addition of a multiple of a
- * vector, i.e. <tt>*this += a*V</tt>.
- */
- void add (const Number a, const Vector<Number> &V);
-
- /**
- * Multiple addition of scaled vectors,
- * i.e. <tt>*this += a*V+b*W</tt>.
- */
- void add (const Number a, const Vector<Number> &V,
- const Number b, const Vector<Number> &W);
-
- /**
- * Scaling and simple vector addition,
- * i.e.
- * <tt>*this = s*(*this)+V</tt>.
- */
- void sadd (const Number s,
- const Vector<Number> &V);
-
- /**
- * Scaling and simple addition, i.e.
- * <tt>*this = s*(*this)+a*V</tt>.
- */
- void sadd (const Number s,
- const Number a,
- const Vector<Number> &V);
-
- /**
- * Scaling and multiple addition.
- */
- void sadd (const Number s,
- const Number a,
- const Vector<Number> &V,
- const Number b,
- const Vector<Number> &W);
-
- /**
- * Scaling and multiple addition.
- * <tt>*this = s*(*this)+a*V + b*W + c*X</tt>.
- */
- void sadd (const Number s,
- const Number a,
- const Vector<Number> &V,
- const Number b,
- const Vector<Number> &W,
- const Number c,
- const Vector<Number> &X);
-
- /**
- * Scale each element of the
- * vector by the given factor.
- *
- * This function is deprecated
- * and will be removed in a
- * future version. Use
- * <tt>operator *=</tt> and
- * <tt>operator /=</tt> instead.
- */
- void scale (const Number factor);
-
-
- /**
- * Scale each element of the
- * vector by a constant
- * value.
- */
- Vector<Number> & operator *= (const Number factor);
-
- /**
- * Scale each element of the
- * vector by the inverse of the
- * given value.
- */
- Vector<Number> & operator /= (const Number factor);
-
- /**
- * Scale each element of this
- * vector by the corresponding
- * element in the argument. This
- * function is mostly meant to
- * simulate multiplication (and
- * immediate re-assignment) by a
- * diagonal scaling matrix.
- */
- void scale (const Vector<Number> &scaling_factors);
-
- /**
- * Scale each element of this
- * vector by the corresponding
- * element in the argument. This
- * function is mostly meant to
- * simulate multiplication (and
- * immediate re-assignment) by a
- * diagonal scaling matrix.
- */
- template <typename Number2>
- void scale (const Vector<Number2> &scaling_factors);
-
- /**
- * Assignment <tt>*this = a*u</tt>.
- */
- void equ (const Number a, const Vector<Number>& u);
-
- /**
- * Assignment <tt>*this = a*u</tt>.
- */
- template <typename Number2>
- void equ (const Number a, const Vector<Number2>& u);
-
- /**
- * Assignment <tt>*this = a*u + b*v</tt>.
- */
- void equ (const Number a, const Vector<Number>& u,
- const Number b, const Vector<Number>& v);
-
- /**
- * Assignment <tt>*this = a*u + b*v + b*w</tt>.
- */
- void equ (const Number a, const Vector<Number>& u,
- const Number b, const Vector<Number>& v,
- const Number c, const Vector<Number>& w);
-
- /**
- * Compute the elementwise ratio of the
- * two given vectors, that is let
- * <tt>this[i] = a[i]/b[i]</tt>. This is
- * useful for example if you want to
- * compute the cellwise ratio of true to
- * estimated error.
- *
- * This vector is appropriately
- * scaled to hold the result.
- *
- * If any of the <tt>b[i]</tt> is
- * zero, the result is
- * undefined. No attempt is made
- * to catch such situations.
- */
- void ratio (const Vector<Number> &a,
- const Vector<Number> &b);
-
- /**
- * This function does nothing but is
- * there for compatibility with the
- * @p PETScWrappers::Vector class.
- *
- * For the PETSc vector wrapper class,
- * this function updates the ghost
- * values of the PETSc vector. This
- * is necessary after any modification
- * before reading ghost values.
- *
- * However, for the implementation of
- * this class, it is immaterial and thus
- * an empty function.
- */
- void update_ghost_values () const;
- //@}
-
-
- /**
- * @name 4: Mixed stuff
- */
- //@{
- /**
- * Output of vector in user-defined
- * format. For complex-valued vectors,
- * the format should include specifiers
- * for both the real and imaginary
- * parts.
- */
- void print (const char* format = 0) const;
-
- /**
- * Print to a
- * stream. @p precision denotes
- * the desired precision with
- * which values shall be printed,
- * @p scientific whether
- * scientific notation shall be
- * used. If @p across is
- * @p true then the vector is
- * printed in a line, while if
- * @p false then the elements
- * are printed on a separate line
- * each.
- */
- void print (std::ostream& out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const bool across = true) const;
-
- /**
- * Print to a
- * LogStream. <tt>width</tt> is
- * used as argument to the
- * std::setw manipulator, if
- * printing across. If @p
- * across is @p true then the
- * vector is printed in a line,
- * while if @p false then the
- * elements are printed on a
- * separate line each.
- */
- void print (LogStream& out,
- const unsigned int width = 6,
- const bool across = true) const;
-
- /**
- * Write the vector en bloc to a
- * file. This is done in a binary
- * mode, so the output is neither
- * readable by humans nor
- * (probably) by other computers
- * using a different operating
- * system or number format.
- */
- void block_write (std::ostream &out) const;
-
- /**
- * Read a vector en block from a
- * file. This is done using the
- * inverse operations to the
- * above function, so it is
- * reasonably fast because the
- * bitstream is not interpreted.
- *
- * The vector is resized if
- * necessary.
- *
- * A primitive form of error
- * checking is performed which
- * will recognize the bluntest
- * attempts to interpret some
- * data as a vector stored
- * bitwise to a file, but not
- * more.
- */
- void block_read (std::istream &in);
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
- */
- std::size_t memory_consumption () const;
- //@}
-
- /**
- * Write the data of this object to
- * a stream for the purpose of serialization.
- */
- template <class Archive>
- void save (Archive & ar, const unsigned int version) const;
-
- /**
- * Read the data of this object
- * from a stream for the purpose of serialization.
- */
- template <class Archive>
- void load (Archive & ar, const unsigned int version);
-
- BOOST_SERIALIZATION_SPLIT_MEMBER()
-
- protected:
-
- /**
- * Dimension. Actual number of
- * components contained in the
- * vector. Get this number by
- * calling <tt>size()</tt>.
- */
- unsigned int vec_size;
-
- /**
- * Amount of memory actually
- * reserved for this vector. This
- * number may be greater than
- * @p vec_size if a @p reinit was
- * called with less memory
- * requirements than the vector
- * needed last time. At present
- * @p reinit does not free
- * memory when the number of
- * needed elements is reduced.
- */
- unsigned int max_vec_size;
-
- /**
- * Pointer to the array of
- * elements of this vector.
- */
- Number *val;
-
- /**
- * Make all other vector types
- * friends.
- */
- template <typename Number2> friend class Vector;
-
- /**
- * LAPACK matrices need access to
- * the data.
- */
- friend class LAPACKFullMatrix<Number>;
-
- /**
- * VectorView will access the
- * pointer.
- */
- friend class VectorView<Number>;
+ /**
+ * Test for equality. This function
+ * assumes that the present vector
+ * and the one to compare with have
+ * the same size already, since
+ * comparing vectors of different
+ * sizes makes not much sense
+ * anyway.
+ */
+ template <typename Number2>
+ bool operator == (const Vector<Number2> &v) const;
+
+ /**
+ * Test for inequality. This function
+ * assumes that the present vector and
+ * the one to compare with have the same
+ * size already, since comparing vectors
+ * of different sizes makes not much
+ * sense anyway.
+ */
+ template <typename Number2>
+ bool operator != (const Vector<Number2> &v) const;
+
+ /**
+ * Return the scalar product of
+ * two vectors. The return type
+ * is the underlying type of
+ * @p this vector, so the return
+ * type and the accuracy with
+ * which it the result is
+ * computed depend on the order
+ * of the arguments of this
+ * vector.
+ *
+ * For complex vectors, the
+ * scalar product is implemented
+ * as $\left<v,w\right>=\sum_i
+ * v_i \bar{w_i}$.
+ */
+ template <typename Number2>
+ Number operator * (const Vector<Number2> &V) const;
+
+ /**
+ * Return square of the $l_2$-norm.
+ */
+ real_type norm_sqr () const;
+
+ /**
+ * Mean value of the elements of
+ * this vector.
+ */
+ Number mean_value () const;
+
+ /**
+ * $l_1$-norm of the vector.
+ * The sum of the absolute values.
+ */
+ real_type l1_norm () const;
+
+ /**
+ * $l_2$-norm of the vector. The
+ * square root of the sum of the
+ * squares of the elements.
+ */
+ real_type l2_norm () const;
+
+ /**
+ * $l_p$-norm of the vector. The
+ * pth root of the sum of the pth
+ * powers of the absolute values
+ * of the elements.
+ */
+ real_type lp_norm (const real_type p) const;
+
+ /**
+ * Maximum absolute value of the
+ * elements.
+ */
+ real_type linfty_norm () const;
+
+ /**
+ * Return dimension of the vector.
+ */
+ unsigned int size () const;
+
+ /**
+ * Return whether the vector contains only
+ * elements with value zero. This function
+ * is mainly for internal consistency
+ * checks and should seldom be used when
+ * not in debug mode since it uses quite
+ * some time.
+ */
+ bool all_zero () const;
+
+ /**
+ * Return @p true if the vector has no
+ * negative entries, i.e. all entries are
+ * zero or positive. This function is
+ * used, for example, to check whether
+ * refinement indicators are really all
+ * positive (or zero).
+ *
+ * The function obviously only makes
+ * sense if the template argument of this
+ * class is a real type. If it is a
+ * complex type, then an exception is
+ * thrown.
+ */
+ bool is_non_negative () const;
+
+ /**
+ * Make the @p Vector class a bit like
+ * the <tt>vector<></tt> class of the C++
+ * standard library by returning
+ * iterators to the start and end of the
+ * elements of this vector.
+ */
+ iterator begin ();
+
+ /**
+ * Return constant iterator to the start of
+ * the vectors.
+ */
+ const_iterator begin () const;
+
+ /**
+ * Return an iterator pointing to the
+ * element past the end of the array.
+ */
+ iterator end ();
+
+ /**
+ * Return a constant iterator pointing to
+ * the element past the end of the array.
+ */
+ const_iterator end () const;
+ //@}
+
+
+ /**
+ * @name 2: Data-Access
+ */
+ //@{
+ /**
+ * Access the value of the @p ith
+ * component.
+ */
+ Number operator() (const unsigned int i) const;
+
+ /**
+ * Access the @p ith component
+ * as a writeable reference.
+ */
+ Number &operator() (const unsigned int i);
+
+ /**
+ * Access the value of the @p ith
+ * component.
+ *
+ * Exactly the same as operator().
+ */
+ Number operator[] (const unsigned int i) const;
+
+ /**
+ * Access the @p ith component
+ * as a writeable reference.
+ *
+ * Exactly the same as operator().
+ */
+ Number &operator[] (const unsigned int i);
+ //@}
+
+
+ /**
+ * @name 3: Modification of vectors
+ */
+ //@{
+
+ /**
+ * Add the given vector to the present
+ * one.
+ */
+ Vector<Number> &operator += (const Vector<Number> &V);
+
+ /**
+ * Subtract the given vector from the
+ * present one.
+ */
+ Vector<Number> &operator -= (const Vector<Number> &V);
+
+ /**
+ * A collective add operation:
+ * This funnction adds a whole
+ * set of values stored in @p
+ * values to the vector
+ * components specified by @p
+ * indices.
+ */
+ template <typename OtherNumber>
+ void add (const std::vector<unsigned int> &indices,
- const std::vector<OtherNumber> &values);
++ const std::vector<OtherNumber> &values);
+
+ /**
+ * This is a second collective
+ * add operation. As a
+ * difference, this function
+ * takes a deal.II vector of
+ * values.
+ */
+ template <typename OtherNumber>
+ void add (const std::vector<unsigned int> &indices,
+ const Vector<OtherNumber> &values);
+
+ /**
+ * Take an address where
+ * <tt>n_elements</tt> are stored
+ * contiguously and add them into
+ * the vector. Handles all cases
+ * which are not covered by the
+ * other two <tt>add()</tt>
+ * functions above.
+ */
+ template <typename OtherNumber>
+ void add (const unsigned int n_elements,
+ const unsigned int *indices,
- const OtherNumber *values);
++ const OtherNumber *values);
+
+ /**
+ * Addition of @p s to all
+ * components. Note that @p s is a
+ * scalar and not a vector.
+ */
+ void add (const Number s);
+
+ /**
+ * Simple vector addition, equal to the
+ * <tt>operator +=</tt>.
+ */
+ void add (const Vector<Number> &V);
+
+ /**
+ * Simple addition of a multiple of a
+ * vector, i.e. <tt>*this += a*V</tt>.
+ */
+ void add (const Number a, const Vector<Number> &V);
+
+ /**
+ * Multiple addition of scaled vectors,
+ * i.e. <tt>*this += a*V+b*W</tt>.
+ */
+ void add (const Number a, const Vector<Number> &V,
+ const Number b, const Vector<Number> &W);
+
+ /**
+ * Scaling and simple vector addition,
+ * i.e.
+ * <tt>*this = s*(*this)+V</tt>.
+ */
+ void sadd (const Number s,
+ const Vector<Number> &V);
+
+ /**
+ * Scaling and simple addition, i.e.
+ * <tt>*this = s*(*this)+a*V</tt>.
+ */
+ void sadd (const Number s,
+ const Number a,
+ const Vector<Number> &V);
+
+ /**
+ * Scaling and multiple addition.
+ */
+ void sadd (const Number s,
+ const Number a,
+ const Vector<Number> &V,
+ const Number b,
+ const Vector<Number> &W);
+
+ /**
+ * Scaling and multiple addition.
+ * <tt>*this = s*(*this)+a*V + b*W + c*X</tt>.
+ */
+ void sadd (const Number s,
+ const Number a,
+ const Vector<Number> &V,
+ const Number b,
+ const Vector<Number> &W,
+ const Number c,
+ const Vector<Number> &X);
+
+ /**
+ * Scale each element of the
+ * vector by the given factor.
+ *
+ * This function is deprecated
+ * and will be removed in a
+ * future version. Use
+ * <tt>operator *=</tt> and
+ * <tt>operator /=</tt> instead.
+ */
+ void scale (const Number factor);
+
+
+ /**
+ * Scale each element of the
+ * vector by a constant
+ * value.
+ */
+ Vector<Number> &operator *= (const Number factor);
+
+ /**
+ * Scale each element of the
+ * vector by the inverse of the
+ * given value.
+ */
+ Vector<Number> &operator /= (const Number factor);
+
+ /**
+ * Scale each element of this
+ * vector by the corresponding
+ * element in the argument. This
+ * function is mostly meant to
+ * simulate multiplication (and
+ * immediate re-assignment) by a
+ * diagonal scaling matrix.
+ */
+ void scale (const Vector<Number> &scaling_factors);
+
+ /**
+ * Scale each element of this
+ * vector by the corresponding
+ * element in the argument. This
+ * function is mostly meant to
+ * simulate multiplication (and
+ * immediate re-assignment) by a
+ * diagonal scaling matrix.
+ */
+ template <typename Number2>
+ void scale (const Vector<Number2> &scaling_factors);
+
+ /**
+ * Assignment <tt>*this = a*u</tt>.
+ */
+ void equ (const Number a, const Vector<Number> &u);
+
+ /**
+ * Assignment <tt>*this = a*u</tt>.
+ */
+ template <typename Number2>
+ void equ (const Number a, const Vector<Number2> &u);
+
+ /**
+ * Assignment <tt>*this = a*u + b*v</tt>.
+ */
+ void equ (const Number a, const Vector<Number> &u,
+ const Number b, const Vector<Number> &v);
+
+ /**
+ * Assignment <tt>*this = a*u + b*v + b*w</tt>.
+ */
+ void equ (const Number a, const Vector<Number> &u,
+ const Number b, const Vector<Number> &v,
+ const Number c, const Vector<Number> &w);
+
+ /**
+ * Compute the elementwise ratio of the
+ * two given vectors, that is let
+ * <tt>this[i] = a[i]/b[i]</tt>. This is
+ * useful for example if you want to
+ * compute the cellwise ratio of true to
+ * estimated error.
+ *
+ * This vector is appropriately
+ * scaled to hold the result.
+ *
+ * If any of the <tt>b[i]</tt> is
+ * zero, the result is
+ * undefined. No attempt is made
+ * to catch such situations.
+ */
+ void ratio (const Vector<Number> &a,
+ const Vector<Number> &b);
+
+ /**
+ * This function does nothing but is
+ * there for compatibility with the
+ * @p PETScWrappers::Vector class.
+ *
+ * For the PETSc vector wrapper class,
+ * this function updates the ghost
+ * values of the PETSc vector. This
+ * is necessary after any modification
+ * before reading ghost values.
+ *
+ * However, for the implementation of
+ * this class, it is immaterial and thus
+ * an empty function.
+ */
+ void update_ghost_values () const;
+ //@}
+
+
+ /**
+ * @name 4: Mixed stuff
+ */
+ //@{
+ /**
+ * Output of vector in user-defined
+ * format. For complex-valued vectors,
+ * the format should include specifiers
+ * for both the real and imaginary
+ * parts.
+ */
+ void print (const char *format = 0) const;
+
+ /**
+ * Print to a
+ * stream. @p precision denotes
+ * the desired precision with
+ * which values shall be printed,
+ * @p scientific whether
+ * scientific notation shall be
+ * used. If @p across is
+ * @p true then the vector is
+ * printed in a line, while if
+ * @p false then the elements
+ * are printed on a separate line
+ * each.
+ */
+ void print (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /**
+ * Print to a
+ * LogStream. <tt>width</tt> is
+ * used as argument to the
+ * std::setw manipulator, if
+ * printing across. If @p
+ * across is @p true then the
+ * vector is printed in a line,
+ * while if @p false then the
+ * elements are printed on a
+ * separate line each.
+ */
+ void print (LogStream &out,
+ const unsigned int width = 6,
+ const bool across = true) const;
+
+ /**
+ * Write the vector en bloc to a
+ * file. This is done in a binary
+ * mode, so the output is neither
+ * readable by humans nor
+ * (probably) by other computers
+ * using a different operating
+ * system or number format.
+ */
+ void block_write (std::ostream &out) const;
+
+ /**
+ * Read a vector en block from a
+ * file. This is done using the
+ * inverse operations to the
+ * above function, so it is
+ * reasonably fast because the
+ * bitstream is not interpreted.
+ *
+ * The vector is resized if
+ * necessary.
+ *
+ * A primitive form of error
+ * checking is performed which
+ * will recognize the bluntest
+ * attempts to interpret some
+ * data as a vector stored
+ * bitwise to a file, but not
+ * more.
+ */
+ void block_read (std::istream &in);
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object.
+ */
+ std::size_t memory_consumption () const;
+ //@}
+
+ /**
+ * Write the data of this object to
+ * a stream for the purpose of serialization.
+ */
+ template <class Archive>
+ void save (Archive &ar, const unsigned int version) const;
+
+ /**
+ * Read the data of this object
+ * from a stream for the purpose of serialization.
+ */
+ template <class Archive>
+ void load (Archive &ar, const unsigned int version);
+
+ BOOST_SERIALIZATION_SPLIT_MEMBER()
+
+ protected:
+
+ /**
+ * Dimension. Actual number of
+ * components contained in the
+ * vector. Get this number by
+ * calling <tt>size()</tt>.
+ */
+ unsigned int vec_size;
+
+ /**
+ * Amount of memory actually
+ * reserved for this vector. This
+ * number may be greater than
+ * @p vec_size if a @p reinit was
+ * called with less memory
+ * requirements than the vector
+ * needed last time. At present
+ * @p reinit does not free
+ * memory when the number of
+ * needed elements is reduced.
+ */
+ unsigned int max_vec_size;
+
+ /**
+ * Pointer to the array of
+ * elements of this vector.
+ */
+ Number *val;
+
+ /**
+ * Make all other vector types
+ * friends.
+ */
+ template <typename Number2> friend class Vector;
+
+ /**
+ * LAPACK matrices need access to
+ * the data.
+ */
+ friend class LAPACKFullMatrix<Number>;
+
+ /**
+ * VectorView will access the
+ * pointer.
+ */
+ friend class VectorView<Number>;
};
/*@}*/
struct InnerProd
{
Number
- operator() (const Number*&X, const Number2*&Y, const Number &) const
- operator() (const Number *&X, const Number2 *&Y, const Number &) const
++ operator() (const Number *&X, const Number2 *&Y, const Number &) const
{
return *X++ * Number(numbers::NumberTraits<Number2>::conjugate(*Y++));
}
struct Norm2
{
RealType
- operator() (const Number*&X, const Number* &, const RealType &) const
- operator() (const Number *&X, const Number *&, const RealType &) const
++ operator() (const Number *&X, const Number *&, const RealType &) const
{
return numbers::NumberTraits<Number>::abs_square(*X++);
}
struct Norm1
{
RealType
- operator() (const Number*&X, const Number* &, const RealType &) const
- operator() (const Number *&X, const Number *&, const RealType &) const
++ operator() (const Number *&X, const Number *&, const RealType &) const
{
return numbers::NumberTraits<Number>::abs(*X++);
}
struct NormP
{
RealType
- operator() (const Number*&X, const Number* &, const RealType &p) const
- operator() (const Number *&X, const Number *&, const RealType &p) const
++ operator() (const Number *&X, const Number *&, const RealType &p) const
{
return std::pow(numbers::NumberTraits<Number>::abs(*X++), p);
}
struct MeanValue
{
Number
- operator() (const Number*&X, const Number* &, const Number &) const
- operator() (const Number *&X, const Number *&, const Number &) const
++ operator() (const Number *&X, const Number *&, const Number &) const
{
return *X++;
}
inline
void
FEEvaluationBase<dim,dofs_per_cell_,n_q_points_,n_components_,Number>
- ::distribute_local_to_global (std::vector<VectorType*> &dst,
-::distribute_local_to_global (std::vector<VectorType *> &dst,
++::distribute_local_to_global (std::vector<VectorType *> &dst,
const unsigned int first_index) const
{
AssertIndexRange (first_index, dst.size());
inline
void
FEEvaluationBase<dim,dofs_per_cell_,n_q_points_,n_components_,Number>
- ::set_dof_values (std::vector<VectorType*> &dst,
-::set_dof_values (std::vector<VectorType *> &dst,
++::set_dof_values (std::vector<VectorType *> &dst,
const unsigned int first_index) const
{
AssertIndexRange (first_index, dst.size());
namespace internal
{
- namespace MatrixFreeFunctions
- {
- /**
- * The class that stores all geometry-dependent data related with cell
- * interiors for use in the matrix-free class.
- *
- * @author Katharina Kormann and Martin Kronbichler, 2010, 2011
- */
- template <int dim, typename Number>
- struct MappingInfo
+ namespace MatrixFreeFunctions
{
- /**
- * Determines how many bits of an unsigned int
- * are used to distinguish the cell types
- * (Cartesian, with constant Jacobian, or
- * general)
- */
- static const std::size_t n_cell_type_bits = 2;
-
- /**
- * Determines how many types of different
- * cells can be detected at most. Corresponds
- * to the number of bits we reserved for it.
- */
- static const unsigned int n_cell_types = 1U<<n_cell_type_bits;
-
- /**
- * Empty constructor.
- */
- MappingInfo();
-
- /**
- * Computes the information in the given
- * cells. The cells are specified by the level
- * and the index within the level (as given by
- * CellIterator::level() and
- * CellIterator::index(), in order to allow
- * for different kinds of iterators,
- * e.g. standard DoFHandler, multigrid, etc.)
- * on a fixed Triangulation. In addition, a
- * mapping and several quadrature formulas are
- * given.
- */
- void initialize (const dealii::Triangulation<dim> &tria,
- const std::vector<std::pair<unsigned int,unsigned int> > &cells,
- const std::vector<unsigned int> &active_fe_index,
- const Mapping<dim> &mapping,
- const std::vector<dealii::hp::QCollection<1> > &quad,
- const UpdateFlags update_flags);
-
- /**
- * Helper function to determine which update
- * flags must be set in the internal functions
- * to initialize all data as requested by the
- * user.
- */
- UpdateFlags
- compute_update_flags (const UpdateFlags update_flags,
- const std::vector<dealii::hp::QCollection<1> > &quad) const;
-
- /**
- * Returns the type of a given cell as
- * detected during initialization.
- */
- CellType get_cell_type (const unsigned int cell_chunk_no) const;
-
- /**
- * Returns the type of a given cell as
- * detected during initialization.
- */
- unsigned int get_cell_data_index (const unsigned int cell_chunk_no) const;
-
- /**
- * Clears all data fields in this class.
- */
- void clear ();
-
- /**
- * Returns the memory consumption of this
- * class in bytes.
- */
- std::size_t memory_consumption() const;
-
- /**
- * Prints a detailed summary of memory
- * consumption in the different structures of
- * this class to the given output stream.
- */
- template <typename STREAM>
- void print_memory_consumption(STREAM &out,
- const SizeInfo &size_info) const;
-
- /**
- * Stores whether a cell is Cartesian, has
- * constant transform data (Jacobians) or is
- * general. cell_type % 4 gives this
- * information (0: Cartesian, 1: constant
- * Jacobian throughout cell, 2: general cell),
- * and cell_type / 4 gives the index in the
- * data field of where to find the information
- * in the fields Jacobian and JxW values
- * (except for quadrature points, for which
- * the index runs as usual).
- */
- std::vector<unsigned int> cell_type;
-
- /**
- * The first field stores the inverse Jacobian
- * for Cartesian cells: There, it is a
- * diagonal rank-2 tensor, so we actually just
- * store a rank-1 tensor. It is the same on
- * all cells, therefore we only store it once
- * per cell, and use similarities from one
- * cell to another, too (on structured meshes,
- * there are usually many cells with the same
- * Jacobian).
- *
- * The second field stores the Jacobian
- * determinant for Cartesian cells (without
- * the quadrature weight, which depends on the
- * quadrature point, whereas the determinant
- * is the same on each quadrature point).
- */
- AlignedVector<std::pair<Tensor<1,dim,VectorizedArray<Number> >,
- VectorizedArray<Number> > > cartesian_data;
-
- /**
- * The first field stores the Jacobian for
- * non-Cartesian cells where all the Jacobians
- * on the cell are the same (i.e., constant,
- * which comes from a linear transformation
- * from unit to real cell). Also use
- * similarities from one cell to another (on
- * structured meshes, there are usually many
- * cells with the same Jacobian).
- *
- * The second field stores the Jacobian
- * determinant for non-Cartesian cells with
- * constant Jacobian throughout the cell
- * (without the quadrature weight, which
- * depends on the quadrature point, whereas
- * the determinant is the same on each
- * quadrature point).
- */
- AlignedVector<std::pair<Tensor<2,dim,VectorizedArray<Number> >,
- VectorizedArray<Number> > > affine_data;
-
- /**
- * Definition of a structure that stores data
- * that depends on the quadrature formula (if
- * we have more than one quadrature formula on
- * a given problem, these fields will be
- * different)
- */
- struct MappingInfoDependent
+ /**
+ * The class that stores all geometry-dependent data related with cell
+ * interiors for use in the matrix-free class.
+ *
+ * @author Katharina Kormann and Martin Kronbichler, 2010, 2011
+ */
+ template <int dim, typename Number>
+ struct MappingInfo
{
- /**
- * This field stores the row starts for the
- * inverse Jacobian transformations,
- * quadrature weights and second derivatives.
- */
- std::vector<unsigned int> rowstart_jacobians;
-
- /**
- * This field stores the inverse Jacobian
- * transformation from unit to real cell,
- * which is needed for most gradient
- * transformations (corresponds to
- * FEValues::inverse_jacobian) for general
- * cells.
- */
- AlignedVector<Tensor<2,dim,VectorizedArray<Number> > > jacobians;
-
- /**
- * This field stores the Jacobian
- * determinant times the quadrature weights
- * (JxW in deal.II speak) for general cells.
- */
- AlignedVector<VectorizedArray<Number> > JxW_values;
-
- /**
- * Stores the diagonal part of the gradient of
- * the inverse Jacobian transformation. The
- * first index runs over the derivatives
- * $\partial^2/\partial x_i^2$, the second
- * over the space coordinate. Needed for
- * computing the Laplacian of FE functions on
- * the real cell. Uses a separate storage from
- * the off-diagonal part $\partial^2/\partial
- * x_i \partial x_j, i\neq j$ because that is
- * only needed for computing a full Hessian.
- */
- AlignedVector<Tensor<2,dim,VectorizedArray<Number> > > jacobians_grad_diag;
-
- /**
- * Stores the off-diagonal part of the
- * gradient of the inverse Jacobian
- * transformation. Because of symmetry, only
- * the upper diagonal part is needed. The
- * first index runs through the derivatives
- * row-wise, i.e., $\partial^2/\partial x_1
- * \partial x_2$ first, then
- * $\partial^2/\partial x_1 \partial x_3$, and
- * so on. The second index is the spatial
- * coordinate. Not filled currently.
- */
- AlignedVector<Tensor<1,(dim>1?dim*(dim-1)/2:1),
- Tensor<1,dim,VectorizedArray<Number> > > > jacobians_grad_upper;
-
- /**
- * Stores the row start for quadrature points
- * in real coordinates for both types of
- * cells. Note that Cartesian cells will have
- * shorter fields (length is @p n_q_points_1d)
- * than non-Cartesian cells (length is @p
- * n_q_points).
- */
- std::vector<unsigned int> rowstart_q_points;
-
- /**
- * Stores the quadrature points in real
- * coordinates for Cartesian cells (does not
- * need to store the full data on all points)
- */
- AlignedVector<Point<dim,VectorizedArray<Number> > > quadrature_points;
-
- /**
- * The dim-dimensional quadrature formula
- * underlying the problem (constructed from a
- * 1D tensor product quadrature formula).
- */
- dealii::hp::QCollection<dim> quadrature;
-
- /**
- * The (dim-1)-dimensional quadrature formula
- * corresponding to face evaluation
- * (constructed from a 1D tensor product
- * quadrature formula).
- */
- dealii::hp::QCollection<dim-1> face_quadrature;
-
- /**
- * The number of quadrature points for the
- * current quadrature formula.
- */
- std::vector<unsigned int> n_q_points;
-
- /**
- * The number of quadrature points for the
- * current quadrature formula when applied to
- * a face. Only set if the quadrature formula
- * is derived from a tensor product, since it
- * is not defined from the full quadrature
- * formula otherwise.
- */
- std::vector<unsigned int> n_q_points_face;
-
- /**
- * The quadrature weights (vectorized data
- * format) on the unit cell.
- */
- std::vector<AlignedVector<VectorizedArray<Number> > > quadrature_weights;
-
- /**
- * This variable stores the number of
- * quadrature points for all quadrature
- * indices in the underlying element for
- * easier access to data in the hp case.
- */
- std::vector<unsigned int> quad_index_conversion;
-
- /**
- * Returns the quadrature index for a given
- * number of quadrature points. If not in hp
- * mode or if the index is not found, this
- * function always returns index 0. Hence,
- * this function does not check whether the
- * given degree is actually present.
- */
- unsigned int
- quad_index_from_n_q_points (const unsigned int n_q_points) const;
-
-
- /**
- * Prints a detailed summary of memory
- * consumption in the different structures of
- * this class to the given output stream.
- */
+ /**
+ * Determines how many bits of an unsigned int
+ * are used to distinguish the cell types
+ * (Cartesian, with constant Jacobian, or
+ * general)
+ */
+ static const std::size_t n_cell_type_bits = 2;
+
+ /**
+ * Determines how many types of different
+ * cells can be detected at most. Corresponds
+ * to the number of bits we reserved for it.
+ */
+ static const unsigned int n_cell_types = 1U<<n_cell_type_bits;
+
+ /**
+ * Empty constructor.
+ */
+ MappingInfo();
+
+ /**
+ * Computes the information in the given
+ * cells. The cells are specified by the level
+ * and the index within the level (as given by
+ * CellIterator::level() and
+ * CellIterator::index(), in order to allow
+ * for different kinds of iterators,
+ * e.g. standard DoFHandler, multigrid, etc.)
+ * on a fixed Triangulation. In addition, a
+ * mapping and several quadrature formulas are
+ * given.
+ */
+ void initialize (const dealii::Triangulation<dim> &tria,
+ const std::vector<std::pair<unsigned int,unsigned int> > &cells,
+ const std::vector<unsigned int> &active_fe_index,
+ const Mapping<dim> &mapping,
- const std::vector<dealii::hp::QCollection<1> > &quad,
++ const std::vector<dealii::hp::QCollection<1> > &quad,
+ const UpdateFlags update_flags);
+
+ /**
+ * Helper function to determine which update
+ * flags must be set in the internal functions
+ * to initialize all data as requested by the
+ * user.
+ */
+ UpdateFlags
+ compute_update_flags (const UpdateFlags update_flags,
- const std::vector<dealii::hp::QCollection<1> > &quad) const;
++ const std::vector<dealii::hp::QCollection<1> > &quad) const;
+
+ /**
+ * Returns the type of a given cell as
+ * detected during initialization.
+ */
+ CellType get_cell_type (const unsigned int cell_chunk_no) const;
+
+ /**
+ * Returns the type of a given cell as
+ * detected during initialization.
+ */
+ unsigned int get_cell_data_index (const unsigned int cell_chunk_no) const;
+
+ /**
+ * Clears all data fields in this class.
+ */
+ void clear ();
+
+ /**
+ * Returns the memory consumption of this
+ * class in bytes.
+ */
+ std::size_t memory_consumption() const;
+
+ /**
+ * Prints a detailed summary of memory
+ * consumption in the different structures of
+ * this class to the given output stream.
+ */
template <typename STREAM>
void print_memory_consumption(STREAM &out,
const SizeInfo &size_info) const;
template <typename DH, typename Quad>
void MatrixFree<dim,Number>::
reinit(const Mapping<dim> &mapping,
- const std::vector<const DH *> &dof_handler,
- const std::vector<const ConstraintMatrix*> &constraint,
- const std::vector<const DH *> &dof_handler,
++ const std::vector<const DH *> &dof_handler,
+ const std::vector<const ConstraintMatrix *> &constraint,
const std::vector<Quad> &quad,
const MatrixFree<dim,Number>::AdditionalData additional_data)
{
template <typename VectorStruct>
inline
- void update_ghost_values_start (const std::vector<VectorStruct> &src)
+ void update_ghost_values_start (const std::vector<VectorStruct> &src)
{
- for(unsigned int comp=0;comp<src.size();comp++)
+ for (unsigned int comp=0; comp<src.size(); comp++)
update_ghost_values_start(src[comp], comp);
}
template <typename VectorStruct>
inline
- void update_ghost_values_start (const std::vector<VectorStruct*> &src)
- void update_ghost_values_start (const std::vector<VectorStruct *> &src)
++ void update_ghost_values_start (const std::vector<VectorStruct *> &src)
{
- for(unsigned int comp=0;comp<src.size();comp++)
+ for (unsigned int comp=0; comp<src.size(); comp++)
update_ghost_values_start(*src[comp], comp);
}
template <typename VectorStruct>
inline
- void update_ghost_values_finish (const std::vector<VectorStruct> &src)
+ void update_ghost_values_finish (const std::vector<VectorStruct> &src)
{
- for(unsigned int comp=0;comp<src.size();comp++)
+ for (unsigned int comp=0; comp<src.size(); comp++)
update_ghost_values_finish(src[comp]);
}
template <typename VectorStruct>
inline
- void update_ghost_values_finish (const std::vector<VectorStruct*> &src)
- void update_ghost_values_finish (const std::vector<VectorStruct *> &src)
++ void update_ghost_values_finish (const std::vector<VectorStruct *> &src)
{
- for(unsigned int comp=0;comp<src.size();comp++)
+ for (unsigned int comp=0; comp<src.size(); comp++)
update_ghost_values_finish(*src[comp]);
}
return NULL;
}
- tbb::empty_task* dummy;
+ tbb::empty_task *dummy;
private:
- const Worker &function;
+ const Worker &function;
const unsigned int partition;
const internal::MatrixFreeFunctions::TaskInfo &task_info;
};
MatrixFree<dim, Number>::cell_loop
(const std_cxx1x::function<void (const MatrixFree<dim,Number> &,
OutVector &,
- const InVector&,
+ const InVector &,
const std::pair<unsigned int,
- unsigned int> &)> &cell_operation,
+ unsigned int> &)> &cell_operation,
OutVector &dst,
- const InVector &src) const
+ const InVector &src) const
{
#if DEAL_II_USE_MT==1
template<class SOLVER, class VECTOR>
template<class MATRIX, class PRECOND>
MGCoarseGridLACIteration<SOLVER, VECTOR>
- ::MGCoarseGridLACIteration(SOLVER& s,
+ ::MGCoarseGridLACIteration(SOLVER &s,
- const MATRIX &m,
+ const MATRIX &m,
const PRECOND &p)
- :
- solver(&s, typeid(*this).name())
+ :
+ solver(&s, typeid(*this).name())
{
matrix = new PointerMatrix<MATRIX, VECTOR>(&m);
precondition = new PointerMatrix<PRECOND, VECTOR>(&p);
template<class MATRIX, class PRECOND>
void
MGCoarseGridLACIteration<SOLVER, VECTOR>
- ::initialize(SOLVER& s,
+ ::initialize(SOLVER &s,
- const MATRIX &m,
+ const MATRIX &m,
const PRECOND &p)
{
solver = &s;
void
make_boundary_list (const MGDoFHandler<dim,spacedim> &mg_dof,
const typename FunctionMap<dim>::type &function_map,
- std::vector<std::set<unsigned int> > &boundary_indices,
+ std::vector<std::set<unsigned int> > &boundary_indices,
const ComponentMask &component_mask = ComponentMask());
- /**
- * The same function as above, but return
- * an IndexSet rather than a
- * std::set<unsigned int> on each level.
- */
+ /**
+ * The same function as above, but return
+ * an IndexSet rather than a
+ * std::set<unsigned int> on each level.
+ */
template <int dim, int spacedim>
void
make_boundary_list (const MGDoFHandler<dim,spacedim> &mg_dof,
template <int dim, int spacedim>
void
extract_inner_interface_dofs (const MGDoFHandler<dim,spacedim> &mg_dof_handler,
- std::vector<std::vector<bool> > &interface_dofs,
- std::vector<std::vector<bool> > &boundary_interface_dofs);
+ std::vector<std::vector<bool> > &interface_dofs,
+ std::vector<std::vector<bool> > &boundary_interface_dofs);
- /**
- * Does the same as the function above,
- * but fills only the interface_dofs.
- */
+ /**
+ * Does the same as the function above,
+ * but fills only the interface_dofs.
+ */
template <int dim, int spacedim>
void
extract_inner_interface_dofs (const MGDoFHandler<dim,spacedim> &mg_dof_handler,
template <int dim, int spacedim>
void
extract_non_interface_dofs (const MGDoFHandler<dim,spacedim> &mg_dof_handler,
- std::vector<std::set<unsigned int> > &non_interface_dofs);
- std::vector<std::set<unsigned int> > &non_interface_dofs);
++ std::vector<std::set<unsigned int> > &non_interface_dofs);
}
/* @} */
*/
class DerivativeApproximation
{
+ public:
+ /**
+ * This function is used to
+ * obtain an approximation of the
+ * gradient. Pass it the DoF
+ * handler object that describes
+ * the finite element field, a
+ * nodal value vector, and
+ * receive the cell-wise
+ * Euclidian norm of the
+ * approximated gradient.
+ *
+ * The last parameter denotes the
+ * solution component, for which the
+ * gradient is to be computed. It
+ * defaults to the first component. For
+ * scalar elements, this is the only
+ * valid choice; for vector-valued ones,
+ * any component between zero and the
+ * number of vector components can be
+ * given here.
+ */
+ template <int dim, template <int, int> class DH, class InputVector, int spacedim>
+ static void
+ approximate_gradient (const Mapping<dim,spacedim> &mapping,
+ const DH<dim,spacedim> &dof,
+ const InputVector &solution,
+ Vector<float> &derivative_norm,
+ const unsigned int component = 0);
+
+ /**
+ * Calls the @p interpolate
+ * function, see above, with
+ * <tt>mapping=MappingQ1@<dim@>()</tt>.
+ */
+ template <int dim, template <int, int> class DH, class InputVector, int spacedim>
+ static void
+ approximate_gradient (const DH<dim,spacedim> &dof,
+ const InputVector &solution,
+ Vector<float> &derivative_norm,
+ const unsigned int component = 0);
+
+ /**
+ * This function is the analogue
+ * to the one above, computing
+ * finite difference
+ * approximations of the tensor
+ * of second derivatives. Pass it
+ * the DoF handler object that
+ * describes the finite element
+ * field, a nodal value vector,
+ * and receive the cell-wise
+ * spectral norm of the
+ * approximated tensor of second
+ * derivatives. The spectral norm
+ * is the matrix norm associated
+ * to the $l_2$ vector norm.
+ *
+ * The last parameter denotes the
+ * solution component, for which
+ * the gradient is to be
+ * computed. It defaults to the
+ * first component. For
+ * scalar elements, this is the only
+ * valid choice; for vector-valued ones,
+ * any component between zero and the
+ * number of vector components can be
+ * given here.
+ */
+ template <int dim, template <int, int> class DH, class InputVector, int spacedim>
+ static void
+ approximate_second_derivative (const Mapping<dim,spacedim> &mapping,
+ const DH<dim,spacedim> &dof,
+ const InputVector &solution,
+ Vector<float> &derivative_norm,
+ const unsigned int component = 0);
+
+ /**
+ * Calls the @p interpolate
+ * function, see above, with
+ * <tt>mapping=MappingQ1@<dim@>()</tt>.
+ */
+ template <int dim, template <int, int> class DH, class InputVector, int spacedim>
+ static void
+ approximate_second_derivative (const DH<dim,spacedim> &dof,
+ const InputVector &solution,
+ Vector<float> &derivative_norm,
+ const unsigned int component = 0);
+
+ /**
+ * This function calculates the
+ * <tt>order</tt>-th order approximate
+ * derivative and returns the full tensor
+ * for a single cell.
+ *
+ * The last parameter denotes the
+ * solution component, for which
+ * the gradient is to be
+ * computed. It defaults to the
+ * first component. For
+ * scalar elements, this is the only
+ * valid choice; for vector-valued ones,
+ * any component between zero and the
+ * number of vector components can be
+ * given here.
+ */
+
+ template <int dim, template <int, int> class DH, class InputVector, int order, int spacedim>
+ static void
+ approximate_derivative_tensor (const Mapping<dim,spacedim> &mapping,
+ const DH<dim,spacedim> &dof,
+ const InputVector &solution,
+ const typename DH<dim,spacedim>::active_cell_iterator &cell,
+ Tensor<order,dim> &derivative,
+ const unsigned int component = 0);
+
+ /**
+ * Same as above, with
+ * <tt>mapping=MappingQ1@<dim@>()</tt>.
+ */
+
+ template <int dim, template <int, int> class DH, class InputVector, int order, int spacedim>
+ static void
+ approximate_derivative_tensor (const DH<dim,spacedim> &dof,
+ const InputVector &solution,
+ const typename DH<dim,spacedim>::active_cell_iterator &cell,
+ Tensor<order,dim> &derivative,
+ const unsigned int component = 0);
+
+ /**
+ * Return the norm of the derivative.
+ */
+ template <int dim, int order>
+ static double
+ derivative_norm(const Tensor<order,dim> &derivative);
+
+ /**
+ * Exception
+ */
+ DeclException2 (ExcInvalidVectorLength,
+ int, int,
+ << "Vector has length " << arg1 << ", but should have "
+ << arg2);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInsufficientDirections);
+
+ private:
+
+ /**
+ * The following class is used to
+ * describe the data needed to
+ * compute the finite difference
+ * approximation to the gradient
+ * on a cell. See the general
+ * documentation of this class
+ * for more information on
+ * implementational details.
+ *
+ * @author Wolfgang Bangerth, 2000
+ */
+ template <int dim>
+ class Gradient
+ {
public:
- /**
- * This function is used to
- * obtain an approximation of the
- * gradient. Pass it the DoF
- * handler object that describes
- * the finite element field, a
- * nodal value vector, and
- * receive the cell-wise
- * Euclidian norm of the
- * approximated gradient.
- *
- * The last parameter denotes the
- * solution component, for which the
- * gradient is to be computed. It
- * defaults to the first component. For
- * scalar elements, this is the only
- * valid choice; for vector-valued ones,
- * any component between zero and the
- * number of vector components can be
- * given here.
- */
- template <int dim, template <int, int> class DH, class InputVector, int spacedim>
- static void
- approximate_gradient (const Mapping<dim,spacedim> &mapping,
+ /**
+ * Declare which data fields have
+ * to be updated for the function
+ * @p get_projected_derivative
+ * to work.
+ */
+ static const UpdateFlags update_flags;
+
+ /**
+ * Declare the data type which
+ * holds the derivative described
+ * by this class.
+ */
+ typedef Tensor<1,dim> Derivative;
+
+ /**
+ * Likewise declare the data type
+ * that holds the derivative
+ * projected to a certain
+ * directions.
+ */
+ typedef double ProjectedDerivative;
+
+ /**
+ * Given an FEValues object
+ * initialized to a cell, and a
+ * solution vector, extract the
+ * desired derivative at the
+ * first quadrature point (which
+ * is the only one, as we only
+ * evaluate the finite element
+ * field at the center of each
+ * cell).
+ */
+ template <class InputVector, int spacedim>
+ static ProjectedDerivative
- get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
++ get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
+ const InputVector &solution,
+ const unsigned int component);
+
+ /**
+ * Return the norm of the
+ * derivative object. Here, for
+ * the gradient, we choose the
+ * Euclidian norm of the gradient
+ * vector.
+ */
+ static double derivative_norm (const Derivative &d);
+
+ /**
+ * If for the present derivative
+ * order, symmetrization of the
+ * derivative tensor is
+ * necessary, then do so on the
+ * argument.
+ *
+ * For the first derivatives, no
+ * such thing is necessary, so
+ * this function is a no-op.
+ */
+ static void symmetrize (Derivative &derivative_tensor);
+ };
+
+
+
+ /**
+ * The following class is used to
+ * describe the data needed to
+ * compute the finite difference
+ * approximation to the second
+ * derivatives on a cell. See the
+ * general documentation of this
+ * class for more information on
+ * implementational details.
+ *
+ * @author Wolfgang Bangerth, 2000
+ */
+ template <int dim>
+ class SecondDerivative
+ {
+ public:
+ /**
+ * Declare which data fields have
+ * to be updated for the function
+ * @p get_projected_derivative
+ * to work.
+ */
+ static const UpdateFlags update_flags;
+
+ /**
+ * Declare the data type which
+ * holds the derivative described
+ * by this class.
+ */
+ typedef Tensor<2,dim> Derivative;
+
+ /**
+ * Likewise declare the data type
+ * that holds the derivative
+ * projected to a certain
+ * directions.
+ */
+ typedef Tensor<1,dim> ProjectedDerivative;
+
+ /**
+ * Given an FEValues object
+ * initialized to a cell, and a
+ * solution vector, extract the
+ * desired derivative at the
+ * first quadrature point (which
+ * is the only one, as we only
+ * evaluate the finite element
+ * field at the center of each
+ * cell).
+ */
+ template <class InputVector, int spacedim>
+ static ProjectedDerivative
- get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
++ get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
+ const InputVector &solution,
+ const unsigned int component);
+
+ /**
+ * Return the norm of the
+ * derivative object. Here, for
+ * the (symmetric) tensor of
+ * second derivatives, we choose
+ * the absolute value of the
+ * largest eigenvalue, which is
+ * the matrix norm associated to
+ * the $l_2$ norm of vectors. It
+ * is also the largest value of
+ * the curvature of the solution.
+ */
+ static double derivative_norm (const Derivative &d);
+
+ /**
+ * If for the present derivative
+ * order, symmetrization of the
+ * derivative tensor is
+ * necessary, then do so on the
+ * argument.
+ *
+ * For the second derivatives,
+ * each entry of the tensor is
+ * set to the mean of its value
+ * and the value of the transpose
+ * element.
+ *
+ * Note that this function
+ * actually modifies its
+ * argument.
+ */
+ static void symmetrize (Derivative &derivative_tensor);
+ };
+
+ template <int dim>
+ class ThirdDerivative
+ {
+ public:
+ /**
+ * Declare which data fields have
+ * to be updated for the function
+ * @p get_projected_derivative
+ * to work.
+ */
+ static const UpdateFlags update_flags;
+
+ /**
+ * Declare the data type which
+ * holds the derivative described
+ * by this class.
+ */
+ typedef Tensor<3,dim> Derivative;
+
+ /**
+ * Likewise declare the data type
+ * that holds the derivative
+ * projected to a certain
+ * directions.
+ */
+ typedef Tensor<2,dim> ProjectedDerivative;
+
+ /**
+ * Given an FEValues object
+ * initialized to a cell, and a
+ * solution vector, extract the
+ * desired derivative at the
+ * first quadrature point (which
+ * is the only one, as we only
+ * evaluate the finite element
+ * field at the center of each
+ * cell).
+ */
+ template <class InputVector, int spacedim>
+ static ProjectedDerivative
- get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
++ get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
+ const InputVector &solution,
+ const unsigned int component);
+
+ /**
+ * Return the norm of the
+ * derivative object. Here, for
+ * the (symmetric) tensor of
+ * second derivatives, we choose
+ * the absolute value of the
+ * largest eigenvalue, which is
+ * the matrix norm associated to
+ * the $l_2$ norm of vectors. It
+ * is also the largest value of
+ * the curvature of the solution.
+ */
+ static double derivative_norm (const Derivative &d);
+
+ /**
+ * If for the present derivative
+ * order, symmetrization of the
+ * derivative tensor is
+ * necessary, then do so on the
+ * argument.
+ *
+ * For the second derivatives,
+ * each entry of the tensor is
+ * set to the mean of its value
+ * and the value of the transpose
+ * element.
+ *
+ * Note that this function
+ * actually modifies its
+ * argument.
+ */
+ static void symmetrize (Derivative &derivative_tensor);
+ };
+
+ template <int order, int dim>
+ class DerivativeSelector
+ {
+ public:
+ /**
+ * typedef to select the
+ * DerivativeDescription corresponding
+ * to the <tt>order</tt>th
+ * derivative. In this general template
+ * we set an unvalid typedef to void,
+ * the real typedefs have to be
+ * specialized.
+ */
+ typedef void DerivDescr;
+
+ };
+
+ template <int dim>
+ class DerivativeSelector<1,dim>
+ {
+ public:
+
+ typedef Gradient<dim> DerivDescr;
+ };
+
+ template <int dim>
+ class DerivativeSelector<2,dim>
+ {
+ public:
+
+ typedef SecondDerivative<dim> DerivDescr;
+ };
+
+ template <int dim>
+ class DerivativeSelector<3,dim>
+ {
+ public:
+
+ typedef ThirdDerivative<dim> DerivDescr;
+ };
+
+
+
+
+ private:
+
+ /**
+ * Convenience typedef denoting
+ * the range of indices on which
+ * a certain thread shall
+ * operate.
+ */
+ typedef std::pair<unsigned int,unsigned int> IndexInterval;
+
+ /**
+ * Kind of the main function of
+ * this class. It is called by
+ * the public entry points to
+ * this class with the correct
+ * template first argument and
+ * then simply calls the
+ * @p approximate function,
+ * after setting up several
+ * threads and doing some
+ * administration that is
+ * independent of the actual
+ * derivative to be computed.
+ *
+ * The @p component argument
+ * denotes which component of the
+ * solution vector we are to work
+ * on.
+ */
+ template <class DerivativeDescription, int dim,
+ template <int, int> class DH, class InputVector, int spacedim>
+ static void
+ approximate_derivative (const Mapping<dim,spacedim> &mapping,
const DH<dim,spacedim> &dof,
const InputVector &solution,
- Vector<float> &derivative_norm,
- const unsigned int component = 0);
-
- /**
- * Calls the @p interpolate
- * function, see above, with
- * <tt>mapping=MappingQ1@<dim@>()</tt>.
- */
- template <int dim, template <int, int> class DH, class InputVector, int spacedim>
- static void
- approximate_gradient (const DH<dim,spacedim> &dof,
- const InputVector &solution,
- Vector<float> &derivative_norm,
- const unsigned int component = 0);
-
- /**
- * This function is the analogue
- * to the one above, computing
- * finite difference
- * approximations of the tensor
- * of second derivatives. Pass it
- * the DoF handler object that
- * describes the finite element
- * field, a nodal value vector,
- * and receive the cell-wise
- * spectral norm of the
- * approximated tensor of second
- * derivatives. The spectral norm
- * is the matrix norm associated
- * to the $l_2$ vector norm.
- *
- * The last parameter denotes the
- * solution component, for which
- * the gradient is to be
- * computed. It defaults to the
- * first component. For
- * scalar elements, this is the only
- * valid choice; for vector-valued ones,
- * any component between zero and the
- * number of vector components can be
- * given here.
- */
- template <int dim, template <int, int> class DH, class InputVector, int spacedim>
- static void
- approximate_second_derivative (const Mapping<dim,spacedim> &mapping,
- const DH<dim,spacedim> &dof,
- const InputVector &solution,
- Vector<float> &derivative_norm,
- const unsigned int component = 0);
-
- /**
- * Calls the @p interpolate
- * function, see above, with
- * <tt>mapping=MappingQ1@<dim@>()</tt>.
- */
- template <int dim, template <int, int> class DH, class InputVector, int spacedim>
- static void
- approximate_second_derivative (const DH<dim,spacedim> &dof,
- const InputVector &solution,
- Vector<float> &derivative_norm,
- const unsigned int component = 0);
-
- /**
- * This function calculates the
- * <tt>order</tt>-th order approximate
- * derivative and returns the full tensor
- * for a single cell.
- *
- * The last parameter denotes the
- * solution component, for which
- * the gradient is to be
- * computed. It defaults to the
- * first component. For
- * scalar elements, this is the only
- * valid choice; for vector-valued ones,
- * any component between zero and the
- * number of vector components can be
- * given here.
- */
-
- template <int dim, template <int, int> class DH, class InputVector, int order, int spacedim>
- static void
- approximate_derivative_tensor (const Mapping<dim,spacedim> &mapping,
- const DH<dim,spacedim> &dof,
- const InputVector &solution,
- const typename DH<dim,spacedim>::active_cell_iterator &cell,
- Tensor<order,dim> &derivative,
- const unsigned int component = 0);
-
- /**
- * Same as above, with
- * <tt>mapping=MappingQ1@<dim@>()</tt>.
- */
-
- template <int dim, template <int, int> class DH, class InputVector, int order, int spacedim>
- static void
- approximate_derivative_tensor (const DH<dim,spacedim> &dof,
- const InputVector &solution,
- const typename DH<dim,spacedim>::active_cell_iterator &cell,
- Tensor<order,dim> &derivative,
- const unsigned int component = 0);
-
- /**
- * Return the norm of the derivative.
- */
- template <int dim, int order>
- static double
- derivative_norm(const Tensor<order,dim> &derivative);
-
- /**
- * Exception
- */
- DeclException2 (ExcInvalidVectorLength,
- int, int,
- << "Vector has length " << arg1 << ", but should have "
- << arg2);
- /**
- * Exception
- */
- DeclException0 (ExcInsufficientDirections);
-
- private:
-
- /**
- * The following class is used to
- * describe the data needed to
- * compute the finite difference
- * approximation to the gradient
- * on a cell. See the general
- * documentation of this class
- * for more information on
- * implementational details.
- *
- * @author Wolfgang Bangerth, 2000
- */
- template <int dim>
- class Gradient
- {
- public:
- /**
- * Declare which data fields have
- * to be updated for the function
- * @p get_projected_derivative
- * to work.
- */
- static const UpdateFlags update_flags;
-
- /**
- * Declare the data type which
- * holds the derivative described
- * by this class.
- */
- typedef Tensor<1,dim> Derivative;
-
- /**
- * Likewise declare the data type
- * that holds the derivative
- * projected to a certain
- * directions.
- */
- typedef double ProjectedDerivative;
-
- /**
- * Given an FEValues object
- * initialized to a cell, and a
- * solution vector, extract the
- * desired derivative at the
- * first quadrature point (which
- * is the only one, as we only
- * evaluate the finite element
- * field at the center of each
- * cell).
- */
- template <class InputVector, int spacedim>
- static ProjectedDerivative
- get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
- const InputVector &solution,
- const unsigned int component);
-
- /**
- * Return the norm of the
- * derivative object. Here, for
- * the gradient, we choose the
- * Euclidian norm of the gradient
- * vector.
- */
- static double derivative_norm (const Derivative &d);
-
- /**
- * If for the present derivative
- * order, symmetrization of the
- * derivative tensor is
- * necessary, then do so on the
- * argument.
- *
- * For the first derivatives, no
- * such thing is necessary, so
- * this function is a no-op.
- */
- static void symmetrize (Derivative &derivative_tensor);
- };
-
-
-
- /**
- * The following class is used to
- * describe the data needed to
- * compute the finite difference
- * approximation to the second
- * derivatives on a cell. See the
- * general documentation of this
- * class for more information on
- * implementational details.
- *
- * @author Wolfgang Bangerth, 2000
- */
- template <int dim>
- class SecondDerivative
- {
- public:
- /**
- * Declare which data fields have
- * to be updated for the function
- * @p get_projected_derivative
- * to work.
- */
- static const UpdateFlags update_flags;
-
- /**
- * Declare the data type which
- * holds the derivative described
- * by this class.
- */
- typedef Tensor<2,dim> Derivative;
-
- /**
- * Likewise declare the data type
- * that holds the derivative
- * projected to a certain
- * directions.
- */
- typedef Tensor<1,dim> ProjectedDerivative;
-
- /**
- * Given an FEValues object
- * initialized to a cell, and a
- * solution vector, extract the
- * desired derivative at the
- * first quadrature point (which
- * is the only one, as we only
- * evaluate the finite element
- * field at the center of each
- * cell).
- */
- template <class InputVector, int spacedim>
- static ProjectedDerivative
- get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
- const InputVector &solution,
- const unsigned int component);
-
- /**
- * Return the norm of the
- * derivative object. Here, for
- * the (symmetric) tensor of
- * second derivatives, we choose
- * the absolute value of the
- * largest eigenvalue, which is
- * the matrix norm associated to
- * the $l_2$ norm of vectors. It
- * is also the largest value of
- * the curvature of the solution.
- */
- static double derivative_norm (const Derivative &d);
-
- /**
- * If for the present derivative
- * order, symmetrization of the
- * derivative tensor is
- * necessary, then do so on the
- * argument.
- *
- * For the second derivatives,
- * each entry of the tensor is
- * set to the mean of its value
- * and the value of the transpose
- * element.
- *
- * Note that this function
- * actually modifies its
- * argument.
- */
- static void symmetrize (Derivative &derivative_tensor);
- };
-
- template <int dim>
- class ThirdDerivative
- {
- public:
- /**
- * Declare which data fields have
- * to be updated for the function
- * @p get_projected_derivative
- * to work.
- */
- static const UpdateFlags update_flags;
-
- /**
- * Declare the data type which
- * holds the derivative described
- * by this class.
- */
- typedef Tensor<3,dim> Derivative;
-
- /**
- * Likewise declare the data type
- * that holds the derivative
- * projected to a certain
- * directions.
- */
- typedef Tensor<2,dim> ProjectedDerivative;
-
- /**
- * Given an FEValues object
- * initialized to a cell, and a
- * solution vector, extract the
- * desired derivative at the
- * first quadrature point (which
- * is the only one, as we only
- * evaluate the finite element
- * field at the center of each
- * cell).
- */
- template <class InputVector, int spacedim>
- static ProjectedDerivative
- get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
- const InputVector &solution,
- const unsigned int component);
-
- /**
- * Return the norm of the
- * derivative object. Here, for
- * the (symmetric) tensor of
- * second derivatives, we choose
- * the absolute value of the
- * largest eigenvalue, which is
- * the matrix norm associated to
- * the $l_2$ norm of vectors. It
- * is also the largest value of
- * the curvature of the solution.
- */
- static double derivative_norm (const Derivative &d);
-
- /**
- * If for the present derivative
- * order, symmetrization of the
- * derivative tensor is
- * necessary, then do so on the
- * argument.
- *
- * For the second derivatives,
- * each entry of the tensor is
- * set to the mean of its value
- * and the value of the transpose
- * element.
- *
- * Note that this function
- * actually modifies its
- * argument.
- */
- static void symmetrize (Derivative &derivative_tensor);
- };
-
- template <int order, int dim>
- class DerivativeSelector
- {
- public:
- /**
- * typedef to select the
- * DerivativeDescription corresponding
- * to the <tt>order</tt>th
- * derivative. In this general template
- * we set an unvalid typedef to void,
- * the real typedefs have to be
- * specialized.
- */
- typedef void DerivDescr;
-
- };
-
- template <int dim>
- class DerivativeSelector<1,dim>
- {
- public:
-
- typedef Gradient<dim> DerivDescr;
- };
-
- template <int dim>
- class DerivativeSelector<2,dim>
- {
- public:
-
- typedef SecondDerivative<dim> DerivDescr;
- };
-
- template <int dim>
- class DerivativeSelector<3,dim>
- {
- public:
-
- typedef ThirdDerivative<dim> DerivDescr;
- };
-
-
-
-
- private:
-
- /**
- * Convenience typedef denoting
- * the range of indices on which
- * a certain thread shall
- * operate.
- */
- typedef std::pair<unsigned int,unsigned int> IndexInterval;
-
- /**
- * Kind of the main function of
- * this class. It is called by
- * the public entry points to
- * this class with the correct
- * template first argument and
- * then simply calls the
- * @p approximate function,
- * after setting up several
- * threads and doing some
- * administration that is
- * independent of the actual
- * derivative to be computed.
- *
- * The @p component argument
- * denotes which component of the
- * solution vector we are to work
- * on.
- */
- template <class DerivativeDescription, int dim,
- template <int, int> class DH, class InputVector, int spacedim>
- static void
- approximate_derivative (const Mapping<dim,spacedim> &mapping,
- const DH<dim,spacedim> &dof,
- const InputVector &solution,
- const unsigned int component,
- Vector<float> &derivative_norm);
-
- /**
- * Compute the derivative
- * approximation on the cells in
- * the range given by the third
- * parameter.
- * Fill the @p derivative_norm vector with
- * the norm of the computed derivative
- * tensors on each cell.
- */
- template <class DerivativeDescription, int dim,
- template <int, int> class DH, class InputVector, int spacedim>
- static void
- approximate (const Mapping<dim,spacedim> &mapping,
- const DH<dim,spacedim> &dof,
- const InputVector &solution,
- const unsigned int component,
- const IndexInterval &index_interval,
- Vector<float> &derivative_norm);
-
- /**
- * Compute the derivative approximation on
- * one cell. This computes the full
- * derivative tensor.
- */
- template <class DerivativeDescription, int dim,
- template <int, int> class DH, class InputVector, int spacedim>
- static void
- approximate_cell (const Mapping<dim,spacedim> &mapping,
- const DH<dim,spacedim> &dof,
- const InputVector &solution,
- const unsigned int component,
- const typename DH<dim,spacedim>::active_cell_iterator &cell,
- typename DerivativeDescription::Derivative &derivative);
+ const unsigned int component,
+ Vector<float> &derivative_norm);
+
+ /**
+ * Compute the derivative
+ * approximation on the cells in
+ * the range given by the third
+ * parameter.
+ * Fill the @p derivative_norm vector with
+ * the norm of the computed derivative
+ * tensors on each cell.
+ */
+ template <class DerivativeDescription, int dim,
+ template <int, int> class DH, class InputVector, int spacedim>
+ static void
+ approximate (const Mapping<dim,spacedim> &mapping,
+ const DH<dim,spacedim> &dof,
+ const InputVector &solution,
+ const unsigned int component,
+ const IndexInterval &index_interval,
+ Vector<float> &derivative_norm);
+
+ /**
+ * Compute the derivative approximation on
+ * one cell. This computes the full
+ * derivative tensor.
+ */
+ template <class DerivativeDescription, int dim,
+ template <int, int> class DH, class InputVector, int spacedim>
+ static void
+ approximate_cell (const Mapping<dim,spacedim> &mapping,
+ const DH<dim,spacedim> &dof,
+ const InputVector &solution,
+ const unsigned int component,
- const typename DH<dim,spacedim>::active_cell_iterator &cell,
++ const typename DH<dim,spacedim>::active_cell_iterator &cell,
+ typename DerivativeDescription::Derivative &derivative);
};
template <int spacedim>
class KellyErrorEstimator<1,spacedim>
{
- public:
- /**
- * Implementation of the error
- * estimator described above. You
- * may give a coefficient, but
- * there is a default value which
- * denotes the constant
- * coefficient with value
- * one. The coefficient function
- * may either be a scalar one, in
- * which case it is used for all
- * components of the finite
- * element, or a vector-valued
- * one with as many components as
- * there are in the finite
- * element; in the latter case,
- * each component is weighted by
- * the respective component in
- * the coefficient.
- *
- * You might give a list of components
- * you want to evaluate, in case the
- * finite element used by the DoFHandler
- * object is vector-valued. You then have
- * to set those entries to true in the
- * bit-vector @p component_mask for which
- * the respective component is to be used
- * in the error estimator. The default is
- * to use all components, which is done
- * by either providing a bit-vector with
- * all-set entries, or an empty
- * bit-vector. All the other parameters
- * are as in the general case used for 2d
- * and higher.
- *
- * The estimator supports multithreading
- * and splits the cells to
- * <tt>multithread_info.n_default_threads</tt>
- * (default) threads. The number of
- * threads to be used in multithreaded
- * mode can be set with the last
- * parameter of the error estimator.
- * Multithreading is not presently
- * implemented for 1d, but we retain the
- * respective parameter for compatibility
- * with the function signature in the
- * general case.
- */
- template <typename InputVector, class DH>
- static void estimate (const Mapping<1,spacedim> &mapping,
- const DH &dof,
- const Quadrature<0> &quadrature,
- const typename FunctionMap<spacedim>::type &neumann_bc,
- const InputVector &solution,
- Vector<float> &error,
- const ComponentMask &component_mask = ComponentMask(),
- const Function<spacedim> *coefficients = 0,
- const unsigned int n_threads = multithread_info.n_default_threads,
- const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
- const types::material_id material_id = numbers::invalid_material_id);
-
- /**
- * Calls the @p estimate
- * function, see above, with
- * <tt>mapping=MappingQ1<1>()</tt>.
- */
- template <typename InputVector, class DH>
- static void estimate (const DH &dof,
- const Quadrature<0> &quadrature,
- const typename FunctionMap<spacedim>::type &neumann_bc,
- const InputVector &solution,
- Vector<float> &error,
- const ComponentMask &component_mask = ComponentMask(),
- const Function<spacedim> *coefficients = 0,
- const unsigned int n_threads = multithread_info.n_default_threads,
- const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
- const types::material_id material_id = numbers::invalid_material_id);
-
- /**
- * Same function as above, but
- * accepts more than one solution
- * vectors and returns one error
- * vector for each solution
- * vector. For the reason of
- * existence of this function,
- * see the general documentation
- * of this class.
- *
- * Since we do not want to force
- * the user of this function to
- * copy around their solution
- * vectors, the vector of
- * solution vectors takes
- * pointers to the solutions,
- * rather than being a vector of
- * vectors. This makes it simpler
- * to have the solution vectors
- * somewhere in memory, rather
- * than to have them collected
- * somewhere special. (Note that
- * it is not possible to
- * construct of vector of
- * references, so we had to use a
- * vector of pointers.)
- */
- template <typename InputVector, class DH>
- static void estimate (const Mapping<1,spacedim> &mapping,
- const DH &dof,
- const Quadrature<0> &quadrature,
- const typename FunctionMap<spacedim>::type &neumann_bc,
- const std::vector<const InputVector *> &solutions,
- std::vector<Vector<float>*> &errors,
- const ComponentMask &component_mask = ComponentMask(),
- const Function<spacedim> *coefficients = 0,
- const unsigned int n_threads = multithread_info.n_default_threads,
- const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
- const types::material_id material_id = numbers::invalid_material_id);
-
- /**
- * Calls the @p estimate
- * function, see above, with
- * <tt>mapping=MappingQ1<1>()</tt>.
- */
- template <typename InputVector, class DH>
- static void estimate (const DH &dof,
- const Quadrature<0> &quadrature,
- const typename FunctionMap<spacedim>::type &neumann_bc,
- const std::vector<const InputVector *> &solutions,
- std::vector<Vector<float>*> &errors,
- const ComponentMask &component_mask = ComponentMask(),
- const Function<spacedim> *coefficients = 0,
- const unsigned int n_threads = multithread_info.n_default_threads,
- const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
- const types::material_id material_id = numbers::invalid_material_id);
-
-
- /**
- * Equivalent to the set of functions
- * above, except that this one takes a
- * quadrature collection for hp finite
- * element dof handlers.
- */
- template <typename InputVector, class DH>
- static void estimate (const Mapping<1,spacedim> &mapping,
- const DH &dof,
- const hp::QCollection<0> &quadrature,
- const typename FunctionMap<spacedim>::type &neumann_bc,
- const InputVector &solution,
- Vector<float> &error,
- const ComponentMask &component_mask = ComponentMask(),
- const Function<spacedim> *coefficients = 0,
- const unsigned int n_threads = multithread_info.n_default_threads,
- const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
- const types::material_id material_id = numbers::invalid_material_id);
-
-
- /**
- * Equivalent to the set of functions
- * above, except that this one takes a
- * quadrature collection for hp finite
- * element dof handlers.
- */
- template <typename InputVector, class DH>
- static void estimate (const DH &dof,
- const hp::QCollection<0> &quadrature,
- const typename FunctionMap<spacedim>::type &neumann_bc,
- const InputVector &solution,
- Vector<float> &error,
- const ComponentMask &component_mask = ComponentMask(),
- const Function<spacedim> *coefficients = 0,
- const unsigned int n_threads = multithread_info.n_default_threads,
- const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
- const types::material_id material_id = numbers::invalid_material_id);
-
-
- /**
- * Equivalent to the set of functions
- * above, except that this one takes a
- * quadrature collection for hp finite
- * element dof handlers.
- */
- template <typename InputVector, class DH>
- static void estimate (const Mapping<1,spacedim> &mapping,
- const DH &dof,
- const hp::QCollection<0> &quadrature,
- const typename FunctionMap<spacedim>::type &neumann_bc,
- const std::vector<const InputVector *> &solutions,
- std::vector<Vector<float>*> &errors,
- const ComponentMask &component_mask = ComponentMask(),
- const Function<spacedim> *coefficients = 0,
- const unsigned int n_threads = multithread_info.n_default_threads,
- const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
- const types::material_id material_id = numbers::invalid_material_id);
-
-
- /**
- * Equivalent to the set of functions
- * above, except that this one takes a
- * quadrature collection for hp finite
- * element dof handlers.
- */
- template <typename InputVector, class DH>
- static void estimate (const DH &dof,
- const hp::QCollection<0> &quadrature,
- const typename FunctionMap<spacedim>::type &neumann_bc,
- const std::vector<const InputVector *> &solutions,
- std::vector<Vector<float>*> &errors,
- const ComponentMask &component_mask = ComponentMask(),
- const Function<spacedim> *coefficients = 0,
- const unsigned int n_threads = multithread_info.n_default_threads,
- const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
- const types::material_id material_id = numbers::invalid_material_id);
-
- /**
- * Exception
- */
- DeclException0 (ExcInvalidBoundaryIndicator);
- /**
- * Exception
- */
- DeclException0 (ExcInvalidComponentMask);
- /**
- * Exception
- */
- DeclException0 (ExcInvalidCoefficient);
- /**
- * Exception
- */
- DeclException0 (ExcInvalidBoundaryFunction);
- /**
- * Exception
- */
- DeclException2 (ExcIncompatibleNumberOfElements,
- int, int,
- << "The number of elements " << arg1 << " and " << arg2
- << " of the vectors do not match!");
- /**
- * Exception
- */
- DeclException0 (ExcInvalidSolutionVector);
- /**
- * Exception
- */
- DeclException0 (ExcNoSolutions);
+ public:
+ /**
+ * Implementation of the error
+ * estimator described above. You
+ * may give a coefficient, but
+ * there is a default value which
+ * denotes the constant
+ * coefficient with value
+ * one. The coefficient function
+ * may either be a scalar one, in
+ * which case it is used for all
+ * components of the finite
+ * element, or a vector-valued
+ * one with as many components as
+ * there are in the finite
+ * element; in the latter case,
+ * each component is weighted by
+ * the respective component in
+ * the coefficient.
+ *
+ * You might give a list of components
+ * you want to evaluate, in case the
+ * finite element used by the DoFHandler
+ * object is vector-valued. You then have
+ * to set those entries to true in the
+ * bit-vector @p component_mask for which
+ * the respective component is to be used
+ * in the error estimator. The default is
+ * to use all components, which is done
+ * by either providing a bit-vector with
+ * all-set entries, or an empty
+ * bit-vector. All the other parameters
+ * are as in the general case used for 2d
+ * and higher.
+ *
+ * The estimator supports multithreading
+ * and splits the cells to
+ * <tt>multithread_info.n_default_threads</tt>
+ * (default) threads. The number of
+ * threads to be used in multithreaded
+ * mode can be set with the last
+ * parameter of the error estimator.
+ * Multithreading is not presently
+ * implemented for 1d, but we retain the
+ * respective parameter for compatibility
+ * with the function signature in the
+ * general case.
+ */
+ template <typename InputVector, class DH>
- static void estimate (const Mapping<1,spacedim> &mapping,
++ static void estimate (const Mapping<1,spacedim> &mapping,
+ const DH &dof,
+ const Quadrature<0> &quadrature,
+ const typename FunctionMap<spacedim>::type &neumann_bc,
+ const InputVector &solution,
+ Vector<float> &error,
+ const ComponentMask &component_mask = ComponentMask(),
+ const Function<spacedim> *coefficients = 0,
+ const unsigned int n_threads = multithread_info.n_default_threads,
+ const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
+ const types::material_id material_id = numbers::invalid_material_id);
+
+ /**
+ * Calls the @p estimate
+ * function, see above, with
+ * <tt>mapping=MappingQ1<1>()</tt>.
+ */
+ template <typename InputVector, class DH>
+ static void estimate (const DH &dof,
+ const Quadrature<0> &quadrature,
+ const typename FunctionMap<spacedim>::type &neumann_bc,
+ const InputVector &solution,
+ Vector<float> &error,
+ const ComponentMask &component_mask = ComponentMask(),
+ const Function<spacedim> *coefficients = 0,
+ const unsigned int n_threads = multithread_info.n_default_threads,
+ const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
+ const types::material_id material_id = numbers::invalid_material_id);
+
+ /**
+ * Same function as above, but
+ * accepts more than one solution
+ * vectors and returns one error
+ * vector for each solution
+ * vector. For the reason of
+ * existence of this function,
+ * see the general documentation
+ * of this class.
+ *
+ * Since we do not want to force
+ * the user of this function to
+ * copy around their solution
+ * vectors, the vector of
+ * solution vectors takes
+ * pointers to the solutions,
+ * rather than being a vector of
+ * vectors. This makes it simpler
+ * to have the solution vectors
+ * somewhere in memory, rather
+ * than to have them collected
+ * somewhere special. (Note that
+ * it is not possible to
+ * construct of vector of
+ * references, so we had to use a
+ * vector of pointers.)
+ */
+ template <typename InputVector, class DH>
+ static void estimate (const Mapping<1,spacedim> &mapping,
+ const DH &dof,
+ const Quadrature<0> &quadrature,
+ const typename FunctionMap<spacedim>::type &neumann_bc,
+ const std::vector<const InputVector *> &solutions,
+ std::vector<Vector<float>*> &errors,
+ const ComponentMask &component_mask = ComponentMask(),
+ const Function<spacedim> *coefficients = 0,
+ const unsigned int n_threads = multithread_info.n_default_threads,
+ const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
+ const types::material_id material_id = numbers::invalid_material_id);
+
+ /**
+ * Calls the @p estimate
+ * function, see above, with
+ * <tt>mapping=MappingQ1<1>()</tt>.
+ */
+ template <typename InputVector, class DH>
+ static void estimate (const DH &dof,
+ const Quadrature<0> &quadrature,
+ const typename FunctionMap<spacedim>::type &neumann_bc,
+ const std::vector<const InputVector *> &solutions,
+ std::vector<Vector<float>*> &errors,
+ const ComponentMask &component_mask = ComponentMask(),
+ const Function<spacedim> *coefficients = 0,
+ const unsigned int n_threads = multithread_info.n_default_threads,
+ const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
+ const types::material_id material_id = numbers::invalid_material_id);
+
+
+ /**
+ * Equivalent to the set of functions
+ * above, except that this one takes a
+ * quadrature collection for hp finite
+ * element dof handlers.
+ */
+ template <typename InputVector, class DH>
+ static void estimate (const Mapping<1,spacedim> &mapping,
+ const DH &dof,
+ const hp::QCollection<0> &quadrature,
+ const typename FunctionMap<spacedim>::type &neumann_bc,
+ const InputVector &solution,
+ Vector<float> &error,
+ const ComponentMask &component_mask = ComponentMask(),
+ const Function<spacedim> *coefficients = 0,
+ const unsigned int n_threads = multithread_info.n_default_threads,
+ const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
+ const types::material_id material_id = numbers::invalid_material_id);
+
+
+ /**
+ * Equivalent to the set of functions
+ * above, except that this one takes a
+ * quadrature collection for hp finite
+ * element dof handlers.
+ */
+ template <typename InputVector, class DH>
+ static void estimate (const DH &dof,
+ const hp::QCollection<0> &quadrature,
+ const typename FunctionMap<spacedim>::type &neumann_bc,
+ const InputVector &solution,
+ Vector<float> &error,
+ const ComponentMask &component_mask = ComponentMask(),
+ const Function<spacedim> *coefficients = 0,
+ const unsigned int n_threads = multithread_info.n_default_threads,
+ const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
+ const types::material_id material_id = numbers::invalid_material_id);
+
+
+ /**
+ * Equivalent to the set of functions
+ * above, except that this one takes a
+ * quadrature collection for hp finite
+ * element dof handlers.
+ */
+ template <typename InputVector, class DH>
+ static void estimate (const Mapping<1,spacedim> &mapping,
+ const DH &dof,
+ const hp::QCollection<0> &quadrature,
+ const typename FunctionMap<spacedim>::type &neumann_bc,
+ const std::vector<const InputVector *> &solutions,
+ std::vector<Vector<float>*> &errors,
+ const ComponentMask &component_mask = ComponentMask(),
+ const Function<spacedim> *coefficients = 0,
+ const unsigned int n_threads = multithread_info.n_default_threads,
+ const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
+ const types::material_id material_id = numbers::invalid_material_id);
+
+
+ /**
+ * Equivalent to the set of functions
+ * above, except that this one takes a
+ * quadrature collection for hp finite
+ * element dof handlers.
+ */
+ template <typename InputVector, class DH>
+ static void estimate (const DH &dof,
+ const hp::QCollection<0> &quadrature,
+ const typename FunctionMap<spacedim>::type &neumann_bc,
+ const std::vector<const InputVector *> &solutions,
+ std::vector<Vector<float>*> &errors,
+ const ComponentMask &component_mask = ComponentMask(),
+ const Function<spacedim> *coefficients = 0,
+ const unsigned int n_threads = multithread_info.n_default_threads,
+ const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
+ const types::material_id material_id = numbers::invalid_material_id);
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidBoundaryIndicator);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidComponentMask);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidCoefficient);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidBoundaryFunction);
+ /**
+ * Exception
+ */
+ DeclException2 (ExcIncompatibleNumberOfElements,
+ int, int,
+ << "The number of elements " << arg1 << " and " << arg2
+ << " of the vectors do not match!");
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidSolutionVector);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcNoSolutions);
};
const bool eliminate_columns = true);
#ifdef DEAL_II_USE_PETSC
- /**
- * Apply dirichlet boundary conditions to
- * the system matrix and vectors as
- * described in the general
- * documentation. This function works on
- * the classes that are used to wrap
- * PETSc objects.
- *
- * Note that this function is not very
- * efficient: it needs to alternatingly
- * read and write into the matrix, a
- * situation that PETSc does not handle
- * too well. In addition, we only get rid
- * of rows corresponding to boundary
- * nodes, but the corresponding case of
- * deleting the respective columns
- * (i.e. if @p eliminate_columns is @p
- * true) is not presently implemented,
- * and probably will never because it is
- * too expensive without direct access to
- * the PETSc data structures. (This leads
- * to the situation where the action
- * indicates by the default value of the
- * last argument is actually not
- * implemented; that argument has
- * <code>true</code> as its default value
- * to stay consistent with the other
- * functions of same name in this class.)
- * A third reason against this function
- * is that it doesn't handle the case
- * where the matrix is distributed across
- * an MPI system.
- *
- * This function is used in
- * step-17 and
- * step-18.
- */
+ /**
+ * Apply dirichlet boundary conditions to
+ * the system matrix and vectors as
+ * described in the general
+ * documentation. This function works on
+ * the classes that are used to wrap
+ * PETSc objects.
+ *
+ * Note that this function is not very
+ * efficient: it needs to alternatingly
+ * read and write into the matrix, a
+ * situation that PETSc does not handle
+ * too well. In addition, we only get rid
+ * of rows corresponding to boundary
+ * nodes, but the corresponding case of
+ * deleting the respective columns
+ * (i.e. if @p eliminate_columns is @p
+ * true) is not presently implemented,
+ * and probably will never because it is
+ * too expensive without direct access to
+ * the PETSc data structures. (This leads
+ * to the situation where the action
+ * indicates by the default value of the
+ * last argument is actually not
+ * implemented; that argument has
+ * <code>true</code> as its default value
+ * to stay consistent with the other
+ * functions of same name in this class.)
+ * A third reason against this function
+ * is that it doesn't handle the case
+ * where the matrix is distributed across
+ * an MPI system.
+ *
+ * This function is used in
+ * step-17 and
+ * step-18.
+ */
void
apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- PETScWrappers::SparseMatrix &matrix,
- PETScWrappers::Vector &solution,
- PETScWrappers::Vector &right_hand_side,
+ PETScWrappers::SparseMatrix &matrix,
+ PETScWrappers::Vector &solution,
+ PETScWrappers::Vector &right_hand_side,
const bool eliminate_columns = true);
- /**
- * Same function, but for parallel PETSc
- * matrices.
- */
+ /**
+ * Same function, but for parallel PETSc
+ * matrices.
+ */
void
apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- PETScWrappers::MPI::SparseMatrix &matrix,
- PETScWrappers::MPI::Vector &solution,
- PETScWrappers::MPI::Vector &right_hand_side,
+ PETScWrappers::MPI::SparseMatrix &matrix,
+ PETScWrappers::MPI::Vector &solution,
+ PETScWrappers::MPI::Vector &right_hand_side,
const bool eliminate_columns = true);
- /**
- * Same function, but for
- * parallel PETSc matrices. Note
- * that this function only
- * operates on the local range of
- * the parallel matrix, i.e. it
- * only eliminates rows
- * corresponding to degrees of
- * freedom for which the row is
- * stored on the present
- * processor. All other boundary
- * nodes are ignored, and it
- * doesn't matter whether they
- * are present in the first
- * argument to this function or
- * not. A consequence of this,
- * however, is that this function
- * has to be called from all
- * processors that participate in
- * sharing the contents of the
- * given matrices and vectors. It
- * is also implied that the local
- * range for all objects passed
- * to this function is the same.
- */
+ /**
+ * Same function, but for
+ * parallel PETSc matrices. Note
+ * that this function only
+ * operates on the local range of
+ * the parallel matrix, i.e. it
+ * only eliminates rows
+ * corresponding to degrees of
+ * freedom for which the row is
+ * stored on the present
+ * processor. All other boundary
+ * nodes are ignored, and it
+ * doesn't matter whether they
+ * are present in the first
+ * argument to this function or
+ * not. A consequence of this,
+ * however, is that this function
+ * has to be called from all
+ * processors that participate in
+ * sharing the contents of the
+ * given matrices and vectors. It
+ * is also implied that the local
+ * range for all objects passed
+ * to this function is the same.
+ */
void
apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- PETScWrappers::MPI::SparseMatrix &matrix,
+ PETScWrappers::MPI::SparseMatrix &matrix,
PETScWrappers::Vector &solution,
- PETScWrappers::MPI::Vector &right_hand_side,
+ PETScWrappers::MPI::Vector &right_hand_side,
const bool eliminate_columns = true);
- /**
- * Same as above but for BlockSparseMatrix.
- */
+ /**
+ * Same as above but for BlockSparseMatrix.
+ */
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- PETScWrappers::MPI::BlockSparseMatrix &matrix,
+ apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
+ PETScWrappers::MPI::BlockSparseMatrix &matrix,
PETScWrappers::MPI::BlockVector &solution,
PETScWrappers::MPI::BlockVector &right_hand_side,
const bool eliminate_columns = true);
#endif
#ifdef DEAL_II_USE_TRILINOS
- /**
- * Apply dirichlet boundary
- * conditions to the system matrix
- * and vectors as described in the
- * general documentation. This
- * function works on the classes
- * that are used to wrap Trilinos
- * objects.
- *
- * Note that this function is not
- * very efficient: it needs to
- * alternatingly read and write
- * into the matrix, a situation
- * that Trilinos does not handle
- * too well. In addition, we only
- * get rid of rows corresponding to
- * boundary nodes, but the
- * corresponding case of deleting
- * the respective columns (i.e. if
- * @p eliminate_columns is @p true)
- * is not presently implemented,
- * and probably will never because
- * it is too expensive without
- * direct access to the Trilinos
- * data structures. (This leads to
- * the situation where the action
- * indicates by the default value
- * of the last argument is actually
- * not implemented; that argument
- * has <code>true</code> as its
- * default value to stay consistent
- * with the other functions of same
- * name in this class.) A third
- * reason against this function is
- * that it doesn't handle the case
- * where the matrix is distributed
- * across an MPI system.
- */
+ /**
+ * Apply dirichlet boundary
+ * conditions to the system matrix
+ * and vectors as described in the
+ * general documentation. This
+ * function works on the classes
+ * that are used to wrap Trilinos
+ * objects.
+ *
+ * Note that this function is not
+ * very efficient: it needs to
+ * alternatingly read and write
+ * into the matrix, a situation
+ * that Trilinos does not handle
+ * too well. In addition, we only
+ * get rid of rows corresponding to
+ * boundary nodes, but the
+ * corresponding case of deleting
+ * the respective columns (i.e. if
+ * @p eliminate_columns is @p true)
+ * is not presently implemented,
+ * and probably will never because
+ * it is too expensive without
+ * direct access to the Trilinos
+ * data structures. (This leads to
+ * the situation where the action
+ * indicates by the default value
+ * of the last argument is actually
+ * not implemented; that argument
+ * has <code>true</code> as its
+ * default value to stay consistent
+ * with the other functions of same
+ * name in this class.) A third
+ * reason against this function is
+ * that it doesn't handle the case
+ * where the matrix is distributed
+ * across an MPI system.
+ */
void
apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- TrilinosWrappers::SparseMatrix &matrix,
+ TrilinosWrappers::SparseMatrix &matrix,
TrilinosWrappers::Vector &solution,
TrilinosWrappers::Vector &right_hand_side,
const bool eliminate_columns = true);
- /**
- * This function does the same as
- * the one above, except now
- * working on block structures.
- */
+ /**
+ * This function does the same as
+ * the one above, except now
+ * working on block structures.
+ */
void
apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- TrilinosWrappers::BlockSparseMatrix &matrix,
+ TrilinosWrappers::BlockSparseMatrix &matrix,
TrilinosWrappers::BlockVector &solution,
TrilinosWrappers::BlockVector &right_hand_side,
const bool eliminate_columns = true);
- /**
- * Apply dirichlet boundary
- * conditions to the system matrix
- * and vectors as described in the
- * general documentation. This
- * function works on the classes
- * that are used to wrap Trilinos
- * objects.
- *
- * Note that this function is not
- * very efficient: it needs to
- * alternatingly read and write
- * into the matrix, a situation
- * that Trilinos does not handle
- * too well. In addition, we only
- * get rid of rows corresponding to
- * boundary nodes, but the
- * corresponding case of deleting
- * the respective columns (i.e. if
- * @p eliminate_columns is @p true)
- * is not presently implemented,
- * and probably will never because
- * it is too expensive without
- * direct access to the Trilinos
- * data structures. (This leads to
- * the situation where the action
- * indicates by the default value
- * of the last argument is actually
- * not implemented; that argument
- * has <code>true</code> as its
- * default value to stay consistent
- * with the other functions of same
- * name in this class.) This
- * function does work on MPI vector
- * types.
- */
+ /**
+ * Apply dirichlet boundary
+ * conditions to the system matrix
+ * and vectors as described in the
+ * general documentation. This
+ * function works on the classes
+ * that are used to wrap Trilinos
+ * objects.
+ *
+ * Note that this function is not
+ * very efficient: it needs to
+ * alternatingly read and write
+ * into the matrix, a situation
+ * that Trilinos does not handle
+ * too well. In addition, we only
+ * get rid of rows corresponding to
+ * boundary nodes, but the
+ * corresponding case of deleting
+ * the respective columns (i.e. if
+ * @p eliminate_columns is @p true)
+ * is not presently implemented,
+ * and probably will never because
+ * it is too expensive without
+ * direct access to the Trilinos
+ * data structures. (This leads to
+ * the situation where the action
+ * indicates by the default value
+ * of the last argument is actually
+ * not implemented; that argument
+ * has <code>true</code> as its
+ * default value to stay consistent
+ * with the other functions of same
+ * name in this class.) This
+ * function does work on MPI vector
+ * types.
+ */
void
apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- TrilinosWrappers::SparseMatrix &matrix,
+ TrilinosWrappers::SparseMatrix &matrix,
TrilinosWrappers::MPI::Vector &solution,
TrilinosWrappers::MPI::Vector &right_hand_side,
const bool eliminate_columns = true);
- /**
- * This function does the same as
- * the one above, except now working
- * on block structures.
- */
+ /**
+ * This function does the same as
+ * the one above, except now working
+ * on block structures.
+ */
void
apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- TrilinosWrappers::BlockSparseMatrix &matrix,
+ TrilinosWrappers::BlockSparseMatrix &matrix,
TrilinosWrappers::MPI::BlockVector &solution,
TrilinosWrappers::MPI::BlockVector &right_hand_side,
const bool eliminate_columns = true);
template <int dim, int spacedim>
void
project_boundary_values (const Mapping<dim, spacedim> &mapping,
- const DoFHandler<dim, spacedim>&dof,
+ const DoFHandler<dim, spacedim> &dof,
const typename FunctionMap<spacedim>::type &boundary_functions,
const Quadrature<dim-1> &q,
- std::map<unsigned int,double> &boundary_values,
+ std::map<unsigned int,double> &boundary_values,
std::vector<unsigned int> component_mapping)
{
//TODO:[?] In project_boundary_values, no condensation of sparsity
const unsigned int List::max_int_value
- = std::numeric_limits<unsigned int>::max();
+ = std::numeric_limits<unsigned int>::max();
- const char* List::description_init = "[List";
+ const char *List::description_init = "[List";
- List::List (const PatternBase &p,
+ List::List (const PatternBase &p,
const unsigned int min_elements,
const unsigned int max_elements)
- :
- pattern (p.clone()),
- min_elements (min_elements),
- max_elements (max_elements)
+ :
+ pattern (p.clone()),
+ min_elements (min_elements),
+ max_elements (max_elements)
{
Assert (min_elements <= max_elements,
ExcInvalidRange (min_elements, max_elements));
const unsigned int Map::max_int_value
- = std::numeric_limits<unsigned int>::max();
+ = std::numeric_limits<unsigned int>::max();
- const char* Map::description_init = "[Map";
+ const char *Map::description_init = "[Map";
- Map::Map (const PatternBase &p_key,
- const PatternBase &p_value,
+ Map::Map (const PatternBase &p_key,
+ const PatternBase &p_value,
const unsigned int min_elements,
const unsigned int max_elements)
- :
- key_pattern (p_key.clone()),
- value_pattern (p_value.clone()),
- min_elements (min_elements),
- max_elements (max_elements)
+ :
+ key_pattern (p_key.clone()),
+ value_pattern (p_value.clone()),
+ min_elements (min_elements),
+ max_elements (max_elements)
{
Assert (min_elements <= max_elements,
ExcInvalidRange (min_elements, max_elements));
Assert (subface_no < GeometryInfo<dim>::max_children_per_face,
ExcInternalError());
- // As the quadrature points created by
- // QProjector are on subfaces in their
- // "standard location" we have to use a
- // permutation of the equivalent subface
- // number in order to respect face
- // orientation, flip and rotation. The
- // information we need here is exactly the
- // same as the
- // GeometryInfo<3>::child_cell_on_face info
- // for the bottom face (face 4) of a hex, as
- // on this the RefineCase of the cell matches
- // that of the face and the subfaces are
- // numbered in the same way as the child
- // cells.
-
- // in 3d, we have to account for faces that
- // have non-standard face orientation, flip
- // and rotation. thus, we have to store
- // _eight_ data sets per face or subface
- // already for the isotropic
- // case. Additionally, we have three
- // different refinement cases, resulting in
- // <tt>4 + 2 + 2 = 8</tt> different subfaces
- // for each face.
+ // As the quadrature points created by
+ // QProjector are on subfaces in their
+ // "standard location" we have to use a
+ // permutation of the equivalent subface
+ // number in order to respect face
+ // orientation, flip and rotation. The
+ // information we need here is exactly the
+ // same as the
+ // GeometryInfo<3>::child_cell_on_face info
+ // for the bottom face (face 4) of a hex, as
+ // on this the RefineCase of the cell matches
+ // that of the face and the subfaces are
+ // numbered in the same way as the child
+ // cells.
+
+ // in 3d, we have to account for faces that
+ // have non-standard face orientation, flip
+ // and rotation. thus, we have to store
+ // _eight_ data sets per face or subface
+ // already for the isotropic
+ // case. Additionally, we have three
+ // different refinement cases, resulting in
+ // <tt>4 + 2 + 2 = 8</tt> different subfaces
+ // for each face.
const unsigned int total_subfaces_per_face=8;
- // set up a table with the according offsets
- // for non-standard orientation, first index:
- // face_orientation (standard true=1), second
- // index: face_flip (standard false=0), third
- // index: face_rotation (standard false=0)
- //
- // note, that normally we should use the
- // obvious offsets 0,1,2,3,4,5,6,7. However,
- // prior to the changes enabling flipped and
- // rotated faces, in many places of the
- // library the convention was used, that the
- // first dataset with offset 0 corresponds to
- // a face in standard orientation. therefore
- // we use the offsets 4,5,6,7,0,1,2,3 here to
- // stick to that (implicit) convention
+ // set up a table with the according offsets
+ // for non-standard orientation, first index:
+ // face_orientation (standard true=1), second
+ // index: face_flip (standard false=0), third
+ // index: face_rotation (standard false=0)
+ //
+ // note, that normally we should use the
+ // obvious offsets 0,1,2,3,4,5,6,7. However,
+ // prior to the changes enabling flipped and
+ // rotated faces, in many places of the
+ // library the convention was used, that the
+ // first dataset with offset 0 corresponds to
+ // a face in standard orientation. therefore
+ // we use the offsets 4,5,6,7,0,1,2,3 here to
+ // stick to that (implicit) convention
static const unsigned int orientation_offset[2][2][2]=
- {{
- // face_orientation=false; face_flip=false; face_rotation=false and true
- {4*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face,
- 5*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face},
- // face_orientation=false; face_flip=true; face_rotation=false and true
- {6*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face,
- 7*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face}},
- {
- // face_orientation=true; face_flip=false; face_rotation=false and true
- {0*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face,
- 1*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face},
- // face_orientation=true; face_flip=true; face_rotation=false and true
- {2*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face,
- 3*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face}}};
-
- // set up a table with the offsets for a
- // given refinement case respecting the
- // corresponding number of subfaces. the
- // index corresponds to (RefineCase::Type - 1)
-
- // note, that normally we should use the
- // obvious offsets 0,2,6. However, prior to
- // the implementation of anisotropic
- // refinement, in many places of the library
- // the convention was used, that the first
- // dataset with offset 0 corresponds to a
- // standard (isotropic) face
- // refinement. therefore we use the offsets
- // 6,4,0 here to stick to that (implicit)
- // convention
- static const unsigned int ref_case_offset[3]=
+ {
{
- 6, //cut_x
- 4, //cut_y
- 0 //cut_xy
- };
+ // face_orientation=false; face_flip=false; face_rotation=false and true
+ {
+ 4*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face,
- 5*GeometryInfo<dim>::faces_per_cell *total_subfaces_per_face
++ 5*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face
+ },
+ // face_orientation=false; face_flip=true; face_rotation=false and true
+ {
+ 6*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face,
- 7*GeometryInfo<dim>::faces_per_cell *total_subfaces_per_face
++ 7*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face
+ }
+ },
+ {
+ // face_orientation=true; face_flip=false; face_rotation=false and true
+ {
+ 0*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face,
- 1*GeometryInfo<dim>::faces_per_cell *total_subfaces_per_face
++ 1*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face
+ },
+ // face_orientation=true; face_flip=true; face_rotation=false and true
+ {
+ 2*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face,
- 3*GeometryInfo<dim>::faces_per_cell *total_subfaces_per_face
++ 3*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face
+ }
+ }
+ };
+
+ // set up a table with the offsets for a
+ // given refinement case respecting the
+ // corresponding number of subfaces. the
+ // index corresponds to (RefineCase::Type - 1)
+
+ // note, that normally we should use the
+ // obvious offsets 0,2,6. However, prior to
+ // the implementation of anisotropic
+ // refinement, in many places of the library
+ // the convention was used, that the first
+ // dataset with offset 0 corresponds to a
+ // standard (isotropic) face
+ // refinement. therefore we use the offsets
+ // 6,4,0 here to stick to that (implicit)
+ // convention
+ static const unsigned int ref_case_offset[3]=
+ {
+ 6, //cut_x
+ 4, //cut_y
+ 0 //cut_xy
+ };
- // for each subface of a given FaceRefineCase
- // there is a corresponding equivalent
- // subface number of one of the "standard"
- // RefineCases (cut_x, cut_y, cut_xy). Map
- // the given values to those equivalent
- // ones.
+ // for each subface of a given FaceRefineCase
+ // there is a corresponding equivalent
+ // subface number of one of the "standard"
+ // RefineCases (cut_x, cut_y, cut_xy). Map
+ // the given values to those equivalent
+ // ones.
- // first, define an invalid number
+ // first, define an invalid number
static const unsigned int e = deal_II_numbers::invalid_unsigned_int;
static const RefinementCase<dim-1>
}
else if (!p4est_has_children && !dealii_cell->has_children())
{
- //this active cell didn't change
+ //this active cell didn't change
typename internal::p4est::types<dim>::quadrant *q;
- q = static_cast<typename internal::p4est::types<dim>::quadrant*> (
- sc_array_index (const_cast<sc_array_t*>(&tree.quadrants), idx)
- );
- *static_cast<typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus*>(q->p.user_data) = parallel::distributed::Triangulation<dim,spacedim>::CELL_PERSIST;
-
- for(typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin();
- it != attached_data_pack_callbacks.end();
- ++it)
+ q = static_cast<typename internal::p4est::types<dim>::quadrant *> (
+ sc_array_index (const_cast<sc_array_t *>(&tree.quadrants), idx)
+ );
+ *static_cast<typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus *>(q->p.user_data) = parallel::distributed::Triangulation<dim,spacedim>::CELL_PERSIST;
+
+ for (typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin();
+ it != attached_data_pack_callbacks.end();
+ ++it)
{
- void * ptr = static_cast<char*>(q->p.user_data) + (*it).first; //add offset
- void *ptr = static_cast<char *>(q->p.user_data) + (*it).first; //add offset
++ void *ptr = static_cast<char *>(q->p.user_data) + (*it).first; //add offset
((*it).second)(dealii_cell,
parallel::distributed::Triangulation<dim,spacedim>::CELL_PERSIST,
ptr);
Assert(child0_idx != -1, ExcMessage("the first child should exist as an active quadrant!"));
typename internal::p4est::types<dim>::quadrant *q;
- q = static_cast<typename internal::p4est::types<dim>::quadrant*> (
- sc_array_index (const_cast<sc_array_t*>(&tree.quadrants), child0_idx)
- );
- *static_cast<typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus*>(q->p.user_data) = parallel::distributed::Triangulation<dim,spacedim>::CELL_REFINE;
-
- for(typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin();
- it != attached_data_pack_callbacks.end();
- ++it)
+ q = static_cast<typename internal::p4est::types<dim>::quadrant *> (
+ sc_array_index (const_cast<sc_array_t *>(&tree.quadrants), child0_idx)
+ );
+ *static_cast<typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus *>(q->p.user_data) = parallel::distributed::Triangulation<dim,spacedim>::CELL_REFINE;
+
+ for (typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin();
+ it != attached_data_pack_callbacks.end();
+ ++it)
{
- void * ptr = static_cast<char*>(q->p.user_data) + (*it).first; //add offset
- void *ptr = static_cast<char *>(q->p.user_data) + (*it).first; //add offset
++ void *ptr = static_cast<char *>(q->p.user_data) + (*it).first; //add offset
((*it).second)(dealii_cell,
parallel::distributed::Triangulation<dim,spacedim>::CELL_REFINE,
}
else
{
- //it's children got coarsened into
- //this cell
+ //it's children got coarsened into
+ //this cell
typename internal::p4est::types<dim>::quadrant *q;
- q = static_cast<typename internal::p4est::types<dim>::quadrant*> (
- sc_array_index (const_cast<sc_array_t*>(&tree.quadrants), idx)
- );
- *static_cast<typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus*>(q->p.user_data) = parallel::distributed::Triangulation<dim,spacedim>::CELL_COARSEN;
-
- for(typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin();
- it != attached_data_pack_callbacks.end();
- ++it)
+ q = static_cast<typename internal::p4est::types<dim>::quadrant *> (
+ sc_array_index (const_cast<sc_array_t *>(&tree.quadrants), idx)
+ );
+ *static_cast<typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus *>(q->p.user_data) = parallel::distributed::Triangulation<dim,spacedim>::CELL_COARSEN;
+
+ for (typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin();
+ it != attached_data_pack_callbacks.end();
+ ++it)
{
- void * ptr = static_cast<char*>(q->p.user_data) + (*it).first; //add offset
- void *ptr = static_cast<char *>(q->p.user_data) + (*it).first; //add offset
++ void *ptr = static_cast<char *>(q->p.user_data) + (*it).first; //add offset
((*it).second)(dealii_cell,
parallel::distributed::Triangulation<dim,spacedim>::CELL_COARSEN,
ptr);
template <int dim>
- void downstream_dg (MGDoFHandler<dim>& dof,
+ void downstream_dg (MGDoFHandler<dim> &dof,
const unsigned int level,
- const Point<dim>& direction)
- const Point<dim> &direction)
++ const Point<dim> &direction)
{
std::vector<unsigned int> renumbering(dof.n_dofs(level));
std::vector<unsigned int> reverse(dof.n_dofs(level));
template <int dim>
- void downstream (MGDoFHandler<dim>& dof,
+ void downstream (MGDoFHandler<dim> &dof,
const unsigned int level,
- const Point<dim>& direction,
- const Point<dim> &direction,
++ const Point<dim> &direction,
const bool dof_wise_renumbering)
{
std::vector<unsigned int> renumbering(dof.n_dofs(level));
void
- make_hp_hanging_node_constraints (const dealii::hp::DoFHandler<1> & /*dof_handler*/,
- ConstraintMatrix & /*constraints*/)
+ make_hp_hanging_node_constraints (const dealii::hp::DoFHandler<1> &/*dof_handler*/,
+ ConstraintMatrix &/*constraints*/)
{
- // we may have to compute
- // constraints for
- // vertices. gotta think about
- // that a bit more
+ // we may have to compute
+ // constraints for
+ // vertices. gotta think about
+ // that a bit more
//TODO[WB]: think about what to do here...
}
void
- make_oldstyle_hanging_node_constraints (const dealii::hp::DoFHandler<1> & /*dof_handler*/,
- ConstraintMatrix & /*constraints*/,
+ make_oldstyle_hanging_node_constraints (const dealii::hp::DoFHandler<1> &/*dof_handler*/,
+ ConstraintMatrix &/*constraints*/,
dealii::internal::int2type<1>)
{
- // we may have to compute
- // constraints for
- // vertices. gotta think about
- // that a bit more
+ // we may have to compute
+ // constraints for
+ // vertices. gotta think about
+ // that a bit more
//TODO[WB]: think about what to do here...
}
// component's index
template <int dim, int spacedim>
void
- resolve_components (const FiniteElement<dim,spacedim>&fe,
+ resolve_components (const FiniteElement<dim,spacedim> &fe,
const std::vector<unsigned char> &dofs_by_component,
- const std::vector<unsigned int> &target_component,
+ const std::vector<unsigned int> &target_component,
const bool only_once,
std::vector<unsigned int> &dofs_per_component,
unsigned int &component)
template <int dim, int spacedim>
void
- resolve_components (const hp::FECollection<dim,spacedim>&fe_collection,
+ resolve_components (const hp::FECollection<dim,spacedim> &fe_collection,
const std::vector<unsigned char> &dofs_by_component,
- const std::vector<unsigned int> &target_component,
+ const std::vector<unsigned int> &target_component,
const bool only_once,
std::vector<unsigned int> &dofs_per_component,
unsigned int &component)
{
template <class DH>
void
- map_dofs_to_support_points(const hp::MappingCollection<DH::dimension, DH::space_dimension> & mapping,
+ map_dofs_to_support_points(const hp::MappingCollection<DH::dimension, DH::space_dimension> &mapping,
- const DH &dof_handler,
- std::map<unsigned int,Point<DH::space_dimension> > &support_points)
+ const DH &dof_handler,
+ std::map<unsigned int,Point<DH::space_dimension> > &support_points)
{
- const unsigned int dim = DH::dimension;
- const unsigned int spacedim = DH::space_dimension;
+ const unsigned int dim = DH::dimension;
+ const unsigned int spacedim = DH::space_dimension;
- hp::FECollection<dim, spacedim> fe_collection(dof_handler.get_fe());
- hp::QCollection<dim> q_coll_dummy;
+ hp::FECollection<dim, spacedim> fe_collection(dof_handler.get_fe());
+ hp::QCollection<dim> q_coll_dummy;
- for (unsigned int fe_index = 0; fe_index < fe_collection.size(); ++fe_index)
- {
- // check whether every fe in the collection
- // has support points
- Assert(fe_collection[fe_index].has_support_points(),
- typename FiniteElement<dim>::ExcFEHasNoSupportPoints());
- q_coll_dummy.push_back(
- Quadrature<dim> (
- fe_collection[fe_index].get_unit_support_points()));
- }
+ for (unsigned int fe_index = 0; fe_index < fe_collection.size(); ++fe_index)
+ {
+ // check whether every fe in the collection
+ // has support points
+ Assert(fe_collection[fe_index].has_support_points(),
+ typename FiniteElement<dim>::ExcFEHasNoSupportPoints());
+ q_coll_dummy.push_back(
+ Quadrature<dim> (
+ fe_collection[fe_index].get_unit_support_points()));
+ }
- // now loop over all cells and
- // enquire the support points on
- // each of these. we use dummy
- // quadrature formulas where the
- // quadrature points are located at
- // the unit support points to
- // enquire the location of the
- // support points in real space
- //
- // the weights of the quadrature
- // rule have been set to invalid values
- // by the used constructor.
- hp::FEValues<dim, spacedim> hp_fe_values(mapping, fe_collection,
- q_coll_dummy, update_quadrature_points);
- typename DH::active_cell_iterator cell =
- dof_handler.begin_active(), endc = dof_handler.end();
-
- std::vector<unsigned int> local_dof_indices;
- for (; cell != endc; ++cell)
- // only work on locally relevant cells
- if (cell->is_artificial() == false)
- {
- hp_fe_values.reinit(cell);
- const FEValues<dim, spacedim> &fe_values = hp_fe_values.get_present_fe_values();
+ // now loop over all cells and
+ // enquire the support points on
+ // each of these. we use dummy
+ // quadrature formulas where the
+ // quadrature points are located at
+ // the unit support points to
+ // enquire the location of the
+ // support points in real space
+ //
+ // the weights of the quadrature
+ // rule have been set to invalid values
+ // by the used constructor.
+ hp::FEValues<dim, spacedim> hp_fe_values(mapping, fe_collection,
+ q_coll_dummy, update_quadrature_points);
+ typename DH::active_cell_iterator cell =
+ dof_handler.begin_active(), endc = dof_handler.end();
+
+ std::vector<unsigned int> local_dof_indices;
+ for (; cell != endc; ++cell)
+ // only work on locally relevant cells
+ if (cell->is_artificial() == false)
+ {
+ hp_fe_values.reinit(cell);
+ const FEValues<dim, spacedim> &fe_values = hp_fe_values.get_present_fe_values();
- local_dof_indices.resize(cell->get_fe().dofs_per_cell);
- cell->get_dof_indices(local_dof_indices);
+ local_dof_indices.resize(cell->get_fe().dofs_per_cell);
+ cell->get_dof_indices(local_dof_indices);
- const std::vector<Point<spacedim> > & points =
- fe_values.get_quadrature_points();
- for (unsigned int i = 0; i < cell->get_fe().dofs_per_cell; ++i)
- // insert the values into the map
- support_points[local_dof_indices[i]] = points[i];
- }
+ const std::vector<Point<spacedim> > &points =
+ fe_values.get_quadrature_points();
+ for (unsigned int i = 0; i < cell->get_fe().dofs_per_cell; ++i)
+ // insert the values into the map
+ support_points[local_dof_indices[i]] = points[i];
+ }
}
template <class DH>
void
- map_dofs_to_support_points(const hp::MappingCollection<DH::dimension, DH::space_dimension> & mapping,
+ map_dofs_to_support_points(const hp::MappingCollection<DH::dimension, DH::space_dimension> &mapping,
- const DH &dof_handler,
- std::vector<Point<DH::space_dimension> > &support_points)
+ const DH &dof_handler,
+ std::vector<Point<DH::space_dimension> > &support_points)
- {
- // get the data in the form of the map as above
- std::map<unsigned int,Point<DH::space_dimension> > x_support_points;
- map_dofs_to_support_points(mapping, dof_handler, x_support_points);
+ {
+ // get the data in the form of the map as above
+ std::map<unsigned int,Point<DH::space_dimension> > x_support_points;
+ map_dofs_to_support_points(mapping, dof_handler, x_support_points);
- // now convert from the map to the linear vector. make sure every
- // entry really appeared in the map
- for (unsigned int i=0; i<dof_handler.n_dofs(); ++i)
- {
- Assert (x_support_points.find(i) != x_support_points.end(),
- ExcInternalError());
- support_points[i] = x_support_points[i];
- }
- }
+ // now convert from the map to the linear vector. make sure every
+ // entry really appeared in the map
+ for (unsigned int i=0; i<dof_handler.n_dofs(); ++i)
+ {
+ Assert (x_support_points.find(i) != x_support_points.end(),
+ ExcInternalError());
+ support_points[i] = x_support_points[i];
+ }
+ }
}
}
template <int dim, int spacedim>
void
FE_DGPNonparametric<dim,spacedim>::fill_fe_values (
- const Mapping<dim,spacedim>&,
- const typename Triangulation<dim,spacedim>::cell_iterator&,
- const Quadrature<dim>&,
- typename Mapping<dim,spacedim>::InternalDataBase&,
- typename Mapping<dim,spacedim>::InternalDataBase& fedata,
- FEValuesData<dim,spacedim>&data,
+ const Mapping<dim,spacedim> &,
+ const typename Triangulation<dim,spacedim>::cell_iterator &,
+ const Quadrature<dim> &,
+ typename Mapping<dim,spacedim>::InternalDataBase &,
+ typename Mapping<dim,spacedim>::InternalDataBase &fedata,
+ FEValuesData<dim,spacedim> &data,
- CellSimilarity::Similarity & /*cell_similarity*/) const
+ CellSimilarity::Similarity &/*cell_similarity*/) const
{
- // convert data object to internal
- // data for this class. fails with
- // an exception if that is not
- // possible
+ // convert data object to internal
+ // data for this class. fails with
+ // an exception if that is not
+ // possible
Assert (dynamic_cast<InternalData *> (&fedata) != 0,
ExcInternalError());
InternalData &fe_data = static_cast<InternalData &> (fedata);
template <int dim>
std::vector<std::pair<unsigned int, unsigned int> >
FE_Nothing<dim> ::
-hp_vertex_dof_identities (const FiniteElement<dim> & /*fe_other*/) const
+hp_vertex_dof_identities (const FiniteElement<dim> &/*fe_other*/) const
{
- // the FE_Nothing has no
- // degrees of freedom, so there
- // are no equivalencies to be
- // recorded
- return std::vector<std::pair<unsigned int, unsigned int> > ();
+ // the FE_Nothing has no
+ // degrees of freedom, so there
+ // are no equivalencies to be
+ // recorded
+ return std::vector<std::pair<unsigned int, unsigned int> > ();
}
template <int dim>
std::vector<std::pair<unsigned int, unsigned int> >
FE_Nothing<dim> ::
-hp_line_dof_identities (const FiniteElement<dim> & /*fe_other*/) const
+hp_line_dof_identities (const FiniteElement<dim> &/*fe_other*/) const
{
- // the FE_Nothing has no
- // degrees of freedom, so there
- // are no equivalencies to be
- // recorded
- return std::vector<std::pair<unsigned int, unsigned int> > ();
+ // the FE_Nothing has no
+ // degrees of freedom, so there
+ // are no equivalencies to be
+ // recorded
+ return std::vector<std::pair<unsigned int, unsigned int> > ();
}
template <int dim>
std::vector<std::pair<unsigned int, unsigned int> >
FE_Nothing<dim> ::
-hp_quad_dof_identities (const FiniteElement<dim> & /*fe_other*/) const
+hp_quad_dof_identities (const FiniteElement<dim> &/*fe_other*/) const
{
- // the FE_Nothing has no
- // degrees of freedom, so there
- // are no equivalencies to be
- // recorded
- return std::vector<std::pair<unsigned int, unsigned int> > ();
+ // the FE_Nothing has no
+ // degrees of freedom, so there
+ // are no equivalencies to be
+ // recorded
+ return std::vector<std::pair<unsigned int, unsigned int> > ();
}
template <int dim>
void
FE_Nothing<dim>::
-get_face_interpolation_matrix (const FiniteElement<dim> & /*source_fe*/,
+get_face_interpolation_matrix (const FiniteElement<dim> &/*source_fe*/,
FullMatrix<double> &interpolation_matrix) const
{
- // since this element has no face dofs, the
- // interpolation matrix is necessarily empty
+ // since this element has no face dofs, the
+ // interpolation matrix is necessarily empty
Assert (interpolation_matrix.m() == 0,
ExcDimensionMismatch (interpolation_matrix.m(),
FE_Nothing<dim>::
get_subface_interpolation_matrix (const FiniteElement<dim> & /*source_fe*/,
const unsigned int /*index*/,
- FullMatrix<double> &interpolation_matrix) const
+ FullMatrix<double> &interpolation_matrix) const
{
- // since this element has no face dofs, the
- // interpolation matrix is necessarily empty
+ // since this element has no face dofs, the
+ // interpolation matrix is necessarily empty
Assert (interpolation_matrix.m() == 0,
ExcDimensionMismatch (interpolation_matrix.m(),
}
template <>
-void FE_Q<1>::initialize_unit_face_support_points (const Quadrature<1> & /*points*/)
+void FE_Q<1>::initialize_unit_face_support_points (const Quadrature<1> &/*points*/)
{
- // no faces in 1d, so nothing to do
+ // no faces in 1d, so nothing to do
}
template <>
}
template <>
-void FE_Q<1,2>::initialize_unit_face_support_points (const Quadrature<1> & /*points*/)
+void FE_Q<1,2>::initialize_unit_face_support_points (const Quadrature<1> &/*points*/)
{
- // no faces in 1d, so nothing to do
+ // no faces in 1d, so nothing to do
}
template <>
}
template <>
-void FE_Q<1,3>::initialize_unit_face_support_points (const Quadrature<1> & /*points*/)
+void FE_Q<1,3>::initialize_unit_face_support_points (const Quadrature<1> &/*points*/)
{
- // no faces in 1d, so nothing to do
+ // no faces in 1d, so nothing to do
}
template <int dim, int spacedim>
template <int dim, int spacedim>
FESystem<dim,spacedim>::FESystem (
- const std::vector<const FiniteElement<dim,spacedim>*> &fes,
+ const std::vector<const FiniteElement<dim,spacedim>*> &fes,
const std::vector<unsigned int> &multiplicities)
- :
- FiniteElement<dim,spacedim> (multiply_dof_numbers(fes, multiplicities),
- compute_restriction_is_additive_flags (fes, multiplicities),
- compute_nonzero_components(fes, multiplicities)),
- base_elements(count_nonzeros(multiplicities))
+ :
+ FiniteElement<dim,spacedim> (multiply_dof_numbers(fes, multiplicities),
+ compute_restriction_is_additive_flags (fes, multiplicities),
+ compute_nonzero_components(fes, multiplicities)),
+ base_elements(count_nonzeros(multiplicities))
{
initialize(fes, multiplicities);
}
template <int dim, int spacedim>
Tensor<1,dim>
FESystem<dim,spacedim>::shape_grad_component (const unsigned int i,
- const Point<dim> &p,
- const unsigned int component) const
- const Point<dim> &p,
++ const Point<dim> &p,
+ const unsigned int component) const
{
Assert (i<this->dofs_per_cell, ExcIndexRange(i, 0, this->dofs_per_cell));
Assert (component < this->n_components(),
template <int dim, int spacedim>
Tensor<2,dim>
FESystem<dim,spacedim>::shape_grad_grad_component (const unsigned int i,
- const Point<dim> &p,
- const unsigned int component) const
- const Point<dim> &p,
++ const Point<dim> &p,
+ const unsigned int component) const
{
Assert (i<this->dofs_per_cell, ExcIndexRange(i, 0, this->dofs_per_cell));
Assert (component < this->n_components(),
typename Mapping<dim, spacedim>::InternalDataBase &mapping_data,
std::vector<Point<dim> > &quadrature_points,
std::vector<double> &JxW_values,
- std::vector<Tensor<1,dim> > &boundary_forms,
+ std::vector<Tensor<1,dim> > &boundary_forms,
std::vector<Point<spacedim> > &normal_vectors) const
{
- // convert data object to internal
- // data for this class. fails with
- // an exception if that is not
- // possible
- Assert (dynamic_cast<InternalData*> (&mapping_data) != 0,
+ // convert data object to internal
+ // data for this class. fails with
+ // an exception if that is not
+ // possible
+ Assert (dynamic_cast<InternalData *> (&mapping_data) != 0,
ExcInternalError());
- InternalData &data = static_cast<InternalData&> (mapping_data);
+ InternalData &data = static_cast<InternalData &> (mapping_data);
compute_fill (cell, face_no, invalid_face_number,
CellSimilarity::none,
typename Mapping<dim, spacedim>::InternalDataBase &mapping_data,
std::vector<Point<dim> > &quadrature_points,
std::vector<double> &JxW_values,
- std::vector<Tensor<1,dim> > &boundary_forms,
+ std::vector<Tensor<1,dim> > &boundary_forms,
std::vector<Point<spacedim> > &normal_vectors) const
{
- // convert data object to internal
- // data for this class. fails with
- // an exception if that is not
- // possible
- Assert (dynamic_cast<InternalData*> (&mapping_data) != 0, ExcInternalError());
- InternalData &data = static_cast<InternalData&> (mapping_data);
+ // convert data object to internal
+ // data for this class. fails with
+ // an exception if that is not
+ // possible
+ Assert (dynamic_cast<InternalData *> (&mapping_data) != 0, ExcInternalError());
+ InternalData &data = static_cast<InternalData &> (mapping_data);
compute_fill (cell, face_no, sub_no, CellSimilarity::none,
data,
typename Mapping<dim,spacedim>::InternalDataBase &mapping_data,
std::vector<Point<spacedim> > &quadrature_points,
std::vector<double> &JxW_values,
- std::vector<Tensor<1,spacedim> > &exterior_forms,
+ std::vector<Tensor<1,spacedim> > &exterior_forms,
std::vector<Point<spacedim> > &normal_vectors) const
{
- // convert data object to internal
- // data for this class. fails with
- // an exception if that is not
- // possible
- Assert (dynamic_cast<InternalData*> (&mapping_data) != 0,
+ // convert data object to internal
+ // data for this class. fails with
+ // an exception if that is not
+ // possible
+ Assert (dynamic_cast<InternalData *> (&mapping_data) != 0,
ExcInternalError());
- InternalData &data = static_cast<InternalData&> (mapping_data);
-
- // check whether this cell needs
- // the full mapping or can be
- // treated by a reduced Q1 mapping,
- // e.g. if the cell is entirely in
- // the interior of the domain. note
- // that it is not sufficient to ask
- // whether the present _face_ is in
- // the interior, as the mapping on
- // the face depends on the mapping
- // of the cell, which in turn
- // depends on the fact whether
- // _any_ of the faces of this cell
- // is at the boundary, not only the
- // present face
+ InternalData &data = static_cast<InternalData &> (mapping_data);
+
+ // check whether this cell needs
+ // the full mapping or can be
+ // treated by a reduced Q1 mapping,
+ // e.g. if the cell is entirely in
+ // the interior of the domain. note
+ // that it is not sufficient to ask
+ // whether the present _face_ is in
+ // the interior, as the mapping on
+ // the face depends on the mapping
+ // of the cell, which in turn
+ // depends on the fact whether
+ // _any_ of the faces of this cell
+ // is at the boundary, not only the
+ // present face
data.use_mapping_q1_on_current_cell=!(use_mapping_q_on_all_cells
|| cell->has_boundary_lines());
template<int dim, int spacedim>
void
MappingQ<dim,spacedim>::fill_fe_subface_values (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int sub_no,
- const Quadrature<dim-1> &q,
- typename Mapping<dim,spacedim>::InternalDataBase &mapping_data,
- std::vector<Point<spacedim> > &quadrature_points,
- std::vector<double> &JxW_values,
- std::vector<Tensor<1,spacedim> > &exterior_forms,
- std::vector<Point<spacedim> > &normal_vectors) const
+ const unsigned int face_no,
+ const unsigned int sub_no,
+ const Quadrature<dim-1> &q,
+ typename Mapping<dim,spacedim>::InternalDataBase &mapping_data,
+ std::vector<Point<spacedim> > &quadrature_points,
+ std::vector<double> &JxW_values,
- std::vector<Tensor<1,spacedim> > &exterior_forms,
++ std::vector<Tensor<1,spacedim> > &exterior_forms,
+ std::vector<Point<spacedim> > &normal_vectors) const
{
- // convert data object to internal
- // data for this class. fails with
- // an exception if that is not
- // possible
- Assert (dynamic_cast<InternalData*> (&mapping_data) != 0,
+ // convert data object to internal
+ // data for this class. fails with
+ // an exception if that is not
+ // possible
+ Assert (dynamic_cast<InternalData *> (&mapping_data) != 0,
ExcInternalError());
- InternalData &data = static_cast<InternalData&> (mapping_data);
-
- // check whether this cell needs
- // the full mapping or can be
- // treated by a reduced Q1 mapping,
- // e.g. if the cell is entirely in
- // the interior of the domain. note
- // that it is not sufficient to ask
- // whether the present _face_ is in
- // the interior, as the mapping on
- // the face depends on the mapping
- // of the cell, which in turn
- // depends on the fact whether
- // _any_ of the faces of this cell
- // is at the boundary, not only the
- // present face
+ InternalData &data = static_cast<InternalData &> (mapping_data);
+
+ // check whether this cell needs
+ // the full mapping or can be
+ // treated by a reduced Q1 mapping,
+ // e.g. if the cell is entirely in
+ // the interior of the domain. note
+ // that it is not sufficient to ask
+ // whether the present _face_ is in
+ // the interior, as the mapping on
+ // the face depends on the mapping
+ // of the cell, which in turn
+ // depends on the fact whether
+ // _any_ of the faces of this cell
+ // is at the boundary, not only the
+ // present face
data.use_mapping_q1_on_current_cell=!(use_mapping_q_on_all_cells
|| cell->has_boundary_lines());
typename Mapping<dim,spacedim>::InternalDataBase &mapping_data,
std::vector<Point<spacedim> > &quadrature_points,
std::vector<double> &JxW_values,
- std::vector<Tensor<1,spacedim> > &boundary_forms,
+ std::vector<Tensor<1,spacedim> > &boundary_forms,
std::vector<Point<spacedim> > &normal_vectors) const
{
- // ensure that the following cast
- // is really correct:
+ // ensure that the following cast
+ // is really correct:
Assert (dynamic_cast<InternalData *>(&mapping_data) != 0,
ExcInternalError());
- InternalData &data = static_cast<InternalData&>(mapping_data);
+ InternalData &data = static_cast<InternalData &>(mapping_data);
const unsigned int n_q_points = q.size();
template <int dim, class EulerVectorType, int spacedim>
MappingQ1Eulerian<dim, EulerVectorType, spacedim>::
-MappingQ1Eulerian (const EulerVectorType &euler_transform_vectors,
+MappingQ1Eulerian (const EulerVectorType &euler_transform_vectors,
const DoFHandler<dim,spacedim> &shiftmap_dof_handler)
- :
- euler_transform_vectors(&euler_transform_vectors),
- shiftmap_dof_handler(&shiftmap_dof_handler)
+ :
+ euler_transform_vectors(&euler_transform_vectors),
+ shiftmap_dof_handler(&shiftmap_dof_handler)
{}
void
GridGenerator::moebius (
- Triangulation<3>& tria,
- Triangulation<3> &tria,
++ Triangulation<3> &tria,
const unsigned int n_cells,
const unsigned int n_rotations,
const double R,
void
- GridGenerator::torus (Triangulation<2,3>& tria,
-GridGenerator::torus (Triangulation<2,3> &tria,
++GridGenerator::torus (Triangulation<2,3> &tria,
const double R,
const double r)
{
template<>
void
GridGenerator::parallelogram (
- Triangulation<2>& tria,
- const Tensor<2,2>& corners,
- Triangulation<2> &tria,
++ Triangulation<2> &tria,
+ const Tensor<2,2> &corners,
const bool colorize)
{
std::vector<Point<2> > vertices (GeometryInfo<2>::vertices_per_cell);
template <int dim, int spacedim>
-void GridIn<dim, spacedim>::debug_output_grid (const std::vector<CellData<dim> > & /*cells*/,
- const std::vector<Point<spacedim> > & /*vertices*/,
- std::ostream & /*out*/)
+void GridIn<dim, spacedim>::debug_output_grid (const std::vector<CellData<dim> > &/*cells*/,
- const std::vector<Point<spacedim> > &/*vertices*/,
- std::ostream &/*out*/)
++ const std::vector<Point<spacedim> > &/*vertices*/,
++ std::ostream &/*out*/)
{
Assert (false, ExcNotImplemented());
}
const unsigned int s1,
const unsigned int s2,
const unsigned int s3,
- const CellData<2> &cd)
+ const CellData<2> &cd)
- :
- original_cell_data (cd)
+ :
+ original_cell_data (cd)
{
v[0] = v0;
v[1] = v1;
<< arg1 << " and " << arg2 << " is multiply set.");
- /**
- * A class into which we put many of the functions that implement
- * functionality of the Triangulation class. The main reason for this
- * class is as follows: the majority of the functions in Triangulation
- * need to be implemented differently for dim==1, dim==2, and
- * dim==3. However, their implementation is largly independent of the
- * spacedim template parameter. So we would like to write things like
- *
- * template <int spacedim>
- * void Triangulation<1,spacedim>::create_triangulation (...) {...}
- *
- * Unfortunately, C++ doesn't allow this: member functions of class
- * templates have to be either not specialized at all, or fully
- * specialized. No partial specialization is allowed. One possible
- * solution would be to just duplicate the bodies of the functions and
- * have equally implemented functions
- *
- * template <>
- * void Triangulation<1,1>::create_triangulation (...) {...}
- *
- * template <>
- * void Triangulation<1,2>::create_triangulation (...) {...}
- *
- * but that is clearly an unsatisfactory solution. Rather, what we do
- * is introduce the current Implementation class in which we can write
- * these functions as member templates over spacedim, i.e. we can have
- *
- * template <int dim_, int spacedim_>
- * template <int spacedim>
- * void Triangulation<dim_,spacedim_>::Implementation::
- * create_triangulation (...,
- * Triangulation<1,spacedim> &tria ) {...}
- *
- * The outer template parameters are here unused, only the inner
- * ones are of real interest.
- *
- * One may ask why we put these functions into an class rather
- * than an anonymous namespace, for example?
- *
- * First, these implementation functions need to be friends of the
- * Triangulation class. It is simpler to make the entire class a friend
- * rather than listing all members of an implementation namespace as
- * friends of the Triangulation class (there is no such thing as a "friend
- * namespace XXX" directive).
- *
- * Ideally, we would make this class a member class of the
- * Triangulation<dim,spacedim> class, since then our implementation functions
- * have immediate access to the typedefs and static functions of the
- * surrounding Triangulation class. I.e., we do not have to write "typename
- * Triangulation<dim,spacedim>::active_cell_iterator" but can write
- * "active_cell_iterator" right away. This is, in fact, the way it was
- * implemented first, but we ran into a bug in gcc4.0:
- * @code
- * class Triangulation {
- * struct Implementation;
- * friend class TriaAccessor;
- * };
- *
- * class TriaAccessor {
- * struct Implementation;
- * friend class Triangulation;
- * };
- * @endcode
- *
- * Here, friendship (per C++ standard) is supposed to extend to all members of
- * the befriended class, including its 'Implementation' member class. But gcc4.0
- * gets this wrong: the members of Triangulation::Implementation are not friends
- * of TriaAccessor and the other way around. Ideally, one would fix this by
- * saying
- * @code
- * class Triangulation {
- * struct Implementation;
- * friend class TriaAccessor;
- * friend class TriaAccessor::Implementation; // **
- * };
- *
- * class TriaAccessor {
- * struct Implementation;
- * friend class Triangulation;
- * friend class Triangulation::Implementation;
- * };
- * @endcode
- * but that's not legal because in ** we don't know yet that TriaAccessor has
- * a member class Implementation and so we can't make it a friend. The only
- * way forward at this point was to make Implementation a class in the
- * internal namespace so that we can forward declare it and make it a friend
- * of the respective other outer class -- not quite what we wanted but the
- * only way I could see to make it work...
- */
+ /**
+ * A class into which we put many of the functions that implement
+ * functionality of the Triangulation class. The main reason for this
+ * class is as follows: the majority of the functions in Triangulation
+ * need to be implemented differently for dim==1, dim==2, and
+ * dim==3. However, their implementation is largly independent of the
+ * spacedim template parameter. So we would like to write things like
+ *
+ * template <int spacedim>
+ * void Triangulation<1,spacedim>::create_triangulation (...) {...}
+ *
+ * Unfortunately, C++ doesn't allow this: member functions of class
+ * templates have to be either not specialized at all, or fully
+ * specialized. No partial specialization is allowed. One possible
+ * solution would be to just duplicate the bodies of the functions and
+ * have equally implemented functions
+ *
+ * template <>
+ * void Triangulation<1,1>::create_triangulation (...) {...}
+ *
+ * template <>
+ * void Triangulation<1,2>::create_triangulation (...) {...}
+ *
+ * but that is clearly an unsatisfactory solution. Rather, what we do
+ * is introduce the current Implementation class in which we can write
+ * these functions as member templates over spacedim, i.e. we can have
+ *
+ * template <int dim_, int spacedim_>
+ * template <int spacedim>
+ * void Triangulation<dim_,spacedim_>::Implementation::
+ * create_triangulation (...,
+ * Triangulation<1,spacedim> &tria ) {...}
+ *
+ * The outer template parameters are here unused, only the inner
+ * ones are of real interest.
+ *
+ * One may ask why we put these functions into an class rather
+ * than an anonymous namespace, for example?
+ *
+ * First, these implementation functions need to be friends of the
+ * Triangulation class. It is simpler to make the entire class a friend
+ * rather than listing all members of an implementation namespace as
+ * friends of the Triangulation class (there is no such thing as a "friend
+ * namespace XXX" directive).
+ *
+ * Ideally, we would make this class a member class of the
+ * Triangulation<dim,spacedim> class, since then our implementation functions
+ * have immediate access to the typedefs and static functions of the
+ * surrounding Triangulation class. I.e., we do not have to write "typename
+ * Triangulation<dim,spacedim>::active_cell_iterator" but can write
+ * "active_cell_iterator" right away. This is, in fact, the way it was
+ * implemented first, but we ran into a bug in gcc4.0:
+ * @code
+ * class Triangulation {
+ * struct Implementation;
+ * friend class TriaAccessor;
+ * };
+ *
+ * class TriaAccessor {
+ * struct Implementation;
+ * friend class Triangulation;
+ * };
+ * @endcode
+ *
+ * Here, friendship (per C++ standard) is supposed to extend to all members of
+ * the befriended class, including its 'Implementation' member class. But gcc4.0
+ * gets this wrong: the members of Triangulation::Implementation are not friends
+ * of TriaAccessor and the other way around. Ideally, one would fix this by
+ * saying
+ * @code
+ * class Triangulation {
+ * struct Implementation;
+ * friend class TriaAccessor;
+ * friend class TriaAccessor::Implementation; // **
+ * };
+ *
+ * class TriaAccessor {
+ * struct Implementation;
+ * friend class Triangulation;
+ * friend class Triangulation::Implementation;
+ * };
+ * @endcode
+ * but that's not legal because in ** we don't know yet that TriaAccessor has
+ * a member class Implementation and so we can't make it a friend. The only
+ * way forward at this point was to make Implementation a class in the
+ * internal namespace so that we can forward declare it and make it a friend
+ * of the respective other outer class -- not quite what we wanted but the
+ * only way I could see to make it work...
+ */
struct Implementation
{
- /**
- * For a given Triangulation, update the
- * number cache for lines. For 1d, we have
- * to deal with the fact that lines have
- * levels, whereas for higher dimensions
- * they do not.
- *
- * The second argument indicates
- * for how many levels the
- * Triangulation has objects,
- * though the highest levels need
- * not contain active cells if they
- * have previously all been
- * coarsened away.
- */
- template <int dim, int spacedim>
- static
- void compute_number_cache (const Triangulation<dim,spacedim> &triangulation,
- const unsigned int level_objects,
- internal::Triangulation::NumberCache<1> &number_cache)
- {
- typedef
- typename Triangulation<dim,spacedim>::line_iterator line_iterator;
- typedef
- typename Triangulation<dim,spacedim>::active_line_iterator active_line_iterator;
-
- number_cache.n_levels = 0;
- if (level_objects > 0)
- // find the last level
- // on which there are
- // used cells
- for (unsigned int level=0; level<level_objects; ++level)
- if (triangulation.begin(level) !=
- triangulation.end(level))
- number_cache.n_levels = level+1;
-
- // no cells at all?
- Assert (number_cache.n_levels > 0, ExcInternalError());
-
- ///////////////////////////////////
- // update the number of lines
- // on the different levels in
- // the cache
- number_cache.n_lines_level.resize (number_cache.n_levels);
- number_cache.n_lines = 0;
-
- number_cache.n_active_lines_level.resize (number_cache.n_levels);
- number_cache.n_active_lines = 0;
-
- // for 1d, lines have levels so take
- // count the objects per level and
- // globally
- if (dim == 1)
- {
- for (unsigned int level=0; level<number_cache.n_levels; ++level)
- {
- // count lines on this level
- number_cache.n_lines_level[level] = 0;
-
- line_iterator line = triangulation.begin_line (level),
- endc = (level == number_cache.n_levels-1 ?
- line_iterator(triangulation.end_line()) :
- triangulation.begin_line (level+1));
- for (; line!=endc; ++line)
- ++number_cache.n_lines_level[level];
-
- // update total number of lines
- number_cache.n_lines += number_cache.n_lines_level[level];
- }
-
- // do the update for the number of
- // active lines as well
- for (unsigned int level=0; level<number_cache.n_levels; ++level)
- {
- // count lines on this level
- number_cache.n_active_lines_level[level] = 0;
-
- active_line_iterator line = triangulation.begin_active_line (level),
- endc = triangulation.end_line ();
- for (; (line!=endc) && (line->level() == static_cast<signed int>(level)); ++line)
- ++number_cache.n_active_lines_level[level];
-
- // update total number of lines
- number_cache.n_active_lines += number_cache.n_active_lines_level[level];
- }
- }
- else
- {
- // for dim>1, there are no
- // levels for lines
- {
- line_iterator line = triangulation.begin_line (),
- endc = triangulation.end_line();
- for (; line!=endc; ++line)
- ++number_cache.n_lines;
- }
-
- {
- active_line_iterator line = triangulation.begin_active_line (),
- endc = triangulation.end_line();
- for (; line!=endc; ++line)
- ++number_cache.n_active_lines;
- }
- }
- }
-
- /**
- * For a given Triangulation, update the
- * number cache for quads. For 2d, we have
- * to deal with the fact that quads have
- * levels, whereas for higher dimensions
- * they do not.
- *
- * The second argument indicates
- * for how many levels the
- * Triangulation has objects,
- * though the highest levels need
- * not contain active cells if they
- * have previously all been
- * coarsened away.
- *
- * At the beginning of the function, we call the
- * respective function to update the number
- * cache for lines.
- */
- template <int dim, int spacedim>
- static
- void compute_number_cache (const Triangulation<dim,spacedim> &triangulation,
- const unsigned int level_objects,
- internal::Triangulation::NumberCache<2> &number_cache)
- {
- // update lines and n_levels
- compute_number_cache (triangulation,
- level_objects,
- static_cast<internal::Triangulation::NumberCache<1>&>
- (number_cache));
-
- typedef
- typename Triangulation<dim,spacedim>::quad_iterator quad_iterator;
- typedef
- typename Triangulation<dim,spacedim>::active_quad_iterator active_quad_iterator;
-
- ///////////////////////////////////
- // update the number of quads
- // on the different levels in
- // the cache
- number_cache.n_quads_level.resize (number_cache.n_levels);
- number_cache.n_quads = 0;
-
- number_cache.n_active_quads_level.resize (number_cache.n_levels);
- number_cache.n_active_quads = 0;
-
- // for 2d, quads have levels so take
- // count the objects per level and
- // globally
- if (dim == 2)
- {
- for (unsigned int level=0; level<number_cache.n_levels; ++level)
- {
- // count quads on this level
- number_cache.n_quads_level[level] = 0;
-
- quad_iterator quad = triangulation.begin_quad (level),
- endc = (level == number_cache.n_levels-1 ?
- quad_iterator(triangulation.end_quad()) :
- triangulation.begin_quad (level+1));
- for (; quad!=endc; ++quad)
- ++number_cache.n_quads_level[level];
-
- // update total number of quads
- number_cache.n_quads += number_cache.n_quads_level[level];
- }
-
- // do the update for the number of
- // active quads as well
- for (unsigned int level=0; level<number_cache.n_levels; ++level)
- {
- // count quads on this level
- number_cache.n_active_quads_level[level] = 0;
-
- active_quad_iterator quad = triangulation.begin_active_quad (level),
- endc = triangulation.end_quad ();
- for (; (quad!=endc) && (quad->level() == static_cast<signed int>(level)); ++quad)
- ++number_cache.n_active_quads_level[level];
-
- // update total number of quads
- number_cache.n_active_quads += number_cache.n_active_quads_level[level];
- }
- }
- else
- {
- // for dim>2, there are no
- // levels for quads
- {
- quad_iterator quad = triangulation.begin_quad (),
- endc = triangulation.end_quad();
- for (; quad!=endc; ++quad)
- ++number_cache.n_quads;
- }
-
- {
- active_quad_iterator quad = triangulation.begin_active_quad (),
- endc = triangulation.end_quad();
- for (; quad!=endc; ++quad)
- ++number_cache.n_active_quads;
- }
- }
- }
-
- /**
- * For a given Triangulation, update the
- * number cache for hexes. For 3d, we have
- * to deal with the fact that hexes have
- * levels, whereas for higher dimensions
- * they do not.
- *
- * The second argument indicates
- * for how many levels the
- * Triangulation has objects,
- * though the highest levels need
- * not contain active cells if they
- * have previously all been
- * coarsened away.
- *
- * At the end of the function, we call the
- * respective function to update the number
- * cache for quads, which will in turn call
- * the respective function for lines.
- */
- template <int dim, int spacedim>
- static
- void compute_number_cache (const Triangulation<dim,spacedim> &triangulation,
- const unsigned int level_objects,
- internal::Triangulation::NumberCache<3> &number_cache)
- {
- // update quads, lines and n_levels
- compute_number_cache (triangulation,
- level_objects,
- static_cast<internal::Triangulation::NumberCache<2>&>
- (number_cache));
-
- typedef
- typename Triangulation<dim,spacedim>::hex_iterator hex_iterator;
- typedef
- typename Triangulation<dim,spacedim>::active_hex_iterator active_hex_iterator;
-
- ///////////////////////////////////
- // update the number of hexes
- // on the different levels in
- // the cache
- number_cache.n_hexes_level.resize (number_cache.n_levels);
- number_cache.n_hexes = 0;
-
- number_cache.n_active_hexes_level.resize (number_cache.n_levels);
- number_cache.n_active_hexes = 0;
-
- // for 3d, hexes have levels so take
- // count the objects per level and
- // globally
- if (dim == 3)
- {
- for (unsigned int level=0; level<number_cache.n_levels; ++level)
- {
- // count hexes on this level
- number_cache.n_hexes_level[level] = 0;
-
- hex_iterator hex = triangulation.begin_hex (level),
- endc = (level == number_cache.n_levels-1 ?
- hex_iterator(triangulation.end_hex()) :
- triangulation.begin_hex (level+1));
- for (; hex!=endc; ++hex)
- ++number_cache.n_hexes_level[level];
-
- // update total number of hexes
- number_cache.n_hexes += number_cache.n_hexes_level[level];
- }
-
- // do the update for the number of
- // active hexes as well
- for (unsigned int level=0; level<number_cache.n_levels; ++level)
- {
- // count hexes on this level
- number_cache.n_active_hexes_level[level] = 0;
-
- active_hex_iterator hex = triangulation.begin_active_hex (level),
- endc = triangulation.end_hex ();
- for (; (hex!=endc) && (hex->level() == static_cast<signed int>(level)); ++hex)
- ++number_cache.n_active_hexes_level[level];
-
- // update total number of hexes
- number_cache.n_active_hexes += number_cache.n_active_hexes_level[level];
- }
- }
- else
- {
- // for dim>3, there are no
- // levels for hexs
- {
- hex_iterator hex = triangulation.begin_hex (),
- endc = triangulation.end_hex();
- for (; hex!=endc; ++hex)
- ++number_cache.n_hexes;
- }
-
- {
- active_hex_iterator hex = triangulation.begin_active_hex (),
- endc = triangulation.end_hex();
- for (; hex!=endc; ++hex)
- ++number_cache.n_active_hexes;
- }
- }
- }
-
-
- /**
- * Create a triangulation from
- * given data. This function does
- * this work for 1-dimensional
- * triangulations independently
- * of the actual space dimension.
- */
- template <int spacedim>
- static
- void
- create_triangulation (const std::vector<Point<spacedim> > &v,
- const std::vector<CellData<1> > &cells,
- const SubCellData &/*subcelldata*/,
- Triangulation<1,spacedim> &triangulation)
+ /**
+ * For a given Triangulation, update the
+ * number cache for lines. For 1d, we have
+ * to deal with the fact that lines have
+ * levels, whereas for higher dimensions
+ * they do not.
+ *
+ * The second argument indicates
+ * for how many levels the
+ * Triangulation has objects,
+ * though the highest levels need
+ * not contain active cells if they
+ * have previously all been
+ * coarsened away.
+ */
+ template <int dim, int spacedim>
+ static
+ void compute_number_cache (const Triangulation<dim,spacedim> &triangulation,
+ const unsigned int level_objects,
+ internal::Triangulation::NumberCache<1> &number_cache)
+ {
+ typedef
+ typename Triangulation<dim,spacedim>::line_iterator line_iterator;
+ typedef
+ typename Triangulation<dim,spacedim>::active_line_iterator active_line_iterator;
+
+ number_cache.n_levels = 0;
+ if (level_objects > 0)
+ // find the last level
+ // on which there are
+ // used cells
+ for (unsigned int level=0; level<level_objects; ++level)
+ if (triangulation.begin(level) !=
+ triangulation.end(level))
+ number_cache.n_levels = level+1;
+
+ // no cells at all?
+ Assert (number_cache.n_levels > 0, ExcInternalError());
+
+ ///////////////////////////////////
+ // update the number of lines
+ // on the different levels in
+ // the cache
+ number_cache.n_lines_level.resize (number_cache.n_levels);
+ number_cache.n_lines = 0;
+
+ number_cache.n_active_lines_level.resize (number_cache.n_levels);
+ number_cache.n_active_lines = 0;
+
+ // for 1d, lines have levels so take
+ // count the objects per level and
+ // globally
+ if (dim == 1)
{
- AssertThrow (v.size() > 0, ExcMessage ("No vertices given"));
- AssertThrow (cells.size() > 0, ExcMessage ("No cells given"));
-
- // note: since no boundary
- // information can be given in one
- // dimension, the @p{subcelldata}
- // field is ignored. (only used for
- // error checking, which is a good
- // idea in any case)
- const unsigned int dim=1;
-
- // copy vertices
- triangulation.vertices = v;
- triangulation.vertices_used = std::vector<bool> (v.size(), true);
-
- // store the indices of the lines
- // which are adjacent to a given
- // vertex
- std::vector<std::vector<int> > lines_at_vertex (v.size());
-
- // reserve enough space
- triangulation.levels.push_back (new internal::Triangulation::TriaLevel<dim>);
- triangulation.levels[0]->reserve_space (cells.size(), dim, spacedim);
- triangulation.levels[0]->cells.reserve_space (0,cells.size());
-
- // make up cells
- typename Triangulation<dim,spacedim>::raw_line_iterator
- next_free_line = triangulation.begin_raw_line ();
- for (unsigned int cell=0; cell<cells.size(); ++cell)
+ for (unsigned int level=0; level<number_cache.n_levels; ++level)
{
- while (next_free_line->used())
- ++next_free_line;
-
- next_free_line->set (internal::Triangulation
- ::TriaObject<1> (cells[cell].vertices[0],
- cells[cell].vertices[1]));
- next_free_line->set_used_flag ();
- next_free_line->set_material_id (cells[cell].material_id);
- next_free_line->clear_user_data ();
- next_free_line->set_subdomain_id (0);
-
- // note that this cell is
- // adjacent to these vertices
- lines_at_vertex[cells[cell].vertices[0]].push_back (cell);
- lines_at_vertex[cells[cell].vertices[1]].push_back (cell);
+ // count lines on this level
+ number_cache.n_lines_level[level] = 0;
+
+ line_iterator line = triangulation.begin_line (level),
+ endc = (level == number_cache.n_levels-1 ?
+ line_iterator(triangulation.end_line()) :
+ triangulation.begin_line (level+1));
+ for (; line!=endc; ++line)
+ ++number_cache.n_lines_level[level];
+
+ // update total number of lines
+ number_cache.n_lines += number_cache.n_lines_level[level];
}
+ // do the update for the number of
+ // active lines as well
+ for (unsigned int level=0; level<number_cache.n_levels; ++level)
+ {
+ // count lines on this level
+ number_cache.n_active_lines_level[level] = 0;
+
+ active_line_iterator line = triangulation.begin_active_line (level),
+ endc = triangulation.end_line ();
+ for (; (line!=endc) && (line->level() == static_cast<signed int>(level)); ++line)
+ ++number_cache.n_active_lines_level[level];
- // some security tests
+ // update total number of lines
+ number_cache.n_active_lines += number_cache.n_active_lines_level[level];
+ }
+ }
+ else
+ {
+ // for dim>1, there are no
+ // levels for lines
{
- unsigned int boundary_nodes = 0;
- for (unsigned int i=0; i<lines_at_vertex.size(); ++i)
- switch (lines_at_vertex[i].size())
- {
- case 1:
- // this vertex has only
- // one adjacent line
- ++boundary_nodes;
- break;
- case 2:
- break;
- default:
- // a node must have one
- // or two adjacent
- // lines
- AssertThrow (false, ExcInternalError());
- }
+ line_iterator line = triangulation.begin_line (),
+ endc = triangulation.end_line();
+ for (; line!=endc; ++line)
+ ++number_cache.n_lines;
+ }
- // assert there are no more
- // than two boundary
- // nodes. note that if the
- // space dimension is
- // bigger than 1, then we
- // can have fewer than 2
- // nodes (for example a
- // ring of cells -- no end
- // points at all)
- AssertThrow (((spacedim == 1) && (boundary_nodes == 2))
- ||
- (spacedim > 1),
- ExcMessage("The Triangulation has too many end points"));
+ {
+ active_line_iterator line = triangulation.begin_active_line (),
+ endc = triangulation.end_line();
+ for (; line!=endc; ++line)
+ ++number_cache.n_active_lines;
}
+ }
+ }
+ /**
+ * For a given Triangulation, update the
+ * number cache for quads. For 2d, we have
+ * to deal with the fact that quads have
+ * levels, whereas for higher dimensions
+ * they do not.
+ *
+ * The second argument indicates
+ * for how many levels the
+ * Triangulation has objects,
+ * though the highest levels need
+ * not contain active cells if they
+ * have previously all been
+ * coarsened away.
+ *
+ * At the beginning of the function, we call the
+ * respective function to update the number
+ * cache for lines.
+ */
+ template <int dim, int spacedim>
+ static
+ void compute_number_cache (const Triangulation<dim,spacedim> &triangulation,
+ const unsigned int level_objects,
+ internal::Triangulation::NumberCache<2> &number_cache)
+ {
+ // update lines and n_levels
+ compute_number_cache (triangulation,
+ level_objects,
+ static_cast<internal::Triangulation::NumberCache<1>&>
+ (number_cache));
+
+ typedef
+ typename Triangulation<dim,spacedim>::quad_iterator quad_iterator;
+ typedef
+ typename Triangulation<dim,spacedim>::active_quad_iterator active_quad_iterator;
+
+ ///////////////////////////////////
+ // update the number of quads
+ // on the different levels in
+ // the cache
+ number_cache.n_quads_level.resize (number_cache.n_levels);
+ number_cache.n_quads = 0;
+
+ number_cache.n_active_quads_level.resize (number_cache.n_levels);
+ number_cache.n_active_quads = 0;
+
+ // for 2d, quads have levels so take
+ // count the objects per level and
+ // globally
+ if (dim == 2)
+ {
+ for (unsigned int level=0; level<number_cache.n_levels; ++level)
+ {
+ // count quads on this level
+ number_cache.n_quads_level[level] = 0;
+
+ quad_iterator quad = triangulation.begin_quad (level),
+ endc = (level == number_cache.n_levels-1 ?
+ quad_iterator(triangulation.end_quad()) :
+ triangulation.begin_quad (level+1));
+ for (; quad!=endc; ++quad)
+ ++number_cache.n_quads_level[level];
+
+ // update total number of quads
+ number_cache.n_quads += number_cache.n_quads_level[level];
+ }
+ // do the update for the number of
+ // active quads as well
+ for (unsigned int level=0; level<number_cache.n_levels; ++level)
+ {
+ // count quads on this level
+ number_cache.n_active_quads_level[level] = 0;
- // update neighborship info
- typename Triangulation<dim,spacedim>::active_line_iterator
- line = triangulation.begin_active_line ();
- // for all lines
- for (; line!=triangulation.end(); ++line)
- // for each of the two vertices
- for (unsigned int vertex=0; vertex<GeometryInfo<dim>::vertices_per_cell; ++vertex)
- // if first cell adjacent to
- // this vertex is the present
- // one, then the neighbor is
- // the second adjacent cell and
- // vice versa
- if (lines_at_vertex[line->vertex_index(vertex)][0] == line->index())
- if (lines_at_vertex[line->vertex_index(vertex)].size() == 2)
- {
- const typename Triangulation<dim,spacedim>::cell_iterator
- neighbor (&triangulation,
- 0, // level
- lines_at_vertex[line->vertex_index(vertex)][1]);
- line->set_neighbor (vertex, neighbor);
- }
- else
- // no second adjacent cell
- // entered -> cell at
- // boundary
- line->set_neighbor (vertex, triangulation.end());
- else
- // present line is not first
- // adjacent one -> first
- // adjacent one is neighbor
- {
- const typename Triangulation<dim,spacedim>::cell_iterator
- neighbor (&triangulation,
- 0, // level
- lines_at_vertex[line->vertex_index(vertex)][0]);
- line->set_neighbor (vertex, neighbor);
- }
+ active_quad_iterator quad = triangulation.begin_active_quad (level),
+ endc = triangulation.end_quad ();
+ for (; (quad!=endc) && (quad->level() == static_cast<signed int>(level)); ++quad)
+ ++number_cache.n_active_quads_level[level];
- // finally set the
- // vertex_to_boundary_id_map_1d
- // map
- triangulation.vertex_to_boundary_id_map_1d->clear();
- for (typename Triangulation<dim,spacedim>::active_cell_iterator
- cell = triangulation.begin_active();
- cell != triangulation.end(); ++cell)
- for (unsigned int f=0; f<GeometryInfo<dim>::faces_per_cell; ++f)
- if (cell->at_boundary(f))
- (*triangulation
- .vertex_to_boundary_id_map_1d)[cell->face(f)->vertex_index()]
- = f;
+ // update total number of quads
+ number_cache.n_active_quads += number_cache.n_active_quads_level[level];
+ }
}
+ else
+ {
+ // for dim>2, there are no
+ // levels for quads
+ {
+ quad_iterator quad = triangulation.begin_quad (),
+ endc = triangulation.end_quad();
+ for (; quad!=endc; ++quad)
+ ++number_cache.n_quads;
+ }
+ {
+ active_quad_iterator quad = triangulation.begin_active_quad (),
+ endc = triangulation.end_quad();
+ for (; quad!=endc; ++quad)
+ ++number_cache.n_active_quads;
+ }
+ }
+ }
- /**
- * Create a triangulation from
- * given data. This function does
- * this work for 2-dimensional
- * triangulations independently
- * of the actual space dimension.
- */
- template <int spacedim>
- static
- void
- create_triangulation (const std::vector<Point<spacedim> > &v,
- const std::vector<CellData<2> > &cells,
- const SubCellData &subcelldata,
- Triangulation<2,spacedim> &triangulation)
+ /**
+ * For a given Triangulation, update the
+ * number cache for hexes. For 3d, we have
+ * to deal with the fact that hexes have
+ * levels, whereas for higher dimensions
+ * they do not.
+ *
+ * The second argument indicates
+ * for how many levels the
+ * Triangulation has objects,
+ * though the highest levels need
+ * not contain active cells if they
+ * have previously all been
+ * coarsened away.
+ *
+ * At the end of the function, we call the
+ * respective function to update the number
+ * cache for quads, which will in turn call
+ * the respective function for lines.
+ */
+ template <int dim, int spacedim>
+ static
+ void compute_number_cache (const Triangulation<dim,spacedim> &triangulation,
+ const unsigned int level_objects,
+ internal::Triangulation::NumberCache<3> &number_cache)
+ {
+ // update quads, lines and n_levels
+ compute_number_cache (triangulation,
+ level_objects,
+ static_cast<internal::Triangulation::NumberCache<2>&>
+ (number_cache));
+
+ typedef
+ typename Triangulation<dim,spacedim>::hex_iterator hex_iterator;
+ typedef
+ typename Triangulation<dim,spacedim>::active_hex_iterator active_hex_iterator;
+
+ ///////////////////////////////////
+ // update the number of hexes
+ // on the different levels in
+ // the cache
+ number_cache.n_hexes_level.resize (number_cache.n_levels);
+ number_cache.n_hexes = 0;
+
+ number_cache.n_active_hexes_level.resize (number_cache.n_levels);
+ number_cache.n_active_hexes = 0;
+
+ // for 3d, hexes have levels so take
+ // count the objects per level and
+ // globally
+ if (dim == 3)
{
- AssertThrow (v.size() > 0, ExcMessage ("No vertices given"));
- AssertThrow (cells.size() > 0, ExcMessage ("No cells given"));
-
- const unsigned int dim=2;
-
- // copy vertices
- triangulation.vertices = v;
- triangulation.vertices_used = std::vector<bool> (v.size(), true);
-
- // make up a list of the needed
- // lines each line is a pair of
- // vertices. The list is kept
- // sorted and it is guaranteed that
- // each line is inserted only once.
- // While the key of such an entry
- // is the pair of vertices, the
- // thing it points to is an
- // iterator pointing to the line
- // object itself. In the first run,
- // these iterators are all invalid
- // ones, but they are filled
- // afterwards
- std::map<std::pair<int,int>,
- typename Triangulation<dim,spacedim>::line_iterator> needed_lines;
- for (unsigned int cell=0; cell<cells.size(); ++cell)
+ for (unsigned int level=0; level<number_cache.n_levels; ++level)
{
- for (unsigned int vertex=0; vertex<4; ++vertex)
- AssertThrow (cells[cell].vertices[vertex] < triangulation.vertices.size(),
- ExcInvalidVertexIndex (cell, cells[cell].vertices[vertex],
- triangulation.vertices.size()));
-
- for (unsigned int line=0; line<GeometryInfo<dim>::faces_per_cell; ++line)
- {
- // given a line vertex number
- // (0,1) on a specific line we
- // get the cell vertex number
- // (0-4) through the
- // line_to_cell_vertices
- // function
- std::pair<int,int> line_vertices(
- cells[cell].vertices[GeometryInfo<dim>::line_to_cell_vertices(line, 0)],
- cells[cell].vertices[GeometryInfo<dim>::line_to_cell_vertices(line, 1)]);
-
- // assert that the line was
- // not already inserted in
- // reverse order. This
- // happens in spite of the
- // vertex rotation above,
- // if the sense of the cell
- // was incorrect.
- //
- // Here is what usually
- // happened when this
- // exception is thrown:
- // consider these two cells
- // and the vertices
- // 3---4---5
- // | | |
- // 0---1---2
- // If in the input vector
- // the two cells are given
- // with vertices <0 1 4 3>
- // and <4 1 2 5>, in the
- // first cell the middle
- // line would have
- // direction 1->4, while in
- // the second it would be
- // 4->1. This will cause
- // the exception.
- AssertThrow (needed_lines.find(std::make_pair(line_vertices.second,
- line_vertices.first))
- ==
- needed_lines.end(),
- ExcGridHasInvalidCell(cell));
-
- // insert line, with
- // invalid iterator if line
- // already exists, then
- // nothing bad happens here
- needed_lines[line_vertices] = triangulation.end_line();
- }
+ // count hexes on this level
+ number_cache.n_hexes_level[level] = 0;
+
+ hex_iterator hex = triangulation.begin_hex (level),
+ endc = (level == number_cache.n_levels-1 ?
+ hex_iterator(triangulation.end_hex()) :
+ triangulation.begin_hex (level+1));
+ for (; hex!=endc; ++hex)
+ ++number_cache.n_hexes_level[level];
+
+ // update total number of hexes
+ number_cache.n_hexes += number_cache.n_hexes_level[level];
}
+ // do the update for the number of
+ // active hexes as well
+ for (unsigned int level=0; level<number_cache.n_levels; ++level)
+ {
+ // count hexes on this level
+ number_cache.n_active_hexes_level[level] = 0;
+
+ active_hex_iterator hex = triangulation.begin_active_hex (level),
+ endc = triangulation.end_hex ();
+ for (; (hex!=endc) && (hex->level() == static_cast<signed int>(level)); ++hex)
+ ++number_cache.n_active_hexes_level[level];
- // check that every vertex has at
- // least two adjacent lines
+ // update total number of hexes
+ number_cache.n_active_hexes += number_cache.n_active_hexes_level[level];
+ }
+ }
+ else
+ {
+ // for dim>3, there are no
+ // levels for hexs
{
- std::vector<unsigned short int> vertex_touch_count (v.size(), 0);
- typename std::map<std::pair<int,int>,
- typename Triangulation<dim,spacedim>::line_iterator>::iterator i;
- for (i=needed_lines.begin(); i!=needed_lines.end(); i++)
- {
- // touch the vertices of
- // this line
- ++vertex_touch_count[i->first.first];
- ++vertex_touch_count[i->first.second];
- }
+ hex_iterator hex = triangulation.begin_hex (),
+ endc = triangulation.end_hex();
+ for (; hex!=endc; ++hex)
+ ++number_cache.n_hexes;
+ }
- // assert minimum touch count
- // is at least two. if not so,
- // then clean triangulation and
- // exit with an exception
- AssertThrow (* (std::min_element(vertex_touch_count.begin(),
- vertex_touch_count.end())) >= 2,
- ExcGridHasInvalidVertices());
+ {
+ active_hex_iterator hex = triangulation.begin_active_hex (),
+ endc = triangulation.end_hex();
+ for (; hex!=endc; ++hex)
+ ++number_cache.n_active_hexes;
}
+ }
+ }
- // reserve enough space
- triangulation.levels.push_back (new internal::Triangulation::TriaLevel<dim>);
- triangulation.faces = new internal::Triangulation::TriaFaces<dim>;
- triangulation.levels[0]->reserve_space (cells.size(), dim, spacedim);
- triangulation.faces->lines.reserve_space (0,needed_lines.size());
- triangulation.levels[0]->cells.reserve_space (0,cells.size());
- // make up lines
- {
- typename Triangulation<dim,spacedim>::raw_line_iterator
- line = triangulation.begin_raw_line();
- typename std::map<std::pair<int,int>,
- typename Triangulation<dim,spacedim>::line_iterator>::iterator i;
- for (i = needed_lines.begin();
- line!=triangulation.end_line(); ++line, ++i)
+ /**
+ * Create a triangulation from
+ * given data. This function does
+ * this work for 1-dimensional
+ * triangulations independently
+ * of the actual space dimension.
+ */
+ template <int spacedim>
+ static
+ void
+ create_triangulation (const std::vector<Point<spacedim> > &v,
+ const std::vector<CellData<1> > &cells,
- const SubCellData & /*subcelldata*/,
++ const SubCellData &/*subcelldata*/,
+ Triangulation<1,spacedim> &triangulation)
+ {
+ AssertThrow (v.size() > 0, ExcMessage ("No vertices given"));
+ AssertThrow (cells.size() > 0, ExcMessage ("No cells given"));
+
+ // note: since no boundary
+ // information can be given in one
+ // dimension, the @p{subcelldata}
+ // field is ignored. (only used for
+ // error checking, which is a good
+ // idea in any case)
+ const unsigned int dim=1;
+
+ // copy vertices
+ triangulation.vertices = v;
+ triangulation.vertices_used = std::vector<bool> (v.size(), true);
+
+ // store the indices of the lines
+ // which are adjacent to a given
+ // vertex
+ std::vector<std::vector<int> > lines_at_vertex (v.size());
+
+ // reserve enough space
+ triangulation.levels.push_back (new internal::Triangulation::TriaLevel<dim>);
+ triangulation.levels[0]->reserve_space (cells.size(), dim, spacedim);
+ triangulation.levels[0]->cells.reserve_space (0,cells.size());
+
+ // make up cells
+ typename Triangulation<dim,spacedim>::raw_line_iterator
+ next_free_line = triangulation.begin_raw_line ();
+ for (unsigned int cell=0; cell<cells.size(); ++cell)
+ {
+ while (next_free_line->used())
+ ++next_free_line;
+
+ next_free_line->set (internal::Triangulation
+ ::TriaObject<1> (cells[cell].vertices[0],
+ cells[cell].vertices[1]));
+ next_free_line->set_used_flag ();
+ next_free_line->set_material_id (cells[cell].material_id);
+ next_free_line->clear_user_data ();
+ next_free_line->set_subdomain_id (0);
+
+ // note that this cell is
+ // adjacent to these vertices
+ lines_at_vertex[cells[cell].vertices[0]].push_back (cell);
+ lines_at_vertex[cells[cell].vertices[1]].push_back (cell);
+ }
+
+
+ // some security tests
+ {
+ unsigned int boundary_nodes = 0;
+ for (unsigned int i=0; i<lines_at_vertex.size(); ++i)
+ switch (lines_at_vertex[i].size())
+ {
+ case 1:
+ // this vertex has only
+ // one adjacent line
+ ++boundary_nodes;
+ break;
+ case 2:
+ break;
+ default:
+ // a node must have one
+ // or two adjacent
+ // lines
+ AssertThrow (false, ExcInternalError());
+ }
+
+ // assert there are no more
+ // than two boundary
+ // nodes. note that if the
+ // space dimension is
+ // bigger than 1, then we
+ // can have fewer than 2
+ // nodes (for example a
+ // ring of cells -- no end
+ // points at all)
+ AssertThrow (((spacedim == 1) && (boundary_nodes == 2))
+ ||
+ (spacedim > 1),
+ ExcMessage("The Triangulation has too many end points"));
+ }
+
+
+
+ // update neighborship info
+ typename Triangulation<dim,spacedim>::active_line_iterator
+ line = triangulation.begin_active_line ();
+ // for all lines
+ for (; line!=triangulation.end(); ++line)
+ // for each of the two vertices
+ for (unsigned int vertex=0; vertex<GeometryInfo<dim>::vertices_per_cell; ++vertex)
+ // if first cell adjacent to
+ // this vertex is the present
+ // one, then the neighbor is
+ // the second adjacent cell and
+ // vice versa
+ if (lines_at_vertex[line->vertex_index(vertex)][0] == line->index())
+ if (lines_at_vertex[line->vertex_index(vertex)].size() == 2)
{
- line->set (internal::Triangulation::TriaObject<1>(i->first.first,
- i->first.second));
- line->set_used_flag ();
- line->clear_user_flag ();
- line->clear_user_data ();
- i->second = line;
+ const typename Triangulation<dim,spacedim>::cell_iterator
+ neighbor (&triangulation,
+ 0, // level
+ lines_at_vertex[line->vertex_index(vertex)][1]);
+ line->set_neighbor (vertex, neighbor);
}
+ else
+ // no second adjacent cell
+ // entered -> cell at
+ // boundary
+ line->set_neighbor (vertex, triangulation.end());
+ else
+ // present line is not first
+ // adjacent one -> first
+ // adjacent one is neighbor
+ {
+ const typename Triangulation<dim,spacedim>::cell_iterator
+ neighbor (&triangulation,
+ 0, // level
+ lines_at_vertex[line->vertex_index(vertex)][0]);
+ line->set_neighbor (vertex, neighbor);
+ }
+
+ // finally set the
+ // vertex_to_boundary_id_map_1d
+ // map
+ triangulation.vertex_to_boundary_id_map_1d->clear();
+ for (typename Triangulation<dim,spacedim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell)
+ for (unsigned int f=0; f<GeometryInfo<dim>::faces_per_cell; ++f)
+ if (cell->at_boundary(f))
+ (*triangulation
+ .vertex_to_boundary_id_map_1d)[cell->face(f)->vertex_index()]
+ = f;
+ }
+
+
+ /**
+ * Create a triangulation from
+ * given data. This function does
+ * this work for 2-dimensional
+ * triangulations independently
+ * of the actual space dimension.
+ */
+ template <int spacedim>
+ static
+ void
+ create_triangulation (const std::vector<Point<spacedim> > &v,
+ const std::vector<CellData<2> > &cells,
+ const SubCellData &subcelldata,
+ Triangulation<2,spacedim> &triangulation)
+ {
+ AssertThrow (v.size() > 0, ExcMessage ("No vertices given"));
+ AssertThrow (cells.size() > 0, ExcMessage ("No cells given"));
+
+ const unsigned int dim=2;
+
+ // copy vertices
+ triangulation.vertices = v;
+ triangulation.vertices_used = std::vector<bool> (v.size(), true);
+
+ // make up a list of the needed
+ // lines each line is a pair of
+ // vertices. The list is kept
+ // sorted and it is guaranteed that
+ // each line is inserted only once.
+ // While the key of such an entry
+ // is the pair of vertices, the
+ // thing it points to is an
+ // iterator pointing to the line
+ // object itself. In the first run,
+ // these iterators are all invalid
+ // ones, but they are filled
+ // afterwards
+ std::map<std::pair<int,int>,
+ typename Triangulation<dim,spacedim>::line_iterator> needed_lines;
+ for (unsigned int cell=0; cell<cells.size(); ++cell)
+ {
+ for (unsigned int vertex=0; vertex<4; ++vertex)
+ AssertThrow (cells[cell].vertices[vertex] < triangulation.vertices.size(),
+ ExcInvalidVertexIndex (cell, cells[cell].vertices[vertex],
+ triangulation.vertices.size()));
+
+ for (unsigned int line=0; line<GeometryInfo<dim>::faces_per_cell; ++line)
+ {
+ // given a line vertex number
+ // (0,1) on a specific line we
+ // get the cell vertex number
+ // (0-4) through the
+ // line_to_cell_vertices
+ // function
+ std::pair<int,int> line_vertices(
+ cells[cell].vertices[GeometryInfo<dim>::line_to_cell_vertices(line, 0)],
+ cells[cell].vertices[GeometryInfo<dim>::line_to_cell_vertices(line, 1)]);
+
+ // assert that the line was
+ // not already inserted in
+ // reverse order. This
+ // happens in spite of the
+ // vertex rotation above,
+ // if the sense of the cell
+ // was incorrect.
+ //
+ // Here is what usually
+ // happened when this
+ // exception is thrown:
+ // consider these two cells
+ // and the vertices
+ // 3---4---5
+ // | | |
+ // 0---1---2
+ // If in the input vector
+ // the two cells are given
+ // with vertices <0 1 4 3>
+ // and <4 1 2 5>, in the
+ // first cell the middle
+ // line would have
+ // direction 1->4, while in
+ // the second it would be
+ // 4->1. This will cause
+ // the exception.
+ AssertThrow (needed_lines.find(std::make_pair(line_vertices.second,
+ line_vertices.first))
+ ==
+ needed_lines.end(),
+ ExcGridHasInvalidCell(cell));
+
+ // insert line, with
+ // invalid iterator if line
+ // already exists, then
+ // nothing bad happens here
+ needed_lines[line_vertices] = triangulation.end_line();
+ }
+ }
+
+
+ // check that every vertex has at
+ // least two adjacent lines
+ {
+ std::vector<unsigned short int> vertex_touch_count (v.size(), 0);
+ typename std::map<std::pair<int,int>,
+ typename Triangulation<dim,spacedim>::line_iterator>::iterator i;
+ for (i=needed_lines.begin(); i!=needed_lines.end(); i++)
+ {
+ // touch the vertices of
+ // this line
+ ++vertex_touch_count[i->first.first];
+ ++vertex_touch_count[i->first.second];
}
+ // assert minimum touch count
+ // is at least two. if not so,
+ // then clean triangulation and
+ // exit with an exception
+ AssertThrow (* (std::min_element(vertex_touch_count.begin(),
+ vertex_touch_count.end())) >= 2,
+ ExcGridHasInvalidVertices());
+ }
- // store for each line index
- // the adjacent cells
- std::map<int,std::vector<typename Triangulation<dim,spacedim>::cell_iterator> >
- adjacent_cells;
+ // reserve enough space
+ triangulation.levels.push_back (new internal::Triangulation::TriaLevel<dim>);
+ triangulation.faces = new internal::Triangulation::TriaFaces<dim>;
+ triangulation.levels[0]->reserve_space (cells.size(), dim, spacedim);
+ triangulation.faces->lines.reserve_space (0,needed_lines.size());
+ triangulation.levels[0]->cells.reserve_space (0,cells.size());
- // finally make up cells
+ // make up lines
+ {
+ typename Triangulation<dim,spacedim>::raw_line_iterator
+ line = triangulation.begin_raw_line();
+ typename std::map<std::pair<int,int>,
+ typename Triangulation<dim,spacedim>::line_iterator>::iterator i;
+ for (i = needed_lines.begin();
+ line!=triangulation.end_line(); ++line, ++i)
{
- typename Triangulation<dim,spacedim>::raw_cell_iterator
- cell = triangulation.begin_raw_quad();
- for (unsigned int c=0; c<cells.size(); ++c, ++cell)
- {
- typename Triangulation<dim,spacedim>::line_iterator
- lines[GeometryInfo<dim>::lines_per_cell];
- for (unsigned int line=0; line<GeometryInfo<dim>::lines_per_cell; ++line)
- lines[line]=needed_lines[std::make_pair(
- cells[c].vertices[GeometryInfo<dim>::line_to_cell_vertices(line, 0)],
- cells[c].vertices[GeometryInfo<dim>::line_to_cell_vertices(line, 1)])];
-
- cell->set (internal::Triangulation::TriaObject<2> (lines[0]->index(),
- lines[1]->index(),
- lines[2]->index(),
- lines[3]->index()));
-
- cell->set_used_flag ();
- cell->set_material_id (cells[c].material_id);
- cell->clear_user_data ();
- cell->set_subdomain_id (0);
-
- // note that this cell is
- // adjacent to the four
- // lines
- for (unsigned int line=0; line<GeometryInfo<dim>::lines_per_cell; ++line)
- adjacent_cells[lines[line]->index()].push_back (cell);
- }
+ line->set (internal::Triangulation::TriaObject<1>(i->first.first,
+ i->first.second));
+ line->set_used_flag ();
+ line->clear_user_flag ();
+ line->clear_user_data ();
+ i->second = line;
}
+ }
- for (typename Triangulation<dim,spacedim>::line_iterator
- line=triangulation.begin_line();
- line!=triangulation.end_line(); ++line)
- {
- const unsigned int n_adj_cells = adjacent_cells[line->index()].size();
- // assert that every line has
- // one or two adjacent cells
- AssertThrow ((n_adj_cells >= 1) &&
- (n_adj_cells <= 2),
- ExcInternalError());
+ // store for each line index
+ // the adjacent cells
+ std::map<int,std::vector<typename Triangulation<dim,spacedim>::cell_iterator> >
+ adjacent_cells;
- // if only one cell: line is at
- // boundary -> give it the
- // boundary indicator zero by
- // default
- if (n_adj_cells == 1)
- line->set_boundary_indicator (0);
- else
- // interior line -> numbers::internal_face_boundary_id
- line->set_boundary_indicator (numbers::internal_face_boundary_id);
- }
+ // finally make up cells
+ {
+ typename Triangulation<dim,spacedim>::raw_cell_iterator
+ cell = triangulation.begin_raw_quad();
+ for (unsigned int c=0; c<cells.size(); ++c, ++cell)
+ {
+ typename Triangulation<dim,spacedim>::line_iterator
+ lines[GeometryInfo<dim>::lines_per_cell];
+ for (unsigned int line=0; line<GeometryInfo<dim>::lines_per_cell; ++line)
+ lines[line]=needed_lines[std::make_pair(
+ cells[c].vertices[GeometryInfo<dim>::line_to_cell_vertices(line, 0)],
+ cells[c].vertices[GeometryInfo<dim>::line_to_cell_vertices(line, 1)])];
+
+ cell->set (internal::Triangulation::TriaObject<2> (lines[0]->index(),
+ lines[1]->index(),
+ lines[2]->index(),
+ lines[3]->index()));
+
+ cell->set_used_flag ();
+ cell->set_material_id (cells[c].material_id);
+ cell->clear_user_data ();
+ cell->set_subdomain_id (0);
+
+ // note that this cell is
+ // adjacent to the four
+ // lines
+ for (unsigned int line=0; line<GeometryInfo<dim>::lines_per_cell; ++line)
+ adjacent_cells[lines[line]->index()].push_back (cell);
+ }
+ }
+
+
+ for (typename Triangulation<dim,spacedim>::line_iterator
+ line=triangulation.begin_line();
+ line!=triangulation.end_line(); ++line)
+ {
+ const unsigned int n_adj_cells = adjacent_cells[line->index()].size();
+ // assert that every line has
+ // one or two adjacent cells
+ AssertThrow ((n_adj_cells >= 1) &&
+ (n_adj_cells <= 2),
+ ExcInternalError());
+
+ // if only one cell: line is at
+ // boundary -> give it the
+ // boundary indicator zero by
+ // default
+ if (n_adj_cells == 1)
+ line->set_boundary_indicator (0);
+ else
+ // interior line -> numbers::internal_face_boundary_id
+ line->set_boundary_indicator (numbers::internal_face_boundary_id);
+ }
- // set boundary indicators where
- // given
- std::vector<CellData<1> >::const_iterator boundary_line
- = subcelldata.boundary_lines.begin();
- std::vector<CellData<1> >::const_iterator end_boundary_line
- = subcelldata.boundary_lines.end();
- for (; boundary_line!=end_boundary_line; ++boundary_line)
+ // set boundary indicators where
+ // given
+ std::vector<CellData<1> >::const_iterator boundary_line
+ = subcelldata.boundary_lines.begin();
+ std::vector<CellData<1> >::const_iterator end_boundary_line
+ = subcelldata.boundary_lines.end();
+ for (; boundary_line!=end_boundary_line; ++boundary_line)
+ {
+ typename Triangulation<dim,spacedim>::line_iterator line;
+ std::pair<int,int> line_vertices(std::make_pair(boundary_line->vertices[0],
+ boundary_line->vertices[1]));
+ if (needed_lines.find(line_vertices) != needed_lines.end())
+ // line found in this
+ // direction
+ line = needed_lines[line_vertices];
+ else
{
- typename Triangulation<dim,spacedim>::line_iterator line;
- std::pair<int,int> line_vertices(std::make_pair(boundary_line->vertices[0],
- boundary_line->vertices[1]));
+ // look whether it exists
+ // in reverse direction
+ std::swap (line_vertices.first, line_vertices.second);
if (needed_lines.find(line_vertices) != needed_lines.end())
- // line found in this
- // direction
line = needed_lines[line_vertices];
else
- {
- // look whether it exists
- // in reverse direction
- std::swap (line_vertices.first, line_vertices.second);
- if (needed_lines.find(line_vertices) != needed_lines.end())
- line = needed_lines[line_vertices];
- else
- // line does not exist
- AssertThrow (false, ExcLineInexistant(line_vertices.first,
- line_vertices.second));
- }
+ // line does not exist
+ AssertThrow (false, ExcLineInexistant(line_vertices.first,
+ line_vertices.second));
+ }
- // assert that we only set
- // boundary info once
- AssertThrow (! (line->boundary_indicator() != 0 &&
- line->boundary_indicator() != numbers::internal_face_boundary_id),
- ExcMultiplySetLineInfoOfLine(line_vertices.first,
- line_vertices.second));
+ // assert that we only set
+ // boundary info once
+ AssertThrow (! (line->boundary_indicator() != 0 &&
+ line->boundary_indicator() != numbers::internal_face_boundary_id),
+ ExcMultiplySetLineInfoOfLine(line_vertices.first,
+ line_vertices.second));
- // Assert that only exterior lines
- // are given a boundary indicator
- AssertThrow (! (line->boundary_indicator() == numbers::internal_face_boundary_id),
- ExcInteriorLineCantBeBoundary());
+ // Assert that only exterior lines
+ // are given a boundary indicator
+ AssertThrow (! (line->boundary_indicator() == numbers::internal_face_boundary_id),
+ ExcInteriorLineCantBeBoundary());
- line->set_boundary_indicator (boundary_line->boundary_id);
- }
+ line->set_boundary_indicator (boundary_line->boundary_id);
+ }
- // finally update neighborship info
- for (typename Triangulation<dim,spacedim>::cell_iterator
- cell=triangulation.begin(); cell!=triangulation.end(); ++cell)
- for (unsigned int side=0; side<4; ++side)
- if (adjacent_cells[cell->line(side)->index()][0] == cell)
- // first adjacent cell is
- // this one
- {
- if (adjacent_cells[cell->line(side)->index()].size() == 2)
- // there is another
- // adjacent cell
- cell->set_neighbor (side,
- adjacent_cells[cell->line(side)->index()][1]);
- }
- // first adjacent cell is not this
- // one, -> it must be the neighbor
- // we are looking for
- else
+ // finally update neighborship info
+ for (typename Triangulation<dim,spacedim>::cell_iterator
+ cell=triangulation.begin(); cell!=triangulation.end(); ++cell)
+ for (unsigned int side=0; side<4; ++side)
+ if (adjacent_cells[cell->line(side)->index()][0] == cell)
+ // first adjacent cell is
+ // this one
+ {
+ if (adjacent_cells[cell->line(side)->index()].size() == 2)
+ // there is another
+ // adjacent cell
cell->set_neighbor (side,
- adjacent_cells[cell->line(side)->index()][0]);
- }
+ adjacent_cells[cell->line(side)->index()][1]);
+ }
+ // first adjacent cell is not this
+ // one, -> it must be the neighbor
+ // we are looking for
+ else
+ cell->set_neighbor (side,
+ adjacent_cells[cell->line(side)->index()][0]);
+ }
- /**
- * Invent an object which compares two internal::Triangulation::TriaObject<2>
- * against each other. This comparison is needed in order to establish a map
- * of TriaObject<2> to iterators in the Triangulation<3,3>::create_triangulation
- * function.
- *
- * Since this comparison is not canonical, we do not include it into the
- * general internal::Triangulation::TriaObject<2> class.
- */
- struct QuadComparator
+ /**
+ * Invent an object which compares two internal::Triangulation::TriaObject<2>
+ * against each other. This comparison is needed in order to establish a map
+ * of TriaObject<2> to iterators in the Triangulation<3,3>::create_triangulation
+ * function.
+ *
+ * Since this comparison is not canonical, we do not include it into the
+ * general internal::Triangulation::TriaObject<2> class.
+ */
+ struct QuadComparator
+ {
+ inline bool operator () (const internal::Triangulation::TriaObject<2> &q1,
+ const internal::Triangulation::TriaObject<2> &q2) const
{
- inline bool operator () (const internal::Triangulation::TriaObject<2> &q1,
- const internal::Triangulation::TriaObject<2> &q2) const
- {
- // here is room to
- // optimize the repeated
- // equality test of the
- // previous lines; the
- // compiler will probably
- // take care of most of
- // it anyway
- if ((q1.face(0) < q2.face(0)) ||
- ((q1.face(0) == q2.face(0)) &&
- (q1.face(1) < q2.face(1))) ||
- ((q1.face(0) == q2.face(0)) &&
- (q1.face(1) == q2.face(1)) &&
- (q1.face(2) < q2.face(2))) ||
- ((q1.face(0) == q2.face(0)) &&
- (q1.face(1) == q2.face(1)) &&
- (q1.face(2) == q2.face(2)) &&
- (q1.face(3) < q2.face(3))))
- return true;
- else
- return false;
- }
- };
+ // here is room to
+ // optimize the repeated
+ // equality test of the
+ // previous lines; the
+ // compiler will probably
+ // take care of most of
+ // it anyway
+ if ((q1.face(0) < q2.face(0)) ||
+ ((q1.face(0) == q2.face(0)) &&
+ (q1.face(1) < q2.face(1))) ||
+ ((q1.face(0) == q2.face(0)) &&
+ (q1.face(1) == q2.face(1)) &&
+ (q1.face(2) < q2.face(2))) ||
+ ((q1.face(0) == q2.face(0)) &&
+ (q1.face(1) == q2.face(1)) &&
+ (q1.face(2) == q2.face(2)) &&
+ (q1.face(3) < q2.face(3))))
+ return true;
+ else
+ return false;
+ }
+ };
template <int dim, int spacedim>
FEFaceValues<dim,spacedim>::FEFaceValues (const hp::MappingCollection<dim,spacedim> &mapping,
- const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim-1> &q_collection,
- const UpdateFlags update_flags)
- :
- internal::hp::FEValuesBase<dim,dim-1,dealii::FEFaceValues<dim,spacedim> > (mapping,
- fe_collection,
- q_collection,
- update_flags)
- const hp::FECollection<dim,spacedim> &fe_collection,
++ const hp::FECollection<dim,spacedim> &fe_collection,
+ const hp::QCollection<dim-1> &q_collection,
+ const UpdateFlags update_flags)
+ :
+ internal::hp::FEValuesBase<dim,dim-1,dealii::FEFaceValues<dim,spacedim> > (mapping,
+ fe_collection,
+ q_collection,
+ update_flags)
{}
template <int dim, int spacedim>
- FEFaceValues<dim,spacedim>::FEFaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
+ FEFaceValues<dim,spacedim>::FEFaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim-1> &q_collection,
- const UpdateFlags update_flags)
- :
- internal::hp::FEValuesBase<dim,dim-1,dealii::FEFaceValues<dim,spacedim> > (fe_collection,
- q_collection,
- update_flags)
+ const hp::QCollection<dim-1> &q_collection,
+ const UpdateFlags update_flags)
+ :
+ internal::hp::FEValuesBase<dim,dim-1,dealii::FEFaceValues<dim,spacedim> > (fe_collection,
+ q_collection,
+ update_flags)
{}
template <int dim, int spacedim>
FESubfaceValues<dim,spacedim>::FESubfaceValues (const hp::MappingCollection<dim,spacedim> &mapping,
- const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim-1> &q_collection,
- const UpdateFlags update_flags)
- :
- internal::hp::FEValuesBase<dim,dim-1,dealii::FESubfaceValues<dim,spacedim> > (mapping,
- fe_collection,
- q_collection,
- update_flags)
- const hp::FECollection<dim,spacedim> &fe_collection,
++ const hp::FECollection<dim,spacedim> &fe_collection,
+ const hp::QCollection<dim-1> &q_collection,
+ const UpdateFlags update_flags)
+ :
+ internal::hp::FEValuesBase<dim,dim-1,dealii::FESubfaceValues<dim,spacedim> > (mapping,
+ fe_collection,
+ q_collection,
+ update_flags)
{}
template <int dim, int spacedim>
- FESubfaceValues<dim,spacedim>::FESubfaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
+ FESubfaceValues<dim,spacedim>::FESubfaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim-1> &q_collection,
- const UpdateFlags update_flags)
- :
- internal::hp::FEValuesBase<dim,dim-1,dealii::FESubfaceValues<dim,spacedim> > (fe_collection,
- q_collection,
- update_flags)
+ const hp::QCollection<dim-1> &q_collection,
+ const UpdateFlags update_flags)
+ :
+ internal::hp::FEValuesBase<dim,dim-1,dealii::FESubfaceValues<dim,spacedim> > (fe_collection,
+ q_collection,
+ update_flags)
{}
Vector::Vector (const MPI_Comm &communicator,
- const VectorBase &v,
+ const VectorBase &v,
const unsigned int local_size)
- :
- communicator (communicator)
+ :
+ communicator (communicator)
{
Vector::create_vector (v.size(), local_size);
- SolverBase::SolverBase (SolverControl &cn,
+ SolverBase::SolverBase (SolverControl &cn,
const MPI_Comm &mpi_communicator)
- :
- solver_control (cn),
- mpi_communicator (mpi_communicator)
+ :
+ solver_control (cn),
+ mpi_communicator (mpi_communicator)
{}
template
void SparseDirectMA27::solve (const SparseMatrix<double> &matrix,
- Vector<double> &rhs_and_solution);
+ Vector<double> &rhs_and_solution);
template
-void SparseDirectMA27::solve (const SparseMatrix<float> &matrix,
+void SparseDirectMA27::solve (const SparseMatrix<float> &matrix,
Vector<double> &rhs_and_solution);
- SolverBase::SolverBase (SolverControl &cn)
+ SolverBase::SolverBase (SolverControl &cn)
- :
- solver_name (gmres),
- solver_control (cn)
+ :
+ solver_name (gmres),
+ solver_control (cn)
{}
- SolverDirect::SolverDirect (SolverControl &cn,
+ SolverDirect::SolverDirect (SolverControl &cn,
const AdditionalData &data)
- :
- solver_control (cn),
- additional_data (data.output_solver_details)
+ :
+ solver_control (cn),
+ additional_data (data.output_solver_details)
{}
- SparseMatrix::SparseMatrix (const Epetra_Map &input_map,
+ SparseMatrix::SparseMatrix (const Epetra_Map &input_map,
const unsigned int n_max_entries_per_row)
- :
- column_space_map (new Epetra_Map (input_map)),
- matrix (new Epetra_FECrsMatrix(Copy, *column_space_map,
- int(n_max_entries_per_row), false)),
- last_action (Zero),
- compressed (false)
+ :
+ column_space_map (new Epetra_Map (input_map)),
+ matrix (new Epetra_FECrsMatrix(Copy, *column_space_map,
+ int(n_max_entries_per_row), false)),
+ last_action (Zero),
+ compressed (false)
{}
- SparseMatrix::SparseMatrix (const Epetra_Map &input_row_map,
- const Epetra_Map &input_col_map,
+ SparseMatrix::SparseMatrix (const Epetra_Map &input_row_map,
+ const Epetra_Map &input_col_map,
const unsigned int n_max_entries_per_row)
- :
- column_space_map (new Epetra_Map (input_col_map)),
- matrix (new Epetra_FECrsMatrix(Copy, input_row_map,
- int(n_max_entries_per_row), false)),
- last_action (Zero),
- compressed (false)
+ :
+ column_space_map (new Epetra_Map (input_col_map)),
+ matrix (new Epetra_FECrsMatrix(Copy, input_row_map,
+ int(n_max_entries_per_row), false)),
+ last_action (Zero),
+ compressed (false)
{}
void
SparseMatrix::reinit (const Epetra_Map &input_row_map,
const Epetra_Map &input_col_map,
- const SparsityType &sparsity_pattern,
+ const SparsityType &sparsity_pattern,
const bool exchange_data)
{
- // release memory before reallocation
+ // release memory before reallocation
temp_vector.clear();
matrix.reset();
reinit (v, false, true);
}
- :
- VectorBase()
+ Vector::Vector (const MPI_Comm &communicator,
+ const IndexSet &local,
+ const IndexSet &ghost)
- IndexSet parallel_partitioning = local;
- parallel_partitioning.add_indices(ghost);
- reinit(parallel_partitioning, communicator);
++ :
++ VectorBase()
+ {
++ IndexSet parallel_partitioning = local;
++ parallel_partitioning.add_indices(ghost);
++ reinit(parallel_partitioning, communicator);
+ }
+
Vector::~Vector ()
}
- IndexSet parallel_partitioning = local;
- parallel_partitioning.add_indices(ghost);
- reinit(parallel_partitioning, communicator);
+ void Vector::reinit(const MPI_Comm &communicator, const IndexSet &local, const IndexSet &ghost)
+ {
++ IndexSet parallel_partitioning = local;
++ parallel_partitioning.add_indices(ghost);
++ reinit(parallel_partitioning, communicator);
+ }
+
Vector &
Vector::operator = (const Vector &v)
std::size_t
VectorBase::memory_consumption () const
{
- //TODO[TH]: No accurate memory
- //consumption for Trilinos vectors
- //yet. This is a rough approximation with
- //one index and the value per local
- //entry.
+ //TODO[TH]: No accurate memory
+ //consumption for Trilinos vectors
+ //yet. This is a rough approximation with
+ //one index and the value per local
+ //entry.
return sizeof(*this)
- + this->local_size()*( sizeof(double)+sizeof(int) );
+ + this->local_size()*( sizeof(double)+sizeof(int) );
}
+ void
+ VectorBase::update_ghost_values() const
+ {
+ }
+
} /* end of namespace TrilinosWrappers */
template <>
void MGDoFHandler<2>::renumber_dofs (const unsigned int level,
- const std::vector<unsigned int> &new_numbers) {
- const std::vector<unsigned int> &new_numbers)
++ const std::vector<unsigned int> &new_numbers)
+ {
Assert (new_numbers.size() == n_dofs(level),
DoFHandler<2>::ExcRenumberingIncomplete());
template <>
void MGDoFHandler<3>::renumber_dofs (const unsigned int level,
- const std::vector<unsigned int> &new_numbers) {
- const std::vector<unsigned int> &new_numbers)
++ const std::vector<unsigned int> &new_numbers)
+ {
Assert (new_numbers.size() == n_dofs(level),
DoFHandler<3>::ExcRenumberingIncomplete());
}
- template <int dim, int spacedim>
- void
- extract_non_interface_dofs (const MGDoFHandler<dim,spacedim> &mg_dof_handler,
- std::vector<std::set<unsigned int> > &non_interface_dofs)
- {
- Assert (non_interface_dofs.size() == mg_dof_handler.get_tria().n_levels(),
- ExcDimensionMismatch (non_interface_dofs.size(),
- mg_dof_handler.get_tria().n_levels()));
+ template <int dim, int spacedim>
+ void
+ extract_non_interface_dofs (const MGDoFHandler<dim,spacedim> &mg_dof_handler,
- std::vector<std::set<unsigned int> > &non_interface_dofs)
++ std::vector<std::set<unsigned int> > &non_interface_dofs)
+ {
+ Assert (non_interface_dofs.size() == mg_dof_handler.get_tria().n_levels(),
+ ExcDimensionMismatch (non_interface_dofs.size(),
+ mg_dof_handler.get_tria().n_levels()));
- const FiniteElement<dim,spacedim> &fe = mg_dof_handler.get_fe();
+ const FiniteElement<dim,spacedim> &fe = mg_dof_handler.get_fe();
- const unsigned int dofs_per_cell = fe.dofs_per_cell;
- const unsigned int dofs_per_face = fe.dofs_per_face;
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+ const unsigned int dofs_per_face = fe.dofs_per_face;
- std::vector<unsigned int> local_dof_indices (dofs_per_cell);
- std::vector<bool> cell_dofs(dofs_per_cell, false);
- std::vector<bool> cell_dofs_interface(dofs_per_cell, false);
+ std::vector<unsigned int> local_dof_indices (dofs_per_cell);
+ std::vector<bool> cell_dofs(dofs_per_cell, false);
+ std::vector<bool> cell_dofs_interface(dofs_per_cell, false);
- typename MGDoFHandler<dim>::cell_iterator cell = mg_dof_handler.begin(),
- endc = mg_dof_handler.end();
+ typename MGDoFHandler<dim>::cell_iterator cell = mg_dof_handler.begin(),
+ endc = mg_dof_handler.end();
- for (; cell!=endc; ++cell)
- {
- std::fill (cell_dofs.begin(), cell_dofs.end(), false);
- std::fill (cell_dofs_interface.begin(), cell_dofs_interface.end(), false);
+ for (; cell!=endc; ++cell)
+ {
+ std::fill (cell_dofs.begin(), cell_dofs.end(), false);
+ std::fill (cell_dofs_interface.begin(), cell_dofs_interface.end(), false);
- for (unsigned int face_nr=0; face_nr<GeometryInfo<dim>::faces_per_cell; ++face_nr)
- {
- const typename DoFHandler<dim,spacedim>::face_iterator face = cell->face(face_nr);
- if (!face->at_boundary())
- {
- //interior face
- const typename MGDoFHandler<dim>::cell_iterator
+ for (unsigned int face_nr=0; face_nr<GeometryInfo<dim>::faces_per_cell; ++face_nr)
+ {
+ const typename DoFHandler<dim,spacedim>::face_iterator face = cell->face(face_nr);
+ if (!face->at_boundary())
+ {
+ //interior face
+ const typename MGDoFHandler<dim>::cell_iterator
neighbor = cell->neighbor(face_nr);
- if ((neighbor->level() < cell->level()))
- {
- for (unsigned int j=0; j<dofs_per_face; ++j)
- cell_dofs_interface[fe.face_to_cell_index(j,face_nr)] = true;
- }
- else
+ if ((neighbor->level() < cell->level()))
+ {
+ for (unsigned int j=0; j<dofs_per_face; ++j)
+ cell_dofs_interface[fe.face_to_cell_index(j,face_nr)] = true;
+ }
+ else
+ {
+ for (unsigned int j=0; j<dofs_per_face; ++j)
+ cell_dofs[fe.face_to_cell_index(j,face_nr)] = true;
+ }
+ }
+ else
{
+ //boundary face
for (unsigned int j=0; j<dofs_per_face; ++j)
- cell_dofs[fe.face_to_cell_index(j,face_nr)] = true;
+ cell_dofs[fe.face_to_cell_index(j,face_nr)] = true;
}
- }
- else
- {
- //boundary face
- for (unsigned int j=0; j<dofs_per_face; ++j)
- cell_dofs[fe.face_to_cell_index(j,face_nr)] = true;
}
- }
- const unsigned int level = cell->level();
- cell->get_mg_dof_indices (local_dof_indices);
+ const unsigned int level = cell->level();
+ cell->get_mg_dof_indices (local_dof_indices);
- for(unsigned int i=0; i<dofs_per_cell; ++i)
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
if (cell_dofs[i] && !cell_dofs_interface[i])
non_interface_dofs[level].insert(local_dof_indices[i]);
- }
- }
+ }
+ }
template <int dim, int spacedim>
template <int spacedim>
template <typename InputVector, class DH>
void KellyErrorEstimator<1,spacedim>::
-estimate (const Mapping<1,spacedim> & /*mapping*/,
- const DH & /*dof_handler*/,
+estimate (const Mapping<1,spacedim> &/*mapping*/,
+ const DH &/*dof_handler*/,
- const hp::QCollection<0> &,
+ const hp::QCollection<0> &,
- const typename FunctionMap<spacedim>::type & /*neumann_bc*/,
- const std::vector<const InputVector *> & /*solutions*/,
- std::vector<Vector<float>*> & /*errors*/,
- const ComponentMask & /*component_mask_*/,
- const Function<spacedim> * /*coefficient*/,
+ const typename FunctionMap<spacedim>::type &/*neumann_bc*/,
+ const std::vector<const InputVector *> &/*solutions*/,
+ std::vector<Vector<float>*> &/*errors*/,
+ const ComponentMask &/*component_mask_*/,
+ const Function<spacedim> */*coefficient*/,
const unsigned int,
const types::subdomain_id /*subdomain_id*/,
const types::material_id /*material_id*/)
template <int dim, int spacedim>
void
- create_boundary_mass_matrix (const Mapping<dim, spacedim> &mapping,
+ create_boundary_mass_matrix (const Mapping<dim, spacedim> &mapping,
const DoFHandler<dim,spacedim> &dof,
const Quadrature<dim-1> &q,
- SparseMatrix<double> &matrix,
- const typename FunctionMap<spacedim>::type &boundary_functions,
+ SparseMatrix<double> &matrix,
+ const typename FunctionMap<spacedim>::type &boundary_functions,
Vector<double> &rhs_vector,
std::vector<unsigned int> &dof_to_boundary_mapping,
- const Function<spacedim> * const coefficient,
+ const Function<spacedim> *const coefficient,
std::vector<unsigned int> component_mapping)
{
- // what would that be in 1d? the
- // identity matrix on the boundary
- // dofs?
+ // what would that be in 1d? the
+ // identity matrix on the boundary
+ // dofs?
if (dim == 1)
{
Assert (false, ExcNotImplemented());