template <int dim>
class EvaluationBase
{
- public:
- virtual ~EvaluationBase ();
+ public:
+ virtual ~EvaluationBase ();
- void set_refinement_cycle (const unsigned int refinement_cycle);
+ void set_refinement_cycle (const unsigned int refinement_cycle);
- virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const = 0;
- protected:
- unsigned int refinement_cycle;
+ virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const = 0;
++ const Vector<double> &solution) const = 0;
+ protected:
+ unsigned int refinement_cycle;
};
template <int dim>
class PointValueEvaluation : public EvaluationBase<dim>
{
- public:
- PointValueEvaluation (const Point<dim> &evaluation_point,
- TableHandler &results_table);
-
- virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
-
- DeclException1 (ExcEvaluationPointNotFound,
- Point<dim>,
- << "The evaluation point " << arg1
- << " was not found among the vertices of the present grid.");
- private:
- const Point<dim> evaluation_point;
- TableHandler &results_table;
+ public:
+ PointValueEvaluation (const Point<dim> &evaluation_point,
+ TableHandler &results_table);
+
+ virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
++ const Vector<double> &solution) const;
+
+ DeclException1 (ExcEvaluationPointNotFound,
+ Point<dim>,
+ << "The evaluation point " << arg1
+ << " was not found among the vertices of the present grid.");
+ private:
+ const Point<dim> evaluation_point;
+ TableHandler &results_table;
};
void
PointValueEvaluation<dim>::
operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const
+ const Vector<double> &solution) const
{
- // First allocate a variable that
- // will hold the point
- // value. Initialize it with a
- // value that is clearly bogus,
- // so that if we fail to set it
- // to a reasonable value, we will
- // note at once. This may not be
- // necessary in a function as
- // small as this one, since we
- // can easily see all possible
- // paths of execution here, but
- // it proved to be helpful for
- // more complex cases, and so we
- // employ this strategy here as
- // well.
+ // First allocate a variable that
+ // will hold the point
+ // value. Initialize it with a
+ // value that is clearly bogus,
+ // so that if we fail to set it
+ // to a reasonable value, we will
+ // note at once. This may not be
+ // necessary in a function as
+ // small as this one, since we
+ // can easily see all possible
+ // paths of execution here, but
+ // it proved to be helpful for
+ // more complex cases, and so we
+ // employ this strategy here as
+ // well.
double point_value = 1e20;
- // Then loop over all cells and
- // all their vertices, and check
- // whether a vertex matches the
- // evaluation point. If this is
- // the case, then extract the
- // point value, set a flag that
- // we have found the point of
- // interest, and exit the loop.
+ // Then loop over all cells and
+ // all their vertices, and check
+ // whether a vertex matches the
+ // evaluation point. If this is
+ // the case, then extract the
+ // point value, set a flag that
+ // we have found the point of
+ // interest, and exit the loop.
typename DoFHandler<dim>::active_cell_iterator
- cell = dof_handler.begin_active(),
- endc = dof_handler.end();
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
bool evaluation_point_found = false;
for (; (cell!=endc) && !evaluation_point_found; ++cell)
for (unsigned int vertex=0;
template <int dim>
class SolutionOutput : public EvaluationBase<dim>
{
- public:
- SolutionOutput (const std::string &output_name_base,
- const typename DataOut<dim>::OutputFormat output_format);
-
- virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
- private:
- const std::string output_name_base;
- const typename DataOut<dim>::OutputFormat output_format;
+ public:
+ SolutionOutput (const std::string &output_name_base,
+ const typename DataOut<dim>::OutputFormat output_format);
+
+ virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
++ const Vector<double> &solution) const;
+ private:
+ const std::string output_name_base;
+ const typename DataOut<dim>::OutputFormat output_format;
};
template <int dim>
class EvaluationBase
{
- public:
- virtual ~EvaluationBase ();
+ public:
+ virtual ~EvaluationBase ();
- void set_refinement_cycle (const unsigned int refinement_cycle);
+ void set_refinement_cycle (const unsigned int refinement_cycle);
- virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const = 0;
- protected:
- unsigned int refinement_cycle;
+ virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const = 0;
++ const Vector<double> &solution) const = 0;
+ protected:
+ unsigned int refinement_cycle;
};
template <int dim>
class PointValueEvaluation : public EvaluationBase<dim>
{
- public:
- PointValueEvaluation (const Point<dim> &evaluation_point);
+ public:
+ PointValueEvaluation (const Point<dim> &evaluation_point);
- virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
+ virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
++ const Vector<double> &solution) const;
- DeclException1 (ExcEvaluationPointNotFound,
- Point<dim>,
- << "The evaluation point " << arg1
- << " was not found among the vertices of the present grid.");
- private:
- const Point<dim> evaluation_point;
+ DeclException1 (ExcEvaluationPointNotFound,
+ Point<dim>,
+ << "The evaluation point " << arg1
+ << " was not found among the vertices of the present grid.");
+ private:
+ const Point<dim> evaluation_point;
};
template <int dim>
class PointXDerivativeEvaluation : public EvaluationBase<dim>
{
- public:
- PointXDerivativeEvaluation (const Point<dim> &evaluation_point);
+ public:
+ PointXDerivativeEvaluation (const Point<dim> &evaluation_point);
- virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
+ virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
++ const Vector<double> &solution) const;
- DeclException1 (ExcEvaluationPointNotFound,
- Point<dim>,
- << "The evaluation point " << arg1
- << " was not found among the vertices of the present grid.");
- private:
- const Point<dim> evaluation_point;
+ DeclException1 (ExcEvaluationPointNotFound,
+ Point<dim>,
+ << "The evaluation point " << arg1
+ << " was not found among the vertices of the present grid.");
+ private:
+ const Point<dim> evaluation_point;
};
void
PointXDerivativeEvaluation<dim>::
operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const
+ const Vector<double> &solution) const
{
- // This time initialize the
- // return value with something
- // useful, since we will have to
- // add up a number of
- // contributions and take the
- // mean value afterwards...
+ // This time initialize the
+ // return value with something
+ // useful, since we will have to
+ // add up a number of
+ // contributions and take the
+ // mean value afterwards...
double point_derivative = 0;
- // ...then have some objects of
- // which the meaning wil become
- // clear below...
+ // ...then have some objects of
+ // which the meaning wil become
+ // clear below...
QTrapez<dim> vertex_quadrature;
FEValues<dim> fe_values (dof_handler.get_fe(),
vertex_quadrature,
template <int dim>
class GridOutput : public EvaluationBase<dim>
{
- public:
- GridOutput (const std::string &output_name_base);
+ public:
+ GridOutput (const std::string &output_name_base);
- virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
- private:
- const std::string output_name_base;
+ virtual void operator () (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution) const;
++ const Vector<double> &solution) const;
+ private:
+ const std::string output_name_base;
};
template <int dim>
inline
void
- BodyForce<dim>::vector_value (const Point<dim> & /*p*/,
+ BodyForce<dim>::vector_value (const Point<dim> &/*p*/,
- Vector<double> &values) const
+ Vector<double> &values) const
{
Assert (values.size() == dim,
- ExcDimensionMismatch (values.size(), dim));
+ ExcDimensionMismatch (values.size(), dim));
const double g = 9.81;
const double rho = 7700;
template <int dim>
void
IncrementalBoundaryValues<dim>::
- vector_value (const Point<dim> & /*p*/,
+ vector_value (const Point<dim> &/*p*/,
- Vector<double> &values) const
+ Vector<double> &values) const
{
Assert (values.size() == dim,
- ExcDimensionMismatch (values.size(), dim));
+ ExcDimensionMismatch (values.size(), dim));
values = 0;
values(2) = -present_timestep * velocity;
};
- // And then we also have to define
- // these respective functions, of
- // course. Given our discussion in
- // the introduction of how the
- // solution should look like, the
- // following computations should be
- // straightforward:
+ // And then we also have to define
+ // these respective functions, of
+ // course. Given our discussion in
+ // the introduction of how the
+ // solution should look like, the
+ // following computations should be
+ // straightforward:
template <int dim>
- double RightHandSide<dim>::value (const Point<dim> & /*p*/,
+ double RightHandSide<dim>::value (const Point<dim> &/*p*/,
const unsigned int /*component*/) const
{
return 0;
template <int dim>
class NeutronDiffusionProblem
{
+ public:
+ class Parameters
+ {
public:
- class Parameters
- {
- public:
- Parameters ();
-
- static void declare_parameters (ParameterHandler &prm);
- void get_parameters (ParameterHandler &prm);
-
- unsigned int n_groups;
- unsigned int n_refinement_cycles;
-
- unsigned int fe_degree;
-
- double convergence_tolerance;
- };
-
-
-
- NeutronDiffusionProblem (const Parameters ¶meters);
- ~NeutronDiffusionProblem ();
-
- void run ();
-
- private:
- // @sect5{Private member functions}
-
- // There are not that many member
- // functions in this class since
- // most of the functionality has
- // been moved into the
- // <code>EnergyGroup</code> class
- // and is simply called from the
- // <code>run()</code> member
- // function of this class. The
- // ones that remain have
- // self-explanatory names:
- void initialize_problem();
-
- void refine_grid ();
-
- double get_total_fission_source () const;
-
-
- // @sect5{Private member variables}
-
- // Next, we have a few member
- // variables. In particular,
- // these are (i) a reference to
- // the parameter object (owned by
- // the main function of this
- // program, and passed to the
- // constructor of this class),
- // (ii) an object describing the
- // material parameters for the
- // number of energy groups
- // requested in the input file,
- // and (iii) the finite element
- // to be used by all energy
- // groups:
- const Parameters ¶meters;
- const MaterialData material_data;
- FE_Q<dim> fe;
-
- // Furthermore, we have (iv) the
- // value of the computed
- // eigenvalue at the present
- // iteration. This is, in fact,
- // the only part of the solution
- // that is shared between all
- // energy groups -- all other
- // parts of the solution, such as
- // neutron fluxes are particular
- // to one or the other energy
- // group, and are therefore
- // stored in objects that
- // describe a single energy
- // group:
- double k_eff;
-
- // Finally, (v), we have an array
- // of pointers to the energy
- // group objects. The length of
- // this array is, of course,
- // equal to the number of energy
- // groups specified in the
- // parameter file.
- std::vector<EnergyGroup<dim>*> energy_groups;
+ Parameters ();
+
+ static void declare_parameters (ParameterHandler &prm);
+ void get_parameters (ParameterHandler &prm);
+
+ unsigned int n_groups;
+ unsigned int n_refinement_cycles;
+
+ unsigned int fe_degree;
+
+ double convergence_tolerance;
+ };
+
+
+
+ NeutronDiffusionProblem (const Parameters ¶meters);
+ ~NeutronDiffusionProblem ();
+
+ void run ();
+
+ private:
+ // @sect5{Private member functions}
+
+ // There are not that many member
+ // functions in this class since
+ // most of the functionality has
+ // been moved into the
+ // <code>EnergyGroup</code> class
+ // and is simply called from the
+ // <code>run()</code> member
+ // function of this class. The
+ // ones that remain have
+ // self-explanatory names:
+ void initialize_problem();
+
+ void refine_grid ();
+
+ double get_total_fission_source () const;
+
+
+ // @sect5{Private member variables}
+
+ // Next, we have a few member
+ // variables. In particular,
+ // these are (i) a reference to
+ // the parameter object (owned by
+ // the main function of this
+ // program, and passed to the
+ // constructor of this class),
+ // (ii) an object describing the
+ // material parameters for the
+ // number of energy groups
+ // requested in the input file,
+ // and (iii) the finite element
+ // to be used by all energy
+ // groups:
- const Parameters ¶meters;
++ const Parameters ¶meters;
+ const MaterialData material_data;
+ FE_Q<dim> fe;
+
+ // Furthermore, we have (iv) the
+ // value of the computed
+ // eigenvalue at the present
+ // iteration. This is, in fact,
+ // the only part of the solution
+ // that is shared between all
+ // energy groups -- all other
+ // parts of the solution, such as
+ // neutron fluxes are particular
+ // to one or the other energy
+ // group, and are therefore
+ // stored in objects that
+ // describe a single energy
+ // group:
+ double k_eff;
+
+ // Finally, (v), we have an array
+ // of pointers to the energy
+ // group objects. The length of
+ // this array is, of course,
+ // equal to the number of energy
+ // groups specified in the
+ // parameter file.
+ std::vector<EnergyGroup<dim>*> energy_groups;
};
- // The constructor takes the
- // ParameterHandler object and stores
- // it in a reference. It also
- // initializes the DoF-Handler and
- // the finite element system, which
- // consists of two copies of the
- // scalar Q1 field, one for $v$ and
- // one for $w$:
+ // The constructor takes the
+ // ParameterHandler object and stores
+ // it in a reference. It also
+ // initializes the DoF-Handler and
+ // the finite element system, which
+ // consists of two copies of the
+ // scalar Q1 field, one for $v$ and
+ // one for $w$:
template <int dim>
- UltrasoundProblem<dim>::UltrasoundProblem (ParameterHandler& param)
- :
- prm(param),
- dof_handler(triangulation),
- fe(FE_Q<dim>(1), 2)
- UltrasoundProblem<dim>::UltrasoundProblem (ParameterHandler ¶m)
++ UltrasoundProblem<dim>::UltrasoundProblem (ParameterHandler ¶m)
+ :
+ prm(param),
+ dof_handler(triangulation),
+ fe(FE_Q<dim>(1), 2)
{}
template <int dim>
double
- TemperatureRightHandSide<dim>::value (const Point<dim> &p,
+ TemperatureRightHandSide<dim>::value (const Point<dim> &p,
- const unsigned int component) const
+ const unsigned int component) const
{
Assert (component == 0,
- ExcMessage ("Invalid operation for a scalar function."));
+ ExcMessage ("Invalid operation for a scalar function."));
Assert ((dim==2) || (dim==3), ExcNotImplemented());
template <class PreconditionerA, class PreconditionerMp>
BlockSchurPreconditioner<PreconditionerA, PreconditionerMp>::
- BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S,
+ BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S,
- const InverseMatrix<TrilinosWrappers::SparseMatrix,
- PreconditionerMp> &Mpinv,
- const PreconditionerA &Apreconditioner)
- :
- stokes_matrix (&S),
- m_inverse (&Mpinv),
- a_preconditioner (Apreconditioner),
- tmp (stokes_matrix->block(1,1).m())
+ const InverseMatrix<TrilinosWrappers::SparseMatrix,
+ PreconditionerMp> &Mpinv,
+ const PreconditionerA &Apreconditioner)
+ :
+ stokes_matrix (&S),
+ m_inverse (&Mpinv),
+ a_preconditioner (Apreconditioner),
+ tmp (stokes_matrix->block(1,1).m())
{}
template <int dim>
class BoussinesqFlowProblem
{
- public:
- BoussinesqFlowProblem ();
- void run ();
+ public:
+ BoussinesqFlowProblem ();
+ void run ();
+
+ private:
+ void setup_dofs ();
+ void assemble_stokes_preconditioner ();
+ void build_stokes_preconditioner ();
+ void assemble_stokes_system ();
+ void assemble_temperature_system (const double maximal_velocity);
+ void assemble_temperature_matrix ();
+ double get_maximal_velocity () const;
+ std::pair<double,double> get_extrapolated_temperature_range () const;
+ void solve ();
+ void output_results () const;
+ void refine_mesh (const unsigned int max_grid_level);
- private:
- void setup_dofs ();
- void assemble_stokes_preconditioner ();
- void build_stokes_preconditioner ();
- void assemble_stokes_system ();
- void assemble_temperature_system (const double maximal_velocity);
- void assemble_temperature_matrix ();
- double get_maximal_velocity () const;
- std::pair<double,double> get_extrapolated_temperature_range () const;
- void solve ();
- void output_results () const;
- void refine_mesh (const unsigned int max_grid_level);
-
- double
- compute_viscosity(const std::vector<double> &old_temperature,
- const std::vector<double> &old_old_temperature,
- const std::vector<Tensor<1,dim> > &old_temperature_grads,
- const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
- const std::vector<double> &old_temperature_laplacians,
- const std::vector<double> &old_old_temperature_laplacians,
- const std::vector<Tensor<1,dim> > &old_velocity_values,
- const std::vector<Tensor<1,dim> > &old_old_velocity_values,
- const std::vector<double> &gamma_values,
- const double global_u_infty,
- const double global_T_variation,
- const double cell_diameter) const;
-
-
- Triangulation<dim> triangulation;
- double global_Omega_diameter;
-
- const unsigned int stokes_degree;
- FESystem<dim> stokes_fe;
- DoFHandler<dim> stokes_dof_handler;
- ConstraintMatrix stokes_constraints;
-
- std::vector<unsigned int> stokes_block_sizes;
- TrilinosWrappers::BlockSparseMatrix stokes_matrix;
- TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix;
-
- TrilinosWrappers::BlockVector stokes_solution;
- TrilinosWrappers::BlockVector old_stokes_solution;
- TrilinosWrappers::BlockVector stokes_rhs;
-
-
- const unsigned int temperature_degree;
- FE_Q<dim> temperature_fe;
- DoFHandler<dim> temperature_dof_handler;
- ConstraintMatrix temperature_constraints;
-
- TrilinosWrappers::SparseMatrix temperature_mass_matrix;
- TrilinosWrappers::SparseMatrix temperature_stiffness_matrix;
- TrilinosWrappers::SparseMatrix temperature_matrix;
-
- TrilinosWrappers::Vector temperature_solution;
- TrilinosWrappers::Vector old_temperature_solution;
- TrilinosWrappers::Vector old_old_temperature_solution;
- TrilinosWrappers::Vector temperature_rhs;
-
-
- double time_step;
- double old_time_step;
- unsigned int timestep_number;
-
- std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionAMG> Amg_preconditioner;
- std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionIC> Mp_preconditioner;
-
- bool rebuild_stokes_matrix;
- bool rebuild_temperature_matrices;
- bool rebuild_stokes_preconditioner;
+ double
+ compute_viscosity(const std::vector<double> &old_temperature,
+ const std::vector<double> &old_old_temperature,
- const std::vector<Tensor<1,dim> > &old_temperature_grads,
- const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
++ const std::vector<Tensor<1,dim> > &old_temperature_grads,
++ const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
+ const std::vector<double> &old_temperature_laplacians,
+ const std::vector<double> &old_old_temperature_laplacians,
- const std::vector<Tensor<1,dim> > &old_velocity_values,
- const std::vector<Tensor<1,dim> > &old_old_velocity_values,
++ const std::vector<Tensor<1,dim> > &old_velocity_values,
++ const std::vector<Tensor<1,dim> > &old_old_velocity_values,
+ const std::vector<double> &gamma_values,
+ const double global_u_infty,
+ const double global_T_variation,
+ const double cell_diameter) const;
+
+
+ Triangulation<dim> triangulation;
+ double global_Omega_diameter;
+
+ const unsigned int stokes_degree;
+ FESystem<dim> stokes_fe;
+ DoFHandler<dim> stokes_dof_handler;
+ ConstraintMatrix stokes_constraints;
+
+ std::vector<unsigned int> stokes_block_sizes;
+ TrilinosWrappers::BlockSparseMatrix stokes_matrix;
+ TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix;
+
+ TrilinosWrappers::BlockVector stokes_solution;
+ TrilinosWrappers::BlockVector old_stokes_solution;
+ TrilinosWrappers::BlockVector stokes_rhs;
+
+
+ const unsigned int temperature_degree;
+ FE_Q<dim> temperature_fe;
+ DoFHandler<dim> temperature_dof_handler;
+ ConstraintMatrix temperature_constraints;
+
+ TrilinosWrappers::SparseMatrix temperature_mass_matrix;
+ TrilinosWrappers::SparseMatrix temperature_stiffness_matrix;
+ TrilinosWrappers::SparseMatrix temperature_matrix;
+
+ TrilinosWrappers::Vector temperature_solution;
+ TrilinosWrappers::Vector old_temperature_solution;
+ TrilinosWrappers::Vector old_old_temperature_solution;
+ TrilinosWrappers::Vector temperature_rhs;
+
+
+ double time_step;
+ double old_time_step;
+ unsigned int timestep_number;
+
+ std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionAMG> Amg_preconditioner;
+ std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionIC> Mp_preconditioner;
+
+ bool rebuild_stokes_matrix;
+ bool rebuild_temperature_matrices;
+ bool rebuild_stokes_preconditioner;
};
double
BoussinesqFlowProblem<dim>::
compute_viscosity (const std::vector<double> &old_temperature,
- const std::vector<double> &old_old_temperature,
- const std::vector<Tensor<1,dim> > &old_temperature_grads,
- const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
- const std::vector<double> &old_temperature_laplacians,
- const std::vector<double> &old_old_temperature_laplacians,
- const std::vector<Tensor<1,dim> > &old_velocity_values,
- const std::vector<Tensor<1,dim> > &old_old_velocity_values,
- const std::vector<double> &gamma_values,
- const double global_u_infty,
- const double global_T_variation,
- const double cell_diameter) const
+ const std::vector<double> &old_old_temperature,
- const std::vector<Tensor<1,dim> > &old_temperature_grads,
- const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
++ const std::vector<Tensor<1,dim> > &old_temperature_grads,
++ const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
+ const std::vector<double> &old_temperature_laplacians,
+ const std::vector<double> &old_old_temperature_laplacians,
- const std::vector<Tensor<1,dim> > &old_velocity_values,
- const std::vector<Tensor<1,dim> > &old_old_velocity_values,
++ const std::vector<Tensor<1,dim> > &old_velocity_values,
++ const std::vector<Tensor<1,dim> > &old_old_velocity_values,
+ const std::vector<double> &gamma_values,
+ const double global_u_infty,
+ const double global_T_variation,
+ const double cell_diameter) const
{
const double beta = 0.015 * dim;
const double alpha = 1;
template <int dim>
double
- TemperatureInitialValues<dim>::value (const Point<dim> &p,
+ TemperatureInitialValues<dim>::value (const Point<dim> &p,
- const unsigned int) const
+ const unsigned int) const
{
const double r = p.norm();
const double h = R1-R0;
template <class PreconditionerA, class PreconditionerMp>
class BlockSchurPreconditioner : public Subscriptor
{
- public:
- BlockSchurPreconditioner (const TrilinosWrappers::BlockSparseMatrix &S,
- const TrilinosWrappers::BlockSparseMatrix &Spre,
- const PreconditionerMp &Mppreconditioner,
- const PreconditionerA &Apreconditioner,
- const bool do_solve_A)
- :
- stokes_matrix (&S),
- stokes_preconditioner_matrix (&Spre),
- mp_preconditioner (Mppreconditioner),
- a_preconditioner (Apreconditioner),
- do_solve_A (do_solve_A)
- {}
-
- void vmult (TrilinosWrappers::MPI::BlockVector &dst,
- const TrilinosWrappers::MPI::BlockVector &src) const
- {
- TrilinosWrappers::MPI::Vector utmp(src.block(0));
-
- {
- SolverControl solver_control(5000, 1e-6 * src.block(1).l2_norm());
-
- SolverCG<TrilinosWrappers::MPI::Vector> solver(solver_control);
-
- solver.solve(stokes_preconditioner_matrix->block(1,1),
- dst.block(1), src.block(1),
- mp_preconditioner);
-
- dst.block(1) *= -1.0;
- }
-
- {
- stokes_matrix->block(0,1).vmult(utmp, dst.block(1));
- utmp*=-1.0;
- utmp.add(src.block(0));
- }
-
- if (do_solve_A == true)
- {
- SolverControl solver_control(5000, utmp.l2_norm()*1e-2);
- TrilinosWrappers::SolverCG solver(solver_control);
- solver.solve(stokes_matrix->block(0,0), dst.block(0), utmp,
- a_preconditioner);
- }
- else
- a_preconditioner.vmult (dst.block(0), utmp);
- }
-
- private:
- const SmartPointer<const TrilinosWrappers::BlockSparseMatrix> stokes_matrix;
- const SmartPointer<const TrilinosWrappers::BlockSparseMatrix> stokes_preconditioner_matrix;
- const PreconditionerMp &mp_preconditioner;
- const PreconditionerA &a_preconditioner;
- const bool do_solve_A;
+ public:
- BlockSchurPreconditioner (const TrilinosWrappers::BlockSparseMatrix &S,
- const TrilinosWrappers::BlockSparseMatrix &Spre,
++ BlockSchurPreconditioner (const TrilinosWrappers::BlockSparseMatrix &S,
++ const TrilinosWrappers::BlockSparseMatrix &Spre,
+ const PreconditionerMp &Mppreconditioner,
+ const PreconditionerA &Apreconditioner,
+ const bool do_solve_A)
+ :
+ stokes_matrix (&S),
+ stokes_preconditioner_matrix (&Spre),
+ mp_preconditioner (Mppreconditioner),
+ a_preconditioner (Apreconditioner),
+ do_solve_A (do_solve_A)
+ {}
+
+ void vmult (TrilinosWrappers::MPI::BlockVector &dst,
+ const TrilinosWrappers::MPI::BlockVector &src) const
+ {
+ TrilinosWrappers::MPI::Vector utmp(src.block(0));
+
+ {
+ SolverControl solver_control(5000, 1e-6 * src.block(1).l2_norm());
+
+ SolverCG<TrilinosWrappers::MPI::Vector> solver(solver_control);
+
+ solver.solve(stokes_preconditioner_matrix->block(1,1),
+ dst.block(1), src.block(1),
+ mp_preconditioner);
+
+ dst.block(1) *= -1.0;
+ }
+
+ {
+ stokes_matrix->block(0,1).vmult(utmp, dst.block(1));
+ utmp*=-1.0;
+ utmp.add(src.block(0));
+ }
+
+ if (do_solve_A == true)
+ {
+ SolverControl solver_control(5000, utmp.l2_norm()*1e-2);
+ TrilinosWrappers::SolverCG solver(solver_control);
+ solver.solve(stokes_matrix->block(0,0), dst.block(0), utmp,
+ a_preconditioner);
+ }
+ else
+ a_preconditioner.vmult (dst.block(0), utmp);
+ }
+
+ private:
+ const SmartPointer<const TrilinosWrappers::BlockSparseMatrix> stokes_matrix;
+ const SmartPointer<const TrilinosWrappers::BlockSparseMatrix> stokes_preconditioner_matrix;
+ const PreconditionerMp &mp_preconditioner;
- const PreconditionerA &a_preconditioner;
++ const PreconditionerA &a_preconditioner;
+ const bool do_solve_A;
};
}
template <int dim>
class BoussinesqFlowProblem
{
- public:
- struct Parameters;
- BoussinesqFlowProblem (Parameters ¶meters);
- void run ();
+ public:
+ struct Parameters;
+ BoussinesqFlowProblem (Parameters ¶meters);
+ void run ();
+
+ private:
+ void setup_dofs ();
+ void assemble_stokes_preconditioner ();
+ void build_stokes_preconditioner ();
+ void assemble_stokes_system ();
+ void assemble_temperature_matrix ();
+ void assemble_temperature_system (const double maximal_velocity);
+ void project_temperature_field ();
+ double get_maximal_velocity () const;
+ double get_cfl_number () const;
+ double get_entropy_variation (const double average_temperature) const;
+ std::pair<double,double> get_extrapolated_temperature_range () const;
+ void solve ();
+ void output_results ();
+ void refine_mesh (const unsigned int max_grid_level);
- private:
- void setup_dofs ();
- void assemble_stokes_preconditioner ();
- void build_stokes_preconditioner ();
- void assemble_stokes_system ();
- void assemble_temperature_matrix ();
- void assemble_temperature_system (const double maximal_velocity);
- void project_temperature_field ();
- double get_maximal_velocity () const;
- double get_cfl_number () const;
- double get_entropy_variation (const double average_temperature) const;
- std::pair<double,double> get_extrapolated_temperature_range () const;
- void solve ();
- void output_results ();
- void refine_mesh (const unsigned int max_grid_level);
-
- double
- compute_viscosity(const std::vector<double> &old_temperature,
- const std::vector<double> &old_old_temperature,
- const std::vector<Tensor<1,dim> > &old_temperature_grads,
- const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
- const std::vector<double> &old_temperature_laplacians,
- const std::vector<double> &old_old_temperature_laplacians,
- const std::vector<Tensor<1,dim> > &old_velocity_values,
- const std::vector<Tensor<1,dim> > &old_old_velocity_values,
- const std::vector<SymmetricTensor<2,dim> > &old_strain_rates,
- const std::vector<SymmetricTensor<2,dim> > &old_old_strain_rates,
- const double global_u_infty,
- const double global_T_variation,
- const double average_temperature,
- const double global_entropy_variation,
- const double cell_diameter) const;
+ double
+ compute_viscosity(const std::vector<double> &old_temperature,
+ const std::vector<double> &old_old_temperature,
- const std::vector<Tensor<1,dim> > &old_temperature_grads,
- const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
++ const std::vector<Tensor<1,dim> > &old_temperature_grads,
++ const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
+ const std::vector<double> &old_temperature_laplacians,
+ const std::vector<double> &old_old_temperature_laplacians,
- const std::vector<Tensor<1,dim> > &old_velocity_values,
- const std::vector<Tensor<1,dim> > &old_old_velocity_values,
- const std::vector<SymmetricTensor<2,dim> > &old_strain_rates,
- const std::vector<SymmetricTensor<2,dim> > &old_old_strain_rates,
++ const std::vector<Tensor<1,dim> > &old_velocity_values,
++ const std::vector<Tensor<1,dim> > &old_old_velocity_values,
++ const std::vector<SymmetricTensor<2,dim> > &old_strain_rates,
++ const std::vector<SymmetricTensor<2,dim> > &old_old_strain_rates,
+ const double global_u_infty,
+ const double global_T_variation,
+ const double average_temperature,
+ const double global_entropy_variation,
+ const double cell_diameter) const;
+
+ public:
+
+ // The first significant new
+ // component is the definition
+ // of a struct for the
+ // parameters according to the
+ // discussion in the
+ // introduction. This structure
+ // is initialized by reading
+ // from a parameter file during
+ // construction of this object.
+ struct Parameters
+ {
+ Parameters (const std::string ¶meter_filename);
- public:
+ static void declare_parameters (ParameterHandler &prm);
+ void parse_parameters (ParameterHandler &prm);
- // The first significant new
- // component is the definition
- // of a struct for the
- // parameters according to the
- // discussion in the
- // introduction. This structure
- // is initialized by reading
- // from a parameter file during
- // construction of this object.
- struct Parameters
- {
- Parameters (const std::string ¶meter_filename);
+ double end_time;
- static void declare_parameters (ParameterHandler &prm);
- void parse_parameters (ParameterHandler &prm);
+ unsigned int initial_global_refinement;
+ unsigned int initial_adaptive_refinement;
- double end_time;
+ bool generate_graphical_output;
+ unsigned int graphical_output_interval;
- unsigned int initial_global_refinement;
- unsigned int initial_adaptive_refinement;
+ unsigned int adaptive_refinement_interval;
- bool generate_graphical_output;
- unsigned int graphical_output_interval;
+ double stabilization_alpha;
+ double stabilization_c_R;
+ double stabilization_beta;
- unsigned int adaptive_refinement_interval;
+ unsigned int stokes_velocity_degree;
+ bool use_locally_conservative_discretization;
- double stabilization_alpha;
- double stabilization_c_R;
- double stabilization_beta;
+ unsigned int temperature_degree;
+ };
- unsigned int stokes_velocity_degree;
- bool use_locally_conservative_discretization;
+ private:
+ Parameters ¶meters;
+
+ // The <code>pcout</code> (for
+ // <i>%parallel
+ // <code>std::cout</code></i>)
+ // object is used to simplify
+ // writing output: each MPI
+ // process can use this to
+ // generate output as usual,
+ // but since each of these
+ // processes will (hopefully)
+ // produce the same output it
+ // will just be replicated many
+ // times over; with the
+ // ConditionalOStream class,
+ // only the output generated by
+ // one MPI process will
+ // actually be printed to
+ // screen, whereas the output
+ // by all the other threads
+ // will simply be forgotten.
+ ConditionalOStream pcout;
+
+ // The following member
+ // variables will then again be
+ // similar to those in step-31
+ // (and to other tutorial
+ // programs). As mentioned in
+ // the introduction, we fully
+ // distribute computations, so
+ // we will have to use the
+ // parallel::distributed::Triangulation
+ // class (see step-40) but the
+ // remainder of these variables
+ // is rather standard with two
+ // exceptions:
+ //
+ // - The <code>mapping</code>
+ // variable is used to denote a
+ // higher-order polynomial
+ // mapping. As mentioned in the
+ // introduction, we use this
+ // mapping when forming
+ // integrals through quadrature
+ // for all cells that are
+ // adjacent to either the inner
+ // or outer boundaries of our
+ // domain where the boundary is
+ // curved.
+ //
+ // - In a bit of naming
+ // confusion, you will notice
+ // below that some of the
+ // variables from namespace
+ // TrilinosWrappers are taken
+ // from namespace
+ // TrilinosWrappers::MPI (such
+ // as the right hand side
+ // vectors) whereas others are
+ // not (such as the various
+ // matrices). For the matrices,
+ // we happen to use the same
+ // class names for %parallel
+ // and sequential data
+ // structures, i.e., all
+ // matrices will actually be
+ // considered %parallel
+ // below. On the other hand,
+ // for vectors, only those from
+ // namespace
+ // TrilinosWrappers::MPI are
+ // actually distributed. In
+ // particular, we will
+ // frequently have to query
+ // velocities and temperatures
+ // at arbitrary quadrature
+ // points; consequently, rather
+ // than importing ghost
+ // information of a vector
+ // whenever we need access to
+ // degrees of freedom that are
+ // relevant locally but owned
+ // by another processor, we
+ // solve linear systems in
+ // %parallel but then
+ // immediately initialize a
+ // vector including ghost
+ // entries of the solution for
+ // further processing. The
+ // various
+ // <code>*_solution</code>
+ // vectors are therefore filled
+ // immediately after solving
+ // their respective linear
+ // system in %parallel and will
+ // always contain values for
+ // all @ref
+ // GlossLocallyRelevantDof
+ // "locally relevant degrees of freedom";
+ // the fully
+ // distributed vectors that we
+ // obtain from the solution
+ // process and that only ever
+ // contain the @ref
+ // GlossLocallyOwnedDof
+ // "locally owned degrees of freedom"
+ // are destroyed
+ // immediately after the
+ // solution process and after
+ // we have copied the relevant
+ // values into the member
+ // variable vectors.
+ parallel::distributed::Triangulation<dim> triangulation;
+ double global_Omega_diameter;
+
+ const MappingQ<dim> mapping;
+
+ const FESystem<dim> stokes_fe;
+ DoFHandler<dim> stokes_dof_handler;
+ ConstraintMatrix stokes_constraints;
+
+ TrilinosWrappers::BlockSparseMatrix stokes_matrix;
+ TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix;
+
+ TrilinosWrappers::MPI::BlockVector stokes_solution;
+ TrilinosWrappers::MPI::BlockVector old_stokes_solution;
+ TrilinosWrappers::MPI::BlockVector stokes_rhs;
+
+
+ FE_Q<dim> temperature_fe;
+ DoFHandler<dim> temperature_dof_handler;
+ ConstraintMatrix temperature_constraints;
+
+ TrilinosWrappers::SparseMatrix temperature_mass_matrix;
+ TrilinosWrappers::SparseMatrix temperature_stiffness_matrix;
+ TrilinosWrappers::SparseMatrix temperature_matrix;
+
+ TrilinosWrappers::MPI::Vector temperature_solution;
+ TrilinosWrappers::MPI::Vector old_temperature_solution;
+ TrilinosWrappers::MPI::Vector old_old_temperature_solution;
+ TrilinosWrappers::MPI::Vector temperature_rhs;
+
+
+ double time_step;
+ double old_time_step;
+ unsigned int timestep_number;
+
+ std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionAMG> Amg_preconditioner;
+ std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionJacobi> Mp_preconditioner;
+ std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionJacobi> T_preconditioner;
+
+ bool rebuild_stokes_matrix;
+ bool rebuild_stokes_preconditioner;
+ bool rebuild_temperature_matrices;
+ bool rebuild_temperature_preconditioner;
+
+ // The next member variable,
+ // <code>computing_timer</code>
+ // is used to conveniently
+ // account for compute time
+ // spent in certain "sections"
+ // of the code that are
+ // repeatedly entered. For
+ // example, we will enter (and
+ // leave) sections for Stokes
+ // matrix assembly and would
+ // like to accumulate the run
+ // time spent in this section
+ // over all time steps. Every
+ // so many time steps as well
+ // as at the end of the program
+ // (through the destructor of
+ // the TimerOutput class) we
+ // will then produce a nice
+ // summary of the times spent
+ // in the different sections
+ // into which we categorize the
+ // run-time of this program.
+ TimerOutput computing_timer;
+
+ // After these member variables
+ // we have a number of
+ // auxiliary functions that
+ // have been broken out of the
+ // ones listed
+ // above. Specifically, there
+ // are first three functions
+ // that we call from
+ // <code>setup_dofs</code> and
+ // then the ones that do the
+ // assembling of linear
+ // systems:
+ void setup_stokes_matrix (const std::vector<IndexSet> &stokes_partitioning);
+ void setup_stokes_preconditioner (const std::vector<IndexSet> &stokes_partitioning);
+ void setup_temperature_matrices (const IndexSet &temperature_partitioning);
+
+
+ // Following the @ref
+ // MTWorkStream
+ // "task-based parallelization"
+ // paradigm,
+ // we split all the assembly
+ // routines into two parts: a
+ // first part that can do all
+ // the calculations on a
+ // certain cell without taking
+ // care of other threads, and a
+ // second part (which is
+ // writing the local data into
+ // the global matrices and
+ // vectors) which can be
+ // entered by only one thread
+ // at a time. In order to
+ // implement that, we provide
+ // functions for each of those
+ // two steps for all the four
+ // assembly routines that we
+ // use in this program. The
+ // following eight functions do
+ // exactly this:
+ void
+ local_assemble_stokes_preconditioner (const typename DoFHandler<dim>::active_cell_iterator &cell,
+ Assembly::Scratch::StokesPreconditioner<dim> &scratch,
+ Assembly::CopyData::StokesPreconditioner<dim> &data);
- unsigned int temperature_degree;
- };
+ void
+ copy_local_to_global_stokes_preconditioner (const Assembly::CopyData::StokesPreconditioner<dim> &data);
- private:
- Parameters ¶meters;
-
- // The <code>pcout</code> (for
- // <i>%parallel
- // <code>std::cout</code></i>)
- // object is used to simplify
- // writing output: each MPI
- // process can use this to
- // generate output as usual,
- // but since each of these
- // processes will (hopefully)
- // produce the same output it
- // will just be replicated many
- // times over; with the
- // ConditionalOStream class,
- // only the output generated by
- // one MPI process will
- // actually be printed to
- // screen, whereas the output
- // by all the other threads
- // will simply be forgotten.
- ConditionalOStream pcout;
-
- // The following member
- // variables will then again be
- // similar to those in step-31
- // (and to other tutorial
- // programs). As mentioned in
- // the introduction, we fully
- // distribute computations, so
- // we will have to use the
- // parallel::distributed::Triangulation
- // class (see step-40) but the
- // remainder of these variables
- // is rather standard with two
- // exceptions:
- //
- // - The <code>mapping</code>
- // variable is used to denote a
- // higher-order polynomial
- // mapping. As mentioned in the
- // introduction, we use this
- // mapping when forming
- // integrals through quadrature
- // for all cells that are
- // adjacent to either the inner
- // or outer boundaries of our
- // domain where the boundary is
- // curved.
- //
- // - In a bit of naming
- // confusion, you will notice
- // below that some of the
- // variables from namespace
- // TrilinosWrappers are taken
- // from namespace
- // TrilinosWrappers::MPI (such
- // as the right hand side
- // vectors) whereas others are
- // not (such as the various
- // matrices). For the matrices,
- // we happen to use the same
- // class names for %parallel
- // and sequential data
- // structures, i.e., all
- // matrices will actually be
- // considered %parallel
- // below. On the other hand,
- // for vectors, only those from
- // namespace
- // TrilinosWrappers::MPI are
- // actually distributed. In
- // particular, we will
- // frequently have to query
- // velocities and temperatures
- // at arbitrary quadrature
- // points; consequently, rather
- // than importing ghost
- // information of a vector
- // whenever we need access to
- // degrees of freedom that are
- // relevant locally but owned
- // by another processor, we
- // solve linear systems in
- // %parallel but then
- // immediately initialize a
- // vector including ghost
- // entries of the solution for
- // further processing. The
- // various
- // <code>*_solution</code>
- // vectors are therefore filled
- // immediately after solving
- // their respective linear
- // system in %parallel and will
- // always contain values for
- // all @ref
- // GlossLocallyRelevantDof
- // "locally relevant degrees of freedom";
- // the fully
- // distributed vectors that we
- // obtain from the solution
- // process and that only ever
- // contain the @ref
- // GlossLocallyOwnedDof
- // "locally owned degrees of freedom"
- // are destroyed
- // immediately after the
- // solution process and after
- // we have copied the relevant
- // values into the member
- // variable vectors.
- parallel::distributed::Triangulation<dim> triangulation;
- double global_Omega_diameter;
-
- const MappingQ<dim> mapping;
-
- const FESystem<dim> stokes_fe;
- DoFHandler<dim> stokes_dof_handler;
- ConstraintMatrix stokes_constraints;
-
- TrilinosWrappers::BlockSparseMatrix stokes_matrix;
- TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix;
-
- TrilinosWrappers::MPI::BlockVector stokes_solution;
- TrilinosWrappers::MPI::BlockVector old_stokes_solution;
- TrilinosWrappers::MPI::BlockVector stokes_rhs;
-
-
- FE_Q<dim> temperature_fe;
- DoFHandler<dim> temperature_dof_handler;
- ConstraintMatrix temperature_constraints;
-
- TrilinosWrappers::SparseMatrix temperature_mass_matrix;
- TrilinosWrappers::SparseMatrix temperature_stiffness_matrix;
- TrilinosWrappers::SparseMatrix temperature_matrix;
-
- TrilinosWrappers::MPI::Vector temperature_solution;
- TrilinosWrappers::MPI::Vector old_temperature_solution;
- TrilinosWrappers::MPI::Vector old_old_temperature_solution;
- TrilinosWrappers::MPI::Vector temperature_rhs;
-
-
- double time_step;
- double old_time_step;
- unsigned int timestep_number;
-
- std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionAMG> Amg_preconditioner;
- std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionJacobi> Mp_preconditioner;
- std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionJacobi> T_preconditioner;
-
- bool rebuild_stokes_matrix;
- bool rebuild_stokes_preconditioner;
- bool rebuild_temperature_matrices;
- bool rebuild_temperature_preconditioner;
-
- // The next member variable,
- // <code>computing_timer</code>
- // is used to conveniently
- // account for compute time
- // spent in certain "sections"
- // of the code that are
- // repeatedly entered. For
- // example, we will enter (and
- // leave) sections for Stokes
- // matrix assembly and would
- // like to accumulate the run
- // time spent in this section
- // over all time steps. Every
- // so many time steps as well
- // as at the end of the program
- // (through the destructor of
- // the TimerOutput class) we
- // will then produce a nice
- // summary of the times spent
- // in the different sections
- // into which we categorize the
- // run-time of this program.
- TimerOutput computing_timer;
-
- // After these member variables
- // we have a number of
- // auxiliary functions that
- // have been broken out of the
- // ones listed
- // above. Specifically, there
- // are first three functions
- // that we call from
- // <code>setup_dofs</code> and
- // then the ones that do the
- // assembling of linear
- // systems:
- void setup_stokes_matrix (const std::vector<IndexSet> &stokes_partitioning);
- void setup_stokes_preconditioner (const std::vector<IndexSet> &stokes_partitioning);
- void setup_temperature_matrices (const IndexSet &temperature_partitioning);
-
-
- // Following the @ref
- // MTWorkStream
- // "task-based parallelization"
- // paradigm,
- // we split all the assembly
- // routines into two parts: a
- // first part that can do all
- // the calculations on a
- // certain cell without taking
- // care of other threads, and a
- // second part (which is
- // writing the local data into
- // the global matrices and
- // vectors) which can be
- // entered by only one thread
- // at a time. In order to
- // implement that, we provide
- // functions for each of those
- // two steps for all the four
- // assembly routines that we
- // use in this program. The
- // following eight functions do
- // exactly this:
- void
- local_assemble_stokes_preconditioner (const typename DoFHandler<dim>::active_cell_iterator &cell,
- Assembly::Scratch::StokesPreconditioner<dim> &scratch,
- Assembly::CopyData::StokesPreconditioner<dim> &data);
-
- void
- copy_local_to_global_stokes_preconditioner (const Assembly::CopyData::StokesPreconditioner<dim> &data);
-
-
- void
- local_assemble_stokes_system (const typename DoFHandler<dim>::active_cell_iterator &cell,
- Assembly::Scratch::StokesSystem<dim> &scratch,
- Assembly::CopyData::StokesSystem<dim> &data);
-
- void
- copy_local_to_global_stokes_system (const Assembly::CopyData::StokesSystem<dim> &data);
-
-
- void
- local_assemble_temperature_matrix (const typename DoFHandler<dim>::active_cell_iterator &cell,
- Assembly::Scratch::TemperatureMatrix<dim> &scratch,
- Assembly::CopyData::TemperatureMatrix<dim> &data);
-
- void
- copy_local_to_global_temperature_matrix (const Assembly::CopyData::TemperatureMatrix<dim> &data);
-
-
-
- void
- local_assemble_temperature_rhs (const std::pair<double,double> global_T_range,
- const double global_max_velocity,
- const double global_entropy_variation,
- const typename DoFHandler<dim>::active_cell_iterator &cell,
- Assembly::Scratch::TemperatureRHS<dim> &scratch,
- Assembly::CopyData::TemperatureRHS<dim> &data);
-
- void
- copy_local_to_global_temperature_rhs (const Assembly::CopyData::TemperatureRHS<dim> &data);
-
- // Finally, we forward declare
- // a member class that we will
- // define later on and that
- // will be used to compute a
- // number of quantities from
- // our solution vectors that
- // we'd like to put into the
- // output files for
- // visualization.
- class Postprocessor;
+
+ void
+ local_assemble_stokes_system (const typename DoFHandler<dim>::active_cell_iterator &cell,
- Assembly::Scratch::StokesSystem<dim> &scratch,
++ Assembly::Scratch::StokesSystem<dim> &scratch,
+ Assembly::CopyData::StokesSystem<dim> &data);
+
+ void
+ copy_local_to_global_stokes_system (const Assembly::CopyData::StokesSystem<dim> &data);
+
+
+ void
+ local_assemble_temperature_matrix (const typename DoFHandler<dim>::active_cell_iterator &cell,
- Assembly::Scratch::TemperatureMatrix<dim> &scratch,
++ Assembly::Scratch::TemperatureMatrix<dim> &scratch,
+ Assembly::CopyData::TemperatureMatrix<dim> &data);
+
+ void
+ copy_local_to_global_temperature_matrix (const Assembly::CopyData::TemperatureMatrix<dim> &data);
+
+
+
+ void
+ local_assemble_temperature_rhs (const std::pair<double,double> global_T_range,
+ const double global_max_velocity,
+ const double global_entropy_variation,
+ const typename DoFHandler<dim>::active_cell_iterator &cell,
+ Assembly::Scratch::TemperatureRHS<dim> &scratch,
+ Assembly::CopyData::TemperatureRHS<dim> &data);
+
+ void
+ copy_local_to_global_temperature_rhs (const Assembly::CopyData::TemperatureRHS<dim> &data);
+
+ // Finally, we forward declare
+ // a member class that we will
+ // define later on and that
+ // will be used to compute a
+ // number of quantities from
+ // our solution vectors that
+ // we'd like to put into the
+ // output files for
+ // visualization.
+ class Postprocessor;
};
double
BoussinesqFlowProblem<dim>::
compute_viscosity (const std::vector<double> &old_temperature,
- const std::vector<double> &old_old_temperature,
- const std::vector<Tensor<1,dim> > &old_temperature_grads,
- const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
- const std::vector<double> &old_temperature_laplacians,
- const std::vector<double> &old_old_temperature_laplacians,
- const std::vector<Tensor<1,dim> > &old_velocity_values,
- const std::vector<Tensor<1,dim> > &old_old_velocity_values,
- const std::vector<SymmetricTensor<2,dim> > &old_strain_rates,
- const std::vector<SymmetricTensor<2,dim> > &old_old_strain_rates,
- const double global_u_infty,
- const double global_T_variation,
- const double average_temperature,
- const double global_entropy_variation,
- const double cell_diameter) const
+ const std::vector<double> &old_old_temperature,
- const std::vector<Tensor<1,dim> > &old_temperature_grads,
- const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
++ const std::vector<Tensor<1,dim> > &old_temperature_grads,
++ const std::vector<Tensor<1,dim> > &old_old_temperature_grads,
+ const std::vector<double> &old_temperature_laplacians,
+ const std::vector<double> &old_old_temperature_laplacians,
- const std::vector<Tensor<1,dim> > &old_velocity_values,
- const std::vector<Tensor<1,dim> > &old_old_velocity_values,
- const std::vector<SymmetricTensor<2,dim> > &old_strain_rates,
- const std::vector<SymmetricTensor<2,dim> > &old_old_strain_rates,
++ const std::vector<Tensor<1,dim> > &old_velocity_values,
++ const std::vector<Tensor<1,dim> > &old_old_velocity_values,
++ const std::vector<SymmetricTensor<2,dim> > &old_strain_rates,
++ const std::vector<SymmetricTensor<2,dim> > &old_old_strain_rates,
+ const double global_u_infty,
+ const double global_T_variation,
+ const double average_temperature,
+ const double global_entropy_variation,
+ const double cell_diameter) const
{
if (global_u_infty == 0)
return 5e-3 * cell_diameter;
template <int dim>
struct EulerEquations
{
- // @sect4{Component description}
-
- // First a few variables that
- // describe the various components of our
- // solution vector in a generic way. This
- // includes the number of components in the
- // system (Euler's equations have one entry
- // for momenta in each spatial direction,
- // plus the energy and density components,
- // for a total of <code>dim+2</code>
- // components), as well as functions that
- // describe the index within the solution
- // vector of the first momentum component,
- // the density component, and the energy
- // density component. Note that all these
- // %numbers depend on the space dimension;
- // defining them in a generic way (rather
- // than by implicit convention) makes our
- // code more flexible and makes it easier
- // to later extend it, for example by
- // adding more components to the equations.
- static const unsigned int n_components = dim + 2;
- static const unsigned int first_momentum_component = 0;
- static const unsigned int density_component = dim;
- static const unsigned int energy_component = dim+1;
-
- // When generating graphical
- // output way down in this
- // program, we need to specify
- // the names of the solution
- // variables as well as how the
- // various components group into
- // vector and scalar fields. We
- // could describe this there, but
- // in order to keep things that
- // have to do with the Euler
- // equation localized here and
- // the rest of the program as
- // generic as possible, we
- // provide this sort of
- // information in the following
- // two functions:
- static
- std::vector<std::string>
- component_names ()
- {
- std::vector<std::string> names (dim, "momentum");
- names.push_back ("density");
- names.push_back ("energy_density");
+ // @sect4{Component description}
+
+ // First a few variables that
+ // describe the various components of our
+ // solution vector in a generic way. This
+ // includes the number of components in the
+ // system (Euler's equations have one entry
+ // for momenta in each spatial direction,
+ // plus the energy and density components,
+ // for a total of <code>dim+2</code>
+ // components), as well as functions that
+ // describe the index within the solution
+ // vector of the first momentum component,
+ // the density component, and the energy
+ // density component. Note that all these
+ // %numbers depend on the space dimension;
+ // defining them in a generic way (rather
+ // than by implicit convention) makes our
+ // code more flexible and makes it easier
+ // to later extend it, for example by
+ // adding more components to the equations.
+ static const unsigned int n_components = dim + 2;
+ static const unsigned int first_momentum_component = 0;
+ static const unsigned int density_component = dim;
+ static const unsigned int energy_component = dim+1;
+
+ // When generating graphical
+ // output way down in this
+ // program, we need to specify
+ // the names of the solution
+ // variables as well as how the
+ // various components group into
+ // vector and scalar fields. We
+ // could describe this there, but
+ // in order to keep things that
+ // have to do with the Euler
+ // equation localized here and
+ // the rest of the program as
+ // generic as possible, we
+ // provide this sort of
+ // information in the following
+ // two functions:
+ static
+ std::vector<std::string>
+ component_names ()
+ {
+ std::vector<std::string> names (dim, "momentum");
+ names.push_back ("density");
+ names.push_back ("energy_density");
- return names;
- }
+ return names;
+ }
- static
+ static
+ std::vector<DataComponentInterpretation::DataComponentInterpretation>
+ component_interpretation ()
+ {
std::vector<DataComponentInterpretation::DataComponentInterpretation>
- component_interpretation ()
- {
- std::vector<DataComponentInterpretation::DataComponentInterpretation>
- data_component_interpretation
- (dim, DataComponentInterpretation::component_is_part_of_vector);
- data_component_interpretation
- .push_back (DataComponentInterpretation::component_is_scalar);
- data_component_interpretation
- .push_back (DataComponentInterpretation::component_is_scalar);
-
- return data_component_interpretation;
- }
+ data_component_interpretation
+ (dim, DataComponentInterpretation::component_is_part_of_vector);
+ data_component_interpretation
+ .push_back (DataComponentInterpretation::component_is_scalar);
+ data_component_interpretation
+ .push_back (DataComponentInterpretation::component_is_scalar);
+
+ return data_component_interpretation;
+ }
- // @sect4{Transformations between variables}
-
- // Next, we define the gas
- // constant. We will set it to 1.4
- // in its definition immediately
- // following the declaration of
- // this class (unlike integer
- // variables, like the ones above,
- // static const floating point
- // member variables cannot be
- // initialized within the class
- // declaration in C++). This value
- // of 1.4 is representative of a
- // gas that consists of molecules
- // composed of two atoms, such as
- // air which consists up to small
- // traces almost entirely of $N_2$
- // and $O_2$.
- static const double gas_gamma;
-
-
- // In the following, we will need to
- // compute the kinetic energy and the
- // pressure from a vector of conserved
- // variables. This we can do based on the
- // energy density and the kinetic energy
- // $\frac 12 \rho |\mathbf v|^2 =
- // \frac{|\rho \mathbf v|^2}{2\rho}$
- // (note that the independent variables
- // contain the momentum components $\rho
- // v_i$, not the velocities $v_i$).
- //
- // There is one slight problem: We will
- // need to call the following functions
- // with input arguments of type
- // <code>std::vector@<number@></code> and
- // <code>Vector@<number@></code>. The
- // problem is that the former has an
- // access operator
- // <code>operator[]</code> whereas the
- // latter, for historical reasons, has
- // <code>operator()</code>. We wouldn't
- // be able to write the function in a
- // generic way if we were to use one or
- // the other of these. Fortunately, we
- // can use the following trick: instead
- // of writing <code>v[i]</code> or
- // <code>v(i)</code>, we can use
- // <code>*(v.begin() + i)</code>, i.e. we
- // generate an iterator that points to
- // the <code>i</code>th element, and then
- // dereference it. This works for both
- // kinds of vectors -- not the prettiest
- // solution, but one that works.
- template <typename number, typename InputVector>
- static
- number
- compute_kinetic_energy (const InputVector &W)
- {
- number kinetic_energy = 0;
- for (unsigned int d=0; d<dim; ++d)
- kinetic_energy += *(W.begin()+first_momentum_component+d) *
- *(W.begin()+first_momentum_component+d);
- kinetic_energy *= 1./(2 * *(W.begin() + density_component));
+ // @sect4{Transformations between variables}
+
+ // Next, we define the gas
+ // constant. We will set it to 1.4
+ // in its definition immediately
+ // following the declaration of
+ // this class (unlike integer
+ // variables, like the ones above,
+ // static const floating point
+ // member variables cannot be
+ // initialized within the class
+ // declaration in C++). This value
+ // of 1.4 is representative of a
+ // gas that consists of molecules
+ // composed of two atoms, such as
+ // air which consists up to small
+ // traces almost entirely of $N_2$
+ // and $O_2$.
+ static const double gas_gamma;
+
+
+ // In the following, we will need to
+ // compute the kinetic energy and the
+ // pressure from a vector of conserved
+ // variables. This we can do based on the
+ // energy density and the kinetic energy
+ // $\frac 12 \rho |\mathbf v|^2 =
+ // \frac{|\rho \mathbf v|^2}{2\rho}$
+ // (note that the independent variables
+ // contain the momentum components $\rho
+ // v_i$, not the velocities $v_i$).
+ //
+ // There is one slight problem: We will
+ // need to call the following functions
+ // with input arguments of type
+ // <code>std::vector@<number@></code> and
+ // <code>Vector@<number@></code>. The
+ // problem is that the former has an
+ // access operator
+ // <code>operator[]</code> whereas the
+ // latter, for historical reasons, has
+ // <code>operator()</code>. We wouldn't
+ // be able to write the function in a
+ // generic way if we were to use one or
+ // the other of these. Fortunately, we
+ // can use the following trick: instead
+ // of writing <code>v[i]</code> or
+ // <code>v(i)</code>, we can use
+ // <code>*(v.begin() + i)</code>, i.e. we
+ // generate an iterator that points to
+ // the <code>i</code>th element, and then
+ // dereference it. This works for both
+ // kinds of vectors -- not the prettiest
+ // solution, but one that works.
+ template <typename number, typename InputVector>
+ static
+ number
+ compute_kinetic_energy (const InputVector &W)
+ {
+ number kinetic_energy = 0;
+ for (unsigned int d=0; d<dim; ++d)
+ kinetic_energy += *(W.begin()+first_momentum_component+d) *
+ *(W.begin()+first_momentum_component+d);
+ kinetic_energy *= 1./(2 * *(W.begin() + density_component));
- return kinetic_energy;
- }
+ return kinetic_energy;
+ }
- template <typename number, typename InputVector>
- static
- number
- compute_pressure (const InputVector &W)
- {
- return ((gas_gamma-1.0) *
- (*(W.begin() + energy_component) -
- compute_kinetic_energy<number>(W)));
- }
+ template <typename number, typename InputVector>
+ static
+ number
+ compute_pressure (const InputVector &W)
+ {
+ return ((gas_gamma-1.0) *
+ (*(W.begin() + energy_component) -
+ compute_kinetic_energy<number>(W)));
+ }
- // @sect4{EulerEquations::compute_flux_matrix}
-
- // We define the flux function
- // $F(W)$ as one large matrix.
- // Each row of this matrix
- // represents a scalar
- // conservation law for the
- // component in that row. The
- // exact form of this matrix is
- // given in the
- // introduction. Note that we
- // know the size of the matrix:
- // it has as many rows as the
- // system has components, and
- // <code>dim</code> columns;
- // rather than using a FullMatrix
- // object for such a matrix
- // (which has a variable number
- // of rows and columns and must
- // therefore allocate memory on
- // the heap each time such a
- // matrix is created), we use a
- // rectangular array of numbers
- // right away.
- //
- // We templatize the numerical type of
- // the flux function so that we may use
- // the automatic differentiation type
- // here. Similarly, we will call the
- // function with different input vector
- // data types, so we templatize on it as
- // well:
- template <typename InputVector, typename number>
- static
- void compute_flux_matrix (const InputVector &W,
- number (&flux)[n_components][dim])
+ // @sect4{EulerEquations::compute_flux_matrix}
+
+ // We define the flux function
+ // $F(W)$ as one large matrix.
+ // Each row of this matrix
+ // represents a scalar
+ // conservation law for the
+ // component in that row. The
+ // exact form of this matrix is
+ // given in the
+ // introduction. Note that we
+ // know the size of the matrix:
+ // it has as many rows as the
+ // system has components, and
+ // <code>dim</code> columns;
+ // rather than using a FullMatrix
+ // object for such a matrix
+ // (which has a variable number
+ // of rows and columns and must
+ // therefore allocate memory on
+ // the heap each time such a
+ // matrix is created), we use a
+ // rectangular array of numbers
+ // right away.
+ //
+ // We templatize the numerical type of
+ // the flux function so that we may use
+ // the automatic differentiation type
+ // here. Similarly, we will call the
+ // function with different input vector
+ // data types, so we templatize on it as
+ // well:
+ template <typename InputVector, typename number>
+ static
+ void compute_flux_matrix (const InputVector &W,
+ number (&flux)[n_components][dim])
+ {
+ // First compute the pressure that
+ // appears in the flux matrix, and
+ // then compute the first
+ // <code>dim</code> columns of the
+ // matrix that correspond to the
+ // momentum terms:
+ const number pressure = compute_pressure<number> (W);
+
+ for (unsigned int d=0; d<dim; ++d)
{
- // First compute the pressure that
- // appears in the flux matrix, and
- // then compute the first
- // <code>dim</code> columns of the
- // matrix that correspond to the
- // momentum terms:
- const number pressure = compute_pressure<number> (W);
-
- for (unsigned int d=0; d<dim; ++d)
- {
- for (unsigned int e=0; e<dim; ++e)
- flux[first_momentum_component+d][e]
- = W[first_momentum_component+d] *
- W[first_momentum_component+e] /
- W[density_component];
-
- flux[first_momentum_component+d][d] += pressure;
- }
+ for (unsigned int e=0; e<dim; ++e)
+ flux[first_momentum_component+d][e]
+ = W[first_momentum_component+d] *
+ W[first_momentum_component+e] /
+ W[density_component];
- // Then the terms for the
- // density (i.e. mass
- // conservation), and,
- // lastly, conservation of
- // energy:
- for (unsigned int d=0; d<dim; ++d)
- flux[density_component][d] = W[first_momentum_component+d];
-
- for (unsigned int d=0; d<dim; ++d)
- flux[energy_component][d] = W[first_momentum_component+d] /
- W[density_component] *
- (W[energy_component] + pressure);
+ flux[first_momentum_component+d][d] += pressure;
}
+ // Then the terms for the
+ // density (i.e. mass
+ // conservation), and,
+ // lastly, conservation of
+ // energy:
+ for (unsigned int d=0; d<dim; ++d)
+ flux[density_component][d] = W[first_momentum_component+d];
+
+ for (unsigned int d=0; d<dim; ++d)
+ flux[energy_component][d] = W[first_momentum_component+d] /
+ W[density_component] *
+ (W[energy_component] + pressure);
+ }
- // @sect4{EulerEquations::compute_normal_flux}
-
- // On the boundaries of the
- // domain and across hanging
- // nodes we use a numerical flux
- // function to enforce boundary
- // conditions. This routine is
- // the basic Lax-Friedrich's flux
- // with a stabilization parameter
- // $\alpha$. It's form has also
- // been given already in the
- // introduction:
- template <typename InputVector>
- static
- void numerical_normal_flux (const Point<dim> &normal,
- const InputVector &Wplus,
- const InputVector &Wminus,
- const double alpha,
- Sacado::Fad::DFad<double> (&normal_flux)[n_components])
- {
- Sacado::Fad::DFad<double> iflux[n_components][dim];
- Sacado::Fad::DFad<double> oflux[n_components][dim];
-
- compute_flux_matrix (Wplus, iflux);
- compute_flux_matrix (Wminus, oflux);
- for (unsigned int di=0; di<n_components; ++di)
- {
- normal_flux[di] = 0;
- for (unsigned int d=0; d<dim; ++d)
- normal_flux[di] += 0.5*(iflux[di][d] + oflux[di][d]) * normal[d];
+ // @sect4{EulerEquations::compute_normal_flux}
+
+ // On the boundaries of the
+ // domain and across hanging
+ // nodes we use a numerical flux
+ // function to enforce boundary
+ // conditions. This routine is
+ // the basic Lax-Friedrich's flux
+ // with a stabilization parameter
+ // $\alpha$. It's form has also
+ // been given already in the
+ // introduction:
+ template <typename InputVector>
+ static
+ void numerical_normal_flux (const Point<dim> &normal,
+ const InputVector &Wplus,
+ const InputVector &Wminus,
+ const double alpha,
+ Sacado::Fad::DFad<double> (&normal_flux)[n_components])
+ {
+ Sacado::Fad::DFad<double> iflux[n_components][dim];
+ Sacado::Fad::DFad<double> oflux[n_components][dim];
- normal_flux[di] += 0.5*alpha*(Wplus[di] - Wminus[di]);
- }
- }
+ compute_flux_matrix (Wplus, iflux);
+ compute_flux_matrix (Wminus, oflux);
- // @sect4{EulerEquations::compute_forcing_vector}
-
- // In the same way as describing the flux
- // function $\mathbf F(\mathbf w)$, we
- // also need to have a way to describe
- // the right hand side forcing term. As
- // mentioned in the introduction, we
- // consider only gravity here, which
- // leads to the specific form $\mathbf
- // G(\mathbf w) = \left(
- // g_1\rho, g_2\rho, g_3\rho, 0,
- // \rho \mathbf g \cdot \mathbf v
- // \right)^T$, shown here for
- // the 3d case. More specifically, we
- // will consider only $\mathbf
- // g=(0,0,-1)^T$ in 3d, or $\mathbf
- // g=(0,-1)^T$ in 2d. This naturally
- // leads to the following function:
- template <typename InputVector, typename number>
- static
- void compute_forcing_vector (const InputVector &W,
- number (&forcing)[n_components])
+ for (unsigned int di=0; di<n_components; ++di)
{
- const double gravity = -1.0;
+ normal_flux[di] = 0;
+ for (unsigned int d=0; d<dim; ++d)
+ normal_flux[di] += 0.5*(iflux[di][d] + oflux[di][d]) * normal[d];
- for (unsigned int c=0; c<n_components; ++c)
- switch (c)
- {
- case first_momentum_component+dim-1:
- forcing[c] = gravity * W[density_component];
- break;
- case energy_component:
- forcing[c] = gravity *
- W[density_component] *
- W[first_momentum_component+dim-1];
- break;
- default:
- forcing[c] = 0;
- }
+ normal_flux[di] += 0.5*alpha*(Wplus[di] - Wminus[di]);
}
+ }
+ // @sect4{EulerEquations::compute_forcing_vector}
+
+ // In the same way as describing the flux
+ // function $\mathbf F(\mathbf w)$, we
+ // also need to have a way to describe
+ // the right hand side forcing term. As
+ // mentioned in the introduction, we
+ // consider only gravity here, which
+ // leads to the specific form $\mathbf
+ // G(\mathbf w) = \left(
+ // g_1\rho, g_2\rho, g_3\rho, 0,
+ // \rho \mathbf g \cdot \mathbf v
+ // \right)^T$, shown here for
+ // the 3d case. More specifically, we
+ // will consider only $\mathbf
+ // g=(0,0,-1)^T$ in 3d, or $\mathbf
+ // g=(0,-1)^T$ in 2d. This naturally
+ // leads to the following function:
+ template <typename InputVector, typename number>
+ static
+ void compute_forcing_vector (const InputVector &W,
+ number (&forcing)[n_components])
+ {
+ const double gravity = -1.0;
- // @sect4{Dealing with boundary conditions}
+ for (unsigned int c=0; c<n_components; ++c)
+ switch (c)
+ {
+ case first_momentum_component+dim-1:
+ forcing[c] = gravity * W[density_component];
+ break;
+ case energy_component:
+ forcing[c] = gravity *
+ W[density_component] *
+ W[first_momentum_component+dim-1];
+ break;
+ default:
+ forcing[c] = 0;
+ }
+ }
- // Another thing we have to deal with is
- // boundary conditions. To this end, let
- // us first define the kinds of boundary
- // conditions we currently know how to
- // deal with:
- enum BoundaryKind
- {
- inflow_boundary,
- outflow_boundary,
- no_penetration_boundary,
- pressure_boundary
- };
+ // @sect4{Dealing with boundary conditions}
- // The next part is to actually decide
- // what to do at each kind of
- // boundary. To this end, remember from
- // the introduction that boundary
- // conditions are specified by choosing a
- // value $\mathbf w^-$ on the outside of
- // a boundary given an inhomogeneity
- // $\mathbf j$ and possibly the
- // solution's value $\mathbf w^+$ on the
- // inside. Both are then passed to the
- // numerical flux $\mathbf
- // H(\mathbf{w}^+, \mathbf{w}^-,
- // \mathbf{n})$ to define boundary
- // contributions to the bilinear form.
- //
- // Boundary conditions can in some cases
- // be specified for each component of the
- // solution vector independently. For
- // example, if component $c$ is marked
- // for inflow, then $w^-_c = j_c$. If it
- // is an outflow, then $w^-_c =
- // w^+_c$. These two simple cases are
- // handled first in the function below.
- //
- // There is a little snag that makes this
- // function unpleasant from a C++
- // language viewpoint: The output vector
- // <code>Wminus</code> will of course be
- // modified, so it shouldn't be a
- // <code>const</code> argument. Yet it is
- // in the implementation below, and needs
- // to be in order to allow the code to
- // compile. The reason is that we call
- // this function at a place where
- // <code>Wminus</code> is of type
- // <code>Table@<2,Sacado::Fad::DFad@<double@>
- // @></code>, this being 2d table with
- // indices representing the quadrature
- // point and the vector component,
- // respectively. We call this function
- // with <code>Wminus[q]</code> as last
- // argument; subscripting a 2d table
- // yields a temporary accessor object
- // representing a 1d vector, just what we
- // want here. The problem is that a
- // temporary accessor object can't be
- // bound to a non-const reference
- // argument of a function, as we would
- // like here, according to the C++ 1998
- // and 2003 standards (something that
- // will be fixed with the next standard
- // in the form of rvalue references). We
- // get away with making the output
- // argument here a constant because it is
- // the <i>accessor</i> object that's
- // constant, not the table it points to:
- // that one can still be written to. The
- // hack is unpleasant nevertheless
- // because it restricts the kind of data
- // types that may be used as template
- // argument to this function: a regular
- // vector isn't going to do because that
- // one can not be written to when marked
- // <code>const</code>. With no good
- // solution around at the moment, we'll
- // go with the pragmatic, even if not
- // pretty, solution shown here:
- template <typename DataVector>
- static
- void
- compute_Wminus (const BoundaryKind (&boundary_kind)[n_components],
- const Point<dim> &normal_vector,
- const DataVector &Wplus,
- const Vector<double> &boundary_values,
- const DataVector &Wminus)
- {
- for (unsigned int c = 0; c < n_components; c++)
- switch (boundary_kind[c])
- {
- case inflow_boundary:
- {
- Wminus[c] = boundary_values(c);
- break;
- }
+ // Another thing we have to deal with is
+ // boundary conditions. To this end, let
+ // us first define the kinds of boundary
+ // conditions we currently know how to
+ // deal with:
+ enum BoundaryKind
+ {
+ inflow_boundary,
+ outflow_boundary,
+ no_penetration_boundary,
+ pressure_boundary
+ };
- case outflow_boundary:
- {
- Wminus[c] = Wplus[c];
- break;
- }
- // Prescribed pressure boundary
- // conditions are a bit more
- // complicated by the fact that
- // even though the pressure is
- // prescribed, we really are
- // setting the energy component
- // here, which will depend on
- // velocity and pressure. So
- // even though this seems like
- // a Dirichlet type boundary
- // condition, we get
- // sensitivities of energy to
- // velocity and density (unless
- // these are also prescribed):
- case pressure_boundary:
- {
- const typename DataVector::value_type
- density = (boundary_kind[density_component] ==
- inflow_boundary
- ?
- boundary_values(density_component)
- :
- Wplus[density_component]);
+ // The next part is to actually decide
+ // what to do at each kind of
+ // boundary. To this end, remember from
+ // the introduction that boundary
+ // conditions are specified by choosing a
+ // value $\mathbf w^-$ on the outside of
+ // a boundary given an inhomogeneity
+ // $\mathbf j$ and possibly the
+ // solution's value $\mathbf w^+$ on the
+ // inside. Both are then passed to the
+ // numerical flux $\mathbf
+ // H(\mathbf{w}^+, \mathbf{w}^-,
+ // \mathbf{n})$ to define boundary
+ // contributions to the bilinear form.
+ //
+ // Boundary conditions can in some cases
+ // be specified for each component of the
+ // solution vector independently. For
+ // example, if component $c$ is marked
+ // for inflow, then $w^-_c = j_c$. If it
+ // is an outflow, then $w^-_c =
+ // w^+_c$. These two simple cases are
+ // handled first in the function below.
+ //
+ // There is a little snag that makes this
+ // function unpleasant from a C++
+ // language viewpoint: The output vector
+ // <code>Wminus</code> will of course be
+ // modified, so it shouldn't be a
+ // <code>const</code> argument. Yet it is
+ // in the implementation below, and needs
+ // to be in order to allow the code to
+ // compile. The reason is that we call
+ // this function at a place where
+ // <code>Wminus</code> is of type
+ // <code>Table@<2,Sacado::Fad::DFad@<double@>
+ // @></code>, this being 2d table with
+ // indices representing the quadrature
+ // point and the vector component,
+ // respectively. We call this function
+ // with <code>Wminus[q]</code> as last
+ // argument; subscripting a 2d table
+ // yields a temporary accessor object
+ // representing a 1d vector, just what we
+ // want here. The problem is that a
+ // temporary accessor object can't be
+ // bound to a non-const reference
+ // argument of a function, as we would
+ // like here, according to the C++ 1998
+ // and 2003 standards (something that
+ // will be fixed with the next standard
+ // in the form of rvalue references). We
+ // get away with making the output
+ // argument here a constant because it is
+ // the <i>accessor</i> object that's
+ // constant, not the table it points to:
+ // that one can still be written to. The
+ // hack is unpleasant nevertheless
+ // because it restricts the kind of data
+ // types that may be used as template
+ // argument to this function: a regular
+ // vector isn't going to do because that
+ // one can not be written to when marked
+ // <code>const</code>. With no good
+ // solution around at the moment, we'll
+ // go with the pragmatic, even if not
+ // pretty, solution shown here:
+ template <typename DataVector>
+ static
+ void
+ compute_Wminus (const BoundaryKind (&boundary_kind)[n_components],
+ const Point<dim> &normal_vector,
+ const DataVector &Wplus,
+ const Vector<double> &boundary_values,
+ const DataVector &Wminus)
+ {
+ for (unsigned int c = 0; c < n_components; c++)
+ switch (boundary_kind[c])
+ {
+ case inflow_boundary:
+ {
+ Wminus[c] = boundary_values(c);
+ break;
+ }
- typename DataVector::value_type kinetic_energy = 0;
- for (unsigned int d=0; d<dim; ++d)
- if (boundary_kind[d] == inflow_boundary)
- kinetic_energy += boundary_values(d)*boundary_values(d);
- else
- kinetic_energy += Wplus[d]*Wplus[d];
- kinetic_energy *= 1./2./density;
+ case outflow_boundary:
+ {
+ Wminus[c] = Wplus[c];
+ break;
+ }
- Wminus[c] = boundary_values(c) / (gas_gamma-1.0) +
- kinetic_energy;
+ // Prescribed pressure boundary
+ // conditions are a bit more
+ // complicated by the fact that
+ // even though the pressure is
+ // prescribed, we really are
+ // setting the energy component
+ // here, which will depend on
+ // velocity and pressure. So
+ // even though this seems like
+ // a Dirichlet type boundary
+ // condition, we get
+ // sensitivities of energy to
+ // velocity and density (unless
+ // these are also prescribed):
+ case pressure_boundary:
+ {
+ const typename DataVector::value_type
+ density = (boundary_kind[density_component] ==
+ inflow_boundary
+ ?
+ boundary_values(density_component)
+ :
+ Wplus[density_component]);
+
+ typename DataVector::value_type kinetic_energy = 0;
+ for (unsigned int d=0; d<dim; ++d)
+ if (boundary_kind[d] == inflow_boundary)
+ kinetic_energy += boundary_values(d)*boundary_values(d);
+ else
+ kinetic_energy += Wplus[d]*Wplus[d];
+ kinetic_energy *= 1./2./density;
+
+ Wminus[c] = boundary_values(c) / (gas_gamma-1.0) +
+ kinetic_energy;
+
+ break;
+ }
- break;
- }
+ case no_penetration_boundary:
+ {
+ // We prescribe the
+ // velocity (we are dealing with a
+ // particular component here so
+ // that the average of the
+ // velocities is orthogonal to the
+ // surface normal. This creates
+ // sensitivies of across the
+ // velocity components.
+ Sacado::Fad::DFad<double> vdotn = 0;
+ for (unsigned int d = 0; d < dim; d++)
+ {
+ vdotn += Wplus[d]*normal_vector[d];
+ }
- case no_penetration_boundary:
- {
- // We prescribe the
- // velocity (we are dealing with a
- // particular component here so
- // that the average of the
- // velocities is orthogonal to the
- // surface normal. This creates
- // sensitivies of across the
- // velocity components.
- Sacado::Fad::DFad<double> vdotn = 0;
- for (unsigned int d = 0; d < dim; d++) {
- vdotn += Wplus[d]*normal_vector[d];
- }
-
- Wminus[c] = Wplus[c] - 2.0*vdotn*normal_vector[c];
- break;
- }
+ Wminus[c] = Wplus[c] - 2.0*vdotn*normal_vector[c];
+ break;
+ }
- default:
- Assert (false, ExcNotImplemented());
- }
- }
+ default:
+ Assert (false, ExcNotImplemented());
+ }
+ }
- // @sect4{EulerEquations::compute_refinement_indicators}
-
- // In this class, we also want to specify
- // how to refine the mesh. The class
- // <code>ConservationLaw</code> that will
- // use all the information we provide
- // here in the <code>EulerEquation</code>
- // class is pretty agnostic about the
- // particular conservation law it solves:
- // as doesn't even really care how many
- // components a solution vector
- // has. Consequently, it can't know what
- // a reasonable refinement indicator
- // would be. On the other hand, here we
- // do, or at least we can come up with a
- // reasonable choice: we simply look at
- // the gradient of the density, and
- // compute
- // $\eta_K=\log\left(1+|\nabla\rho(x_K)|\right)$,
- // where $x_K$ is the center of cell $K$.
- //
- // There are certainly a number of
- // equally reasonable refinement
- // indicators, but this one does, and it
- // is easy to compute:
- static
- void
- compute_refinement_indicators (const DoFHandler<dim> &dof_handler,
- const Mapping<dim> &mapping,
- const Vector<double> &solution,
- Vector<double> &refinement_indicators)
- {
- const unsigned int dofs_per_cell = dof_handler.get_fe().dofs_per_cell;
- std::vector<unsigned int> dofs (dofs_per_cell);
+ // @sect4{EulerEquations::compute_refinement_indicators}
+
+ // In this class, we also want to specify
+ // how to refine the mesh. The class
+ // <code>ConservationLaw</code> that will
+ // use all the information we provide
+ // here in the <code>EulerEquation</code>
+ // class is pretty agnostic about the
+ // particular conservation law it solves:
+ // as doesn't even really care how many
+ // components a solution vector
+ // has. Consequently, it can't know what
+ // a reasonable refinement indicator
+ // would be. On the other hand, here we
+ // do, or at least we can come up with a
+ // reasonable choice: we simply look at
+ // the gradient of the density, and
+ // compute
+ // $\eta_K=\log\left(1+|\nabla\rho(x_K)|\right)$,
+ // where $x_K$ is the center of cell $K$.
+ //
+ // There are certainly a number of
+ // equally reasonable refinement
+ // indicators, but this one does, and it
+ // is easy to compute:
+ static
+ void
+ compute_refinement_indicators (const DoFHandler<dim> &dof_handler,
+ const Mapping<dim> &mapping,
- const Vector<double> &solution,
++ const Vector<double> &solution,
+ Vector<double> &refinement_indicators)
+ {
+ const unsigned int dofs_per_cell = dof_handler.get_fe().dofs_per_cell;
+ std::vector<unsigned int> dofs (dofs_per_cell);
- const QMidpoint<dim> quadrature_formula;
- const UpdateFlags update_flags = update_gradients;
- FEValues<dim> fe_v (mapping, dof_handler.get_fe(),
- quadrature_formula, update_flags);
+ const QMidpoint<dim> quadrature_formula;
+ const UpdateFlags update_flags = update_gradients;
+ FEValues<dim> fe_v (mapping, dof_handler.get_fe(),
+ quadrature_formula, update_flags);
- std::vector<std::vector<Tensor<1,dim> > >
- dU (1, std::vector<Tensor<1,dim> >(n_components));
+ std::vector<std::vector<Tensor<1,dim> > >
+ dU (1, std::vector<Tensor<1,dim> >(n_components));
- typename DoFHandler<dim>::active_cell_iterator
- cell = dof_handler.begin_active(),
- endc = dof_handler.end();
- for (unsigned int cell_no=0; cell!=endc; ++cell, ++cell_no)
- {
- fe_v.reinit(cell);
- fe_v.get_function_grads (solution, dU);
+ typename DoFHandler<dim>::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+ for (unsigned int cell_no=0; cell!=endc; ++cell, ++cell_no)
+ {
+ fe_v.reinit(cell);
+ fe_v.get_function_grads (solution, dU);
- refinement_indicators(cell_no)
- = std::log(1+
- std::sqrt(dU[0][density_component] *
- dU[0][density_component]));
- }
+ refinement_indicators(cell_no)
+ = std::log(1+
+ std::sqrt(dU[0][density_component] *
+ dU[0][density_component]));
}
+ }
EulerEquations<dim>::Postprocessor::
compute_derived_quantities_vector (const std::vector<Vector<double> > &uh,
const std::vector<std::vector<Tensor<1,dim> > > &duh,
- const std::vector<std::vector<Tensor<2,dim> > > & /*dduh*/,
- const std::vector<Point<dim> > & /*normals*/,
- const std::vector<Point<dim> > & /*evaluation_points*/,
+ const std::vector<std::vector<Tensor<2,dim> > > &/*dduh*/,
+ const std::vector<Point<dim> > &/*normals*/,
+ const std::vector<Point<dim> > &/*evaluation_points*/,
std::vector<Vector<double> > &computed_quantities) const
{
- // At the beginning of the function, let us
- // make sure that all variables have the
- // correct sizes, so that we can access
- // individual vector elements without
- // having to wonder whether we might read
- // or write invalid elements; we also check
- // that the <code>duh</code> vector only
- // contains data if we really need it (the
- // system knows about this because we say
- // so in the
- // <code>get_needed_update_flags()</code>
- // function below). For the inner vectors,
- // we check that at least the first element
- // of the outer vector has the correct
- // inner size:
+ // At the beginning of the function, let us
+ // make sure that all variables have the
+ // correct sizes, so that we can access
+ // individual vector elements without
+ // having to wonder whether we might read
+ // or write invalid elements; we also check
+ // that the <code>duh</code> vector only
+ // contains data if we really need it (the
+ // system knows about this because we say
+ // so in the
+ // <code>get_needed_update_flags()</code>
+ // function below). For the inner vectors,
+ // we check that at least the first element
+ // of the outer vector has the correct
+ // inner size:
const unsigned int n_quadrature_points = uh.size();
if (do_schlieren_plot == true)
template <int dim>
class NavierStokesProjection
{
- public:
- NavierStokesProjection (const RunTimeParameters::Data_Storage &data);
-
- void run (const bool verbose = false,
- const unsigned int n_plots = 10);
- protected:
- RunTimeParameters::MethodFormulation type;
-
- const unsigned int deg;
- const double dt;
- const double t_0, T, Re;
-
- EquationData::Velocity<dim> vel_exact;
- std::map<unsigned int, double> boundary_values;
- std::vector<types::boundary_id> boundary_indicators;
-
- Triangulation<dim> triangulation;
-
- FE_Q<dim> fe_velocity;
- FE_Q<dim> fe_pressure;
-
- DoFHandler<dim> dof_handler_velocity;
- DoFHandler<dim> dof_handler_pressure;
-
- QGauss<dim> quadrature_pressure;
- QGauss<dim> quadrature_velocity;
-
- SparsityPattern sparsity_pattern_velocity;
- SparsityPattern sparsity_pattern_pressure;
- SparsityPattern sparsity_pattern_pres_vel;
-
- SparseMatrix<double> vel_Laplace_plus_Mass;
- SparseMatrix<double> vel_it_matrix[dim];
- SparseMatrix<double> vel_Mass;
- SparseMatrix<double> vel_Laplace;
- SparseMatrix<double> vel_Advection;
- SparseMatrix<double> pres_Laplace;
- SparseMatrix<double> pres_Mass;
- SparseMatrix<double> pres_Diff[dim];
- SparseMatrix<double> pres_iterative;
-
- Vector<double> pres_n;
- Vector<double> pres_n_minus_1;
- Vector<double> phi_n;
- Vector<double> phi_n_minus_1;
- Vector<double> u_n[dim];
- Vector<double> u_n_minus_1[dim];
- Vector<double> u_star[dim];
- Vector<double> force[dim];
- Vector<double> v_tmp;
- Vector<double> pres_tmp;
- Vector<double> rot_u;
-
- SparseILU<double> prec_velocity[dim];
- SparseILU<double> prec_pres_Laplace;
- SparseDirectUMFPACK prec_mass;
- SparseDirectUMFPACK prec_vel_mass;
-
- DeclException2 (ExcInvalidTimeStep,
- double, double,
- << " The time step " << arg1 << " is out of range."
- << std::endl
- << " The permitted range is (0," << arg2 << "]");
-
- void create_triangulation_and_dofs (const unsigned int n_refines);
-
- void initialize();
-
- void interpolate_velocity ();
-
- void diffusion_step (const bool reinit_prec);
-
- void projection_step (const bool reinit_prec);
-
- void update_pressure (const bool reinit_prec);
-
- private:
- unsigned int vel_max_its;
- unsigned int vel_Krylov_size;
- unsigned int vel_off_diagonals;
- unsigned int vel_update_prec;
- double vel_eps;
- double vel_diag_strength;
-
- void initialize_velocity_matrices();
-
- void initialize_pressure_matrices();
-
- // The next few structures and functions
- // are for doing various things in
- // parallel. They follow the scheme laid
- // out in @ref threads, using the
- // WorkStream class. As explained there,
- // this requires us to declare two
- // structures for each of the assemblers,
- // a per-task data and a scratch data
- // structure. These are then handed over
- // to functions that assemble local
- // contributions and that copy these
- // local contributions to the global
- // objects.
- //
- // One of the things that are specific to
- // this program is that we don't just
- // have a single DoFHandler object that
- // represents both the velocities and the
- // pressure, but we use individual
- // DoFHandler objects for these two kinds
- // of variables. We pay for this
- // optimization when we want to assemble
- // terms that involve both variables,
- // such as the divergence of the velocity
- // and the gradient of the pressure,
- // times the respective test
- // functions. When doing so, we can't
- // just anymore use a single FEValues
- // object, but rather we need two, and
- // they need to be initialized with cell
- // iterators that point to the same cell
- // in the triangulation but different
- // DoFHandlers.
- //
- // To do this in practice, we declare a
- // "synchronous" iterator -- an object
- // that internally consists of several
- // (in our case two) iterators, and each
- // time the synchronous iteration is
- // moved up one step, each of the
- // iterators stored internally is moved
- // up one step as well, thereby always
- // staying in sync. As it so happens,
- // there is a deal.II class that
- // facilitates this sort of thing.
- typedef std_cxx1x::tuple< typename DoFHandler<dim>::active_cell_iterator,
- typename DoFHandler<dim>::active_cell_iterator
- > IteratorTuple;
-
- typedef SynchronousIterators<IteratorTuple> IteratorPair;
-
- void initialize_gradient_operator();
-
- struct InitGradPerTaskData
- {
- unsigned int d;
- unsigned int vel_dpc;
- unsigned int pres_dpc;
- FullMatrix<double> local_grad;
- std::vector<unsigned int> vel_local_dof_indices;
- std::vector<unsigned int> pres_local_dof_indices;
-
- InitGradPerTaskData (const unsigned int dd,
- const unsigned int vdpc,
- const unsigned int pdpc)
- :
- d(dd),
- vel_dpc (vdpc),
- pres_dpc (pdpc),
- local_grad (vdpc, pdpc),
- vel_local_dof_indices (vdpc),
- pres_local_dof_indices (pdpc)
- {}
- };
+ public:
+ NavierStokesProjection (const RunTimeParameters::Data_Storage &data);
+
+ void run (const bool verbose = false,
+ const unsigned int n_plots = 10);
+ protected:
+ RunTimeParameters::MethodFormulation type;
+
+ const unsigned int deg;
+ const double dt;
+ const double t_0, T, Re;
+
+ EquationData::Velocity<dim> vel_exact;
+ std::map<unsigned int, double> boundary_values;
+ std::vector<types::boundary_id> boundary_indicators;
+
+ Triangulation<dim> triangulation;
+
+ FE_Q<dim> fe_velocity;
+ FE_Q<dim> fe_pressure;
+
+ DoFHandler<dim> dof_handler_velocity;
+ DoFHandler<dim> dof_handler_pressure;
+
+ QGauss<dim> quadrature_pressure;
+ QGauss<dim> quadrature_velocity;
+
+ SparsityPattern sparsity_pattern_velocity;
+ SparsityPattern sparsity_pattern_pressure;
+ SparsityPattern sparsity_pattern_pres_vel;
+
+ SparseMatrix<double> vel_Laplace_plus_Mass;
+ SparseMatrix<double> vel_it_matrix[dim];
+ SparseMatrix<double> vel_Mass;
+ SparseMatrix<double> vel_Laplace;
+ SparseMatrix<double> vel_Advection;
+ SparseMatrix<double> pres_Laplace;
+ SparseMatrix<double> pres_Mass;
+ SparseMatrix<double> pres_Diff[dim];
+ SparseMatrix<double> pres_iterative;
+
+ Vector<double> pres_n;
+ Vector<double> pres_n_minus_1;
+ Vector<double> phi_n;
+ Vector<double> phi_n_minus_1;
+ Vector<double> u_n[dim];
+ Vector<double> u_n_minus_1[dim];
+ Vector<double> u_star[dim];
+ Vector<double> force[dim];
+ Vector<double> v_tmp;
+ Vector<double> pres_tmp;
+ Vector<double> rot_u;
+
+ SparseILU<double> prec_velocity[dim];
+ SparseILU<double> prec_pres_Laplace;
+ SparseDirectUMFPACK prec_mass;
+ SparseDirectUMFPACK prec_vel_mass;
+
+ DeclException2 (ExcInvalidTimeStep,
+ double, double,
+ << " The time step " << arg1 << " is out of range."
+ << std::endl
+ << " The permitted range is (0," << arg2 << "]");
+
+ void create_triangulation_and_dofs (const unsigned int n_refines);
+
+ void initialize();
+
+ void interpolate_velocity ();
+
+ void diffusion_step (const bool reinit_prec);
+
+ void projection_step (const bool reinit_prec);
+
+ void update_pressure (const bool reinit_prec);
+
+ private:
+ unsigned int vel_max_its;
+ unsigned int vel_Krylov_size;
+ unsigned int vel_off_diagonals;
+ unsigned int vel_update_prec;
+ double vel_eps;
+ double vel_diag_strength;
+
+ void initialize_velocity_matrices();
+
+ void initialize_pressure_matrices();
+
+ // The next few structures and functions
+ // are for doing various things in
+ // parallel. They follow the scheme laid
+ // out in @ref threads, using the
+ // WorkStream class. As explained there,
+ // this requires us to declare two
+ // structures for each of the assemblers,
+ // a per-task data and a scratch data
+ // structure. These are then handed over
+ // to functions that assemble local
+ // contributions and that copy these
+ // local contributions to the global
+ // objects.
+ //
+ // One of the things that are specific to
+ // this program is that we don't just
+ // have a single DoFHandler object that
+ // represents both the velocities and the
+ // pressure, but we use individual
+ // DoFHandler objects for these two kinds
+ // of variables. We pay for this
+ // optimization when we want to assemble
+ // terms that involve both variables,
+ // such as the divergence of the velocity
+ // and the gradient of the pressure,
+ // times the respective test
+ // functions. When doing so, we can't
+ // just anymore use a single FEValues
+ // object, but rather we need two, and
+ // they need to be initialized with cell
+ // iterators that point to the same cell
+ // in the triangulation but different
+ // DoFHandlers.
+ //
+ // To do this in practice, we declare a
+ // "synchronous" iterator -- an object
+ // that internally consists of several
+ // (in our case two) iterators, and each
+ // time the synchronous iteration is
+ // moved up one step, each of the
+ // iterators stored internally is moved
+ // up one step as well, thereby always
+ // staying in sync. As it so happens,
+ // there is a deal.II class that
+ // facilitates this sort of thing.
+ typedef std_cxx1x::tuple< typename DoFHandler<dim>::active_cell_iterator,
+ typename DoFHandler<dim>::active_cell_iterator
+ > IteratorTuple;
+
+ typedef SynchronousIterators<IteratorTuple> IteratorPair;
+
+ void initialize_gradient_operator();
+
+ struct InitGradPerTaskData
+ {
+ unsigned int d;
+ unsigned int vel_dpc;
+ unsigned int pres_dpc;
+ FullMatrix<double> local_grad;
+ std::vector<unsigned int> vel_local_dof_indices;
+ std::vector<unsigned int> pres_local_dof_indices;
+
+ InitGradPerTaskData (const unsigned int dd,
+ const unsigned int vdpc,
+ const unsigned int pdpc)
+ :
+ d(dd),
+ vel_dpc (vdpc),
+ pres_dpc (pdpc),
+ local_grad (vdpc, pdpc),
+ vel_local_dof_indices (vdpc),
+ pres_local_dof_indices (pdpc)
+ {}
+ };
- struct InitGradScratchData
- {
- unsigned int nqp;
- FEValues<dim> fe_val_vel;
- FEValues<dim> fe_val_pres;
- InitGradScratchData (const FE_Q<dim> &fe_v,
- const FE_Q<dim> &fe_p,
- const QGauss<dim> &quad,
- const UpdateFlags flags_v,
- const UpdateFlags flags_p)
- :
- nqp (quad.size()),
- fe_val_vel (fe_v, quad, flags_v),
- fe_val_pres (fe_p, quad, flags_p)
- {}
- InitGradScratchData (const InitGradScratchData &data)
- :
- nqp (data.nqp),
- fe_val_vel (data.fe_val_vel.get_fe(),
- data.fe_val_vel.get_quadrature(),
- data.fe_val_vel.get_update_flags()),
- fe_val_pres (data.fe_val_pres.get_fe(),
- data.fe_val_pres.get_quadrature(),
- data.fe_val_pres.get_update_flags())
- {}
- };
+ struct InitGradScratchData
+ {
+ unsigned int nqp;
+ FEValues<dim> fe_val_vel;
+ FEValues<dim> fe_val_pres;
+ InitGradScratchData (const FE_Q<dim> &fe_v,
+ const FE_Q<dim> &fe_p,
+ const QGauss<dim> &quad,
+ const UpdateFlags flags_v,
+ const UpdateFlags flags_p)
+ :
+ nqp (quad.size()),
+ fe_val_vel (fe_v, quad, flags_v),
+ fe_val_pres (fe_p, quad, flags_p)
+ {}
+ InitGradScratchData (const InitGradScratchData &data)
+ :
+ nqp (data.nqp),
+ fe_val_vel (data.fe_val_vel.get_fe(),
+ data.fe_val_vel.get_quadrature(),
+ data.fe_val_vel.get_update_flags()),
+ fe_val_pres (data.fe_val_pres.get_fe(),
+ data.fe_val_pres.get_quadrature(),
+ data.fe_val_pres.get_update_flags())
+ {}
+ };
- void assemble_one_cell_of_gradient (const IteratorPair &SI,
- InitGradScratchData &scratch,
- InitGradPerTaskData &data);
- void assemble_one_cell_of_gradient (const IteratorPair &SI,
++ void assemble_one_cell_of_gradient (const IteratorPair &SI,
+ InitGradScratchData &scratch,
+ InitGradPerTaskData &data);
- void copy_gradient_local_to_global (const InitGradPerTaskData &data);
+ void copy_gradient_local_to_global (const InitGradPerTaskData &data);
- // The same general layout also applies
- // to the following classes and functions
- // implementing the assembly of the
- // advection term:
- void assemble_advection_term();
+ // The same general layout also applies
+ // to the following classes and functions
+ // implementing the assembly of the
+ // advection term:
+ void assemble_advection_term();
- struct AdvectionPerTaskData
- {
- FullMatrix<double> local_advection;
- std::vector<unsigned int> local_dof_indices;
- AdvectionPerTaskData (const unsigned int dpc)
- :
- local_advection (dpc, dpc),
- local_dof_indices (dpc)
- {}
- };
+ struct AdvectionPerTaskData
+ {
+ FullMatrix<double> local_advection;
+ std::vector<unsigned int> local_dof_indices;
+ AdvectionPerTaskData (const unsigned int dpc)
+ :
+ local_advection (dpc, dpc),
+ local_dof_indices (dpc)
+ {}
+ };
- struct AdvectionScratchData
- {
- unsigned int nqp;
- unsigned int dpc;
- std::vector< Point<dim> > u_star_local;
- std::vector< Tensor<1,dim> > grad_u_star;
- std::vector<double> u_star_tmp;
- FEValues<dim> fe_val;
- AdvectionScratchData (const FE_Q<dim> &fe,
- const QGauss<dim> &quad,
- const UpdateFlags flags)
- :
- nqp (quad.size()),
- dpc (fe.dofs_per_cell),
- u_star_local (nqp),
- grad_u_star (nqp),
- u_star_tmp (nqp),
- fe_val (fe, quad, flags)
- {}
-
- AdvectionScratchData (const AdvectionScratchData &data)
- :
- nqp (data.nqp),
- dpc (data.dpc),
- u_star_local (nqp),
- grad_u_star (nqp),
- u_star_tmp (nqp),
- fe_val (data.fe_val.get_fe(),
- data.fe_val.get_quadrature(),
- data.fe_val.get_update_flags())
- {}
- };
+ struct AdvectionScratchData
+ {
+ unsigned int nqp;
+ unsigned int dpc;
+ std::vector< Point<dim> > u_star_local;
+ std::vector< Tensor<1,dim> > grad_u_star;
+ std::vector<double> u_star_tmp;
+ FEValues<dim> fe_val;
+ AdvectionScratchData (const FE_Q<dim> &fe,
+ const QGauss<dim> &quad,
+ const UpdateFlags flags)
+ :
+ nqp (quad.size()),
+ dpc (fe.dofs_per_cell),
+ u_star_local (nqp),
+ grad_u_star (nqp),
+ u_star_tmp (nqp),
+ fe_val (fe, quad, flags)
+ {}
+
+ AdvectionScratchData (const AdvectionScratchData &data)
+ :
+ nqp (data.nqp),
+ dpc (data.dpc),
+ u_star_local (nqp),
+ grad_u_star (nqp),
+ u_star_tmp (nqp),
+ fe_val (data.fe_val.get_fe(),
+ data.fe_val.get_quadrature(),
+ data.fe_val.get_update_flags())
+ {}
+ };
- void assemble_one_cell_of_advection (const typename DoFHandler<dim>::active_cell_iterator &cell,
- AdvectionScratchData &scratch,
- AdvectionPerTaskData &data);
+ void assemble_one_cell_of_advection (const typename DoFHandler<dim>::active_cell_iterator &cell,
+ AdvectionScratchData &scratch,
+ AdvectionPerTaskData &data);
- void copy_advection_local_to_global (const AdvectionPerTaskData &data);
+ void copy_advection_local_to_global (const AdvectionPerTaskData &data);
- // The final few functions implement the
- // diffusion solve as well as
- // postprocessing the output, including
- // computing the curl of the velocity:
- void diffusion_component_solve (const unsigned int d);
+ // The final few functions implement the
+ // diffusion solve as well as
+ // postprocessing the output, including
+ // computing the curl of the velocity:
+ void diffusion_component_solve (const unsigned int d);
- void output_results (const unsigned int step);
+ void output_results (const unsigned int step);
- void assemble_vorticity (const bool reinit_prec);
+ void assemble_vorticity (const bool reinit_prec);
};
template <int dim, int fe_degree, typename number>
class LaplaceOperator : public Subscriptor
{
- public:
- LaplaceOperator ();
+ public:
+ LaplaceOperator ();
- void clear();
+ void clear();
- void reinit (const MGDoFHandler<dim> &dof_handler,
- const ConstraintMatrix &constraints,
- const unsigned int level = numbers::invalid_unsigned_int);
+ void reinit (const MGDoFHandler<dim> &dof_handler,
- const ConstraintMatrix &constraints,
++ const ConstraintMatrix &constraints,
+ const unsigned int level = numbers::invalid_unsigned_int);
- unsigned int m () const;
- unsigned int n () const;
+ unsigned int m () const;
+ unsigned int n () const;
- void vmult (Vector<double> &dst,
- const Vector<double> &src) const;
- void Tvmult (Vector<double> &dst,
- const Vector<double> &src) const;
- void vmult_add (Vector<double> &dst,
- const Vector<double> &src) const;
- void Tvmult_add (Vector<double> &dst,
- const Vector<double> &src) const;
+ void vmult (Vector<double> &dst,
+ const Vector<double> &src) const;
+ void Tvmult (Vector<double> &dst,
+ const Vector<double> &src) const;
+ void vmult_add (Vector<double> &dst,
+ const Vector<double> &src) const;
+ void Tvmult_add (Vector<double> &dst,
+ const Vector<double> &src) const;
- number el (const unsigned int row,
- const unsigned int col) const;
- void set_diagonal (const Vector<number> &diagonal);
+ number el (const unsigned int row,
+ const unsigned int col) const;
+ void set_diagonal (const Vector<number> &diagonal);
- std::size_t memory_consumption () const;
+ std::size_t memory_consumption () const;
- private:
- void local_apply (const MatrixFree<dim,number> &data,
- Vector<double> &dst,
- const Vector<double> &src,
- const std::pair<unsigned int,unsigned int> &cell_range) const;
+ private:
+ void local_apply (const MatrixFree<dim,number> &data,
+ Vector<double> &dst,
+ const Vector<double> &src,
+ const std::pair<unsigned int,unsigned int> &cell_range) const;
- void evaluate_coefficient(const Coefficient<dim> &function);
+ void evaluate_coefficient(const Coefficient<dim> &function);
- MatrixFree<dim,number> data;
- AlignedVector<VectorizedArray<number> > coefficient;
+ MatrixFree<dim,number> data;
+ AlignedVector<VectorizedArray<number> > coefficient;
- Vector<number> diagonal_values;
- bool diagonal_is_available;
+ Vector<number> diagonal_values;
+ bool diagonal_is_available;
};
template <int dim, int fe_degree, typename number>
void
LaplaceOperator<dim,fe_degree,number>::reinit (const MGDoFHandler<dim> &dof_handler,
- const ConstraintMatrix &constraints,
- const unsigned int level)
- const ConstraintMatrix &constraints,
++ const ConstraintMatrix &constraints,
+ const unsigned int level)
{
typename MatrixFree<dim,number>::AdditionalData additional_data;
additional_data.tasks_parallel_scheme =
FullMatrix<double> cell_matrix (dofs_per_cell, dofs_per_cell);
Vector<double> cell_rhs (dofs_per_cell);
- std::vector<unsigned int> local_dof_indices (dofs_per_cell);
+ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
- // Next, we again have to loop over all
- // cells and assemble local contributions.
- // Note, that a cell is a quadrilateral in
- // two space dimensions, but a hexahedron
- // in 3D. In fact, the
- // <code>active_cell_iterator</code> data
- // type is something different, depending
- // on the dimension we are in, but to the
- // outside world they look alike and you
- // will probably never see a difference
- // although the classes that this typedef
- // stands for are in fact completely
- // unrelated:
+ // Next, we again have to loop over all
+ // cells and assemble local contributions.
+ // Note, that a cell is a quadrilateral in
+ // two space dimensions, but a hexahedron
+ // in 3D. In fact, the
+ // <code>active_cell_iterator</code> data
+ // type is something different, depending
+ // on the dimension we are in, but to the
+ // outside world they look alike and you
+ // will probably never see a difference
+ // although the classes that this typedef
+ // stands for are in fact completely
+ // unrelated:
typename DoFHandler<dim>::active_cell_iterator
- cell = dof_handler.begin_active(),
- endc = dof_handler.end();
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
for (; cell!=endc; ++cell)
{
MPI_Comm _mpi_communicator,
ConditionalOStream _pcout);
- void plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor,
- SymmetricTensor<2,dim> &strain_tensor,
+ void plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor,
+ SymmetricTensor<2,dim> &strain_tensor,
- unsigned int &elast_points,
- unsigned int &plast_points,
+ unsigned int &elast_points,
+ unsigned int &plast_points,
double &yield);
- void linearized_plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor_linearized,
- SymmetricTensor<4,dim> &stress_strain_tensor,
- SymmetricTensor<2,dim> &strain_tensor);
+ void linearized_plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor_linearized,
+ SymmetricTensor<4,dim> &stress_strain_tensor,
+ SymmetricTensor<2,dim> &strain_tensor);
inline SymmetricTensor<2,dim> get_strain (const FEValues<dim> &fe_values,
const unsigned int shape_func,
const unsigned int q_point) const;
}
template <int dim>
- void ConstitutiveLaw<dim>::linearized_plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor_linearized,
- SymmetricTensor<4,dim> &stress_strain_tensor,
- SymmetricTensor<2,dim> &strain_tensor)
+ void ConstitutiveLaw<dim>::linearized_plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor_linearized,
- SymmetricTensor<4,dim> &stress_strain_tensor,
- SymmetricTensor<2,dim> &strain_tensor)
++ SymmetricTensor<4,dim> &stress_strain_tensor,
++ SymmetricTensor<2,dim> &strain_tensor)
{
if (dim == 3)
- {
- SymmetricTensor<2,dim> stress_tensor;
- stress_tensor = (stress_strain_tensor_kappa + stress_strain_tensor_mu)*strain_tensor;
- double tmp = E/((1+nu)*(1-2*nu));
+ {
+ SymmetricTensor<2,dim> stress_tensor;
+ stress_tensor = (stress_strain_tensor_kappa + stress_strain_tensor_mu)*strain_tensor;
+ double tmp = E/((1+nu)*(1-2*nu));
- stress_strain_tensor = stress_strain_tensor_mu;
- stress_strain_tensor_linearized = stress_strain_tensor_mu;
+ stress_strain_tensor = stress_strain_tensor_mu;
+ stress_strain_tensor_linearized = stress_strain_tensor_mu;
- SymmetricTensor<2,dim> deviator_stress_tensor = deviator(stress_tensor);
+ SymmetricTensor<2,dim> deviator_stress_tensor = deviator(stress_tensor);
- double deviator_stress_tensor_norm = deviator_stress_tensor.norm ();
+ double deviator_stress_tensor_norm = deviator_stress_tensor.norm ();
- double beta = 1.0;
- if (deviator_stress_tensor_norm >= sigma_0)
- {
- beta = (sigma_0 + gamma)/deviator_stress_tensor_norm;
- stress_strain_tensor *= beta;
- stress_strain_tensor_linearized *= beta;
- deviator_stress_tensor /= deviator_stress_tensor_norm;
- stress_strain_tensor_linearized -= beta*2*mu*outer_product(deviator_stress_tensor, deviator_stress_tensor);
- }
+ double beta = 1.0;
+ if (deviator_stress_tensor_norm >= sigma_0)
+ {
+ beta = (sigma_0 + gamma)/deviator_stress_tensor_norm;
+ stress_strain_tensor *= beta;
+ stress_strain_tensor_linearized *= beta;
+ deviator_stress_tensor /= deviator_stress_tensor_norm;
+ stress_strain_tensor_linearized -= beta*2*mu*outer_product(deviator_stress_tensor, deviator_stress_tensor);
+ }
- stress_strain_tensor += stress_strain_tensor_kappa;
- stress_strain_tensor_linearized += stress_strain_tensor_kappa;
- }
+ stress_strain_tensor += stress_strain_tensor_kappa;
+ stress_strain_tensor_linearized += stress_strain_tensor_kappa;
+ }
}
namespace EquationData
template <class PreconditionerA, class PreconditionerMp>
BlockSchurPreconditioner<PreconditionerA, PreconditionerMp>::
- BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S,
+ BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S,
const InverseMatrix<TrilinosWrappers::SparseMatrix,
- PreconditionerMp> &Mpinv,
+ PreconditionerMp> &Mpinv,
const PreconditionerA &Apreconditioner)
- :
- darcy_matrix (&S),
- m_inverse (&Mpinv),
- a_preconditioner (Apreconditioner),
- tmp (darcy_matrix->block(1,1).m())
+ :
+ darcy_matrix (&S),
+ m_inverse (&Mpinv),
+ a_preconditioner (Apreconditioner),
+ tmp (darcy_matrix->block(1,1).m())
{}
template <int dim>
class TwoPhaseFlowProblem
{
- public:
- TwoPhaseFlowProblem (const unsigned int degree);
- void run ();
-
- private:
- void setup_dofs ();
- void assemble_darcy_preconditioner ();
- void build_darcy_preconditioner ();
- void assemble_darcy_system ();
- void assemble_saturation_system ();
- void assemble_saturation_matrix ();
- void assemble_saturation_rhs ();
- void assemble_saturation_rhs_cell_term (const FEValues<dim> &saturation_fe_values,
- const FEValues<dim> &darcy_fe_values,
- const double global_max_u_F_prime,
- const double global_S_variation,
- const std::vector<unsigned int> &local_dof_indices);
- void assemble_saturation_rhs_boundary_term (const FEFaceValues<dim> &saturation_fe_face_values,
- const FEFaceValues<dim> &darcy_fe_face_values,
- const std::vector<unsigned int> &local_dof_indices);
- void solve ();
- void refine_mesh (const unsigned int min_grid_level,
- const unsigned int max_grid_level);
- void output_results () const;
-
- // We follow with a number of
- // helper functions that are
- // used in a variety of places
- // throughout the program:
- double get_max_u_F_prime () const;
- std::pair<double,double> get_extrapolated_saturation_range () const;
- bool determine_whether_to_solve_for_pressure_and_velocity () const;
- void project_back_saturation ();
- double compute_viscosity (const std::vector<double> &old_saturation,
- const std::vector<double> &old_old_saturation,
- const std::vector<Tensor<1,dim> > &old_saturation_grads,
- const std::vector<Tensor<1,dim> > &old_old_saturation_grads,
- const std::vector<Vector<double> > &present_darcy_values,
- const double global_max_u_F_prime,
- const double global_S_variation,
- const double cell_diameter) const;
-
-
- // This all is followed by the
- // member variables, most of
- // which are similar to the
- // ones in step-31, with the
- // exception of the ones that
- // pertain to the macro time
- // stepping for the
- // velocity/pressure system:
- Triangulation<dim> triangulation;
- double global_Omega_diameter;
-
- const unsigned int degree;
-
- const unsigned int darcy_degree;
- FESystem<dim> darcy_fe;
- DoFHandler<dim> darcy_dof_handler;
- ConstraintMatrix darcy_constraints;
-
- ConstraintMatrix darcy_preconditioner_constraints;
-
- TrilinosWrappers::BlockSparseMatrix darcy_matrix;
- TrilinosWrappers::BlockSparseMatrix darcy_preconditioner_matrix;
-
- TrilinosWrappers::BlockVector darcy_solution;
- TrilinosWrappers::BlockVector darcy_rhs;
-
- TrilinosWrappers::BlockVector last_computed_darcy_solution;
- TrilinosWrappers::BlockVector second_last_computed_darcy_solution;
-
-
- const unsigned int saturation_degree;
- FE_Q<dim> saturation_fe;
- DoFHandler<dim> saturation_dof_handler;
- ConstraintMatrix saturation_constraints;
-
- TrilinosWrappers::SparseMatrix saturation_matrix;
-
-
- TrilinosWrappers::Vector saturation_solution;
- TrilinosWrappers::Vector old_saturation_solution;
- TrilinosWrappers::Vector old_old_saturation_solution;
- TrilinosWrappers::Vector saturation_rhs;
-
- TrilinosWrappers::Vector saturation_matching_last_computed_darcy_solution;
-
- const double saturation_refinement_threshold;
-
- double time;
- const double end_time;
-
- double current_macro_time_step;
- double old_macro_time_step;
-
- double time_step;
- double old_time_step;
- unsigned int timestep_number;
-
- const double viscosity;
- const double porosity;
- const double AOS_threshold;
-
- std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionIC> Amg_preconditioner;
- std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionIC> Mp_preconditioner;
-
- bool rebuild_saturation_matrix;
-
- // At the very end we declare a
- // variable that denotes the
- // material model. Compared to
- // step-21, we do this here as
- // a member variable since we
- // will want to use it in a
- // variety of places and so
- // having a central place where
- // such a variable is declared
- // will make it simpler to
- // replace one class by another
- // (e.g. replace
- // RandomMedium::KInverse by
- // SingleCurvingCrack::KInverse).
- const RandomMedium::KInverse<dim> k_inverse;
+ public:
+ TwoPhaseFlowProblem (const unsigned int degree);
+ void run ();
+
+ private:
+ void setup_dofs ();
+ void assemble_darcy_preconditioner ();
+ void build_darcy_preconditioner ();
+ void assemble_darcy_system ();
+ void assemble_saturation_system ();
+ void assemble_saturation_matrix ();
+ void assemble_saturation_rhs ();
+ void assemble_saturation_rhs_cell_term (const FEValues<dim> &saturation_fe_values,
+ const FEValues<dim> &darcy_fe_values,
+ const double global_max_u_F_prime,
+ const double global_S_variation,
+ const std::vector<unsigned int> &local_dof_indices);
+ void assemble_saturation_rhs_boundary_term (const FEFaceValues<dim> &saturation_fe_face_values,
+ const FEFaceValues<dim> &darcy_fe_face_values,
+ const std::vector<unsigned int> &local_dof_indices);
+ void solve ();
+ void refine_mesh (const unsigned int min_grid_level,
+ const unsigned int max_grid_level);
+ void output_results () const;
+
+ // We follow with a number of
+ // helper functions that are
+ // used in a variety of places
+ // throughout the program:
+ double get_max_u_F_prime () const;
+ std::pair<double,double> get_extrapolated_saturation_range () const;
+ bool determine_whether_to_solve_for_pressure_and_velocity () const;
+ void project_back_saturation ();
+ double compute_viscosity (const std::vector<double> &old_saturation,
+ const std::vector<double> &old_old_saturation,
- const std::vector<Tensor<1,dim> > &old_saturation_grads,
- const std::vector<Tensor<1,dim> > &old_old_saturation_grads,
++ const std::vector<Tensor<1,dim> > &old_saturation_grads,
++ const std::vector<Tensor<1,dim> > &old_old_saturation_grads,
+ const std::vector<Vector<double> > &present_darcy_values,
+ const double global_max_u_F_prime,
+ const double global_S_variation,
+ const double cell_diameter) const;
+
+
+ // This all is followed by the
+ // member variables, most of
+ // which are similar to the
+ // ones in step-31, with the
+ // exception of the ones that
+ // pertain to the macro time
+ // stepping for the
+ // velocity/pressure system:
+ Triangulation<dim> triangulation;
+ double global_Omega_diameter;
+
+ const unsigned int degree;
+
+ const unsigned int darcy_degree;
+ FESystem<dim> darcy_fe;
+ DoFHandler<dim> darcy_dof_handler;
+ ConstraintMatrix darcy_constraints;
+
+ ConstraintMatrix darcy_preconditioner_constraints;
+
+ TrilinosWrappers::BlockSparseMatrix darcy_matrix;
+ TrilinosWrappers::BlockSparseMatrix darcy_preconditioner_matrix;
+
+ TrilinosWrappers::BlockVector darcy_solution;
+ TrilinosWrappers::BlockVector darcy_rhs;
+
+ TrilinosWrappers::BlockVector last_computed_darcy_solution;
+ TrilinosWrappers::BlockVector second_last_computed_darcy_solution;
+
+
+ const unsigned int saturation_degree;
+ FE_Q<dim> saturation_fe;
+ DoFHandler<dim> saturation_dof_handler;
+ ConstraintMatrix saturation_constraints;
+
+ TrilinosWrappers::SparseMatrix saturation_matrix;
+
+
+ TrilinosWrappers::Vector saturation_solution;
+ TrilinosWrappers::Vector old_saturation_solution;
+ TrilinosWrappers::Vector old_old_saturation_solution;
+ TrilinosWrappers::Vector saturation_rhs;
+
+ TrilinosWrappers::Vector saturation_matching_last_computed_darcy_solution;
+
+ const double saturation_refinement_threshold;
+
+ double time;
+ const double end_time;
+
+ double current_macro_time_step;
+ double old_macro_time_step;
+
+ double time_step;
+ double old_time_step;
+ unsigned int timestep_number;
+
+ const double viscosity;
+ const double porosity;
+ const double AOS_threshold;
+
+ std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionIC> Amg_preconditioner;
+ std_cxx1x::shared_ptr<TrilinosWrappers::PreconditionIC> Mp_preconditioner;
+
+ bool rebuild_saturation_matrix;
+
+ // At the very end we declare a
+ // variable that denotes the
+ // material model. Compared to
+ // step-21, we do this here as
+ // a member variable since we
+ // will want to use it in a
+ // variety of places and so
+ // having a central place where
+ // such a variable is declared
+ // will make it simpler to
+ // replace one class by another
+ // (e.g. replace
+ // RandomMedium::KInverse by
+ // SingleCurvingCrack::KInverse).
+ const RandomMedium::KInverse<dim> k_inverse;
};
FullMatrix<double> cell_matrix (dofs_per_cell, dofs_per_cell);
Vector<double> cell_rhs (dofs_per_cell);
- std::vector<unsigned int> local_dof_indices (dofs_per_cell);
+ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
- // Here is one difference: for this
- // program, we will again use a
- // constant right hand side
- // function and zero boundary
- // values, but a variable
- // coefficient. We have already
- // declared the class that
- // represents this coefficient
- // above, so we only have to
- // declare a corresponding object
- // here.
- //
- // Then, below, we will ask the
- // <code>coefficient</code> function object
- // to compute the values of the
- // coefficient at all quadrature
- // points on one cell at once. The
- // reason for this is that, if you
- // look back at how we did this in
- // step-4, you will realize that we
- // called the function computing
- // the right hand side value inside
- // nested loops over all degrees of
- // freedom and over all quadrature
- // points,
- // i.e. dofs_per_cell*n_q_points
- // times. For the coefficient that
- // is used inside the matrix, this
- // would actually be
- // dofs_per_cell*dofs_per_cell*n_q_points. On
- // the other hand, the function
- // will of course return the same
- // value every time it is called
- // with the same quadrature point,
- // independently of what shape
- // function we presently treat;
- // secondly, these are virtual
- // function calls, so are rather
- // expensive. Obviously, there are
- // only n_q_point different values,
- // and we shouldn't call the
- // function more often than
- // that. Or, even better than this,
- // compute all of these values at
- // once, and get away with a single
- // function call per cell.
- //
- // This is exactly what we are
- // going to do. For this, we need
- // some space to store the values
- // in. We therefore also have to
- // declare an array to hold these
- // values:
+ // Here is one difference: for this
+ // program, we will again use a
+ // constant right hand side
+ // function and zero boundary
+ // values, but a variable
+ // coefficient. We have already
+ // declared the class that
+ // represents this coefficient
+ // above, so we only have to
+ // declare a corresponding object
+ // here.
+ //
+ // Then, below, we will ask the
+ // <code>coefficient</code> function object
+ // to compute the values of the
+ // coefficient at all quadrature
+ // points on one cell at once. The
+ // reason for this is that, if you
+ // look back at how we did this in
+ // step-4, you will realize that we
+ // called the function computing
+ // the right hand side value inside
+ // nested loops over all degrees of
+ // freedom and over all quadrature
+ // points,
+ // i.e. dofs_per_cell*n_q_points
+ // times. For the coefficient that
+ // is used inside the matrix, this
+ // would actually be
+ // dofs_per_cell*dofs_per_cell*n_q_points. On
+ // the other hand, the function
+ // will of course return the same
+ // value every time it is called
+ // with the same quadrature point,
+ // independently of what shape
+ // function we presently treat;
+ // secondly, these are virtual
+ // function calls, so are rather
+ // expensive. Obviously, there are
+ // only n_q_point different values,
+ // and we shouldn't call the
+ // function more often than
+ // that. Or, even better than this,
+ // compute all of these values at
+ // once, and get away with a single
+ // function call per cell.
+ //
+ // This is exactly what we are
+ // going to do. For this, we need
+ // some space to store the values
+ // in. We therefore also have to
+ // declare an array to hold these
+ // values:
const Coefficient<dim> coefficient;
std::vector<double> coefficient_values (n_q_points);
- // @sect3{GradientEstimation class declaration}
-
- // Now, finally, here comes the class
- // that will compute the difference
- // approximation of the gradient on
- // each cell and weighs that with a
- // power of the mesh size, as
- // described in the introduction.
- // This class is a simple version of
- // the <code>DerivativeApproximation</code>
- // class in the library, that uses
- // similar techniques to obtain
- // finite difference approximations
- // of the gradient of a finite
- // element field, or if higher
- // derivatives.
- //
- // The
- // class has one public static
- // function <code>estimate</code> that is
- // called to compute a vector of
- // error indicators, and one private
- // function that does the actual work
- // on an interval of all active
- // cells. The latter is called by the
- // first one in order to be able to
- // do the computations in parallel if
- // your computer has more than one
- // processor. While the first
- // function accepts as parameter a
- // vector into which the error
- // indicator is written for each
- // cell. This vector is passed on to
- // the second function that actually
- // computes the error indicators on
- // some cells, and the respective
- // elements of the vector are
- // written. By the way, we made it
- // somewhat of a convention to use
- // vectors of floats for error
- // indicators rather than the common
- // vectors of doubles, as the
- // additional accuracy is not
- // necessary for estimated values.
- //
- // In addition to these two
- // functions, the class declares to
- // exceptions which are raised when a
- // cell has no neighbors in each of
- // the space directions (in which
- // case the matrix described in the
- // introduction would be singular and
- // can't be inverted), while the
- // other one is used in the more
- // common case of invalid parameters
- // to a function, namely a vector of
- // wrong size.
- //
- // Two annotations to this class are
- // still in order: the first is that
- // the class has no non-static member
- // functions or variables, so this is
- // not really a class, but rather
- // serves the purpose of a
- // <code>namespace</code> in C++. The reason
- // that we chose a class over a
- // namespace is that this way we can
- // declare functions that are
- // private, i.e. visible to the
- // outside world but not
- // callable. This can be done with
- // namespaces as well, if one
- // declares some functions in header
- // files in the namespace and
- // implements these and other
- // functions in the implementation
- // file. The functions not declared
- // in the header file are still in
- // the namespace but are not callable
- // from outside. However, as we have
- // only one file here, it is not
- // possible to hide functions in the
- // present case.
- //
- // The second is that the dimension
- // template parameter is attached to
- // the function rather than to the
- // class itself. This way, you don't
- // have to specify the template
- // parameter yourself as in most
- // other cases, but the compiler can
- // figure its value out itself from
- // the dimension of the DoF handler
- // object that one passes as first
- // argument.
- //
- // Finally note that the
- // <code>IndexInterval</code> typedef is
- // introduced as a convenient
- // abbreviation for an otherwise
- // lengthy type name.
+ // @sect3{GradientEstimation class declaration}
+
+ // Now, finally, here comes the class
+ // that will compute the difference
+ // approximation of the gradient on
+ // each cell and weighs that with a
+ // power of the mesh size, as
+ // described in the introduction.
+ // This class is a simple version of
+ // the <code>DerivativeApproximation</code>
+ // class in the library, that uses
+ // similar techniques to obtain
+ // finite difference approximations
+ // of the gradient of a finite
+ // element field, or if higher
+ // derivatives.
+ //
+ // The
+ // class has one public static
+ // function <code>estimate</code> that is
+ // called to compute a vector of
+ // error indicators, and one private
+ // function that does the actual work
+ // on an interval of all active
+ // cells. The latter is called by the
+ // first one in order to be able to
+ // do the computations in parallel if
+ // your computer has more than one
+ // processor. While the first
+ // function accepts as parameter a
+ // vector into which the error
+ // indicator is written for each
+ // cell. This vector is passed on to
+ // the second function that actually
+ // computes the error indicators on
+ // some cells, and the respective
+ // elements of the vector are
+ // written. By the way, we made it
+ // somewhat of a convention to use
+ // vectors of floats for error
+ // indicators rather than the common
+ // vectors of doubles, as the
+ // additional accuracy is not
+ // necessary for estimated values.
+ //
+ // In addition to these two
+ // functions, the class declares to
+ // exceptions which are raised when a
+ // cell has no neighbors in each of
+ // the space directions (in which
+ // case the matrix described in the
+ // introduction would be singular and
+ // can't be inverted), while the
+ // other one is used in the more
+ // common case of invalid parameters
+ // to a function, namely a vector of
+ // wrong size.
+ //
+ // Two annotations to this class are
+ // still in order: the first is that
+ // the class has no non-static member
+ // functions or variables, so this is
+ // not really a class, but rather
+ // serves the purpose of a
+ // <code>namespace</code> in C++. The reason
+ // that we chose a class over a
+ // namespace is that this way we can
+ // declare functions that are
+ // private, i.e. visible to the
+ // outside world but not
+ // callable. This can be done with
+ // namespaces as well, if one
+ // declares some functions in header
+ // files in the namespace and
+ // implements these and other
+ // functions in the implementation
+ // file. The functions not declared
+ // in the header file are still in
+ // the namespace but are not callable
+ // from outside. However, as we have
+ // only one file here, it is not
+ // possible to hide functions in the
+ // present case.
+ //
+ // The second is that the dimension
+ // template parameter is attached to
+ // the function rather than to the
+ // class itself. This way, you don't
+ // have to specify the template
+ // parameter yourself as in most
+ // other cases, but the compiler can
+ // figure its value out itself from
+ // the dimension of the DoF handler
+ // object that one passes as first
+ // argument.
+ //
+ // Finally note that the
+ // <code>IndexInterval</code> typedef is
+ // introduced as a convenient
+ // abbreviation for an otherwise
+ // lengthy type name.
class GradientEstimation
{
- public:
- template <int dim>
- static void estimate (const DoFHandler<dim> &dof,
- const Vector<double> &solution,
- Vector<float> &error_per_cell);
-
- DeclException2 (ExcInvalidVectorLength,
- int, int,
- << "Vector has length " << arg1 << ", but should have "
- << arg2);
- DeclException0 (ExcInsufficientDirections);
-
- private:
- typedef std::pair<unsigned int,unsigned int> IndexInterval;
-
- template <int dim>
- static void estimate_interval (const DoFHandler<dim> &dof,
- const Vector<double> &solution,
- const IndexInterval &index_interval,
- Vector<float> &error_per_cell);
+ public:
+ template <int dim>
+ static void estimate (const DoFHandler<dim> &dof,
- const Vector<double> &solution,
++ const Vector<double> &solution,
+ Vector<float> &error_per_cell);
+
+ DeclException2 (ExcInvalidVectorLength,
+ int, int,
+ << "Vector has length " << arg1 << ", but should have "
+ << arg2);
+ DeclException0 (ExcInsufficientDirections);
+
+ private:
+ typedef std::pair<unsigned int,unsigned int> IndexInterval;
+
+ template <int dim>
+ static void estimate_interval (const DoFHandler<dim> &dof,
- const Vector<double> &solution,
++ const Vector<double> &solution,
+ const IndexInterval &index_interval,
+ Vector<float> &error_per_cell);
};
template <int dim>
void
GradientEstimation::estimate (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution,
+ const Vector<double> &solution,
Vector<float> &error_per_cell)
{
- // Before starting with the work,
- // we check that the vector into
- // which the results are written,
- // has the right size. It is a
- // common error that such
- // parameters have the wrong size,
- // but the resulting damage by not
- // catching these errors are very
- // subtle as they are usually
- // corruption of data somewhere in
- // memory. Often, the problems
- // emerging from this are not
- // reproducible, and we found that
- // it is well worth the effort to
- // check for such things.
+ // Before starting with the work,
+ // we check that the vector into
+ // which the results are written,
+ // has the right size. It is a
+ // common error that such
+ // parameters have the wrong size,
+ // but the resulting damage by not
+ // catching these errors are very
+ // subtle as they are usually
+ // corruption of data somewhere in
+ // memory. Often, the problems
+ // emerging from this are not
+ // reproducible, and we found that
+ // it is well worth the effort to
+ // check for such things.
Assert (error_per_cell.size() == dof_handler.get_tria().n_active_cells(),
ExcInvalidVectorLength (error_per_cell.size(),
dof_handler.get_tria().n_active_cells()));
*/
class ConditionalOStream
{
- public:
- /**
- * Constructor. Set the stream to which
- * we want to write, and the condition
- * based on which writes are actually
- * forwarded. Per default the condition
- * of an object is active.
- */
- ConditionalOStream (std::ostream &stream,
- const bool active = true);
-
- /**
- * Depending on the
- * <tt>active</tt> flag set the
- * condition of this stream to
- * active (true) or non-active
- * (false). An object of this
- * class prints to <tt>cout</tt>
- * if and only if its condition
- * is active.
- */
- void set_condition (const bool active);
-
- /**
- * Return the condition of the object.
- */
- bool is_active() const;
-
- /**
- * Return a reference to the stream
- * currently in use.
- */
- std::ostream & get_stream () const;
-
- /**
- * Output a constant something through
- * this stream. This function must be @p
- * const so that member objects of this
- * type can also be used from @p const
- * member functions of the surrounding
- * class.
- */
- template <typename T>
- const ConditionalOStream &
- operator << (const T &t) const;
-
- /**
- * Treat ostream manipulators. This
- * function must be @p const so that
- * member objects of this type can also
- * be used from @p const member functions
- * of the surrounding class.
- *
- * Note that compilers want to see this
- * treated differently from the general
- * template above since functions like @p
- * std::endl are actually overloaded and
- * can't be bound directly to a template
- * type.
- */
- const ConditionalOStream &
- operator<< (std::ostream& (*p) (std::ostream&)) const;
-
- private:
- /**
- * Reference to the stream we
- * want to write to.
- */
- std::ostream &output_stream;
-
- /**
- * Stores the actual condition
- * the object is in.
- */
- bool active_flag;
+ public:
+ /**
+ * Constructor. Set the stream to which
+ * we want to write, and the condition
+ * based on which writes are actually
+ * forwarded. Per default the condition
+ * of an object is active.
+ */
+ ConditionalOStream (std::ostream &stream,
+ const bool active = true);
+
+ /**
+ * Depending on the
+ * <tt>active</tt> flag set the
+ * condition of this stream to
+ * active (true) or non-active
+ * (false). An object of this
+ * class prints to <tt>cout</tt>
+ * if and only if its condition
+ * is active.
+ */
+ void set_condition (const bool active);
+
+ /**
+ * Return the condition of the object.
+ */
+ bool is_active() const;
+
+ /**
+ * Return a reference to the stream
+ * currently in use.
+ */
+ std::ostream &get_stream () const;
+
+ /**
+ * Output a constant something through
+ * this stream. This function must be @p
+ * const so that member objects of this
+ * type can also be used from @p const
+ * member functions of the surrounding
+ * class.
+ */
+ template <typename T>
+ const ConditionalOStream &
+ operator << (const T &t) const;
+
+ /**
+ * Treat ostream manipulators. This
+ * function must be @p const so that
+ * member objects of this type can also
+ * be used from @p const member functions
+ * of the surrounding class.
+ *
+ * Note that compilers want to see this
+ * treated differently from the general
+ * template above since functions like @p
+ * std::endl are actually overloaded and
+ * can't be bound directly to a template
+ * type.
+ */
+ const ConditionalOStream &
+ operator<< (std::ostream& (*p) (std::ostream &)) const;
+
+ private:
+ /**
+ * Reference to the stream we
+ * want to write to.
+ */
- std::ostream &output_stream;
++ std::ostream &output_stream;
+
+ /**
+ * Stores the actual condition
+ * the object is in.
+ */
+ bool active_flag;
};
template <int dim, int spacedim=dim>
class DataOutInterface : private DataOutBase
{
- public:
- /*
- * Import a few names that were
- * previously in this class and have then
- * moved to the base class. Since the
- * base class is inherited from
- * privately, we need to re-import these
- * symbols to make sure that references
- * to DataOutInterface<dim,spacedim>::XXX
- * remain valid.
- */
- using DataOutBase::OutputFormat;
- using DataOutBase::default_format;
- using DataOutBase::dx;
- using DataOutBase::gnuplot;
- using DataOutBase::povray;
- using DataOutBase::eps;
- using DataOutBase::tecplot;
- using DataOutBase::tecplot_binary;
- using DataOutBase::vtk;
- using DataOutBase::vtu;
- using DataOutBase::deal_II_intermediate;
- using DataOutBase::parse_output_format;
- using DataOutBase::get_output_format_names;
- using DataOutBase::determine_intermediate_format_dimensions;
-
- /**
- * Constructor.
- */
- DataOutInterface ();
-
- /**
- * Destructor. Does nothing, but is
- * declared virtual since this class has
- * virtual functions.
- */
- virtual ~DataOutInterface ();
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in OpenDX format. See
- * DataOutBase::write_dx.
- */
- void write_dx (std::ostream &out) const;
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in EPS format. See
- * DataOutBase::write_eps.
- */
- void write_eps (std::ostream &out) const;
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in GMV format. See
- * DataOutBase::write_gmv.
- */
- void write_gmv (std::ostream &out) const;
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in GNUPLOT format. See
- * DataOutBase::write_gnuplot.
- */
- void write_gnuplot (std::ostream &out) const;
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in POVRAY format. See
- * DataOutBase::write_povray.
- */
- void write_povray (std::ostream &out) const;
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in Tecplot format. See
- * DataOutBase::write_tecplot.
- */
- void write_tecplot (std::ostream &out) const;
-
- /**
- * Obtain data through
- * get_patches() and write it in
- * the Tecplot binary output
- * format. Note that the name of
- * the output file must be
- * specified through the
- * TecplotFlags interface.
- */
- void write_tecplot_binary (std::ostream &out) const;
-
- /**
- * Obtain data through
- * get_patches() and write it to
- * <tt>out</tt> in UCD format for
- * AVS. See
- * DataOutBase::write_ucd.
- */
- void write_ucd (std::ostream &out) const;
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in Vtk format. See
- * DataOutBase::write_vtk.
- */
- void write_vtk (std::ostream &out) const;
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in Vtu (VTK's XML) format. See
- * DataOutBase::write_vtu.
- *
- * Some visualization programs,
- * such as ParaView, can read
- * several separate VTU files to
- * parallelize visualization. In
- * that case, you need a
- * <code>.pvtu</code> file that
- * describes which VTU files form
- * a group. The
- * DataOutInterface::write_pvtu_record()
- * function can generate such a
- * master record. Likewise,
- * DataOutInterface::write_visit_record()
- * does the same for VisIt. Finally,
- * DataOutInterface::write_pvd_record()
- * can be used to group together
- * the files that jointly make up
- * a time dependent simulation.
- */
- void write_vtu (std::ostream &out) const;
-
- /**
- * Collective MPI call to write the
- * solution from all participating nodes
- * (those in the given communicator) to a
- * single compressed .vtu file on a
- * shared file system. The communicator
- * can be a sub communicator of the one
- * used by the computation. This routine
- * uses MPI I/O to achieve high
- * performance on parallel filesystems.
- * Also see
- * DataOutInterface::write_vtu().
- */
- void write_vtu_in_parallel (const char* filename, MPI_Comm comm) const;
-
- /**
- * Some visualization programs, such as
- * ParaView, can read several separate
- * VTU files to parallelize
- * visualization. In that case, you need
- * a <code>.pvtu</code> file that
- * describes which VTU files (written,
- * for example, through the write_vtu()
- * function) form a group. The current
- * function can generate such a master
- * record.
- *
- * The file so written contains a list of
- * (scalar or vector) fields whose values
- * are described by the individual files
- * that comprise the set of parallel VTU
- * files along with the names of these
- * files. This function gets the names
- * and types of fields through the
- * get_patches() function of this class
- * like all the other write_xxx()
- * functions. The second argument to this
- * function specifies the names of the
- * files that form the parallel set.
- *
- * @note See DataOutBase::write_vtu for
- * writing each piece. Also note that
- * only one parallel process needs to
- * call the current function, listing the
- * names of the files written by all
- * parallel processes.
- *
- * @note The use of this function is
- * explained in step-40.
- *
- * @note In order to tell Paraview to
- * group together multiple <code>pvtu</code>
- * files that each describe one time
- * step of a time dependent simulation,
- * see the
- * DataOutInterface::write_pvd_record()
- * function.
- *
- * @note At the time of writing,
- * the other big VTK-based
- * visualization program, VisIt,
- * can not read <code>pvtu</code>
- * records. However, it can read
- * visit records as written by
- * the write_visit_record()
- * function.
- */
- void write_pvtu_record (std::ostream &out,
- const std::vector<std::string> &piece_names) const;
-
- /**
- * In ParaView it is possible to visualize time-dependent
- * data tagged with the current
- * integration time of a time dependent simulation. To use this
- * feature you need a <code>.pvd</code>
- * file that describes which VTU or PVTU file
- * belongs to which timestep. This function writes a file that
- * provides this mapping, i.e., it takes a list of pairs each of
- * which indicates a particular time instant and the corresponding
- * file that contains the graphical data for this time instant.
- *
- * A typical use case, in program that computes a time dependent
- * solution, would be the following (<code>time</code> and
- * <code>time_step</code> are member variables of the class with types
- * <code>double</code> and <code>unsigned int</code>, respectively;
- * the variable <code>times_and_names</code> is of type
- * <code>std::vector@<std::pair@<double,std::string@> @></code>):
- *
- * @code
- * template <int dim>
- * void MyEquation<dim>::output_results () const
- * {
- * DataOut<dim> data_out;
- *
- * data_out.attach_dof_handler (dof_handler);
- * data_out.add_data_vector (solution, "U");
- * data_out.build_patches ();
- *
- * const std::string filename = "solution-" +
- * Utilities::int_to_string (timestep_number, 3) +
- * ".vtu";
- * std::ofstream output (filename.c_str());
- * data_out.write_vtu (output);
- *
- * times_and_names.push_back (std::pair<double,std::string> (time, filename));
- * std::ofstream pvd_output ("solution.pvd");
- * data_out.write_pvd_record (pvd_output, times_and_names);
- * }
- * @endcode
- *
- * @note See DataOutBase::write_vtu or
- * DataOutInterface::write_pvtu_record for
- * writing solutions at each timestep.
- *
- * @note The second element of each pair, i.e., the file in which
- * the graphical data for each time is stored, may itself be again
- * a file that references other files. For example, it could be
- * the name for a <code>.pvtu</code> file that references multiple
- * parts of a parallel computation.
- *
- * @author Marco Engelhard, 2012
- */
- void write_pvd_record (std::ostream &out,
- const std::vector<std::pair<double,std::string> > ×_and_names) const;
-
- /**
- * This function is the exact
- * equivalent of the
- * write_pvtu_record() function
- * but for the VisIt
- * visualization program. See
- * there for the purpose of this
- * function.
- *
- * This function is documented
- * in the "Creating a master file
- * for parallel" section (section 5.7)
- * of the "Getting data into VisIt"
- * report that can be found here:
- * https://wci.llnl.gov/codes/visit/2.0.0/GettingDataIntoVisIt2.0.0.pdf
- */
- void write_visit_record (std::ostream &out,
- const std::vector<std::string> &piece_names) const;
-
- /**
- * Obtain data through get_patches()
- * and write it to <tt>out</tt>
- * in deal.II intermediate
- * format. See
- * DataOutBase::write_deal_II_intermediate.
- *
- * Note that the intermediate
- * format is what its name
- * suggests: a direct
- * representation of internal
- * data. It isn't standardized
- * and will change whenever we
- * change our internal
- * representation. You can only
- * expect to process files
- * written in this format using
- * the same version of deal.II
- * that was used for writing.
- */
- void write_deal_II_intermediate (std::ostream &out) const;
-
- XDMFEntry create_xdmf_entry (const char *h5_filename,
- const double cur_time,
- MPI_Comm comm) const;
-
- void write_xdmf_file (const std::vector<XDMFEntry> &entries,
- const char *filename,
- MPI_Comm comm) const;
-
- void write_hdf5_parallel (const char* filename, MPI_Comm comm) const;
- /**
- * Write data and grid to <tt>out</tt>
- * according to the given data
- * format. This function simply
- * calls the appropriate
- * <tt>write_*</tt> function. If no
- * output format is requested,
- * the <tt>default_format</tt> is
- * written.
- *
- * An error occurs if no format
- * is provided and the default
- * format is <tt>default_format</tt>.
- */
- void write (std::ostream &out,
- const OutputFormat output_format = default_format) const;
-
- /**
- * Set the default format. The
- * value set here is used
- * anytime, output for format
- * <tt>default_format</tt> is
- * requested.
- */
- void set_default_format (const OutputFormat default_format);
-
- /**
- * Set the flags to be used for
- * output in OpenDX format.
- */
- void set_flags (const DXFlags &dx_flags);
-
- /**
- * Set the flags to be used for
- * output in UCD format.
- */
- void set_flags (const UcdFlags &ucd_flags);
-
- /**
- * Set the flags to be used for
- * output in GNUPLOT format.
- */
- void set_flags (const GnuplotFlags &gnuplot_flags);
-
- /**
- * Set the flags to be used for
- * output in POVRAY format.
- */
- void set_flags (const PovrayFlags &povray_flags);
-
- /**
- * Set the flags to be used for
- * output in EPS output.
- */
- void set_flags (const EpsFlags &eps_flags);
-
- /**
- * Set the flags to be used for
- * output in GMV format.
- */
- void set_flags (const GmvFlags &gmv_flags);
-
- /**
- * Set the flags to be used for
- * output in Tecplot format.
- */
- void set_flags (const TecplotFlags &tecplot_flags);
-
- /**
- * Set the flags to be used for
- * output in VTK format.
- */
- void set_flags (const VtkFlags &vtk_flags);
-
- /**
- * Set the flags to be used for output in
- * deal.II intermediate format.
- */
- void set_flags (const Deal_II_IntermediateFlags &deal_II_intermediate_flags);
-
- /**
- * A function that returns the same
- * string as the respective function in
- * the base class does; the only
- * exception being that if the parameter
- * is omitted, then the value for the
- * present default format is returned,
- * i.e. the correct suffix for the format
- * that was set through
- * set_default_format() or
- * parse_parameters() before calling this
- * function.
- */
- std::string
- default_suffix (const OutputFormat output_format = default_format) const;
-
- /**
- * Declare parameters for all
- * output formats by declaring
- * subsections within the
- * parameter file for each output
- * format and call the respective
- * <tt>declare_parameters</tt>
- * functions of the flag classes
- * for each output format.
- *
- * Some of the declared
- * subsections may not contain
- * entries, if the respective
- * format does not export any
- * flags.
- *
- * Note that the top-level
- * parameters denoting the number
- * of subdivisions per patch and
- * the output format are not
- * declared, since they are only
- * passed to virtual functions
- * and are not stored inside
- * objects of this type. You have
- * to declare them yourself.
- */
- static void declare_parameters (ParameterHandler &prm);
-
- /**
- * Read the parameters declared
- * in <tt>declare_parameters</tt> and
- * set the flags for the output
- * formats accordingly.
- *
- * The flags thus obtained
- * overwrite all previous
- * contents of the flag objects
- * as default-constructed or set
- * by the set_flags() function.
- */
- void parse_parameters (ParameterHandler &prm);
-
- /**
- * Determine an estimate for
- * the memory consumption (in
- * bytes) of this
- * object. Since sometimes
- * the size of objects can
- * not be determined exactly
- * (for example: what is the
- * memory consumption of an
- * STL <tt>std::map</tt> type with a
- * certain number of
- * elements?), this is only
- * an estimate. however often
- * quite close to the true
- * value.
- */
- std::size_t memory_consumption () const;
-
- protected:
- /**
- * This is the abstract function
- * through which derived classes
- * propagate preprocessed data in
- * the form of Patch
- * structures (declared in the
- * base class DataOutBase) to
- * the actual output
- * function. You need to overload
- * this function to allow the
- * output functions to know what
- * they shall print.
- */
- virtual
- const std::vector<typename DataOutBase::Patch<dim,spacedim> > &
- get_patches () const = 0;
-
- /**
- * Abstract virtual function
- * through which the names of
- * data sets are obtained by the
- * output functions of the base
- * class.
- */
- virtual
- std::vector<std::string>
- get_dataset_names () const = 0;
-
- /**
- * This functions returns
- * information about how the
- * individual components of
- * output files that consist of
- * more than one data set are to
- * be interpreted.
- *
- * It returns a list of index
- * pairs and corresponding name
- * indicating which components of
- * the output are to be
- * considered vector-valued
- * rather than just a collection
- * of scalar data. The index
- * pairs are inclusive; for
- * example, if we have a Stokes
- * problem in 2d with components
- * (u,v,p), then the
- * corresponding vector data
- * range should be (0,1), and the
- * returned list would consist of
- * only a single element with a
- * tuple such as (0,1,"velocity").
- *
- * Since some of the derived
- * classes do not know about
- * vector data, this function has
- * a default implementation that
- * simply returns an empty
- * string, meaning that all data
- * is to be considered a
- * collection of scalar fields.
- */
- virtual
- std::vector<std_cxx1x::tuple<unsigned int, unsigned int, std::string> >
- get_vector_data_ranges () const;
-
- /**
- * The default number of
- * subdivisions for patches. This
- * is filled by parse_parameters()
- * and should be obeyed by
- * build_patches() in derived
- * classes.
- */
- unsigned int default_subdivisions;
+ public:
+ /*
+ * Import a few names that were
+ * previously in this class and have then
+ * moved to the base class. Since the
+ * base class is inherited from
+ * privately, we need to re-import these
+ * symbols to make sure that references
+ * to DataOutInterface<dim,spacedim>::XXX
+ * remain valid.
+ */
+ using DataOutBase::OutputFormat;
+ using DataOutBase::default_format;
+ using DataOutBase::dx;
+ using DataOutBase::gnuplot;
+ using DataOutBase::povray;
+ using DataOutBase::eps;
+ using DataOutBase::tecplot;
+ using DataOutBase::tecplot_binary;
+ using DataOutBase::vtk;
+ using DataOutBase::vtu;
+ using DataOutBase::deal_II_intermediate;
+ using DataOutBase::parse_output_format;
+ using DataOutBase::get_output_format_names;
+ using DataOutBase::determine_intermediate_format_dimensions;
+
+ /**
+ * Constructor.
+ */
+ DataOutInterface ();
+
+ /**
+ * Destructor. Does nothing, but is
+ * declared virtual since this class has
+ * virtual functions.
+ */
+ virtual ~DataOutInterface ();
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in OpenDX format. See
+ * DataOutBase::write_dx.
+ */
+ void write_dx (std::ostream &out) const;
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in EPS format. See
+ * DataOutBase::write_eps.
+ */
+ void write_eps (std::ostream &out) const;
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in GMV format. See
+ * DataOutBase::write_gmv.
+ */
+ void write_gmv (std::ostream &out) const;
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in GNUPLOT format. See
+ * DataOutBase::write_gnuplot.
+ */
+ void write_gnuplot (std::ostream &out) const;
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in POVRAY format. See
+ * DataOutBase::write_povray.
+ */
+ void write_povray (std::ostream &out) const;
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in Tecplot format. See
+ * DataOutBase::write_tecplot.
+ */
+ void write_tecplot (std::ostream &out) const;
+
+ /**
+ * Obtain data through
+ * get_patches() and write it in
+ * the Tecplot binary output
+ * format. Note that the name of
+ * the output file must be
+ * specified through the
+ * TecplotFlags interface.
+ */
+ void write_tecplot_binary (std::ostream &out) const;
+
+ /**
+ * Obtain data through
+ * get_patches() and write it to
+ * <tt>out</tt> in UCD format for
+ * AVS. See
+ * DataOutBase::write_ucd.
+ */
+ void write_ucd (std::ostream &out) const;
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in Vtk format. See
+ * DataOutBase::write_vtk.
+ */
+ void write_vtk (std::ostream &out) const;
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in Vtu (VTK's XML) format. See
+ * DataOutBase::write_vtu.
+ *
+ * Some visualization programs,
+ * such as ParaView, can read
+ * several separate VTU files to
+ * parallelize visualization. In
+ * that case, you need a
+ * <code>.pvtu</code> file that
+ * describes which VTU files form
+ * a group. The
+ * DataOutInterface::write_pvtu_record()
+ * function can generate such a
+ * master record. Likewise,
+ * DataOutInterface::write_visit_record()
+ * does the same for VisIt. Finally,
+ * DataOutInterface::write_pvd_record()
+ * can be used to group together
+ * the files that jointly make up
+ * a time dependent simulation.
+ */
+ void write_vtu (std::ostream &out) const;
+
+ /**
+ * Collective MPI call to write the
+ * solution from all participating nodes
+ * (those in the given communicator) to a
+ * single compressed .vtu file on a
+ * shared file system. The communicator
+ * can be a sub communicator of the one
+ * used by the computation. This routine
+ * uses MPI I/O to achieve high
+ * performance on parallel filesystems.
+ * Also see
+ * DataOutInterface::write_vtu().
+ */
+ void write_vtu_in_parallel (const char *filename, MPI_Comm comm) const;
+
+ /**
+ * Some visualization programs, such as
+ * ParaView, can read several separate
+ * VTU files to parallelize
+ * visualization. In that case, you need
+ * a <code>.pvtu</code> file that
+ * describes which VTU files (written,
+ * for example, through the write_vtu()
+ * function) form a group. The current
+ * function can generate such a master
+ * record.
+ *
+ * The file so written contains a list of
+ * (scalar or vector) fields whose values
+ * are described by the individual files
+ * that comprise the set of parallel VTU
+ * files along with the names of these
+ * files. This function gets the names
+ * and types of fields through the
+ * get_patches() function of this class
+ * like all the other write_xxx()
+ * functions. The second argument to this
+ * function specifies the names of the
+ * files that form the parallel set.
+ *
+ * @note See DataOutBase::write_vtu for
+ * writing each piece. Also note that
+ * only one parallel process needs to
+ * call the current function, listing the
+ * names of the files written by all
+ * parallel processes.
+ *
+ * @note The use of this function is
+ * explained in step-40.
+ *
+ * @note In order to tell Paraview to
+ * group together multiple <code>pvtu</code>
+ * files that each describe one time
+ * step of a time dependent simulation,
+ * see the
+ * DataOutInterface::write_pvd_record()
+ * function.
+ *
+ * @note At the time of writing,
+ * the other big VTK-based
+ * visualization program, VisIt,
+ * can not read <code>pvtu</code>
+ * records. However, it can read
+ * visit records as written by
+ * the write_visit_record()
+ * function.
+ */
+ void write_pvtu_record (std::ostream &out,
+ const std::vector<std::string> &piece_names) const;
+
+ /**
+ * In ParaView it is possible to visualize time-dependent
+ * data tagged with the current
+ * integration time of a time dependent simulation. To use this
+ * feature you need a <code>.pvd</code>
+ * file that describes which VTU or PVTU file
+ * belongs to which timestep. This function writes a file that
+ * provides this mapping, i.e., it takes a list of pairs each of
+ * which indicates a particular time instant and the corresponding
+ * file that contains the graphical data for this time instant.
+ *
+ * A typical use case, in program that computes a time dependent
+ * solution, would be the following (<code>time</code> and
+ * <code>time_step</code> are member variables of the class with types
+ * <code>double</code> and <code>unsigned int</code>, respectively;
+ * the variable <code>times_and_names</code> is of type
+ * <code>std::vector@<std::pair@<double,std::string@> @></code>):
+ *
+ * @code
+ * template <int dim>
+ * void MyEquation<dim>::output_results () const
+ * {
+ * DataOut<dim> data_out;
+ *
+ * data_out.attach_dof_handler (dof_handler);
+ * data_out.add_data_vector (solution, "U");
+ * data_out.build_patches ();
+ *
+ * const std::string filename = "solution-" +
+ * Utilities::int_to_string (timestep_number, 3) +
+ * ".vtu";
+ * std::ofstream output (filename.c_str());
+ * data_out.write_vtu (output);
+ *
+ * times_and_names.push_back (std::pair<double,std::string> (time, filename));
+ * std::ofstream pvd_output ("solution.pvd");
+ * data_out.write_pvd_record (pvd_output, times_and_names);
+ * }
+ * @endcode
+ *
+ * @note See DataOutBase::write_vtu or
+ * DataOutInterface::write_pvtu_record for
+ * writing solutions at each timestep.
+ *
+ * @note The second element of each pair, i.e., the file in which
+ * the graphical data for each time is stored, may itself be again
+ * a file that references other files. For example, it could be
+ * the name for a <code>.pvtu</code> file that references multiple
+ * parts of a parallel computation.
+ *
+ * @author Marco Engelhard, 2012
+ */
+ void write_pvd_record (std::ostream &out,
- const std::vector<std::pair<double,std::string> > ×_and_names) const;
++ const std::vector<std::pair<double,std::string> > ×_and_names) const;
+
+ /**
+ * This function is the exact
+ * equivalent of the
+ * write_pvtu_record() function
+ * but for the VisIt
+ * visualization program. See
+ * there for the purpose of this
+ * function.
+ *
+ * This function is documented
+ * in the "Creating a master file
+ * for parallel" section (section 5.7)
+ * of the "Getting data into VisIt"
+ * report that can be found here:
+ * https://wci.llnl.gov/codes/visit/2.0.0/GettingDataIntoVisIt2.0.0.pdf
+ */
+ void write_visit_record (std::ostream &out,
+ const std::vector<std::string> &piece_names) const;
+
+ /**
+ * Obtain data through get_patches()
+ * and write it to <tt>out</tt>
+ * in deal.II intermediate
+ * format. See
+ * DataOutBase::write_deal_II_intermediate.
+ *
+ * Note that the intermediate
+ * format is what its name
+ * suggests: a direct
+ * representation of internal
+ * data. It isn't standardized
+ * and will change whenever we
+ * change our internal
+ * representation. You can only
+ * expect to process files
+ * written in this format using
+ * the same version of deal.II
+ * that was used for writing.
+ */
+ void write_deal_II_intermediate (std::ostream &out) const;
+
+ XDMFEntry create_xdmf_entry (const char *h5_filename,
+ const double cur_time,
+ MPI_Comm comm) const;
+
+ void write_xdmf_file (const std::vector<XDMFEntry> &entries,
+ const char *filename,
+ MPI_Comm comm) const;
+
+ void write_hdf5_parallel (const char *filename, MPI_Comm comm) const;
+ /**
+ * Write data and grid to <tt>out</tt>
+ * according to the given data
+ * format. This function simply
+ * calls the appropriate
+ * <tt>write_*</tt> function. If no
+ * output format is requested,
+ * the <tt>default_format</tt> is
+ * written.
+ *
+ * An error occurs if no format
+ * is provided and the default
+ * format is <tt>default_format</tt>.
+ */
+ void write (std::ostream &out,
+ const OutputFormat output_format = default_format) const;
+
+ /**
+ * Set the default format. The
+ * value set here is used
+ * anytime, output for format
+ * <tt>default_format</tt> is
+ * requested.
+ */
+ void set_default_format (const OutputFormat default_format);
+
+ /**
+ * Set the flags to be used for
+ * output in OpenDX format.
+ */
+ void set_flags (const DXFlags &dx_flags);
+
+ /**
+ * Set the flags to be used for
+ * output in UCD format.
+ */
+ void set_flags (const UcdFlags &ucd_flags);
+
+ /**
+ * Set the flags to be used for
+ * output in GNUPLOT format.
+ */
+ void set_flags (const GnuplotFlags &gnuplot_flags);
+
+ /**
+ * Set the flags to be used for
+ * output in POVRAY format.
+ */
+ void set_flags (const PovrayFlags &povray_flags);
+
+ /**
+ * Set the flags to be used for
+ * output in EPS output.
+ */
+ void set_flags (const EpsFlags &eps_flags);
+
+ /**
+ * Set the flags to be used for
+ * output in GMV format.
+ */
+ void set_flags (const GmvFlags &gmv_flags);
+
+ /**
+ * Set the flags to be used for
+ * output in Tecplot format.
+ */
+ void set_flags (const TecplotFlags &tecplot_flags);
+
+ /**
+ * Set the flags to be used for
+ * output in VTK format.
+ */
+ void set_flags (const VtkFlags &vtk_flags);
+
+ /**
+ * Set the flags to be used for output in
+ * deal.II intermediate format.
+ */
+ void set_flags (const Deal_II_IntermediateFlags &deal_II_intermediate_flags);
+
+ /**
+ * A function that returns the same
+ * string as the respective function in
+ * the base class does; the only
+ * exception being that if the parameter
+ * is omitted, then the value for the
+ * present default format is returned,
+ * i.e. the correct suffix for the format
+ * that was set through
+ * set_default_format() or
+ * parse_parameters() before calling this
+ * function.
+ */
+ std::string
+ default_suffix (const OutputFormat output_format = default_format) const;
+
+ /**
+ * Declare parameters for all
+ * output formats by declaring
+ * subsections within the
+ * parameter file for each output
+ * format and call the respective
+ * <tt>declare_parameters</tt>
+ * functions of the flag classes
+ * for each output format.
+ *
+ * Some of the declared
+ * subsections may not contain
+ * entries, if the respective
+ * format does not export any
+ * flags.
+ *
+ * Note that the top-level
+ * parameters denoting the number
+ * of subdivisions per patch and
+ * the output format are not
+ * declared, since they are only
+ * passed to virtual functions
+ * and are not stored inside
+ * objects of this type. You have
+ * to declare them yourself.
+ */
+ static void declare_parameters (ParameterHandler &prm);
+
+ /**
+ * Read the parameters declared
+ * in <tt>declare_parameters</tt> and
+ * set the flags for the output
+ * formats accordingly.
+ *
+ * The flags thus obtained
+ * overwrite all previous
+ * contents of the flag objects
+ * as default-constructed or set
+ * by the set_flags() function.
+ */
+ void parse_parameters (ParameterHandler &prm);
+
+ /**
+ * Determine an estimate for
+ * the memory consumption (in
+ * bytes) of this
+ * object. Since sometimes
+ * the size of objects can
+ * not be determined exactly
+ * (for example: what is the
+ * memory consumption of an
+ * STL <tt>std::map</tt> type with a
+ * certain number of
+ * elements?), this is only
+ * an estimate. however often
+ * quite close to the true
+ * value.
+ */
+ std::size_t memory_consumption () const;
+
+ protected:
+ /**
+ * This is the abstract function
+ * through which derived classes
+ * propagate preprocessed data in
+ * the form of Patch
+ * structures (declared in the
+ * base class DataOutBase) to
+ * the actual output
+ * function. You need to overload
+ * this function to allow the
+ * output functions to know what
+ * they shall print.
+ */
+ virtual
+ const std::vector<typename DataOutBase::Patch<dim,spacedim> > &
+ get_patches () const = 0;
+
+ /**
+ * Abstract virtual function
+ * through which the names of
+ * data sets are obtained by the
+ * output functions of the base
+ * class.
+ */
+ virtual
+ std::vector<std::string>
+ get_dataset_names () const = 0;
+
+ /**
+ * This functions returns
+ * information about how the
+ * individual components of
+ * output files that consist of
+ * more than one data set are to
+ * be interpreted.
+ *
+ * It returns a list of index
+ * pairs and corresponding name
+ * indicating which components of
+ * the output are to be
+ * considered vector-valued
+ * rather than just a collection
+ * of scalar data. The index
+ * pairs are inclusive; for
+ * example, if we have a Stokes
+ * problem in 2d with components
+ * (u,v,p), then the
+ * corresponding vector data
+ * range should be (0,1), and the
+ * returned list would consist of
+ * only a single element with a
+ * tuple such as (0,1,"velocity").
+ *
+ * Since some of the derived
+ * classes do not know about
+ * vector data, this function has
+ * a default implementation that
+ * simply returns an empty
+ * string, meaning that all data
+ * is to be considered a
+ * collection of scalar fields.
+ */
+ virtual
+ std::vector<std_cxx1x::tuple<unsigned int, unsigned int, std::string> >
+ get_vector_data_ranges () const;
+
+ /**
+ * The default number of
+ * subdivisions for patches. This
+ * is filled by parse_parameters()
+ * and should be obeyed by
+ * build_patches() in derived
+ * classes.
+ */
+ unsigned int default_subdivisions;
- private:
- /**
- * Standard output format. Use
- * this format, if output format
- * default_format is
- * requested. It can be changed
- * by the <tt>set_format</tt> function
- * or in a parameter file.
- */
- OutputFormat default_fmt;
-
- /**
- * Flags to be used upon output
- * of OpenDX data. Can be changed by
- * using the <tt>set_flags</tt>
- * function.
- */
- DXFlags dx_flags;
-
- /**
- * Flags to be used upon output
- * of UCD data. Can be changed by
- * using the <tt>set_flags</tt>
- * function.
- */
- UcdFlags ucd_flags;
-
- /**
- * Flags to be used upon output
- * of GNUPLOT data. Can be
- * changed by using the
- * <tt>set_flags</tt> function.
- */
- GnuplotFlags gnuplot_flags;
-
- /**
- * Flags to be used upon output
- * of POVRAY data. Can be changed
- * by using the <tt>set_flags</tt>
- * function.
- */
- PovrayFlags povray_flags;
-
- /**
- * Flags to be used upon output
- * of EPS data in one space
- * dimension. Can be changed by
- * using the <tt>set_flags</tt>
- * function.
- */
- EpsFlags eps_flags;
-
- /**
- * Flags to be used upon output
- * of gmv data in one space
- * dimension. Can be changed by
- * using the <tt>set_flags</tt>
- * function.
- */
- GmvFlags gmv_flags;
-
- /**
- * Flags to be used upon output
- * of Tecplot data in one space
- * dimension. Can be changed by
- * using the <tt>set_flags</tt>
- * function.
- */
- TecplotFlags tecplot_flags;
-
- /**
- * Flags to be used upon output
- * of vtk data in one space
- * dimension. Can be changed by
- * using the <tt>set_flags</tt>
- * function.
- */
- VtkFlags vtk_flags;
-
- /**
- * Flags to be used upon output of
- * deal.II intermediate data in one space
- * dimension. Can be changed by using the
- * <tt>set_flags</tt> function.
- */
- Deal_II_IntermediateFlags deal_II_intermediate_flags;
+ private:
+ /**
+ * Standard output format. Use
+ * this format, if output format
+ * default_format is
+ * requested. It can be changed
+ * by the <tt>set_format</tt> function
+ * or in a parameter file.
+ */
+ OutputFormat default_fmt;
+
+ /**
+ * Flags to be used upon output
+ * of OpenDX data. Can be changed by
+ * using the <tt>set_flags</tt>
+ * function.
+ */
+ DXFlags dx_flags;
+
+ /**
+ * Flags to be used upon output
+ * of UCD data. Can be changed by
+ * using the <tt>set_flags</tt>
+ * function.
+ */
+ UcdFlags ucd_flags;
+
+ /**
+ * Flags to be used upon output
+ * of GNUPLOT data. Can be
+ * changed by using the
+ * <tt>set_flags</tt> function.
+ */
+ GnuplotFlags gnuplot_flags;
+
+ /**
+ * Flags to be used upon output
+ * of POVRAY data. Can be changed
+ * by using the <tt>set_flags</tt>
+ * function.
+ */
+ PovrayFlags povray_flags;
+
+ /**
+ * Flags to be used upon output
+ * of EPS data in one space
+ * dimension. Can be changed by
+ * using the <tt>set_flags</tt>
+ * function.
+ */
+ EpsFlags eps_flags;
+
+ /**
+ * Flags to be used upon output
+ * of gmv data in one space
+ * dimension. Can be changed by
+ * using the <tt>set_flags</tt>
+ * function.
+ */
+ GmvFlags gmv_flags;
+
+ /**
+ * Flags to be used upon output
+ * of Tecplot data in one space
+ * dimension. Can be changed by
+ * using the <tt>set_flags</tt>
+ * function.
+ */
+ TecplotFlags tecplot_flags;
+
+ /**
+ * Flags to be used upon output
+ * of vtk data in one space
+ * dimension. Can be changed by
+ * using the <tt>set_flags</tt>
+ * function.
+ */
+ VtkFlags vtk_flags;
+
+ /**
+ * Flags to be used upon output of
+ * deal.II intermediate data in one space
+ * dimension. Can be changed by using the
+ * <tt>set_flags</tt> function.
+ */
+ Deal_II_IntermediateFlags deal_II_intermediate_flags;
};
*/
class ExceptionBase : public std::exception
{
- public:
- /**
- * Default constructor.
- */
- ExceptionBase ();
-
- /**
- * The constructor takes the file in which the
- * error happened, the line and the violated
- * condition as well as the name of the
- * exception class as a <tt>char*</tt> as arguments.
- */
- ExceptionBase (const char* f, const int l, const char *func,
- const char* c, const char *e);
-
- /**
- * Copy constructor.
- */
- ExceptionBase (const ExceptionBase &exc);
-
- /**
- * Destructor. Empty, but needed
- * for the sake of exception
- * specification, since the base
- * class has this exception
- * specification and the
- * automatically generated
- * destructor would have a
- * different one due to member
- * objects.
- */
- virtual ~ExceptionBase () throw();
-
- /**
- * Set the file name and line of where the
- * exception appeared as well as the violated
- * condition and the name of the exception as
- * a char pointer.
- */
- void set_fields (const char *f,
- const int l,
- const char *func,
- const char *c,
- const char *e);
-
- /**
- * Print out the general part of the error
- * information.
- */
- void print_exc_data (std::ostream &out) const;
-
- /**
- * Print more specific information about the
- * exception which occured. Overload this
- * function in your own exception classes.
- */
- virtual void print_info (std::ostream &out) const;
-
-
- /**
- * Function derived from the base class
- * which allows to pass information like
- * the line and name of the file where the
- * exception occurred as well as user
- * information.
- *
- * This function is mainly used
- * when using exceptions
- * declared by the
- * <tt>DeclException*</tt>
- * macros with the
- * <tt>throw</tt> mechanism or
- * the <tt>AssertThrow</tt>
- * macro.
- */
- virtual const char * what () const throw ();
-
- /**
- * Print a stacktrace, if one has
- * been recorded previously, to
- * the given stream.
- */
- void print_stack_trace (std::ostream &out) const;
-
- protected:
- /**
- * Name of the file this exception happen in.
- */
- const char *file;
-
- /**
- * Line number in this file.
- */
- unsigned int line;
-
- /**
- * Name of the function, pretty printed.
- */
- const char *function;
-
- /**
- * The violated condition, as a string.
- */
- const char *cond;
-
- /**
- * Name of the exception and call sequence.
- */
- const char *exc;
-
- /**
- * A backtrace to the position
- * where the problem happened, if
- * the system supports this.
- */
- char ** stacktrace;
-
- /**
- * The number of stacktrace
- * frames that are stored in the
- * previous variable. Zero if the
- * system does not support stack
- * traces.
- */
- int n_stacktrace_frames;
+ public:
+ /**
+ * Default constructor.
+ */
+ ExceptionBase ();
+
+ /**
+ * The constructor takes the file in which the
+ * error happened, the line and the violated
+ * condition as well as the name of the
+ * exception class as a <tt>char*</tt> as arguments.
+ */
+ ExceptionBase (const char *f, const int l, const char *func,
+ const char *c, const char *e);
+
+ /**
+ * Copy constructor.
+ */
+ ExceptionBase (const ExceptionBase &exc);
+
+ /**
+ * Destructor. Empty, but needed
+ * for the sake of exception
+ * specification, since the base
+ * class has this exception
+ * specification and the
+ * automatically generated
+ * destructor would have a
+ * different one due to member
+ * objects.
+ */
+ virtual ~ExceptionBase () throw();
+
+ /**
+ * Set the file name and line of where the
+ * exception appeared as well as the violated
+ * condition and the name of the exception as
+ * a char pointer.
+ */
+ void set_fields (const char *f,
+ const int l,
+ const char *func,
+ const char *c,
+ const char *e);
+
+ /**
+ * Print out the general part of the error
+ * information.
+ */
+ void print_exc_data (std::ostream &out) const;
+
+ /**
+ * Print more specific information about the
+ * exception which occured. Overload this
+ * function in your own exception classes.
+ */
+ virtual void print_info (std::ostream &out) const;
+
+
+ /**
+ * Function derived from the base class
+ * which allows to pass information like
+ * the line and name of the file where the
+ * exception occurred as well as user
+ * information.
+ *
+ * This function is mainly used
+ * when using exceptions
+ * declared by the
+ * <tt>DeclException*</tt>
+ * macros with the
+ * <tt>throw</tt> mechanism or
+ * the <tt>AssertThrow</tt>
+ * macro.
+ */
+ virtual const char *what () const throw ();
+
+ /**
+ * Print a stacktrace, if one has
+ * been recorded previously, to
+ * the given stream.
+ */
+ void print_stack_trace (std::ostream &out) const;
+
+ protected:
+ /**
+ * Name of the file this exception happen in.
+ */
- const char *file;
++ const char *file;
+
+ /**
+ * Line number in this file.
+ */
+ unsigned int line;
+
+ /**
+ * Name of the function, pretty printed.
+ */
- const char *function;
++ const char *function;
+
+ /**
+ * The violated condition, as a string.
+ */
- const char *cond;
++ const char *cond;
+
+ /**
+ * Name of the exception and call sequence.
+ */
- const char *exc;
++ const char *exc;
+
+ /**
+ * A backtrace to the position
+ * where the problem happened, if
+ * the system supports this.
+ */
+ char **stacktrace;
+
+ /**
+ * The number of stacktrace
+ * frames that are stored in the
+ * previous variable. Zero if the
+ * system does not support stack
+ * traces.
+ */
+ int n_stacktrace_frames;
};
*/
class IndexSet
{
- public:
- /**
- * Default constructor.
- */
- IndexSet ();
-
- /**
- * Constructor that also sets the
- * overall size of the index
- * range.
- */
- explicit IndexSet (const types::global_dof_index size);
-
- /**
- * Remove all indices from this
- * index set. The index set retains
- * its size, however.
- */
- void clear ();
-
- /**
- * Set the maximal size of the
- * indices upon which this object
- * operates.
- *
- * This function can only be
- * called if the index set does
- * not yet contain any elements.
- * This can be achieved by calling
- * clear(), for example.
- */
- void set_size (const unsigned int size);
-
- /**
- * Return the size of the index
- * space of which this index set
- * is a subset of.
- *
- * Note that the result is not equal to
- * the number of indices within this
- * set. The latter information is
- * returned by n_elements().
- */
- unsigned int size () const;
-
- /**
- * Add the half-open range
- * $[\text{begin},\text{end})$ to
- * the set of indices represented
- * by this class.
- */
- void add_range (const unsigned int begin,
- const unsigned int end);
-
- /**
- * Add an individual index to the
- * set of indices.
- */
- void add_index (const unsigned int index);
-
- /**
- * Add a whole set of indices
- * described by dereferencing
- * every element of the the
- * iterator range
- * <code>[begin,end)</code>.
- */
- template <typename ForwardIterator>
- void add_indices (const ForwardIterator &begin,
- const ForwardIterator &end);
-
- /**
- * Add the given IndexSet @p other to the
- * current one, constructing the union of
- * *this and @p other.
- */
- void add_indices(const IndexSet & other);
-
- /**
- * Return whether the specified
- * index is an element of the
- * index set.
- */
- bool is_element (const types::global_dof_index index) const;
-
- /**
- * Return whether the index set
- * stored by this object defines
- * a contiguous range. This is
- * true also if no indices are
- * stored at all.
- */
- bool is_contiguous () const;
-
- /**
- * Return the number of elements
- * stored in this index set.
- */
- types::global_dof_index n_elements () const;
-
- /**
- * Return the global index of the local
- * index with number @p local_index
- * stored in this index set. @p
- * local_index obviously needs to be less
- * than n_elements().
- */
- types::global_dof_index nth_index_in_set (const unsigned int local_index) const;
-
- /**
- * Return the how-manyth element of this
- * set (counted in ascending order) @p
- * global_index is. @p global_index needs
- * to be less than the size(). This
- * function throws an exception if the
- * index @p global_index is not actually
- * a member of this index set, i.e. if
- * is_element(global_index) is false.
- */
- unsigned int index_within_set (const types::global_dof_index global_index) const;
-
- /**
- * Each index set can be
- * represented as the union of a
- * number of contiguous intervals
- * of indices, where if necessary
- * intervals may only consist of
- * individual elements to
- * represent isolated members of
- * the index set.
- *
- * This function returns the
- * minimal number of such
- * intervals that are needed to
- * represent the index set under
- * consideration.
- */
- unsigned int n_intervals () const;
-
- /**
- * Compress the internal
- * representation by merging
- * individual elements with
- * contiguous ranges, etc. This
- * function does not have any
- * external effect.
- */
- void compress () const;
-
- /**
- * Comparison for equality of
- * index sets. This operation is
- * only allowed if the size of
- * the two sets is the same
- * (though of course they do not
- * have to have the same number
- * of indices).
- */
- bool operator == (const IndexSet &is) const;
-
- /**
- * Comparison for inequality of
- * index sets. This operation is
- * only allowed if the size of
- * the two sets is the same
- * (though of course they do not
- * have to have the same number
- * of indices).
- */
- bool operator != (const IndexSet &is) const;
-
- /**
- * Return the intersection of the
- * current index set and the
- * argument given, i.e. a set of
- * indices that are elements of
- * both index sets. The two index
- * sets must have the same size
- * (though of course they do not
- * have to have the same number
- * of indices).
- */
- IndexSet operator & (const IndexSet &is) const;
-
- /**
- * This command takes an interval
- * <tt>[begin, end)</tt> and returns
- * the intersection of the current
- * index set with the interval, shifted
- * to the range <tt>[0,
- * end-begin)</tt>.
- */
- IndexSet get_view (const types::global_dof_index begin,
- const types::global_dof_index end) const;
-
-
- /**
- * Removes all elements contained in @p
- * other from this set. In other words,
- * if $x$ is the current object and $o$
- * the argument, then we compute $x
- * \leftarrow x \backslash o$.
- */
- void subtract_set (const IndexSet & other);
-
-
- /**
- * Fills the given vector with all
- * indices contained in this IndexSet.
- */
- void fill_index_vector(std::vector<types::global_dof_index> & indices) const;
-
- /**
- * Fill the given vector with either
- * zero or one elements, providing
- * a binary representation of this
- * index set. The given vector is
- * assumed to already have the correct
- * size.
- *
- * The given argument is filled with
- * integer values zero and one, using
- * <code>vector.operator[]</code>. Thus,
- * any object that has such an operator
- * can be used as long as it allows
- * conversion of integers zero and one to
- * elements of the vector. Specifically,
- * this is the case for classes Vector,
- * BlockVector, but also
- * std::vector@<bool@>,
- * std::vector@<int@>, and
- * std::vector@<double@>.
- */
- template <typename Vector>
- void fill_binary_vector (Vector &vector) const;
-
- /**
- * Outputs a text representation of this
- * IndexSet to the given stream. Used for
- * testing.
- */
- template <class STREAM>
- void print(STREAM &out) const;
-
- /**
- * Writes the IndexSet into a text based
- * file format, that can be read in again
- * using the read() function.
- */
- void write(std::ostream & out) const;
-
- /**
- * Constructs the IndexSet from a text
- * based representation given by the
- * stream @param in written by the
- * write() function.
- */
- void read(std::istream & in);
-
- /**
- * Writes the IndexSet into a binary,
- * compact representation, that can be
- * read in again using the block_read()
- * function.
- */
- void block_write(std::ostream & out) const;
-
- /**
- * Constructs the IndexSet from a binary
- * representation given by the stream
- * @param in written by the write_block()
- * function.
- */
- void block_read(std::istream & in);
+ public:
+ /**
+ * Default constructor.
+ */
+ IndexSet ();
+
+ /**
+ * Constructor that also sets the
+ * overall size of the index
+ * range.
+ */
- explicit IndexSet (const unsigned int size);
++ explicit IndexSet (const types::global_dof_index size);
+
+ /**
+ * Remove all indices from this
+ * index set. The index set retains
+ * its size, however.
+ */
+ void clear ();
+
+ /**
+ * Set the maximal size of the
+ * indices upon which this object
+ * operates.
+ *
+ * This function can only be
+ * called if the index set does
+ * not yet contain any elements.
+ * This can be achieved by calling
+ * clear(), for example.
+ */
+ void set_size (const unsigned int size);
+
+ /**
+ * Return the size of the index
+ * space of which this index set
+ * is a subset of.
+ *
+ * Note that the result is not equal to
+ * the number of indices within this
+ * set. The latter information is
+ * returned by n_elements().
+ */
+ unsigned int size () const;
+
+ /**
+ * Add the half-open range
+ * $[\text{begin},\text{end})$ to
+ * the set of indices represented
+ * by this class.
+ */
+ void add_range (const unsigned int begin,
+ const unsigned int end);
+
+ /**
+ * Add an individual index to the
+ * set of indices.
+ */
+ void add_index (const unsigned int index);
+
+ /**
+ * Add a whole set of indices
+ * described by dereferencing
+ * every element of the the
+ * iterator range
+ * <code>[begin,end)</code>.
+ */
+ template <typename ForwardIterator>
+ void add_indices (const ForwardIterator &begin,
+ const ForwardIterator &end);
+
+ /**
+ * Add the given IndexSet @p other to the
+ * current one, constructing the union of
+ * *this and @p other.
+ */
+ void add_indices(const IndexSet &other);
+
+ /**
+ * Return whether the specified
+ * index is an element of the
+ * index set.
+ */
- bool is_element (const unsigned int index) const;
++ bool is_element (const types::global_dof_index index) const;
+
+ /**
+ * Return whether the index set
+ * stored by this object defines
+ * a contiguous range. This is
+ * true also if no indices are
+ * stored at all.
+ */
+ bool is_contiguous () const;
+
+ /**
+ * Return the number of elements
+ * stored in this index set.
+ */
- unsigned int n_elements () const;
++ types::global_dof_index n_elements () const;
+
+ /**
+ * Return the global index of the local
+ * index with number @p local_index
+ * stored in this index set. @p
+ * local_index obviously needs to be less
+ * than n_elements().
+ */
- unsigned int nth_index_in_set (const unsigned int local_index) const;
++ types::global_dof_index nth_index_in_set (const unsigned int local_index) const;
+
+ /**
+ * Return the how-manyth element of this
+ * set (counted in ascending order) @p
+ * global_index is. @p global_index needs
+ * to be less than the size(). This
+ * function throws an exception if the
+ * index @p global_index is not actually
+ * a member of this index set, i.e. if
+ * is_element(global_index) is false.
+ */
- unsigned int index_within_set (const unsigned int global_index) const;
++ unsigned int index_within_set (const types::global_dof_index global_index) const;
+
+ /**
+ * Each index set can be
+ * represented as the union of a
+ * number of contiguous intervals
+ * of indices, where if necessary
+ * intervals may only consist of
+ * individual elements to
+ * represent isolated members of
+ * the index set.
+ *
+ * This function returns the
+ * minimal number of such
+ * intervals that are needed to
+ * represent the index set under
+ * consideration.
+ */
+ unsigned int n_intervals () const;
+
+ /**
+ * Compress the internal
+ * representation by merging
+ * individual elements with
+ * contiguous ranges, etc. This
+ * function does not have any
+ * external effect.
+ */
+ void compress () const;
+
+ /**
+ * Comparison for equality of
+ * index sets. This operation is
+ * only allowed if the size of
+ * the two sets is the same
+ * (though of course they do not
+ * have to have the same number
+ * of indices).
+ */
+ bool operator == (const IndexSet &is) const;
+
+ /**
+ * Comparison for inequality of
+ * index sets. This operation is
+ * only allowed if the size of
+ * the two sets is the same
+ * (though of course they do not
+ * have to have the same number
+ * of indices).
+ */
+ bool operator != (const IndexSet &is) const;
+
+ /**
+ * Return the intersection of the
+ * current index set and the
+ * argument given, i.e. a set of
+ * indices that are elements of
+ * both index sets. The two index
+ * sets must have the same size
+ * (though of course they do not
+ * have to have the same number
+ * of indices).
+ */
+ IndexSet operator & (const IndexSet &is) const;
+
+ /**
+ * This command takes an interval
+ * <tt>[begin, end)</tt> and returns
+ * the intersection of the current
+ * index set with the interval, shifted
+ * to the range <tt>[0,
+ * end-begin)</tt>.
+ */
- IndexSet get_view (const unsigned int begin,
- const unsigned int end) const;
++ IndexSet get_view (const types::global_dof_index begin,
++ const types::global_dof_index end) const;
+
+
+ /**
+ * Removes all elements contained in @p
+ * other from this set. In other words,
+ * if $x$ is the current object and $o$
+ * the argument, then we compute $x
+ * \leftarrow x \backslash o$.
+ */
+ void subtract_set (const IndexSet &other);
+
+
+ /**
+ * Fills the given vector with all
+ * indices contained in this IndexSet.
+ */
- void fill_index_vector(std::vector<unsigned int> &indices) const;
++ void fill_index_vector(std::vector<types::global_dof_index> &indices) const;
+
+ /**
+ * Fill the given vector with either
+ * zero or one elements, providing
+ * a binary representation of this
+ * index set. The given vector is
+ * assumed to already have the correct
+ * size.
+ *
+ * The given argument is filled with
+ * integer values zero and one, using
+ * <code>vector.operator[]</code>. Thus,
+ * any object that has such an operator
+ * can be used as long as it allows
+ * conversion of integers zero and one to
+ * elements of the vector. Specifically,
+ * this is the case for classes Vector,
+ * BlockVector, but also
+ * std::vector@<bool@>,
+ * std::vector@<int@>, and
+ * std::vector@<double@>.
+ */
+ template <typename Vector>
+ void fill_binary_vector (Vector &vector) const;
+
+ /**
+ * Outputs a text representation of this
+ * IndexSet to the given stream. Used for
+ * testing.
+ */
+ template <class STREAM>
+ void print(STREAM &out) const;
+
+ /**
+ * Writes the IndexSet into a text based
+ * file format, that can be read in again
+ * using the read() function.
+ */
+ void write(std::ostream &out) const;
+
+ /**
+ * Constructs the IndexSet from a text
+ * based representation given by the
+ * stream @param in written by the
+ * write() function.
+ */
+ void read(std::istream &in);
+
+ /**
+ * Writes the IndexSet into a binary,
+ * compact representation, that can be
+ * read in again using the block_read()
+ * function.
+ */
+ void block_write(std::ostream &out) const;
+
+ /**
+ * Constructs the IndexSet from a binary
+ * representation given by the stream
+ * @param in written by the write_block()
+ * function.
+ */
+ void block_read(std::istream &in);
#ifdef DEAL_II_USE_TRILINOS
- /**
- * Given an MPI communicator,
- * create a Trilinos map object
- * that represents a distribution
- * of vector elements or matrix
- * rows in which we will locally
- * store those elements or rows
- * for which we store the index
- * in the current index set, and
- * all the other elements/rows
- * elsewhere on one of the other
- * MPI processes.
- *
- * The last argument only plays a
- * role if the communicator is a
- * parallel one, distributing
- * computations across multiple
- * processors. In that case, if
- * the last argument is false,
- * then it is assumed that the
- * index sets this function is
- * called on on all processors
- * are mutually exclusive but
- * together enumerate each index
- * exactly once. In other words,
- * if you call this function on
- * two processors, then the index
- * sets this function is called
- * with must together have all
- * possible indices from zero to
- * size()-1, and no index must
- * appear in both index
- * sets. This corresponds, for
- * example, to the case where we
- * want to split the elements of
- * vectors into unique subsets to
- * be stored on different
- * processors -- no element
- * should be owned by more than
- * one processor, but each
- * element must be owned by one.
- *
- * On the other hand, if the
- * second argument is true, then
- * the index sets can be
- * overlapping, though they still
- * need to contain each index
- * exactly once on all processors
- * taken together. This is a
- * useful operation if we want to
- * create vectors that not only
- * contain the locally owned
- * indices, but for example also
- * the elements that correspond
- * to degrees of freedom located
- * on ghost cells.
- */
- Epetra_Map make_trilinos_map (const MPI_Comm &communicator = MPI_COMM_WORLD,
- const bool overlapping = false) const;
+ /**
+ * Given an MPI communicator,
+ * create a Trilinos map object
+ * that represents a distribution
+ * of vector elements or matrix
+ * rows in which we will locally
+ * store those elements or rows
+ * for which we store the index
+ * in the current index set, and
+ * all the other elements/rows
+ * elsewhere on one of the other
+ * MPI processes.
+ *
+ * The last argument only plays a
+ * role if the communicator is a
+ * parallel one, distributing
+ * computations across multiple
+ * processors. In that case, if
+ * the last argument is false,
+ * then it is assumed that the
+ * index sets this function is
+ * called on on all processors
+ * are mutually exclusive but
+ * together enumerate each index
+ * exactly once. In other words,
+ * if you call this function on
+ * two processors, then the index
+ * sets this function is called
+ * with must together have all
+ * possible indices from zero to
+ * size()-1, and no index must
+ * appear in both index
+ * sets. This corresponds, for
+ * example, to the case where we
+ * want to split the elements of
+ * vectors into unique subsets to
+ * be stored on different
+ * processors -- no element
+ * should be owned by more than
+ * one processor, but each
+ * element must be owned by one.
+ *
+ * On the other hand, if the
+ * second argument is true, then
+ * the index sets can be
+ * overlapping, though they still
+ * need to contain each index
+ * exactly once on all processors
+ * taken together. This is a
+ * useful operation if we want to
+ * create vectors that not only
+ * contain the locally owned
+ * indices, but for example also
+ * the elements that correspond
+ * to degrees of freedom located
+ * on ghost cells.
+ */
+ Epetra_Map make_trilinos_map (const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const bool overlapping = false) const;
#endif
- /**
- * Determine an estimate for the memory
- * consumption (in bytes) of this
- * object.
- */
- std::size_t memory_consumption () const;
+ /**
+ * Determine an estimate for the memory
+ * consumption (in bytes) of this
+ * object.
+ */
+ std::size_t memory_consumption () const;
+
+ DeclException1 (ExcIndexNotPresent, int,
+ << "The global index " << arg1
+ << " is not an element of this set.");
+
+ /**
+ * Write or read the data of this object to or
+ * from a stream for the purpose of serialization
+ */
+ template <class Archive>
+ void serialize (Archive &ar, const unsigned int version);
+
+ private:
+ /**
+ * A type that denotes the half
+ * open index range
+ * <code>[begin,end)</code>.
+ *
+ * The nth_index_in_set denotes
+ * the how many-th index within
+ * this IndexSet the first
+ * element of the current range
+ * is. This information is only
+ * accurate if
+ * IndexSet::compress() has been
+ * called after the last
+ * insertion.
+ */
+ struct Range
+ {
- unsigned int begin;
- unsigned int end;
++ types::global_dof_index begin;
++ types::global_dof_index end;
+
- unsigned int nth_index_in_set;
++ types::global_dof_index nth_index_in_set;
- DeclException1 (ExcIndexNotPresent, int,
- << "The global index " << arg1
- << " is not an element of this set.");
+ /**
+ * Default constructor. Since there is no useful choice for
+ * a default constructed interval, this constructor simply
+ * creates something that resembles an invalid range. We
+ * need this constructor for serialization purposes, but the
+ * invalid range should be filled with something read from
+ * the archive before it is used, so we should hopefully
+ * never get to see an invalid range in the wild.
+ **/
+ Range ();
+
+ /**
+ * Constructor. Create a half-open interval with the given indices.
+ *
+ * @param i1 Left end point of the interval.
+ * @param i2 First index greater than the last index of the indicated range.
+ **/
- Range (const unsigned int i1,
- const unsigned int i2);
++ Range (const types::global_dof_index i1,
++ const types::global_dof_index i2);
+
+ friend
+ inline bool operator< (const Range &range_1,
+ const Range &range_2)
+ {
+ return ((range_1.begin < range_2.begin)
+ ||
+ ((range_1.begin == range_2.begin)
+ &&
+ (range_1.end < range_2.end)));
+ }
+
+ static bool end_compare(const IndexSet::Range &x, const IndexSet::Range &y)
+ {
+ return x.end < y.end;
+ }
+
+ static bool nth_index_compare (const IndexSet::Range &x,
+ const IndexSet::Range &y)
+ {
+ return (x.nth_index_in_set+(x.end-x.begin) <
+ y.nth_index_in_set+(y.end-y.begin));
+ }
+
+ friend
+ inline bool operator== (const Range &range_1,
+ const Range &range_2)
+ {
+ return ((range_1.begin == range_2.begin)
+ ||
+ (range_1.begin == range_2.begin));
+ }
+
+ std::size_t memory_consumption () const
+ {
+ return sizeof(Range);
+ }
/**
* Write or read the data of this object to or
inline
-IndexSet::Range::Range (const unsigned int i1,
- const unsigned int i2)
+IndexSet::Range::Range (const types::global_dof_index i1,
+ const types::global_dof_index i2)
- :
- begin(i1),
- end(i2)
+ :
+ begin(i1),
+ end(i2)
{}
inline
-IndexSet::IndexSet (const unsigned int size)
+IndexSet::IndexSet (const types::global_dof_index size)
- :
- is_compressed (true),
- index_space_size (size),
- largest_range (deal_II_numbers::invalid_unsigned_int)
+ :
+ is_compressed (true),
+ index_space_size (size),
+ largest_range (deal_II_numbers::invalid_unsigned_int)
{}
IndexSet::add_indices (const ForwardIterator &begin,
const ForwardIterator &end)
{
- // insert each element of the
- // range. if some of them happen to
- // be consecutive, merge them to a
- // range
+ // insert each element of the
+ // range. if some of them happen to
+ // be consecutive, merge them to a
+ // range
for (ForwardIterator p=begin; p!=end;)
{
- const unsigned int begin_index = *p;
- unsigned int end_index = begin_index + 1;
+ const types::global_dof_index begin_index = *p;
+ types::global_dof_index end_index = begin_index + 1;
ForwardIterator q = p;
++q;
while ((q != end) && (*q == end_index))
inline
-unsigned int
+types::global_dof_index
IndexSet::n_elements () const
{
- // make sure we have
- // non-overlapping ranges
+ // make sure we have
+ // non-overlapping ranges
compress ();
unsigned int v = 0;
inline
unsigned int
-IndexSet::nth_index_in_set (const unsigned int n) const
+IndexSet::nth_index_in_set (const types::global_dof_index n) const
{
- // to make this call thread-safe, compress()
- // must not be called through this function
+ // to make this call thread-safe, compress()
+ // must not be called through this function
Assert (is_compressed == true, ExcMessage ("IndexSet must be compressed."));
Assert (n < n_elements(), ExcIndexRange (n, 0, n_elements()));
inline
-unsigned int
+types::global_dof_index
IndexSet::index_within_set (const unsigned int n) const
{
- // to make this call thread-safe, compress()
- // must not be called through this function
+ // to make this call thread-safe, compress()
+ // must not be called through this function
Assert (is_compressed == true, ExcMessage ("IndexSet must be compressed."));
Assert (is_element(n) == true, ExcIndexNotPresent (n));
Assert (n < size(), ExcIndexRange (n, 0, size()));
*/
class LogStream : public Subscriptor
{
+ public:
+ /**
+ * A subclass allowing for the
+ * safe generation and removal of
+ * prefices.
+ *
+ * Somewhere at the beginning of
+ * a block, create one of these
+ * objects, and it will appear as
+ * a prefix in LogStream output
+ * like @p deallog. At the end of
+ * the block, the prefix will
+ * automatically be removed, when
+ * this object is destroyed.
+ */
+ class Prefix
+ {
public:
- /**
- * A subclass allowing for the
- * safe generation and removal of
- * prefices.
- *
- * Somewhere at the beginning of
- * a block, create one of these
- * objects, and it will appear as
- * a prefix in LogStream output
- * like @p deallog. At the end of
- * the block, the prefix will
- * automatically be removed, when
- * this object is destroyed.
- */
- class Prefix
- {
- public:
- /**
- * Set a new prefix for
- * @p deallog, which will be
- * removed when the variable
- * is destroyed .
- */
- Prefix(const std::string& text);
-
- /**
- * Set a new prefix for the
- * given stream, which will
- * be removed when the
- * variable is destroyed .
- */
- Prefix(const std::string& text, LogStream& stream);
-
- /**
- * Remove the prefix
- * associated with this
- * variable.
- */
- ~Prefix ();
-
- private:
- SmartPointer<LogStream,LogStream::Prefix> stream;
- };
-
- /**
- * Standard constructor, since we
- * intend to provide an object
- * <tt>deallog</tt> in the library. Set the
- * standard output stream to <tt>std::cerr</tt>.
- */
- LogStream ();
-
- /**
- * Destructor.
- */
- ~LogStream();
-
- /**
- * Enable output to a second
- * stream <tt>o</tt>.
- */
- void attach (std::ostream& o);
-
- /**
- * Disable output to the second
- * stream. You may want to call
- * <tt>close</tt> on the stream that was
- * previously attached to this object.
- */
- void detach ();
-
- /**
- * Setup the logstream for
- * regression test mode.
- *
- * This sets the parameters
- * #double_threshold,
- * #float_threshold, and #offset
- * to nonzero values. The exact
- * values being used have been
- * determined experimentally and
- * can be found in the source
- * code.
- *
- * Called with an argument
- * <tt>false</tt>, switches off
- * test mode and sets all
- * involved parameters to zero.
- */
- void test_mode (bool on=true);
-
- /**
- * Gives the default stream (<tt>std_out</tt>).
- */
- std::ostream& get_console ();
-
- /**
- * Gives the file stream.
- */
- std::ostream& get_file_stream ();
-
- /**
- * @return true, if file stream
- * has already been attached.
- */
- bool has_file () const;
-
- /**
- * Reroutes cerr to LogStream.
- * Works as a switch, turning
- * logging of <tt>cerr</tt> on
- * and off alternatingly with
- * every call.
- */
- void log_cerr ();
-
- /**
- * Return the prefix string.
- */
- const std::string& get_prefix () const;
-
- /**
- * @deprecated Use Prefix instead
- *
- * Push another prefix on the
- * stack. Prefixes are
- * automatically separated by a
- * colon and there is a double
- * colon after the last prefix.
- */
- void push (const std::string& text);
-
- /**
- * @deprecated Use Prefix instead
- *
- * Remove the last prefix.
- */
- void pop ();
-
- /**
- * Maximum number of levels to be
- * printed on the console. This
- * function allows to restrict
- * console output to the upmost
- * levels of iterations. Only
- * output with less than <tt>n</tt>
- * prefixes is printed. By calling
- * this function with <tt>n=0</tt>, no
- * console output will be written.
- *
- * The previous value of this
- * parameter is returned.
- */
- unsigned int depth_console (const unsigned int n);
-
- /**
- * Maximum number of levels to be
- * written to the log file. The
- * functionality is the same as
- * <tt>depth_console</tt>, nevertheless,
- * this function should be used
- * with care, since it may spoile
- * the value of a log file.
- *
- * The previous value of this
- * parameter is returned.
- */
- unsigned int depth_file (const unsigned int n);
-
- /**
- * Set time printing flag. If this flag
- * is true, each output line will
- * be prepended by the user time used
- * by the running program so far.
- *
- * The previous value of this
- * parameter is returned.
- */
- bool log_execution_time (const bool flag);
-
- /**
- * Output time differences
- * between consecutive logs. If
- * this function is invoked with
- * <tt>true</tt>, the time difference
- * between the previous log line
- * and the recent one is
- * printed. If it is invoked with
- * <tt>false</tt>, the accumulated
- * time since start of the
- * program is printed (default
- * behavior).
- *
- * The measurement of times is
- * not changed by this function,
- * just the output.
- *
- * The previous value of this
- * parameter is returned.
- */
- bool log_time_differences (const bool flag);
-
- /**
- * Write detailed timing
- * information.
- *
- *
- */
- void timestamp();
-
- /**
- * Log the thread id.
- */
- bool log_thread_id (const bool flag);
-
- /**
- * Set a threshold for the
- * minimal absolute value of
- * double values. All numbers
- * with a smaller absolute value
- * will be printed as zero.
- *
- * The default value for this
- * threshold is zero,
- * i.e. numbers are printed
- * according to their real value.
- *
- * This feature is mostly useful
- * for automated tests: there,
- * one would like to reproduce
- * the exact same solution in
- * each run of a
- * testsuite. However, subtle
- * difference in processor,
- * operating system, or compiler
- * version can lead to
- * differences in the last few
- * digits of numbers, due to
- * different rounding. While one
- * can avoid trouble for most
- * numbers when comparing with
- * stored results by simply
- * limiting the accuracy of
- * output, this does not hold for
- * numbers very close to zero,
- * i.e. zero plus accumulated
- * round-off. For these numbers,
- * already the first digit is
- * tainted by round-off. Using
- * the present function, it is
- * possible to eliminate this
- * source of problems, by simply
- * writing zero to the output in
- * this case.
- */
- void threshold_double(const double t);
- /**
- * The same as
- * threshold_double(), but for
- * float values.
- */
- void threshold_float(const float t);
-
- /**
- * Output a constant something
- * through this stream.
- */
- template <typename T>
- LogStream & operator << (const T &t);
-
- /**
- * Output double precision
- * numbers through this
- * stream.
- *
- * If they are set, this function
- * applies the methods for making
- * floating point output
- * reproducible as discussed in
- * the introduction.
- */
- LogStream & operator << (const double t);
-
- /**
- * Output single precision
- * numbers through this
- * stream.
- *
- * If they are set, this function
- * applies the methods for making
- * floating point output
- * reproducible as discussed in
- * the introduction.
- */
- LogStream & operator << (const float t);
-
- /**
- * Treat ostream
- * manipulators. This passes on
- * the whole thing to the
- * template function with the
- * exception of the
- * <tt>std::endl</tt>
- * manipulator, for which special
- * action is performed: write the
- * temporary stream buffer
- * including a header to the file
- * and <tt>std::cout</tt> and
- * empty the buffer.
- *
- * An overload of this function is needed
- * anyway, since the compiler can't bind
- * manipulators like @p std::endl
- * directly to template arguments @p T
- * like in the previous general
- * template. This is due to the fact that
- * @p std::endl is actually an overloaded
- * set of functions for @p std::ostream,
- * @p std::wostream, and potentially more
- * of this kind. This function is
- * therefore necessary to pick one
- * element from this overload set.
- */
- LogStream & operator<< (std::ostream& (*p) (std::ostream&));
-
- /**
- * Determine an estimate for
- * the memory consumption (in
- * bytes) of this
- * object. Since sometimes
- * the size of objects can
- * not be determined exactly
- * (for example: what is the
- * memory consumption of an
- * STL <tt>std::map</tt> type with a
- * certain number of
- * elements?), this is only
- * an estimate. however often
- * quite close to the true
- * value.
- */
- std::size_t memory_consumption () const;
-
- /**
- * Exception.
- */
- DeclException0(ExcNoFileStreamGiven);
+ /**
+ * Set a new prefix for
+ * @p deallog, which will be
+ * removed when the variable
+ * is destroyed .
+ */
+ Prefix(const std::string &text);
+
+ /**
+ * Set a new prefix for the
+ * given stream, which will
+ * be removed when the
+ * variable is destroyed .
+ */
+ Prefix(const std::string &text, LogStream &stream);
+
+ /**
+ * Remove the prefix
+ * associated with this
+ * variable.
+ */
+ ~Prefix ();
private:
-
- /**
- * Stack of strings which are printed
- * at the beginning of each line to
- * allow identification where the
- * output was generated.
- */
- std::stack<std::string> prefixes;
-
- /**
- * Default stream, where the output
- * is to go to. This stream defaults
- * to <tt>std::cerr</tt>, but can be set to another
- * stream through the constructor.
- */
- std::ostream *std_out;
-
- /**
- * Pointer to a stream, where a copy of
- * the output is to go to. Usually, this
- * will be a file stream.
- *
- * You can set and reset this stream
- * by the <tt>attach</tt> function.
- */
- std::ostream *file;
-
- /**
- * Value denoting the number of
- * prefixes to be printed to the
- * standard output. If more than
- * this number of prefixes is
- * pushed to the stack, then no
- * output will be generated until
- * the number of prefixes shrinks
- * back below this number.
- */
- unsigned int std_depth;
-
- /**
- * Same for the maximum depth of
- * prefixes for output to a file.
- */
- unsigned int file_depth;
-
- /**
- * Flag for printing execution time.
- */
- bool print_utime;
-
- /**
- * Flag for printing time differences.
- */
- bool diff_utime;
-
- /**
- * Time of last output line.
- */
- double last_time;
-
- /**
- * Threshold for printing double
- * values. Every number with
- * absolute value less than this
- * is printed as zero.
- */
- double double_threshold;
-
- /**
- * Threshold for printing float
- * values. Every number with
- * absolute value less than this
- * is printed as zero.
- */
- float float_threshold;
-
- /**
- * An offset added to every float
- * or double number upon
- * output. This is done after the
- * number is compared to
- * #double_threshold or #float_threshold,
- * but before rounding.
- *
- * This functionality was
- * introduced to produce more
- * reproducible floating point
- * output for regression
- * tests. The rationale is, that
- * an exact output value is much
- * more likely to be 1/8 than
- * 0.124997. If we round to two
- * digits though, 1/8 becomes
- * unreliably either .12 or .13
- * due to machine accuracy. On
- * the other hand, if we add a
- * something above machine
- * accuracy first, we will always
- * get .13.
- *
- * It is safe to leave this
- * value equal to zero. For
- * regression tests, the function
- * test_mode() sets it to a
- * reasonable value.
- *
- * The offset is relative to the
- * magnitude of the number.
- */
- double offset;
-
- /**
- * Flag for printing thread id.
- */
- bool print_thread_id;
-
- /**
- * The value times() returned
- * on initialization.
- */
- double reference_time_val;
-
- /**
- * The tms structure times()
- * filled on initialization.
- */
- struct tms reference_tms;
-
- /**
- * Original buffer of
- * <tt>std::cerr</tt>. We store
- * the address of that buffer
- * when #log_cerr is called, and
- * reset it to this value if
- * #log_cerr is called a second
- * time, or when the destructor
- * of this class is run.
- */
- std::streambuf *old_cerr;
-
- /**
- * Print head of line. This prints
- * optional time information and
- * the contents of the prefix stack.
- */
- void print_line_head ();
-
- /**
- * Actually do the work of
- * writing output. This function
- * unifies the work that is
- * common to the two
- * <tt>operator<<</tt> functions.
- */
- template <typename T>
- void print (const T &t);
- /**
- * Check if we are on a new line
- * and print the header before
- * the data.
- */
- std::ostringstream& get_stream();
-
- /**
- * Type of the stream map
- */
- typedef std::map<unsigned int, std_cxx1x::shared_ptr<std::ostringstream> > stream_map_type;
-
- /**
- * We generate a stringstream for
- * every process that sends log
- * messages.
- */
- stream_map_type outstreams;
+ SmartPointer<LogStream,LogStream::Prefix> stream;
+ };
+
+ /**
+ * Standard constructor, since we
+ * intend to provide an object
+ * <tt>deallog</tt> in the library. Set the
+ * standard output stream to <tt>std::cerr</tt>.
+ */
+ LogStream ();
+
+ /**
+ * Destructor.
+ */
+ ~LogStream();
+
+ /**
+ * Enable output to a second
+ * stream <tt>o</tt>.
+ */
+ void attach (std::ostream &o);
+
+ /**
+ * Disable output to the second
+ * stream. You may want to call
+ * <tt>close</tt> on the stream that was
+ * previously attached to this object.
+ */
+ void detach ();
+
+ /**
+ * Setup the logstream for
+ * regression test mode.
+ *
+ * This sets the parameters
+ * #double_threshold,
+ * #float_threshold, and #offset
+ * to nonzero values. The exact
+ * values being used have been
+ * determined experimentally and
+ * can be found in the source
+ * code.
+ *
+ * Called with an argument
+ * <tt>false</tt>, switches off
+ * test mode and sets all
+ * involved parameters to zero.
+ */
+ void test_mode (bool on=true);
+
+ /**
+ * Gives the default stream (<tt>std_out</tt>).
+ */
+ std::ostream &get_console ();
+
+ /**
+ * Gives the file stream.
+ */
+ std::ostream &get_file_stream ();
+
+ /**
+ * @return true, if file stream
+ * has already been attached.
+ */
+ bool has_file () const;
+
+ /**
+ * Reroutes cerr to LogStream.
+ * Works as a switch, turning
+ * logging of <tt>cerr</tt> on
+ * and off alternatingly with
+ * every call.
+ */
+ void log_cerr ();
+
+ /**
+ * Return the prefix string.
+ */
+ const std::string &get_prefix () const;
+
+ /**
+ * @deprecated Use Prefix instead
+ *
+ * Push another prefix on the
+ * stack. Prefixes are
+ * automatically separated by a
+ * colon and there is a double
+ * colon after the last prefix.
+ */
+ void push (const std::string &text);
+
+ /**
+ * @deprecated Use Prefix instead
+ *
+ * Remove the last prefix.
+ */
+ void pop ();
+
+ /**
+ * Maximum number of levels to be
+ * printed on the console. This
+ * function allows to restrict
+ * console output to the upmost
+ * levels of iterations. Only
+ * output with less than <tt>n</tt>
+ * prefixes is printed. By calling
+ * this function with <tt>n=0</tt>, no
+ * console output will be written.
+ *
+ * The previous value of this
+ * parameter is returned.
+ */
+ unsigned int depth_console (const unsigned int n);
+
+ /**
+ * Maximum number of levels to be
+ * written to the log file. The
+ * functionality is the same as
+ * <tt>depth_console</tt>, nevertheless,
+ * this function should be used
+ * with care, since it may spoile
+ * the value of a log file.
+ *
+ * The previous value of this
+ * parameter is returned.
+ */
+ unsigned int depth_file (const unsigned int n);
+
+ /**
+ * Set time printing flag. If this flag
+ * is true, each output line will
+ * be prepended by the user time used
+ * by the running program so far.
+ *
+ * The previous value of this
+ * parameter is returned.
+ */
+ bool log_execution_time (const bool flag);
+
+ /**
+ * Output time differences
+ * between consecutive logs. If
+ * this function is invoked with
+ * <tt>true</tt>, the time difference
+ * between the previous log line
+ * and the recent one is
+ * printed. If it is invoked with
+ * <tt>false</tt>, the accumulated
+ * time since start of the
+ * program is printed (default
+ * behavior).
+ *
+ * The measurement of times is
+ * not changed by this function,
+ * just the output.
+ *
+ * The previous value of this
+ * parameter is returned.
+ */
+ bool log_time_differences (const bool flag);
+
+ /**
+ * Write detailed timing
+ * information.
+ *
+ *
+ */
+ void timestamp();
+
+ /**
+ * Log the thread id.
+ */
+ bool log_thread_id (const bool flag);
+
+ /**
+ * Set a threshold for the
+ * minimal absolute value of
+ * double values. All numbers
+ * with a smaller absolute value
+ * will be printed as zero.
+ *
+ * The default value for this
+ * threshold is zero,
+ * i.e. numbers are printed
+ * according to their real value.
+ *
+ * This feature is mostly useful
+ * for automated tests: there,
+ * one would like to reproduce
+ * the exact same solution in
+ * each run of a
+ * testsuite. However, subtle
+ * difference in processor,
+ * operating system, or compiler
+ * version can lead to
+ * differences in the last few
+ * digits of numbers, due to
+ * different rounding. While one
+ * can avoid trouble for most
+ * numbers when comparing with
+ * stored results by simply
+ * limiting the accuracy of
+ * output, this does not hold for
+ * numbers very close to zero,
+ * i.e. zero plus accumulated
+ * round-off. For these numbers,
+ * already the first digit is
+ * tainted by round-off. Using
+ * the present function, it is
+ * possible to eliminate this
+ * source of problems, by simply
+ * writing zero to the output in
+ * this case.
+ */
+ void threshold_double(const double t);
+ /**
+ * The same as
+ * threshold_double(), but for
+ * float values.
+ */
+ void threshold_float(const float t);
+
+ /**
+ * Output a constant something
+ * through this stream.
+ */
+ template <typename T>
+ LogStream &operator << (const T &t);
+
+ /**
+ * Output double precision
+ * numbers through this
+ * stream.
+ *
+ * If they are set, this function
+ * applies the methods for making
+ * floating point output
+ * reproducible as discussed in
+ * the introduction.
+ */
+ LogStream &operator << (const double t);
+
+ /**
+ * Output single precision
+ * numbers through this
+ * stream.
+ *
+ * If they are set, this function
+ * applies the methods for making
+ * floating point output
+ * reproducible as discussed in
+ * the introduction.
+ */
+ LogStream &operator << (const float t);
+
+ /**
+ * Treat ostream
+ * manipulators. This passes on
+ * the whole thing to the
+ * template function with the
+ * exception of the
+ * <tt>std::endl</tt>
+ * manipulator, for which special
+ * action is performed: write the
+ * temporary stream buffer
+ * including a header to the file
+ * and <tt>std::cout</tt> and
+ * empty the buffer.
+ *
+ * An overload of this function is needed
+ * anyway, since the compiler can't bind
+ * manipulators like @p std::endl
+ * directly to template arguments @p T
+ * like in the previous general
+ * template. This is due to the fact that
+ * @p std::endl is actually an overloaded
+ * set of functions for @p std::ostream,
+ * @p std::wostream, and potentially more
+ * of this kind. This function is
+ * therefore necessary to pick one
+ * element from this overload set.
+ */
+ LogStream &operator<< (std::ostream& (*p) (std::ostream &));
+
+ /**
+ * Determine an estimate for
+ * the memory consumption (in
+ * bytes) of this
+ * object. Since sometimes
+ * the size of objects can
+ * not be determined exactly
+ * (for example: what is the
+ * memory consumption of an
+ * STL <tt>std::map</tt> type with a
+ * certain number of
+ * elements?), this is only
+ * an estimate. however often
+ * quite close to the true
+ * value.
+ */
+ std::size_t memory_consumption () const;
+
+ /**
+ * Exception.
+ */
+ DeclException0(ExcNoFileStreamGiven);
+
+ private:
+
+ /**
+ * Stack of strings which are printed
+ * at the beginning of each line to
+ * allow identification where the
+ * output was generated.
+ */
+ std::stack<std::string> prefixes;
+
+ /**
+ * Default stream, where the output
+ * is to go to. This stream defaults
+ * to <tt>std::cerr</tt>, but can be set to another
+ * stream through the constructor.
+ */
- std::ostream *std_out;
++ std::ostream *std_out;
+
+ /**
+ * Pointer to a stream, where a copy of
+ * the output is to go to. Usually, this
+ * will be a file stream.
+ *
+ * You can set and reset this stream
+ * by the <tt>attach</tt> function.
+ */
- std::ostream *file;
++ std::ostream *file;
+
+ /**
+ * Value denoting the number of
+ * prefixes to be printed to the
+ * standard output. If more than
+ * this number of prefixes is
+ * pushed to the stack, then no
+ * output will be generated until
+ * the number of prefixes shrinks
+ * back below this number.
+ */
+ unsigned int std_depth;
+
+ /**
+ * Same for the maximum depth of
+ * prefixes for output to a file.
+ */
+ unsigned int file_depth;
+
+ /**
+ * Flag for printing execution time.
+ */
+ bool print_utime;
+
+ /**
+ * Flag for printing time differences.
+ */
+ bool diff_utime;
+
+ /**
+ * Time of last output line.
+ */
+ double last_time;
+
+ /**
+ * Threshold for printing double
+ * values. Every number with
+ * absolute value less than this
+ * is printed as zero.
+ */
+ double double_threshold;
+
+ /**
+ * Threshold for printing float
+ * values. Every number with
+ * absolute value less than this
+ * is printed as zero.
+ */
+ float float_threshold;
+
+ /**
+ * An offset added to every float
+ * or double number upon
+ * output. This is done after the
+ * number is compared to
+ * #double_threshold or #float_threshold,
+ * but before rounding.
+ *
+ * This functionality was
+ * introduced to produce more
+ * reproducible floating point
+ * output for regression
+ * tests. The rationale is, that
+ * an exact output value is much
+ * more likely to be 1/8 than
+ * 0.124997. If we round to two
+ * digits though, 1/8 becomes
+ * unreliably either .12 or .13
+ * due to machine accuracy. On
+ * the other hand, if we add a
+ * something above machine
+ * accuracy first, we will always
+ * get .13.
+ *
+ * It is safe to leave this
+ * value equal to zero. For
+ * regression tests, the function
+ * test_mode() sets it to a
+ * reasonable value.
+ *
+ * The offset is relative to the
+ * magnitude of the number.
+ */
+ double offset;
+
+ /**
+ * Flag for printing thread id.
+ */
+ bool print_thread_id;
+
+ /**
+ * The value times() returned
+ * on initialization.
+ */
+ double reference_time_val;
+
+ /**
+ * The tms structure times()
+ * filled on initialization.
+ */
+ struct tms reference_tms;
+
+ /**
+ * Original buffer of
+ * <tt>std::cerr</tt>. We store
+ * the address of that buffer
+ * when #log_cerr is called, and
+ * reset it to this value if
+ * #log_cerr is called a second
+ * time, or when the destructor
+ * of this class is run.
+ */
+ std::streambuf *old_cerr;
+
+ /**
+ * Print head of line. This prints
+ * optional time information and
+ * the contents of the prefix stack.
+ */
+ void print_line_head ();
+
+ /**
+ * Actually do the work of
+ * writing output. This function
+ * unifies the work that is
+ * common to the two
+ * <tt>operator<<</tt> functions.
+ */
+ template <typename T>
+ void print (const T &t);
+ /**
+ * Check if we are on a new line
+ * and print the header before
+ * the data.
+ */
+ std::ostringstream &get_stream();
+
+ /**
+ * Type of the stream map
+ */
+ typedef std::map<unsigned int, std_cxx1x::shared_ptr<std::ostringstream> > stream_map_type;
+
+ /**
+ * We generate a stringstream for
+ * every process that sends log
+ * messages.
+ */
+ stream_map_type outstreams;
};
namespace internal
{
#if DEAL_II_USE_MT == 1
- /**
- * Take a range argument and call the
- * given function with its begin and end.
- */
+ /**
+ * Take a range argument and call the
+ * given function with its begin and end.
+ */
template <typename RangeType, typename Function>
void apply_to_subranges (const tbb::blocked_range<RangeType> &range,
- const Function &f)
+ const Function &f)
{
f (range.begin(), range.end());
}
namespace Patterns
{
- /**
- * Base class to declare common
- * interface. The purpose of this
- * class is mostly to define the
- * interface of patterns, and to
- * force derived classes to have a
- * <tt>clone</tt> function. It is thus,
- * in the languages of the "Design
- * Patterns" book (Gamma et al.), a
- * "prototype".
- */
+ /**
+ * Base class to declare common
+ * interface. The purpose of this
+ * class is mostly to define the
+ * interface of patterns, and to
+ * force derived classes to have a
+ * <tt>clone</tt> function. It is thus,
+ * in the languages of the "Design
+ * Patterns" book (Gamma et al.), a
+ * "prototype".
+ */
class PatternBase
{
- public:
- /**
- * Make destructor of this and all
- * derived classes virtual.
- */
- virtual ~PatternBase ();
-
- /**
- * Return <tt>true</tt> if the given string
- * matches the pattern.
- */
- virtual bool match (const std::string &test_string) const = 0;
-
- /**
- * Return a string describing the
- * pattern.
- */
- virtual std::string description () const = 0;
-
- /**
- * Return a pointer to an
- * exact copy of the
- * object. This is necessary
- * since we want to store
- * objects of this type in
- * containers, were we need
- * to copy objects without
- * knowledge of their actual
- * data type (we only have
- * pointers to the base
- * class).
- *
- * Ownership of the objects
- * returned by this function
- * is passed to the caller of
- * this function.
- */
- virtual PatternBase * clone () const = 0;
-
- /**
- * Determine an estimate for
- * the memory consumption (in
- * bytes) of this object. To
- * avoid unnecessary
- * overhead, we do not force
- * derived classes to provide
- * this function as a virtual
- * overloaded one, but rather
- * try to cast the present
- * object to one of the known
- * derived classes and if
- * that fails then take the
- * size of this base class
- * instead and add 32 byte
- * (this value is arbitrary,
- * it should account for
- * virtual function tables,
- * and some possible data
- * elements). Since there are
- * usually not many thousands
- * of objects of this type
- * around, and since the
- * memory_consumption
- * mechanism is used to find
- * out where memory in the
- * range of many megabytes
- * is, this seems like a
- * reasonable approximation.
- *
- * On the other hand, if you
- * know that your class
- * deviates from this
- * assumption significantly,
- * you can still overload
- * this function.
- */
- virtual std::size_t memory_consumption () const;
+ public:
+ /**
+ * Make destructor of this and all
+ * derived classes virtual.
+ */
+ virtual ~PatternBase ();
+
+ /**
+ * Return <tt>true</tt> if the given string
+ * matches the pattern.
+ */
+ virtual bool match (const std::string &test_string) const = 0;
+
+ /**
+ * Return a string describing the
+ * pattern.
+ */
+ virtual std::string description () const = 0;
+
+ /**
+ * Return a pointer to an
+ * exact copy of the
+ * object. This is necessary
+ * since we want to store
+ * objects of this type in
+ * containers, were we need
+ * to copy objects without
+ * knowledge of their actual
+ * data type (we only have
+ * pointers to the base
+ * class).
+ *
+ * Ownership of the objects
+ * returned by this function
+ * is passed to the caller of
+ * this function.
+ */
+ virtual PatternBase *clone () const = 0;
+
+ /**
+ * Determine an estimate for
+ * the memory consumption (in
+ * bytes) of this object. To
+ * avoid unnecessary
+ * overhead, we do not force
+ * derived classes to provide
+ * this function as a virtual
+ * overloaded one, but rather
+ * try to cast the present
+ * object to one of the known
+ * derived classes and if
+ * that fails then take the
+ * size of this base class
+ * instead and add 32 byte
+ * (this value is arbitrary,
+ * it should account for
+ * virtual function tables,
+ * and some possible data
+ * elements). Since there are
+ * usually not many thousands
+ * of objects of this type
+ * around, and since the
+ * memory_consumption
+ * mechanism is used to find
+ * out where memory in the
+ * range of many megabytes
+ * is, this seems like a
+ * reasonable approximation.
+ *
+ * On the other hand, if you
+ * know that your class
+ * deviates from this
+ * assumption significantly,
+ * you can still overload
+ * this function.
+ */
+ virtual std::size_t memory_consumption () const;
};
- /**
- * Returns pointer to the correct
- * derived class based on description.
- */
- PatternBase * pattern_factory (const std::string& description);
-
- /**
- * Test for the string being an
- * integer. If bounds are given
- * to the constructor, then the
- * integer given also needs to be
- * within the interval specified
- * by these bounds. Note that
- * unlike common convention in
- * the C++ standard library, both
- * bounds of this interval are
- * inclusive; the reason is that
- * in practice in most cases, one
- * needs closed intervals, but
- * these can only be realized
- * with inclusive bounds for
- * non-integer values. We thus
- * stay consistent by always
- * using closed intervals.
- *
- * If the upper bound given to
- * the constructor is smaller
- * than the lower bound, then the
- * infinite interval is implied,
- * i.e. every integer is allowed.
- *
- * Giving bounds may be useful if
- * for example a value can only
- * be positive and less than a
- * reasonable upper bound (for
- * example the number of
- * refinement steps to be
- * performed), or in many other
- * cases.
- */
+ /**
+ * Returns pointer to the correct
+ * derived class based on description.
+ */
+ PatternBase *pattern_factory (const std::string &description);
+
+ /**
+ * Test for the string being an
+ * integer. If bounds are given
+ * to the constructor, then the
+ * integer given also needs to be
+ * within the interval specified
+ * by these bounds. Note that
+ * unlike common convention in
+ * the C++ standard library, both
+ * bounds of this interval are
+ * inclusive; the reason is that
+ * in practice in most cases, one
+ * needs closed intervals, but
+ * these can only be realized
+ * with inclusive bounds for
+ * non-integer values. We thus
+ * stay consistent by always
+ * using closed intervals.
+ *
+ * If the upper bound given to
+ * the constructor is smaller
+ * than the lower bound, then the
+ * infinite interval is implied,
+ * i.e. every integer is allowed.
+ *
+ * Giving bounds may be useful if
+ * for example a value can only
+ * be positive and less than a
+ * reasonable upper bound (for
+ * example the number of
+ * refinement steps to be
+ * performed), or in many other
+ * cases.
+ */
class Integer : public PatternBase
{
- public:
- /**
- * Minimal integer value. If
- * the numeric_limits class
- * is available use this
- * information to obtain the
- * extremal values, otherwise
- * set it so that this class
- * understands that all values
- * are allowed.
- */
- static const int min_int_value;
-
- /**
- * Maximal integer value. If
- * the numeric_limits class
- * is available use this
- * information to obtain the
- * extremal values, otherwise
- * set it so that this class
- * understands that all values
- * are allowed.
- */
- static const int max_int_value;
-
- /**
- * Constructor. Bounds can be
- * specified within which a
- * valid parameter has to
- * be. If the upper bound is
- * smaller than the lower
- * bound, then the infinite
- * interval is meant. The
- * default values are chosen
- * such that no bounds are
- * enforced on parameters.
- */
- Integer (const int lower_bound = min_int_value,
- const int upper_bound = max_int_value);
-
- /**
- * Return <tt>true</tt> if the
- * string is an integer and
- * its value is within the
- * specified range.
- */
- virtual bool match (const std::string &test_string) const;
-
- /**
- * Return a description of
- * the pattern that valid
- * strings are expected to
- * match. If bounds were
- * specified to the
- * constructor, then include
- * them into this
- * description.
- */
- virtual std::string description () const;
-
- /**
- * Return a copy of the
- * present object, which is
- * newly allocated on the
- * heap. Ownership of that
- * object is transferred to
- * the caller of this
- * function.
- */
- virtual PatternBase * clone () const;
-
- /**
- * Creates new object if the start of
- * description matches
- * description_init. Ownership of that
- * object is transferred to the caller
- * of this function.
- */
- static Integer* create (const std::string& description);
-
- private:
- /**
- * Value of the lower
- * bound. A number that
- * satisfies the @ref match
- * operation of this class
- * must be equal to this
- * value or larger, if the
- * bounds of the interval for
- * a valid range.
- */
- const int lower_bound;
-
- /**
- * Value of the upper
- * bound. A number that
- * satisfies the @ref match
- * operation of this class
- * must be equal to this
- * value or less, if the
- * bounds of the interval for
- * a valid range.
- */
- const int upper_bound;
-
- /**
- * Initial part of description
- */
- static const char* description_init;
+ public:
+ /**
+ * Minimal integer value. If
+ * the numeric_limits class
+ * is available use this
+ * information to obtain the
+ * extremal values, otherwise
+ * set it so that this class
+ * understands that all values
+ * are allowed.
+ */
+ static const int min_int_value;
+
+ /**
+ * Maximal integer value. If
+ * the numeric_limits class
+ * is available use this
+ * information to obtain the
+ * extremal values, otherwise
+ * set it so that this class
+ * understands that all values
+ * are allowed.
+ */
+ static const int max_int_value;
+
+ /**
+ * Constructor. Bounds can be
+ * specified within which a
+ * valid parameter has to
+ * be. If the upper bound is
+ * smaller than the lower
+ * bound, then the infinite
+ * interval is meant. The
+ * default values are chosen
+ * such that no bounds are
+ * enforced on parameters.
+ */
+ Integer (const int lower_bound = min_int_value,
+ const int upper_bound = max_int_value);
+
+ /**
+ * Return <tt>true</tt> if the
+ * string is an integer and
+ * its value is within the
+ * specified range.
+ */
+ virtual bool match (const std::string &test_string) const;
+
+ /**
+ * Return a description of
+ * the pattern that valid
+ * strings are expected to
+ * match. If bounds were
+ * specified to the
+ * constructor, then include
+ * them into this
+ * description.
+ */
+ virtual std::string description () const;
+
+ /**
+ * Return a copy of the
+ * present object, which is
+ * newly allocated on the
+ * heap. Ownership of that
+ * object is transferred to
+ * the caller of this
+ * function.
+ */
+ virtual PatternBase *clone () const;
+
+ /**
+ * Creates new object if the start of
+ * description matches
+ * description_init. Ownership of that
+ * object is transferred to the caller
+ * of this function.
+ */
+ static Integer *create (const std::string &description);
+
+ private:
+ /**
+ * Value of the lower
+ * bound. A number that
+ * satisfies the @ref match
+ * operation of this class
+ * must be equal to this
+ * value or larger, if the
+ * bounds of the interval for
+ * a valid range.
+ */
+ const int lower_bound;
+
+ /**
+ * Value of the upper
+ * bound. A number that
+ * satisfies the @ref match
+ * operation of this class
+ * must be equal to this
+ * value or less, if the
+ * bounds of the interval for
+ * a valid range.
+ */
+ const int upper_bound;
+
+ /**
+ * Initial part of description
+ */
+ static const char *description_init;
};
- /**
- * Test for the string being a
- * <tt>double</tt>. If bounds are
- * given to the constructor, then
- * the integer given also needs
- * to be within the interval
- * specified by these
- * bounds. Note that unlike
- * common convention in the C++
- * standard library, both bounds
- * of this interval are
- * inclusive; the reason is that
- * in practice in most cases, one
- * needs closed intervals, but
- * these can only be realized
- * with inclusive bounds for
- * non-integer values. We thus
- * stay consistent by always
- * using closed intervals.
- *
- * If the upper bound given to
- * the constructor is smaller
- * than the lower bound, then the
- * infinite interval is implied,
- * i.e. every integer is allowed.
- *
- * Giving bounds may be useful if
- * for example a value can only
- * be positive and less than a
- * reasonable upper bound (for
- * example damping parameters are
- * frequently only reasonable if
- * between zero and one), or in
- * many other cases.
- */
+ /**
+ * Test for the string being a
+ * <tt>double</tt>. If bounds are
+ * given to the constructor, then
+ * the integer given also needs
+ * to be within the interval
+ * specified by these
+ * bounds. Note that unlike
+ * common convention in the C++
+ * standard library, both bounds
+ * of this interval are
+ * inclusive; the reason is that
+ * in practice in most cases, one
+ * needs closed intervals, but
+ * these can only be realized
+ * with inclusive bounds for
+ * non-integer values. We thus
+ * stay consistent by always
+ * using closed intervals.
+ *
+ * If the upper bound given to
+ * the constructor is smaller
+ * than the lower bound, then the
+ * infinite interval is implied,
+ * i.e. every integer is allowed.
+ *
+ * Giving bounds may be useful if
+ * for example a value can only
+ * be positive and less than a
+ * reasonable upper bound (for
+ * example damping parameters are
+ * frequently only reasonable if
+ * between zero and one), or in
+ * many other cases.
+ */
class Double : public PatternBase
{
- public:
- /**
- * Minimal double value. If the
- * <tt>std::numeric_limits</tt>
- * class is available use this
- * information to obtain the
- * extremal values, otherwise
- * set it so that this class
- * understands that all values
- * are allowed.
- */
- static const double min_double_value;
-
- /**
- * Maximal double value. If the
- * numeric_limits class is
- * available use this
- * information to obtain the
- * extremal values, otherwise
- * set it so that this class
- * understands that all values
- * are allowed.
- */
- static const double max_double_value;
-
- /**
- * Constructor. Bounds can be
- * specified within which a
- * valid parameter has to
- * be. If the upper bound is
- * smaller than the lower
- * bound, then the infinite
- * interval is meant. The
- * default values are chosen
- * such that no bounds are
- * enforced on parameters.
- */
- Double (const double lower_bound = min_double_value,
- const double upper_bound = max_double_value);
-
- /**
- * Return <tt>true</tt> if the
- * string is a number and its
- * value is within the
- * specified range.
- */
- virtual bool match (const std::string &test_string) const;
-
- /**
- * Return a description of
- * the pattern that valid
- * strings are expected to
- * match. If bounds were
- * specified to the
- * constructor, then include
- * them into this
- * description.
- */
- virtual std::string description () const;
-
- /**
- * Return a copy of the
- * present object, which is
- * newly allocated on the
- * heap. Ownership of that
- * object is transferred to
- * the caller of this
- * function.
- */
- virtual PatternBase * clone () const;
-
- /**
- * Creates new object if the start of
- * description matches
- * description_init. Ownership of that
- * object is transferred to the caller
- * of this function.
- */
- static Double* create (const std::string& description);
-
- private:
- /**
- * Value of the lower
- * bound. A number that
- * satisfies the @ref match
- * operation of this class
- * must be equal to this
- * value or larger, if the
- * bounds of the interval for
- * a valid range.
- */
- const double lower_bound;
-
- /**
- * Value of the upper
- * bound. A number that
- * satisfies the @ref match
- * operation of this class
- * must be equal to this
- * value or less, if the
- * bounds of the interval for
- * a valid range.
- */
- const double upper_bound;
-
- /**
- * Initial part of description
- */
- static const char* description_init;
+ public:
+ /**
+ * Minimal double value. If the
+ * <tt>std::numeric_limits</tt>
+ * class is available use this
+ * information to obtain the
+ * extremal values, otherwise
+ * set it so that this class
+ * understands that all values
+ * are allowed.
+ */
+ static const double min_double_value;
+
+ /**
+ * Maximal double value. If the
+ * numeric_limits class is
+ * available use this
+ * information to obtain the
+ * extremal values, otherwise
+ * set it so that this class
+ * understands that all values
+ * are allowed.
+ */
+ static const double max_double_value;
+
+ /**
+ * Constructor. Bounds can be
+ * specified within which a
+ * valid parameter has to
+ * be. If the upper bound is
+ * smaller than the lower
+ * bound, then the infinite
+ * interval is meant. The
+ * default values are chosen
+ * such that no bounds are
+ * enforced on parameters.
+ */
+ Double (const double lower_bound = min_double_value,
+ const double upper_bound = max_double_value);
+
+ /**
+ * Return <tt>true</tt> if the
+ * string is a number and its
+ * value is within the
+ * specified range.
+ */
+ virtual bool match (const std::string &test_string) const;
+
+ /**
+ * Return a description of
+ * the pattern that valid
+ * strings are expected to
+ * match. If bounds were
+ * specified to the
+ * constructor, then include
+ * them into this
+ * description.
+ */
+ virtual std::string description () const;
+
+ /**
+ * Return a copy of the
+ * present object, which is
+ * newly allocated on the
+ * heap. Ownership of that
+ * object is transferred to
+ * the caller of this
+ * function.
+ */
+ virtual PatternBase *clone () const;
+
+ /**
+ * Creates new object if the start of
+ * description matches
+ * description_init. Ownership of that
+ * object is transferred to the caller
+ * of this function.
+ */
+ static Double *create (const std::string &description);
+
+ private:
+ /**
+ * Value of the lower
+ * bound. A number that
+ * satisfies the @ref match
+ * operation of this class
+ * must be equal to this
+ * value or larger, if the
+ * bounds of the interval for
+ * a valid range.
+ */
+ const double lower_bound;
+
+ /**
+ * Value of the upper
+ * bound. A number that
+ * satisfies the @ref match
+ * operation of this class
+ * must be equal to this
+ * value or less, if the
+ * bounds of the interval for
+ * a valid range.
+ */
+ const double upper_bound;
+
+ /**
+ * Initial part of description
+ */
+ static const char *description_init;
+ };
+
+ /**
+ * Test for the string being one
+ * of a sequence of values given
+ * like a regular expression. For
+ * example, if the string given
+ * to the constructor is
+ * <tt>"red|blue|black"</tt>, then the
+ * @ref match function returns
+ * <tt>true</tt> exactly if the string
+ * is either "red" or "blue" or
+ * "black". Spaces around the
+ * pipe signs do not matter and
+ * are eliminated.
+ */
+ class Selection : public PatternBase
+ {
+ public:
+ /**
+ * Constructor. Take the
+ * given parameter as the
+ * specification of valid
+ * strings.
+ */
+ Selection (const std::string &seq);
+
+ /**
+ * Return <tt>true</tt> if the
+ * string is an element of
+ * the description list
+ * passed to the constructor.
+ */
+ virtual bool match (const std::string &test_string) const;
+
+ /**
+ * Return a description of
+ * the pattern that valid
+ * strings are expected to
+ * match. Here, this is the
+ * list of valid strings
+ * passed to the constructor.
+ */
+ virtual std::string description () const;
+
+ /**
+ * Return a copy of the
+ * present object, which is
+ * newly allocated on the
+ * heap. Ownership of that
+ * object is transferred to
+ * the caller of this
+ * function.
+ */
+ virtual PatternBase *clone () const;
+
+ /**
+ * Determine an estimate for
+ * the memory consumption (in
+ * bytes) of this object.
+ */
+ std::size_t memory_consumption () const;
+
+ /**
+ * Creates new object if the start of
+ * description matches
+ * description_init. Ownership of that
+ * object is transferred to the caller
+ * of this function.
+ */
+ static Selection *create (const std::string &description);
+
+ private:
+ /**
+ * List of valid strings as
+ * passed to the
+ * constructor. We don't make
+ * this string constant, as
+ * we process it somewhat in
+ * the constructor.
+ */
+ std::string sequence;
+
+ /**
+ * Initial part of description
+ */
+ static const char *description_init;
};
- /**
- * Test for the string being one
- * of a sequence of values given
- * like a regular expression. For
- * example, if the string given
- * to the constructor is
- * <tt>"red|blue|black"</tt>, then the
- * @ref match function returns
- * <tt>true</tt> exactly if the string
- * is either "red" or "blue" or
- * "black". Spaces around the
- * pipe signs do not matter and
- * are eliminated.
- */
- class Selection : public PatternBase
- {
- public:
- /**
- * Constructor. Take the
- * given parameter as the
- * specification of valid
- * strings.
- */
- Selection (const std::string &seq);
-
- /**
- * Return <tt>true</tt> if the
- * string is an element of
- * the description list
- * passed to the constructor.
- */
- virtual bool match (const std::string &test_string) const;
-
- /**
- * Return a description of
- * the pattern that valid
- * strings are expected to
- * match. Here, this is the
- * list of valid strings
- * passed to the constructor.
- */
- virtual std::string description () const;
-
- /**
- * Return a copy of the
- * present object, which is
- * newly allocated on the
- * heap. Ownership of that
- * object is transferred to
- * the caller of this
- * function.
- */
- virtual PatternBase * clone () const;
-
- /**
- * Determine an estimate for
- * the memory consumption (in
- * bytes) of this object.
- */
- std::size_t memory_consumption () const;
-
- /**
- * Creates new object if the start of
- * description matches
- * description_init. Ownership of that
- * object is transferred to the caller
- * of this function.
- */
- static Selection* create (const std::string& description);
-
- private:
- /**
- * List of valid strings as
- * passed to the
- * constructor. We don't make
- * this string constant, as
- * we process it somewhat in
- * the constructor.
- */
- std::string sequence;
-
- /**
- * Initial part of description
- */
- static const char* description_init;
- };
- List (const PatternBase &base_pattern,
+ /**
+ * This pattern matches a list of
+ * comma-separated values each of which
+ * have to match a pattern given to the
+ * constructor. With two additional
+ * parameters, the number of elements this
+ * list has to have can be specified. If
+ * none is specified, the list may have
+ * zero or more entries.
+ */
+ class List : public PatternBase
+ {
+ public:
+ /**
+ * Maximal integer value. If
+ * the numeric_limits class
+ * is available use this
+ * information to obtain the
+ * extremal values, otherwise
+ * set it so that this class
+ * understands that all values
+ * are allowed.
+ */
+ static const unsigned int max_int_value;
+
+ /**
+ * Constructor. Take the
+ * given parameter as the
+ * specification of valid
+ * elements of the list.
+ *
+ * The two other arguments can
+ * be used to denote minimal
+ * and maximal allowable
+ * lengths of the list.
+ */
++ List (const PatternBase &base_pattern,
+ const unsigned int min_elements = 0,
+ const unsigned int max_elements = max_int_value);
+
+ /**
+ * Destructor.
+ */
+ virtual ~List ();
+
+ /**
+ * Return <tt>true</tt> if the
+ * string is a comma-separated
+ * list of strings each of
+ * which match the pattern
+ * given to the constructor.
+ */
+ virtual bool match (const std::string &test_string) const;
+
+ /**
+ * Return a description of
+ * the pattern that valid
+ * strings are expected to
+ * match.
+ */
+ virtual std::string description () const;
+
+ /**
+ * Return a copy of the
+ * present object, which is
+ * newly allocated on the
+ * heap. Ownership of that
+ * object is transferred to
+ * the caller of this
+ * function.
+ */
+ virtual PatternBase *clone () const;
+
+ /**
+ * Creates new object if the start of
+ * description matches
+ * description_init. Ownership of that
+ * object is transferred to the caller
+ * of this function.
+ */
+ static List *create (const std::string &description);
+
+ /**
+ * Determine an estimate for
+ * the memory consumption (in
+ * bytes) of this object.
+ */
+ std::size_t memory_consumption () const;
+
+ /** @addtogroup Exceptions
+ * @{ */
+
+ /**
+ * Exception.
+ */
+ DeclException2 (ExcInvalidRange,
+ int, int,
+ << "The values " << arg1 << " and " << arg2
+ << " do not form a valid range.");
+ //@}
+ private:
+ /**
+ * Copy of the pattern that
+ * each element of the list has
+ * to satisfy.
+ */
+ PatternBase *pattern;
+
+ /**
+ * Minimum number of elements
+ * the list must have.
+ */
+ const unsigned int min_elements;
+
+ /**
+ * Maximum number of elements
+ * the list must have.
+ */
+ const unsigned int max_elements;
- /**
- * This pattern matches a list of
- * comma-separated values each of which
- * have to match a pattern given to the
- * constructor. With two additional
- * parameters, the number of elements this
- * list has to have can be specified. If
- * none is specified, the list may have
- * zero or more entries.
- */
- class List : public PatternBase
- {
- public:
- /**
- * Maximal integer value. If
- * the numeric_limits class
- * is available use this
- * information to obtain the
- * extremal values, otherwise
- * set it so that this class
- * understands that all values
- * are allowed.
- */
- static const unsigned int max_int_value;
-
- /**
- * Constructor. Take the
- * given parameter as the
- * specification of valid
- * elements of the list.
- *
- * The two other arguments can
- * be used to denote minimal
- * and maximal allowable
- * lengths of the list.
- */
- List (const PatternBase &base_pattern,
- const unsigned int min_elements = 0,
- const unsigned int max_elements = max_int_value);
-
- /**
- * Destructor.
- */
- virtual ~List ();
-
- /**
- * Return <tt>true</tt> if the
- * string is a comma-separated
- * list of strings each of
- * which match the pattern
- * given to the constructor.
- */
- virtual bool match (const std::string &test_string) const;
-
- /**
- * Return a description of
- * the pattern that valid
- * strings are expected to
- * match.
- */
- virtual std::string description () const;
-
- /**
- * Return a copy of the
- * present object, which is
- * newly allocated on the
- * heap. Ownership of that
- * object is transferred to
- * the caller of this
- * function.
- */
- virtual PatternBase * clone () const;
-
- /**
- * Creates new object if the start of
- * description matches
- * description_init. Ownership of that
- * object is transferred to the caller
- * of this function.
- */
- static List* create (const std::string& description);
-
- /**
- * Determine an estimate for
- * the memory consumption (in
- * bytes) of this object.
- */
- std::size_t memory_consumption () const;
-
- /** @addtogroup Exceptions
- * @{ */
-
- /**
- * Exception.
- */
- DeclException2 (ExcInvalidRange,
- int, int,
- << "The values " << arg1 << " and " << arg2
- << " do not form a valid range.");
- //@}
- private:
- /**
- * Copy of the pattern that
- * each element of the list has
- * to satisfy.
- */
- PatternBase *pattern;
-
- /**
- * Minimum number of elements
- * the list must have.
- */
- const unsigned int min_elements;
-
- /**
- * Maximum number of elements
- * the list must have.
- */
- const unsigned int max_elements;
-
- /**
- * Initial part of description
- */
- static const char* description_init;
+ /**
+ * Initial part of description
+ */
+ static const char *description_init;
};
inline
- const IndexSet& Partitioner::ghost_indices() const
- const IndexSet &Partitioner::ghost_indices() const
++ const IndexSet &Partitioner::ghost_indices() const
{
return ghost_indices_data;
}
template <int dim>
class QProjector
{
- std::vector<Point<dim> > &q_points,
+ public:
+ /**
+ * Define a typedef for a
+ * quadrature that acts on an
+ * object of one dimension
+ * less. For cells, this would
+ * then be a face quadrature.
+ */
+ typedef Quadrature<dim-1> SubQuadrature;
+
+ /**
+ * Compute the quadrature points
+ * on the cell if the given
+ * quadrature formula is used on
+ * face <tt>face_no</tt>. For further
+ * details, see the general doc
+ * for this class.
+ */
+ static void project_to_face (const SubQuadrature &quadrature,
+ const unsigned int face_no,
+ std::vector<Point<dim> > &q_points);
+
+ /**
+ * Compute the cell quadrature
+ * formula corresponding to using
+ * <tt>quadrature</tt> on face
+ * <tt>face_no</tt>. For further
+ * details, see the general doc
+ * for this class.
+ */
+ static Quadrature<dim>
+ project_to_face (const SubQuadrature &quadrature,
+ const unsigned int face_no);
+
+ /**
+ * Compute the quadrature points on the
+ * cell if the given quadrature formula is
+ * used on face <tt>face_no</tt>, subface
+ * number <tt>subface_no</tt> corresponding
+ * to RefineCase::Type
+ * <tt>ref_case</tt>. The last argument is
+ * only used in 3D.
+ *
+ * @note Only the points are
+ * transformed. The quadrature
+ * weights are the same as those
+ * of the original rule.
+ */
+ static void project_to_subface (const SubQuadrature &quadrature,
+ const unsigned int face_no,
+ const unsigned int subface_no,
- project_to_child (const Quadrature<dim> &quadrature,
++ std::vector<Point<dim> > &q_points,
+ const RefinementCase<dim-1> &ref_case=RefinementCase<dim-1>::isotropic_refinement);
+
+ /**
+ * Compute the cell quadrature formula
+ * corresponding to using
+ * <tt>quadrature</tt> on subface
+ * <tt>subface_no</tt> of face
+ * <tt>face_no</tt> with
+ * RefinementCase<dim-1>
+ * <tt>ref_case</tt>. The last argument is
+ * only used in 3D.
+ *
+ * @note Only the points are
+ * transformed. The quadrature
+ * weights are the same as those
+ * of the original rule.
+ */
+ static Quadrature<dim>
+ project_to_subface (const SubQuadrature &quadrature,
+ const unsigned int face_no,
+ const unsigned int subface_no,
+ const RefinementCase<dim-1> &ref_case=RefinementCase<dim-1>::isotropic_refinement);
+
+ /**
+ * Take a face quadrature formula
+ * and generate a cell quadrature
+ * formula from it where the
+ * quadrature points of the given
+ * argument are projected on all
+ * faces.
+ *
+ * The weights of the new rule
+ * are replications of the
+ * original weights. Thus, the
+ * sum of the weights is not one,
+ * but the number of faces, which
+ * is the surface of the
+ * reference cell.
+ *
+ * This in particular allows us
+ * to extract a subset of points
+ * corresponding to a single face
+ * and use it as a quadrature on
+ * this face, as is done in
+ * FEFaceValues.
+ *
+ * @note In 3D, this function
+ * produces eight sets of
+ * quadrature points for each
+ * face, in order to cope
+ * possibly different
+ * orientations of the mesh.
+ */
+ static Quadrature<dim>
+ project_to_all_faces (const SubQuadrature &quadrature);
+
+ /**
+ * Take a face quadrature formula
+ * and generate a cell quadrature
+ * formula from it where the
+ * quadrature points of the given
+ * argument are projected on all
+ * subfaces.
+ *
+ * Like in project_to_all_faces(),
+ * the weights of the new rule
+ * sum up to the number of faces
+ * (not subfaces), which
+ * is the surface of the
+ * reference cell.
+ *
+ * This in particular allows us
+ * to extract a subset of points
+ * corresponding to a single subface
+ * and use it as a quadrature on
+ * this face, as is done in
+ * FESubfaceValues.
+ */
+ static Quadrature<dim>
+ project_to_all_subfaces (const SubQuadrature &quadrature);
+
+ /**
+ * Project a given quadrature
+ * formula to a child of a
+ * cell. You may want to use this
+ * function in case you want to
+ * extend an integral only over
+ * the area which a potential
+ * child would occupy. The child
+ * numbering is the same as the
+ * children would be numbered
+ * upon refinement of the cell.
+ *
+ * As integration using this
+ * quadrature formula now only
+ * extends over a fraction of the
+ * cell, the weights of the
+ * resulting object are divided by
+ * GeometryInfo<dim>::children_per_cell.
+ */
+ static
+ Quadrature<dim>
- project_to_all_children (const Quadrature<dim> &quadrature);
++ project_to_child (const Quadrature<dim> &quadrature,
+ const unsigned int child_no);
+
+ /**
+ * Project a quadrature rule to
+ * all children of a
+ * cell. Similarly to
+ * project_to_all_subfaces(),
+ * this function replicates the
+ * formula generated by
+ * project_to_child() for all
+ * children, such that the
+ * weights sum up to one, the
+ * volume of the total cell
+ * again.
+ *
+ * The child
+ * numbering is the same as the
+ * children would be numbered
+ * upon refinement of the cell.
+ */
+ static
+ Quadrature<dim>
++ project_to_all_children (const Quadrature<dim> &quadrature);
+
+ /**
+ * Project the onedimensional
+ * rule <tt>quadrature</tt> to
+ * the straight line connecting
+ * the points <tt>p1</tt> and
+ * <tt>p2</tt>.
+ */
+ static
+ Quadrature<dim>
+ project_to_line(const Quadrature<1> &quadrature,
+ const Point<dim> &p1,
+ const Point<dim> &p2);
+
+ /**
+ * Since the
+ * project_to_all_faces() and
+ * project_to_all_subfaces()
+ * functions chain together the
+ * quadrature points and weights
+ * of all projections of a face
+ * quadrature formula to the
+ * faces or subfaces of a cell,
+ * we need a way to identify
+ * where the starting index of
+ * the points and weights for a
+ * particular face or subface
+ * is. This class provides this:
+ * there are static member
+ * functions that generate
+ * objects of this type, given
+ * face or subface indices, and
+ * you can then use the generated
+ * object in place of an integer
+ * that denotes the offset of a
+ * given dataset.
+ *
+ * @author Wolfgang Bangerth, 2003
+ */
+ class DataSetDescriptor
+ {
public:
- /**
- * Define a typedef for a
- * quadrature that acts on an
- * object of one dimension
- * less. For cells, this would
- * then be a face quadrature.
- */
- typedef Quadrature<dim-1> SubQuadrature;
-
- /**
- * Compute the quadrature points
- * on the cell if the given
- * quadrature formula is used on
- * face <tt>face_no</tt>. For further
- * details, see the general doc
- * for this class.
- */
- static void project_to_face (const SubQuadrature &quadrature,
- const unsigned int face_no,
- std::vector<Point<dim> > &q_points);
-
- /**
- * Compute the cell quadrature
- * formula corresponding to using
- * <tt>quadrature</tt> on face
- * <tt>face_no</tt>. For further
- * details, see the general doc
- * for this class.
- */
- static Quadrature<dim>
- project_to_face (const SubQuadrature &quadrature,
- const unsigned int face_no);
-
- /**
- * Compute the quadrature points on the
- * cell if the given quadrature formula is
- * used on face <tt>face_no</tt>, subface
- * number <tt>subface_no</tt> corresponding
- * to RefineCase::Type
- * <tt>ref_case</tt>. The last argument is
- * only used in 3D.
- *
- * @note Only the points are
- * transformed. The quadrature
- * weights are the same as those
- * of the original rule.
- */
- static void project_to_subface (const SubQuadrature &quadrature,
- const unsigned int face_no,
- const unsigned int subface_no,
- std::vector<Point<dim> > &q_points,
- const RefinementCase<dim-1> &ref_case=RefinementCase<dim-1>::isotropic_refinement);
-
- /**
- * Compute the cell quadrature formula
- * corresponding to using
- * <tt>quadrature</tt> on subface
- * <tt>subface_no</tt> of face
- * <tt>face_no</tt> with
- * RefinementCase<dim-1>
- * <tt>ref_case</tt>. The last argument is
- * only used in 3D.
- *
- * @note Only the points are
- * transformed. The quadrature
- * weights are the same as those
- * of the original rule.
- */
- static Quadrature<dim>
- project_to_subface (const SubQuadrature &quadrature,
- const unsigned int face_no,
- const unsigned int subface_no,
- const RefinementCase<dim-1> &ref_case=RefinementCase<dim-1>::isotropic_refinement);
-
- /**
- * Take a face quadrature formula
- * and generate a cell quadrature
- * formula from it where the
- * quadrature points of the given
- * argument are projected on all
- * faces.
- *
- * The weights of the new rule
- * are replications of the
- * original weights. Thus, the
- * sum of the weights is not one,
- * but the number of faces, which
- * is the surface of the
- * reference cell.
- *
- * This in particular allows us
- * to extract a subset of points
- * corresponding to a single face
- * and use it as a quadrature on
- * this face, as is done in
- * FEFaceValues.
- *
- * @note In 3D, this function
- * produces eight sets of
- * quadrature points for each
- * face, in order to cope
- * possibly different
- * orientations of the mesh.
- */
- static Quadrature<dim>
- project_to_all_faces (const SubQuadrature &quadrature);
-
- /**
- * Take a face quadrature formula
- * and generate a cell quadrature
- * formula from it where the
- * quadrature points of the given
- * argument are projected on all
- * subfaces.
- *
- * Like in project_to_all_faces(),
- * the weights of the new rule
- * sum up to the number of faces
- * (not subfaces), which
- * is the surface of the
- * reference cell.
- *
- * This in particular allows us
- * to extract a subset of points
- * corresponding to a single subface
- * and use it as a quadrature on
- * this face, as is done in
- * FESubfaceValues.
- */
- static Quadrature<dim>
- project_to_all_subfaces (const SubQuadrature &quadrature);
-
- /**
- * Project a given quadrature
- * formula to a child of a
- * cell. You may want to use this
- * function in case you want to
- * extend an integral only over
- * the area which a potential
- * child would occupy. The child
- * numbering is the same as the
- * children would be numbered
- * upon refinement of the cell.
- *
- * As integration using this
- * quadrature formula now only
- * extends over a fraction of the
- * cell, the weights of the
- * resulting object are divided by
- * GeometryInfo<dim>::children_per_cell.
- */
+ /**
+ * Default constructor. This
+ * doesn't do much except
+ * generating an invalid
+ * index, since you didn't
+ * give a valid descriptor of
+ * the cell, face, or subface
+ * you wanted.
+ */
+ DataSetDescriptor ();
+
+ /**
+ * Static function to
+ * generate the offset of a
+ * cell. Since we only have
+ * one cell per quadrature
+ * object, this offset is of
+ * course zero, but we carry
+ * this function around for
+ * consistency with the other
+ * static functions.
+ */
+ static DataSetDescriptor cell ();
+
+ /**
+ * Static function to generate an
+ * offset object for a given face of a
+ * cell with the given face
+ * orientation, flip and rotation. This
+ * function of course is only allowed
+ * if <tt>dim>=2</tt>, and the face
+ * orientation, flip and rotation are
+ * ignored if the space dimension
+ * equals 2.
+ *
+ * The last argument denotes
+ * the number of quadrature
+ * points the
+ * lower-dimensional face
+ * quadrature formula (the
+ * one that has been
+ * projected onto the faces)
+ * has.
+ */
static
- Quadrature<dim>
- project_to_child (const Quadrature<dim> &quadrature,
- const unsigned int child_no);
-
- /**
- * Project a quadrature rule to
- * all children of a
- * cell. Similarly to
- * project_to_all_subfaces(),
- * this function replicates the
- * formula generated by
- * project_to_child() for all
- * children, such that the
- * weights sum up to one, the
- * volume of the total cell
- * again.
- *
- * The child
- * numbering is the same as the
- * children would be numbered
- * upon refinement of the cell.
- */
+ DataSetDescriptor
+ face (const unsigned int face_no,
+ const bool face_orientation,
+ const bool face_flip,
+ const bool face_rotation,
+ const unsigned int n_quadrature_points);
+
+ /**
+ * Static function to generate an
+ * offset object for a given subface of
+ * a cell with the given face
+ * orientation, flip and rotation. This
+ * function of course is only allowed
+ * if <tt>dim>=2</tt>, and the face
+ * orientation, flip and rotation are
+ * ignored if the space dimension
+ * equals 2.
+ *
+ * The last but one argument denotes
+ * the number of quadrature
+ * points the
+ * lower-dimensional face
+ * quadrature formula (the
+ * one that has been
+ * projected onto the faces)
+ * has.
+ *
+ * Through the last argument
+ * anisotropic refinement can be
+ * respected.
+ */
static
- Quadrature<dim>
- project_to_all_children (const Quadrature<dim> &quadrature);
-
- /**
- * Project the onedimensional
- * rule <tt>quadrature</tt> to
- * the straight line connecting
- * the points <tt>p1</tt> and
- * <tt>p2</tt>.
- */
- static
- Quadrature<dim>
- project_to_line(const Quadrature<1>& quadrature,
- const Point<dim>& p1,
- const Point<dim>& p2);
-
- /**
- * Since the
- * project_to_all_faces() and
- * project_to_all_subfaces()
- * functions chain together the
- * quadrature points and weights
- * of all projections of a face
- * quadrature formula to the
- * faces or subfaces of a cell,
- * we need a way to identify
- * where the starting index of
- * the points and weights for a
- * particular face or subface
- * is. This class provides this:
- * there are static member
- * functions that generate
- * objects of this type, given
- * face or subface indices, and
- * you can then use the generated
- * object in place of an integer
- * that denotes the offset of a
- * given dataset.
- *
- * @author Wolfgang Bangerth, 2003
- */
- class DataSetDescriptor
- {
- public:
- /**
- * Default constructor. This
- * doesn't do much except
- * generating an invalid
- * index, since you didn't
- * give a valid descriptor of
- * the cell, face, or subface
- * you wanted.
- */
- DataSetDescriptor ();
-
- /**
- * Static function to
- * generate the offset of a
- * cell. Since we only have
- * one cell per quadrature
- * object, this offset is of
- * course zero, but we carry
- * this function around for
- * consistency with the other
- * static functions.
- */
- static DataSetDescriptor cell ();
-
- /**
- * Static function to generate an
- * offset object for a given face of a
- * cell with the given face
- * orientation, flip and rotation. This
- * function of course is only allowed
- * if <tt>dim>=2</tt>, and the face
- * orientation, flip and rotation are
- * ignored if the space dimension
- * equals 2.
- *
- * The last argument denotes
- * the number of quadrature
- * points the
- * lower-dimensional face
- * quadrature formula (the
- * one that has been
- * projected onto the faces)
- * has.
- */
- static
- DataSetDescriptor
- face (const unsigned int face_no,
- const bool face_orientation,
- const bool face_flip,
- const bool face_rotation,
- const unsigned int n_quadrature_points);
-
- /**
- * Static function to generate an
- * offset object for a given subface of
- * a cell with the given face
- * orientation, flip and rotation. This
- * function of course is only allowed
- * if <tt>dim>=2</tt>, and the face
- * orientation, flip and rotation are
- * ignored if the space dimension
- * equals 2.
- *
- * The last but one argument denotes
- * the number of quadrature
- * points the
- * lower-dimensional face
- * quadrature formula (the
- * one that has been
- * projected onto the faces)
- * has.
- *
- * Through the last argument
- * anisotropic refinement can be
- * respected.
- */
- static
- DataSetDescriptor
- subface (const unsigned int face_no,
- const unsigned int subface_no,
- const bool face_orientation,
- const bool face_flip,
- const bool face_rotation,
- const unsigned int n_quadrature_points,
- const internal::SubfaceCase<dim> ref_case=internal::SubfaceCase<dim>::case_isotropic);
-
- /**
- * Conversion operator to an
- * integer denoting the
- * offset of the first
- * element of this dataset in
- * the set of quadrature
- * formulas all projected
- * onto faces and
- * subfaces. This conversion
- * operator allows us to use
- * offset descriptor objects
- * in place of integer
- * offsets.
- */
- operator unsigned int () const;
-
- private:
- /**
- * Store the integer offset
- * for a given cell, face, or
- * subface.
- */
- const unsigned int dataset_offset;
-
- /**
- * This is the real
- * constructor, but it is
- * private and thus only
- * available to the static
- * member functions above.
- */
- DataSetDescriptor (const unsigned int dataset_offset);
- };
+ DataSetDescriptor
+ subface (const unsigned int face_no,
+ const unsigned int subface_no,
+ const bool face_orientation,
+ const bool face_flip,
+ const bool face_rotation,
+ const unsigned int n_quadrature_points,
+ const internal::SubfaceCase<dim> ref_case=internal::SubfaceCase<dim>::case_isotropic);
+
+ /**
+ * Conversion operator to an
+ * integer denoting the
+ * offset of the first
+ * element of this dataset in
+ * the set of quadrature
+ * formulas all projected
+ * onto faces and
+ * subfaces. This conversion
+ * operator allows us to use
+ * offset descriptor objects
+ * in place of integer
+ * offsets.
+ */
+ operator unsigned int () const;
private:
- /**
- * Given a quadrature object in
- * 2d, reflect all quadrature
- * points at the main diagonal
- * and return them with their
- * original weights.
- *
- * This function is necessary for
- * projecting a 2d quadrature
- * rule onto the faces of a 3d
- * cube, since there we need both
- * orientations.
- */
- static Quadrature<2> reflect (const Quadrature<2> &q);
-
- /**
- * Given a quadrature object in
- * 2d, rotate all quadrature
- * points by @p n_times * 90 degrees
- * counterclockwise
- * and return them with their
- * original weights.
- *
- * This function is necessary for
- * projecting a 2d quadrature
- * rule onto the faces of a 3d
- * cube, since there we need all
- * rotations to account for
- * face_flip and face_rotation
- * of non-standard faces.
- */
- static Quadrature<2> rotate (const Quadrature<2> &q,
- const unsigned int n_times);
+ /**
+ * Store the integer offset
+ * for a given cell, face, or
+ * subface.
+ */
+ const unsigned int dataset_offset;
+
+ /**
+ * This is the real
+ * constructor, but it is
+ * private and thus only
+ * available to the static
+ * member functions above.
+ */
+ DataSetDescriptor (const unsigned int dataset_offset);
+ };
+
+ private:
+ /**
+ * Given a quadrature object in
+ * 2d, reflect all quadrature
+ * points at the main diagonal
+ * and return them with their
+ * original weights.
+ *
+ * This function is necessary for
+ * projecting a 2d quadrature
+ * rule onto the faces of a 3d
+ * cube, since there we need both
+ * orientations.
+ */
+ static Quadrature<2> reflect (const Quadrature<2> &q);
+
+ /**
+ * Given a quadrature object in
+ * 2d, rotate all quadrature
+ * points by @p n_times * 90 degrees
+ * counterclockwise
+ * and return them with their
+ * original weights.
+ *
+ * This function is necessary for
+ * projecting a 2d quadrature
+ * rule onto the faces of a 3d
+ * cube, since there we need all
+ * rotations to account for
+ * face_flip and face_rotation
+ * of non-standard faces.
+ */
+ static Quadrature<2> rotate (const Quadrature<2> &q,
+ const unsigned int n_times);
};
/*@}*/
template<typename T, typename P = void>
class SmartPointer
{
- public:
- /**
- * Standard constructor for null
- * pointer. The id of this
- * pointer is set to the name of
- * the class P.
- */
- SmartPointer ();
-
- /*
- * Copy constructor for
- * SmartPointer. We do now
- * copy the object subscribed to
- * from <tt>tt</tt>, but subscribe
- * ourselves to it again.
- */
- template <class Q>
- SmartPointer (const SmartPointer<T,Q> &tt);
-
- /*
- * Copy constructor for
- * SmartPointer. We do now
- * copy the object subscribed to
- * from <tt>tt</tt>, but subscribe
- * ourselves to it again.
- */
- SmartPointer (const SmartPointer<T,P> &tt);
-
- /**
- * Constructor taking a normal
- * pointer. If possible, i.e. if
- * the pointer is not a null
- * pointer, the constructor
- * subscribes to the given object
- * to lock it, i.e. to prevent
- * its destruction before the end
- * of its use.
- *
- * The <tt>id</tt> is used in the
- * call to
- * Subscriptor::subscribe(id) and
- * by ~SmartPointer() in the call
- * to Subscriptor::unsubscribe().
- */
- SmartPointer (T *t, const char* id);
-
- /**
- * Constructor taking a normal
- * pointer. If possible, i.e. if
- * the pointer is not a null
- * pointer, the constructor
- * subscribes to the given object
- * to lock it, i.e. to prevent
- * its destruction before the end
- * of its use. The id of this
- * pointer is set to the name of
- * the class P.
- */
- SmartPointer (T *t);
-
-
- /**
- * Destructor, removing the
- * subscription.
- */
- ~SmartPointer();
-
- /**
- * Assignment operator for normal
- * pointers. The pointer
- * subscribes to the new object
- * automatically and unsubscribes
- * to an old one if it exists. It
- * will not try to subscribe to a
- * null-pointer, but still
- * delete the old subscription.
- */
- SmartPointer<T,P> & operator= (T *tt);
-
- /**
- * Assignment operator for
- * SmartPointer. The pointer
- * subscribes to the new object
- * automatically and unsubscribes
- * to an old one if it exists.
- */
- template <class Q>
- SmartPointer<T,P> & operator= (const SmartPointer<T,Q> &tt);
-
- /**
- * Assignment operator for
- * SmartPointer. The pointer
- * subscribes to the new object
- * automatically and unsubscribes
- * to an old one if it exists.
- */
- SmartPointer<T,P> & operator= (const SmartPointer<T,P> &tt);
-
- /**
- * Delete the object pointed to
- * and set the pointer to zero.
- */
- void clear ();
-
- /**
- * Conversion to normal pointer.
- */
- operator T* () const;
-
- /**
- * Dereferencing operator. This
- * operator throws an
- * ExcNotInitialized if the
- * pointer is a null pointer.
- */
- T& operator * () const;
-
- /**
- * Dereferencing operator. This
- * operator throws an
- * ExcNotInitialized if the
- * pointer is a null pointer.
- */
- T * operator -> () const;
-
- /**
- * Exchange the pointers of this
- * object and the argument. Since
- * both the objects to which is
- * pointed are subscribed to
- * before and after, we do not
- * have to change their
- * subscription counters.
- *
- * Note that this function (with
- * two arguments) and the
- * respective functions where one
- * of the arguments is a pointer
- * and the other one is a C-style
- * pointer are implemented in
- * global namespace.
- */
- template <class Q>
- void swap (SmartPointer<T,Q> &tt);
-
- /**
- * Swap pointers between this
- * object and the pointer
- * given. As this releases the
- * object pointed to presently,
- * we reduce its subscription
- * count by one, and increase it
- * at the object which we will
- * point to in the future.
- *
- * Note that we indeed need a
- * reference of a pointer, as we
- * want to change the pointer
- * variable which we are given.
- */
- void swap (T *&tt);
-
- /**
- * Return an estimate of the
- * amount of memory (in bytes)
- * used by this class. Note in
- * particular, that this only
- * includes the amount of memory
- * used by <b>this</b> object, not
- * by the object pointed to.
- */
- std::size_t memory_consumption () const;
-
- private:
- /**
- * Pointer to the object we want
- * to subscribt to. Since it is
- * often necessary to follow this
- * pointer when debugging, we
- * have deliberately chosen a
- * short name.
- */
- T * t;
- /**
- * The identification for the
- * subscriptor.
- */
- const char* const id;
+ public:
+ /**
+ * Standard constructor for null
+ * pointer. The id of this
+ * pointer is set to the name of
+ * the class P.
+ */
+ SmartPointer ();
+
+ /*
+ * Copy constructor for
+ * SmartPointer. We do now
+ * copy the object subscribed to
+ * from <tt>tt</tt>, but subscribe
+ * ourselves to it again.
+ */
+ template <class Q>
+ SmartPointer (const SmartPointer<T,Q> &tt);
+
+ /*
+ * Copy constructor for
+ * SmartPointer. We do now
+ * copy the object subscribed to
+ * from <tt>tt</tt>, but subscribe
+ * ourselves to it again.
+ */
+ SmartPointer (const SmartPointer<T,P> &tt);
+
+ /**
+ * Constructor taking a normal
+ * pointer. If possible, i.e. if
+ * the pointer is not a null
+ * pointer, the constructor
+ * subscribes to the given object
+ * to lock it, i.e. to prevent
+ * its destruction before the end
+ * of its use.
+ *
+ * The <tt>id</tt> is used in the
+ * call to
+ * Subscriptor::subscribe(id) and
+ * by ~SmartPointer() in the call
+ * to Subscriptor::unsubscribe().
+ */
+ SmartPointer (T *t, const char *id);
+
+ /**
+ * Constructor taking a normal
+ * pointer. If possible, i.e. if
+ * the pointer is not a null
+ * pointer, the constructor
+ * subscribes to the given object
+ * to lock it, i.e. to prevent
+ * its destruction before the end
+ * of its use. The id of this
+ * pointer is set to the name of
+ * the class P.
+ */
+ SmartPointer (T *t);
+
+
+ /**
+ * Destructor, removing the
+ * subscription.
+ */
+ ~SmartPointer();
+
+ /**
+ * Assignment operator for normal
+ * pointers. The pointer
+ * subscribes to the new object
+ * automatically and unsubscribes
+ * to an old one if it exists. It
+ * will not try to subscribe to a
+ * null-pointer, but still
+ * delete the old subscription.
+ */
+ SmartPointer<T,P> &operator= (T *tt);
+
+ /**
+ * Assignment operator for
+ * SmartPointer. The pointer
+ * subscribes to the new object
+ * automatically and unsubscribes
+ * to an old one if it exists.
+ */
+ template <class Q>
+ SmartPointer<T,P> &operator= (const SmartPointer<T,Q> &tt);
+
+ /**
+ * Assignment operator for
+ * SmartPointer. The pointer
+ * subscribes to the new object
+ * automatically and unsubscribes
+ * to an old one if it exists.
+ */
+ SmartPointer<T,P> &operator= (const SmartPointer<T,P> &tt);
+
+ /**
+ * Delete the object pointed to
+ * and set the pointer to zero.
+ */
+ void clear ();
+
+ /**
+ * Conversion to normal pointer.
+ */
+ operator T *() const;
+
+ /**
+ * Dereferencing operator. This
+ * operator throws an
+ * ExcNotInitialized if the
+ * pointer is a null pointer.
+ */
+ T &operator * () const;
+
+ /**
+ * Dereferencing operator. This
+ * operator throws an
+ * ExcNotInitialized if the
+ * pointer is a null pointer.
+ */
+ T *operator -> () const;
+
+ /**
+ * Exchange the pointers of this
+ * object and the argument. Since
+ * both the objects to which is
+ * pointed are subscribed to
+ * before and after, we do not
+ * have to change their
+ * subscription counters.
+ *
+ * Note that this function (with
+ * two arguments) and the
+ * respective functions where one
+ * of the arguments is a pointer
+ * and the other one is a C-style
+ * pointer are implemented in
+ * global namespace.
+ */
+ template <class Q>
+ void swap (SmartPointer<T,Q> &tt);
+
+ /**
+ * Swap pointers between this
+ * object and the pointer
+ * given. As this releases the
+ * object pointed to presently,
+ * we reduce its subscription
+ * count by one, and increase it
+ * at the object which we will
+ * point to in the future.
+ *
+ * Note that we indeed need a
+ * reference of a pointer, as we
+ * want to change the pointer
+ * variable which we are given.
+ */
- void swap (T *&tt);
++ void swap (T *&tt);
+
+ /**
+ * Return an estimate of the
+ * amount of memory (in bytes)
+ * used by this class. Note in
+ * particular, that this only
+ * includes the amount of memory
+ * used by <b>this</b> object, not
+ * by the object pointed to.
+ */
+ std::size_t memory_consumption () const;
+
+ private:
+ /**
+ * Pointer to the object we want
+ * to subscribt to. Since it is
+ * often necessary to follow this
+ * pointer when debugging, we
+ * have deliberately chosen a
+ * short name.
+ */
+ T *t;
+ /**
+ * The identification for the
+ * subscriptor.
+ */
+ const char *const id;
};
*/
namespace types
{
- /**
- * The type used to denote
- * subdomain_ids of cells.
- *
- * See the @ref GlossSubdomainId
- * "glossary" for more information.
- *
- * There is a special value,
- * numbers::invalid_subdomain_id
- * that is used to indicate an
- * invalid value of this type.
- */
+ /**
+ * The type used to denote
+ * subdomain_ids of cells.
+ *
+ * See the @ref GlossSubdomainId
+ * "glossary" for more information.
+ *
+ * There is a special value,
+ * numbers::invalid_subdomain_id
+ * that is used to indicate an
+ * invalid value of this type.
+ */
typedef unsigned int subdomain_id;
- /**
- * @deprecated Old name for the typedef above.
- */
+ /**
+ * @deprecated Old name for the typedef above.
+ */
typedef subdomain_id subdomain_id_t;
- /**
- * @deprecated Use numbers::invalid_subdomain_id
- */
+ /**
+ * @deprecated Use numbers::invalid_subdomain_id
+ */
const unsigned int invalid_subdomain_id = static_cast<subdomain_id>(-1);
- /**
- * @deprecated Use numbers::artificial_subdomain_id
- */
+ /**
+ * @deprecated Use numbers::artificial_subdomain_id
+ */
const unsigned int artificial_subdomain_id = static_cast<subdomain_id>(-2);
- /**
- * The type used for global indices of
- * degrees of freedom. While in sequential
- * computations the 4 billion indices of
- * 32-bit unsigned integers is plenty,
- * parallel computations using the
- * parallel::distributed::Triangulation
- * class can overflow this number and we
- * need a bigger index space.
- *
- * The data type always indicates an
- * unsigned integer type.
- */
- // TODO: we should check that unsigned long long int
- // has the same size as uint64_t
+//#define DEAL_II_USE_LARGE_INDEX_TYPE
+#ifdef DEAL_II_USE_LARGE_INDEX_TYPE
- /**
- * An identifier that denotes the MPI type
- * associated with types::global_dof_index.
- */
++ /**
++ * The type used for global indices of
++ * degrees of freedom. While in sequential
++ * computations the 4 billion indices of
++ * 32-bit unsigned integers is plenty,
++ * parallel computations using the
++ * parallel::distributed::Triangulation
++ * class can overflow this number and we
++ * need a bigger index space.
++ *
++ * The data type always indicates an
++ * unsigned integer type.
++ */
++ // TODO: we should check that unsigned long long int
++ // has the same size as uint64_t
+ typedef unsigned long long int global_dof_index;
+
- * The type used to denote global dof
- * indices.
+ /**
- /**
- * The type used for global indices of
- * degrees of freedom. While in sequential
- * computations the 4 billion indices of
- * 32-bit unsigned integers is plenty,
- * parallel computations using the
- * parallel::distributed::Triangulation
- * class can overflow this number and we
- * need a bigger index space.
- *
- * The data type always indicates an
- * unsigned integer type.
- */
++ * An identifier that denotes the MPI type
++ * associated with types::global_dof_index.
++ */
+# define DEAL_II_DOF_INDEX_MPI_TYPE MPI_UNSIGNED_LONG_LONG
+#else
++ /**
++ * The type used for global indices of
++ * degrees of freedom. While in sequential
++ * computations the 4 billion indices of
++ * 32-bit unsigned integers is plenty,
++ * parallel computations using the
++ * parallel::distributed::Triangulation
++ * class can overflow this number and we
++ * need a bigger index space.
++ *
++ * The data type always indicates an
++ * unsigned integer type.
+ */
typedef unsigned int global_dof_index;
- /**
- * An identifier that denotes the MPI type
- * associated with types::global_dof_index.
- */
++ /**
++ * An identifier that denotes the MPI type
++ * associated with types::global_dof_index.
++ */
+# define DEAL_II_DOF_INDEX_MPI_TYPE MPI_UNSIGNED
+#endif
+
- /**
- * @deprecated Use numbers::invalid_dof_index
- */
+ /**
+ * @deprecated Use numbers::invalid_dof_index
+ */
const global_dof_index invalid_dof_index = static_cast<global_dof_index>(-1);
- /**
- * The type used to denote boundary indicators associated with every
- * piece of the boundary and, in the case of meshes that describe
- * manifolds in higher dimensions, associated with every cell.
- *
- * There is a special value, numbers::internal_face_boundary_id
- * that is used to indicate an invalid value of this type and that
- * is used as the boundary indicator for faces that are in the interior
- * of the domain and therefore not part of any addressable boundary
- * component.
- */
+
+ /**
+ * The type used to denote boundary indicators associated with every
+ * piece of the boundary and, in the case of meshes that describe
+ * manifolds in higher dimensions, associated with every cell.
+ *
+ * There is a special value, numbers::internal_face_boundary_id
+ * that is used to indicate an invalid value of this type and that
+ * is used as the boundary indicator for faces that are in the interior
+ * of the domain and therefore not part of any addressable boundary
+ * component.
+ */
typedef unsigned char boundary_id;
- /**
- * @deprecated Old name for the typedef above.
- */
+ /**
+ * @deprecated Old name for the typedef above.
+ */
typedef boundary_id boundary_id_t;
- /**
- * The type used to denote material indicators associated with every
- * cell.
- *
- * There is a special value, numbers::invalid_material_id
- * that is used to indicate an invalid value of this type.
- */
+ /**
+ * The type used to denote material indicators associated with every
+ * cell.
+ *
+ * There is a special value, numbers::invalid_material_id
+ * that is used to indicate an invalid value of this type.
+ */
typedef unsigned char material_id;
- /**
- * @deprecated Old name for the typedef above.
- */
+ /**
+ * @deprecated Old name for the typedef above.
+ */
typedef material_id material_id_t;
}
static const unsigned int
invalid_unsigned_int = static_cast<unsigned int> (-1);
- /**
- * An invalid value for indices of degrees
- * of freedom.
- */
+ /**
+ * An invalid value for indices of degrees
+ * of freedom.
+ */
const types::global_dof_index invalid_dof_index = static_cast<types::global_dof_index>(-1);
- /**
- * Invalid material_id which we
- * need in several places as a
- * default value. We assume that
- * all material_ids lie in the
- * range [0, invalid_material_id).
- */
+ /**
+ * Invalid material_id which we
+ * need in several places as a
+ * default value. We assume that
+ * all material_ids lie in the
+ * range [0, invalid_material_id).
+ */
const types::material_id invalid_material_id = static_cast<types::material_id>(-1);
- /**
- * The number which we reserve for
- * internal faces. We assume that
- * all boundary_ids lie in the
- * range [0,
- * internal_face_boundary_id).
- */
+ /**
+ * The number which we reserve for
+ * internal faces. We assume that
+ * all boundary_ids lie in the
+ * range [0,
+ * internal_face_boundary_id).
+ */
const types::boundary_id internal_face_boundary_id = static_cast<types::boundary_id>(-1);
- /**
- * A special id for an invalid
- * subdomain id. This value may not
- * be used as a valid id but is
- * used, for example, for default
- * arguments to indicate a
- * subdomain id that is not to be
- * used.
- *
- * See the @ref GlossSubdomainId
- * "glossary" for more information.
- */
+ /**
+ * A special id for an invalid
+ * subdomain id. This value may not
+ * be used as a valid id but is
+ * used, for example, for default
+ * arguments to indicate a
+ * subdomain id that is not to be
+ * used.
+ *
+ * See the @ref GlossSubdomainId
+ * "glossary" for more information.
+ */
const types::subdomain_id invalid_subdomain_id = static_cast<types::subdomain_id>(-1);
- /**
- * The subdomain id assigned to a
- * cell whose true subdomain id we
- * don't know, for example because
- * it resides on a different
- * processor on a mesh that is kept
- * distributed on many
- * processors. Such cells are
- * called "artificial".
- *
- * See the glossary entries on @ref
- * GlossSubdomainId "subdomain ids"
- * and @ref GlossArtificialCell
- * "artificial cells" as well as
- * the @ref distributed module for
- * more information.
- */
+ /**
+ * The subdomain id assigned to a
+ * cell whose true subdomain id we
+ * don't know, for example because
+ * it resides on a different
+ * processor on a mesh that is kept
+ * distributed on many
+ * processors. Such cells are
+ * called "artificial".
+ *
+ * See the glossary entries on @ref
+ * GlossSubdomainId "subdomain ids"
+ * and @ref GlossArtificialCell
+ * "artificial cells" as well as
+ * the @ref distributed module for
+ * more information.
+ */
const types::subdomain_id artificial_subdomain_id = static_cast<types::subdomain_id>(-2);
-
}
Iterator
lower_bound (Iterator first,
Iterator last,
- const T &val);
+ const T &val);
- /**
- * The same function as above, but taking
- * an argument that is used to compare
- * individual elements of the sequence of
- * objects pointed to by the iterators.
- */
+ /**
+ * The same function as above, but taking
+ * an argument that is used to compare
+ * individual elements of the sequence of
+ * objects pointed to by the iterators.
+ */
template<typename Iterator, typename T, typename Comp>
Iterator
lower_bound (Iterator first,
void
destroy_communicator (Epetra_Comm &communicator);
- /**
- * Return the number of MPI processes
- * there exist in the given communicator
- * object. If this is a sequential job,
- * it returns 1.
- */
+ /**
+ * Return the number of MPI processes
+ * there exist in the given communicator
+ * object. If this is a sequential job,
+ * it returns 1.
+ */
unsigned int get_n_mpi_processes (const Epetra_Comm &mpi_communicator);
- /**
- * Return the number of the present MPI
- * process in the space of processes
- * described by the given
- * communicator. This will be a unique
- * value for each process between zero
- * and (less than) the number of all
- * processes (given by
- * get_n_mpi_processes()).
- */
+ /**
+ * Return the number of the present MPI
+ * process in the space of processes
+ * described by the given
+ * communicator. This will be a unique
+ * value for each process between zero
+ * and (less than) the number of all
+ * processes (given by
+ * get_n_mpi_processes()).
+ */
unsigned int get_this_mpi_process (const Epetra_Comm &mpi_communicator);
- /**
- * Given a Trilinos Epetra map, create a
- * new map that has the same subdivision
- * of elements to processors but uses the
- * given communicator object instead of
- * the one stored in the first
- * argument. In essence, this means that
- * we create a map that communicates
- * among the same processors in the same
- * way, but using a separate channel.
- *
- * This function is typically used with a
- * communicator that has been obtained by
- * the duplicate_communicator() function.
- */
+ /**
+ * Given a Trilinos Epetra map, create a
+ * new map that has the same subdivision
+ * of elements to processors but uses the
+ * given communicator object instead of
+ * the one stored in the first
+ * argument. In essence, this means that
+ * we create a map that communicates
+ * among the same processors in the same
+ * way, but using a separate channel.
+ *
+ * This function is typically used with a
+ * communicator that has been obtained by
+ * the duplicate_communicator() function.
+ */
Epetra_Map
- duplicate_map (const Epetra_BlockMap &map,
+ duplicate_map (const Epetra_BlockMap &map,
const Epetra_Comm &comm);
}
*/
class BlockInfo : public Subscriptor
{
- public:
- /**
- * @brief Fill the object with values
- * describing block structure
- * of the DoFHandler.
- *
- * This function will also clear
- * the local() indices.
- */
- template <int dim, int spacedim>
- void initialize(const DoFHandler<dim, spacedim>&, bool levels_only = false, bool multigrid = false);
-
- /**
- * @brief Fill the object with values
- * describing level block
- * structure of the
- * MGDoFHandler. If
- * <tt>levels_only</tt> is false,
- * the other initialize() is
- * called as well.
- *
- * This function will also clear
- * the local() indices.
- */
- template <int dim, int spacedim>
- void initialize(const MGDoFHandler<dim, spacedim>&, bool levels_only = false);
-
- /**
- * @brief Initialize block structure
- * on cells and compute
- * renumbering between cell
- * dofs and block cell dofs.
- */
- template <int dim, int spacedim>
- void initialize_local(const DoFHandler<dim, spacedim>&);
-
- /**
- * Access the BlockIndices
- * structure of the global
- * system.
- */
- const BlockIndices& global() const;
-
- /**
- * Access BlockIndices for the
- * local system on a cell.
- */
- const BlockIndices& local() const;
-
- /**
- * Access the BlockIndices
- * structure of a level in the
- * multilevel hierarchy.
- */
- const BlockIndices& level(unsigned int level) const;
-
- /**
- * Return the index after local
- * renumbering.
- *
- * The input of this function is
- * an index between zero and the
- * number of dofs per cell,
- * numbered in local block
- * ordering, that is first all
- * indices of the first system
- * block, then all of the second
- * block and so forth. The
- * function then outputs the index
- * in the standard local
- * numbering of DoFAccessor.
- */
- unsigned int renumber (const unsigned int i) const;
-
- /**
- * The number of base elements.
- */
- unsigned int n_base_elements() const;
-
- /**
- * Return the base element of
- * this index.
- */
- unsigned int base_element (const unsigned int i) const;
-
- /**
- * Write a summary of the block
- * structure to the stream.
- */
- template <class OS>
- void
- print(OS& stream) const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
- */
- std::size_t memory_consumption () const;
-
- /**
- * Read or write the data of this object to or
- * from a stream for the purpose of serialization
- */
- template <class Archive>
- void serialize (Archive & ar,
- const unsigned int version);
-
- private:
- /**
- * @brief The block structure
- * of the global system.
- */
- BlockIndices bi_global;
- /**
- * @brief The multilevel block structure.
- */
- std::vector<BlockIndices> levels;
-
- /**
- * @brief The block structure
- * of the cell systems.
- */
- BlockIndices bi_local;
-
- /**
- * The base element associated
- * with each block.
- */
- std::vector<unsigned int> base_elements;
-
- /**
- * A vector containing the
- * renumbering from the
- * standard order of degrees of
- * freedom on a cell to a
- * component wise
- * ordering. Filled by
- * initialize().
- */
- std::vector<unsigned int> local_renumbering;
+ public:
+ /**
+ * @brief Fill the object with values
+ * describing block structure
+ * of the DoFHandler.
+ *
+ * This function will also clear
+ * the local() indices.
+ */
+ template <int dim, int spacedim>
- void initialize(const DoFHandler<dim, spacedim> &);
++ void initialize(const DoFHandler<dim, spacedim> &, bool levels_only = false, bool multigrid = false);
+
+ /**
+ * @brief Fill the object with values
+ * describing level block
+ * structure of the
+ * MGDoFHandler. If
+ * <tt>levels_only</tt> is false,
+ * the other initialize() is
+ * called as well.
+ *
+ * This function will also clear
+ * the local() indices.
+ */
+ template <int dim, int spacedim>
+ void initialize(const MGDoFHandler<dim, spacedim> &, bool levels_only = false);
+
+ /**
+ * @brief Initialize block structure
+ * on cells and compute
+ * renumbering between cell
+ * dofs and block cell dofs.
+ */
+ template <int dim, int spacedim>
+ void initialize_local(const DoFHandler<dim, spacedim> &);
+
+ /**
+ * Access the BlockIndices
+ * structure of the global
+ * system.
+ */
+ const BlockIndices &global() const;
+
+ /**
+ * Access BlockIndices for the
+ * local system on a cell.
+ */
+ const BlockIndices &local() const;
+
+ /**
+ * Access the BlockIndices
+ * structure of a level in the
+ * multilevel hierarchy.
+ */
+ const BlockIndices &level(unsigned int level) const;
+
+ /**
+ * Return the index after local
+ * renumbering.
+ *
+ * The input of this function is
+ * an index between zero and the
+ * number of dofs per cell,
+ * numbered in local block
+ * ordering, that is first all
+ * indices of the first system
+ * block, then all of the second
+ * block and so forth. The
+ * function then outputs the index
+ * in the standard local
+ * numbering of DoFAccessor.
+ */
+ unsigned int renumber (const unsigned int i) const;
+
+ /**
+ * The number of base elements.
+ */
+ unsigned int n_base_elements() const;
+
+ /**
+ * Return the base element of
+ * this index.
+ */
+ unsigned int base_element (const unsigned int i) const;
+
+ /**
+ * Write a summary of the block
+ * structure to the stream.
+ */
+ template <class OS>
+ void
+ print(OS &stream) const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object.
+ */
+ std::size_t memory_consumption () const;
+
+ /**
+ * Read or write the data of this object to or
+ * from a stream for the purpose of serialization
+ */
+ template <class Archive>
+ void serialize (Archive &ar,
+ const unsigned int version);
+
+ private:
+ /**
+ * @brief The block structure
+ * of the global system.
+ */
+ BlockIndices bi_global;
+ /**
+ * @brief The multilevel block structure.
+ */
+ std::vector<BlockIndices> levels;
+
+ /**
+ * @brief The block structure
+ * of the cell systems.
+ */
+ BlockIndices bi_local;
+
+ /**
+ * The base element associated
+ * with each block.
+ */
+ std::vector<unsigned int> base_elements;
+
+ /**
+ * A vector containing the
+ * renumbering from the
+ * standard order of degrees of
+ * freedom on a cell to a
+ * component wise
+ * ordering. Filled by
+ * initialize().
+ */
+ std::vector<unsigned int> local_renumbering;
};
template <int structdim, class DH>
class DoFAccessor : public dealii::internal::DoFAccessor::Inheritance<structdim, DH::dimension, DH::space_dimension>::BaseClass
{
- public:
-
- /**
- * A static variable that allows users of
- * this class to discover the value of
- * the second template argument.
- */
- static const unsigned int dimension=DH::dimension;
-
- /**
- * A static variable that allows users of
- * this class to discover the value of
- * the third template argument.
- */
- static const unsigned int space_dimension=DH::space_dimension;
-
- /**
- * Declare a typedef to the base
- * class to make accessing some
- * of the exception classes
- * simpler.
- */
- typedef
- typename dealii::internal::DoFAccessor::Inheritance<structdim, dimension, space_dimension>::BaseClass
- BaseClass;
-
- /**
- * Data type passed by the iterator class.
- */
- typedef DH AccessorData;
-
- /**
- * @name Constructors
- */
- /**
- * @{
- */
-
- /**
- * Default constructor. Provides
- * an accessor that can't be
- * used.
- */
- DoFAccessor ();
-
- /**
- * Constructor
- */
- DoFAccessor (const Triangulation<DH::dimension,DH::space_dimension> *tria,
- const int level,
- const int index,
- const DH *local_data);
-
- /**
- * Conversion constructor. This
- * constructor exists to make certain
- * constructs simpler to write in
- * dimension independent code. For
- * example, it allows assigning a face
- * iterator to a line iterator, an
- * operation that is useful in 2d but
- * doesn't make any sense in 3d. The
- * constructor here exists for the
- * purpose of making the code conform to
- * C++ but it will unconditionally abort;
- * in other words, assigning a face
- * iterator to a line iterator is better
- * put into an if-statement that checks
- * that the dimension is two, and assign
- * to a quad iterator in 3d (an operator
- * that, without this constructor would
- * be illegal if we happen to compile for
- * 2d).
- */
- template <int structdim2, int dim2, int spacedim2>
- DoFAccessor (const InvalidAccessor<structdim2,dim2,spacedim2> &);
-
- /**
- * Another conversion operator
- * between objects that don't
- * make sense, just like the
- * previous one.
- */
- template <int dim2, class DH2>
- DoFAccessor (const DoFAccessor<dim2, DH2> &);
-
- /**
- * @}
- */
-
- /**
- * Return a handle on the
- * DoFHandler object which we
- * are using.
- */
- const DH &
- get_dof_handler () const;
-
- /**
- * Implement the copy operator needed
- * for the iterator classes.
- */
- void copy_from (const DoFAccessor<structdim, DH> &a);
-
- /**
- * Copy operator used by the
- * iterator class. Keeps the
- * previously set dof handler,
- * but sets the object
- * coordinates of the TriaAccessor.
- */
- void copy_from (const TriaAccessorBase<structdim, DH::dimension, DH::space_dimension> &da);
-
- /**
- * Return an iterator pointing to
- * the the parent.
- */
- TriaIterator<DoFAccessor<structdim,DH> >
- parent () const;
-
- /**
- * @name Accessing sub-objects
- */
- /**
- * @{
- */
-
- /**
- * Return an iterator pointing to
- * the the @p c-th child.
- */
- TriaIterator<DoFAccessor<structdim,DH> >
- child (const unsigned int c) const;
-
- /**
- * Pointer to the @p ith line
- * bounding this object. If the
- * current object is a line itself,
- * then the only valid index is
- * @p i equals to zero, and the
- * function returns an iterator
- * to itself.
- */
- typename dealii::internal::DoFHandler::Iterators<DH>::line_iterator
- line (const unsigned int i) const;
-
- /**
- * Pointer to the @p ith quad
- * bounding this object. If the
- * current object is a quad itself,
- * then the only valid index is
- * @p i equals to zero, and the
- * function returns an iterator
- * to itself.
- */
- typename dealii::internal::DoFHandler::Iterators<DH>::quad_iterator
- quad (const unsigned int i) const;
-
- /**
- * @}
- */
-
- /**
- * @name Accessing the DoF indices of this object
- */
- /**
- * @{
- */
-
- /**
- * Return the indices of the dofs of this
- * object in the standard ordering: dofs
- * on vertex 0, dofs on vertex 1, etc,
- * dofs on line 0, dofs on line 1, etc,
- * dofs on quad 0, etc.
- *
- * The vector has to have the
- * right size before being passed
- * to this function.
- *
- * This function is most often
- * used on active objects (edges,
- * faces, cells). It can be used
- * on non-active objects as well
- * (i.e. objects that have
- * children), but only if the
- * finite element under
- * consideration has degrees of
- * freedom exclusively on
- * vertices. Otherwise, the
- * function doesn't make much
- * sense, since for example
- * inactive edges do not have
- * degrees of freedom associated
- * with them at all.
- *
- * The last argument denotes the
- * finite element index. For the
- * standard ::DoFHandler class,
- * this value must be equal to
- * its default value since that
- * class only supports the same
- * finite element on all cells
- * anyway.
- *
- * However, for hp objects
- * (i.e. the hp::DoFHandler
- * class), different finite
- * element objects may be used on
- * different cells. On faces
- * between two cells, as well as
- * vertices, there may therefore
- * be two sets of degrees of
- * freedom, one for each of the
- * finite elements used on the
- * adjacent cells. In order to
- * specify which set of degrees
- * of freedom to work on, the
- * last argument is used to
- * disambiguate. Finally, if this
- * function is called for a cell
- * object, there can only be a
- * single set of degrees of
- * freedom, and fe_index has to
- * match the result of
- * active_fe_index().
- *
- * For cells, there is only a
- * single possible finite element
- * index (namely the one for that
- * cell, returned by
- * <code>cell-@>active_fe_index</code>. Consequently,
- * the derived DoFCellAccessor
- * class has an overloaded
- * version of this function that
- * calls the present function
- * with
- * <code>cell-@>active_fe_index</code>
- * as last argument.
- */
- void get_dof_indices (std::vector<types::global_dof_index> &dof_indices,
- const unsigned int fe_index = DH::default_fe_index) const;
-
- void get_mg_dof_indices (const int level, std::vector<types::global_dof_index>& dof_indices, const unsigned int fe_index = DH::default_fe_index) const;
-
- /**
- * Global DoF index of the <i>i</i>
- * degree associated with the @p vertexth
- * vertex of the present cell.
- *
- * The last argument denotes the
- * finite element index. For the
- * standard ::DoFHandler class,
- * this value must be equal to
- * its default value since that
- * class only supports the same
- * finite element on all cells
- * anyway.
- *
- * However, for hp objects
- * (i.e. the hp::DoFHandler
- * class), different finite
- * element objects may be used on
- * different cells. On faces
- * between two cells, as well as
- * vertices, there may therefore
- * be two sets of degrees of
- * freedom, one for each of the
- * finite elements used on the
- * adjacent cells. In order to
- * specify which set of degrees
- * of freedom to work on, the
- * last argument is used to
- * disambiguate. Finally, if this
- * function is called for a cell
- * object, there can only be a
- * single set of degrees of
- * freedom, and fe_index has to
- * match the result of
- * active_fe_index().
- */
- types::global_dof_index vertex_dof_index (const unsigned int vertex,
- const unsigned int i,
- const unsigned int fe_index = DH::default_fe_index) const;
-
- types::global_dof_index mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const unsigned int fe_index = DH::default_fe_index) const;
-
- /**
- * Index of the <i>i</i>th degree
- * of freedom of this object.
- *
- * The last argument denotes the
- * finite element index. For the
- * standard ::DoFHandler class,
- * this value must be equal to
- * its default value since that
- * class only supports the same
- * finite element on all cells
- * anyway.
- *
- * However, for hp objects
- * (i.e. the hp::DoFHandler
- * class), different finite
- * element objects may be used on
- * different cells. On faces
- * between two cells, as well as
- * vertices, there may therefore
- * be two sets of degrees of
- * freedom, one for each of the
- * finite elements used on the
- * adjacent cells. In order to
- * specify which set of degrees
- * of freedom to work on, the
- * last argument is used to
- * disambiguate. Finally, if this
- * function is called for a cell
- * object, there can only be a
- * single set of degrees of
- * freedom, and fe_index has to
- * match the result of
- * active_fe_index().
- *
- * @note While the get_dof_indices()
- * function returns an array that
- * contains the indices of all degrees of
- * freedom that somehow live on this
- * object (i.e. on the vertices, edges or
- * interior of this object), the current
- * dof_index() function only considers
- * the DoFs that really belong to this
- * particular object's interior. In other
- * words, as an example, if the current
- * object refers to a quad (a cell in 2d,
- * a face in 3d) and the finite element
- * associated with it is a bilinear one,
- * then the get_dof_indices() will return
- * an array of size 4 while dof_index()
- * will produce an exception because no
- * degrees are defined in the interior of
- * the face.
- */
- types::global_dof_index dof_index (const unsigned int i,
- const unsigned int fe_index = DH::default_fe_index) const;
-
- types::global_dof_index mg_dof_index (const int level, const unsigned int i) const;
-
- /**
- * @}
- */
-
- /**
- * @name Accessing the finite element associated with this object
- */
- /**
- * @{
- */
-
- /**
- * Return the number of finite
- * elements that are active on a
- * given object.
- *
- * For non-hp DoFHandler objects,
- * the answer is of course always
- * one. However, for
- * hp::DoFHandler objects, this
- * isn't the case: If this is a
- * cell, the answer is of course
- * one. If it is a face, the
- * answer may be one or two,
- * depending on whether the two
- * adjacent cells use the same
- * finite element or not. If it
- * is an edge in 3d, the possible
- * return value may be one or any
- * other value larger than that.
- */
- unsigned int
- n_active_fe_indices () const;
-
- /**
- * Return the @p n-th active fe
- * index on this object. For
- * cells and all non-hp objects,
- * there is only a single active
- * fe index, so the argument must
- * be equal to zero. For
- * lower-dimensional hp objects,
- * there are
- * n_active_fe_indices() active
- * finite elements, and this
- * function can be queried for
- * their indices.
- */
- unsigned int
- nth_active_fe_index (const unsigned int n) const;
-
- /**
- * Return true if the finite
- * element with given index is
- * active on the present
- * object. For non-hp DoF
- * accessors, this is of course
- * the case only if @p fe_index
- * equals zero. For cells, it is
- * the case if @p fe_index equals
- * active_fe_index() of this
- * cell. For faces and other
- * lower-dimensional objects,
- * there may be more than one @p
- * fe_index that are active on
- * any given object (see
- * n_active_fe_indices()).
- */
- bool
- fe_index_is_active (const unsigned int fe_index) const;
-
- /**
- * Return a reference to the finite
- * element used on this object with the
- * given @p fe_index. @p fe_index must be
- * used on this object,
- * i.e. <code>fe_index_is_active(fe_index)</code>
- * must return true.
- */
- const FiniteElement<DH::dimension,DH::space_dimension> &
- get_fe (const unsigned int fe_index) const;
-
- /**
- * @}
- */
-
- /**
- * Exceptions for child classes
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcInvalidObject);
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcVectorNotEmpty);
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcVectorDoesNotMatch);
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcMatrixDoesNotMatch);
- /**
- * A function has been called for
- * a cell which should be active,
- * but is refined. @ref GlossActive
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcNotActive);
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcCantCompareIterators);
-
- protected:
-
- /**
- * Store the address of the DoFHandler object
- * to be accessed.
- */
- DH *dof_handler;
-
- /**
- * Compare for equality.
- */
- bool operator == (const DoFAccessor &) const;
-
- /**
- * Compare for inequality.
- */
- bool operator != (const DoFAccessor &) const;
-
- /**
- * Reset the DoF handler pointer.
- */
- void set_dof_handler (DH *dh);
-
- /**
- * Set the index of the
- * <i>i</i>th degree of freedom
- * of this object to @p index.
- *
- * The last argument denotes the
- * finite element index. For the
- * standard ::DoFHandler class,
- * this value must be equal to
- * its default value since that
- * class only supports the same
- * finite element on all cells
- * anyway.
- *
- * However, for hp objects
- * (i.e. the hp::DoFHandler
- * class), different finite
- * element objects may be used on
- * different cells. On faces
- * between two cells, as well as
- * vertices, there may therefore
- * be two sets of degrees of
- * freedom, one for each of the
- * finite elements used on the
- * adjacent cells. In order to
- * specify which set of degrees
- * of freedom to work on, the
- * last argument is used to
- * disambiguate. Finally, if this
- * function is called for a cell
- * object, there can only be a
- * single set of degrees of
- * freedom, and fe_index has to
- * match the result of
- * active_fe_index().
- */
- void set_dof_index (const unsigned int i,
- const types::global_dof_index index,
- const unsigned int fe_index = DH::default_fe_index) const;
-
- void set_mg_dof_index (const int level, const unsigned int i, const types::global_dof_index index) const;
-
- /**
- * Set the global index of the <i>i</i>
- * degree on the @p vertex-th vertex of
- * the present cell to @p index.
- *
- * The last argument denotes the
- * finite element index. For the
- * standard ::DoFHandler class,
- * this value must be equal to
- * its default value since that
- * class only supports the same
- * finite element on all cells
- * anyway.
- *
- * However, for hp objects
- * (i.e. the hp::DoFHandler
- * class), different finite
- * element objects may be used on
- * different cells. On faces
- * between two cells, as well as
- * vertices, there may therefore
- * be two sets of degrees of
- * freedom, one for each of the
- * finite elements used on the
- * adjacent cells. In order to
- * specify which set of degrees
- * of freedom to work on, the
- * last argument is used to
- * disambiguate. Finally, if this
- * function is called for a cell
- * object, there can only be a
- * single set of degrees of
- * freedom, and fe_index has to
- * match the result of
- * active_fe_index().
- */
- void set_vertex_dof_index (const unsigned int vertex,
- const unsigned int i,
- const types::global_dof_index index,
- const unsigned int fe_index = DH::default_fe_index) const;
-
- void set_mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const types::global_dof_index index, const unsigned int fe_index = DH::default_fe_index) const;
-
- /**
- * Iterator classes need to be friends
- * because they need to access operator==
- * and operator!=.
- */
- template <typename> friend class TriaRawIterator;
-
-
- private:
- /**
- * Copy operator. This is normally used
- * in a context like <tt>iterator a,b;
- * *a=*b;</tt>. Presumably, the intent
- * here is to copy the object pointed to
- * by @p b to the object pointed to by
- * @p a. However, the result of
- * dereferencing an iterator is not an
- * object but an accessor; consequently,
- * this operation is not useful for
- * iterators on triangulations. We
- * declare this function here private,
- * thus it may not be used from outside.
- * Furthermore it is not implemented and
- * will give a linker error if used
- * anyway.
- */
- DoFAccessor<structdim,DH> &
- operator = (const DoFAccessor<structdim,DH> &da);
-
- /**
- * Make the DoFHandler class a friend so
- * that it can call the set_xxx()
- * functions.
- */
- template <int dim, int spacedim> friend class DoFHandler;
- template <int dim, int spacedim> friend class hp::DoFHandler;
-
- friend struct dealii::internal::DoFHandler::Policy::Implementation;
- friend struct dealii::internal::DoFHandler::Implementation;
- friend struct dealii::internal::hp::DoFHandler::Implementation;
- friend struct dealii::internal::DoFCellAccessor::Implementation;
+ public:
+
+ /**
+ * A static variable that allows users of
+ * this class to discover the value of
+ * the second template argument.
+ */
+ static const unsigned int dimension=DH::dimension;
+
+ /**
+ * A static variable that allows users of
+ * this class to discover the value of
+ * the third template argument.
+ */
+ static const unsigned int space_dimension=DH::space_dimension;
+
+ /**
+ * Declare a typedef to the base
+ * class to make accessing some
+ * of the exception classes
+ * simpler.
+ */
+ typedef
+ typename dealii::internal::DoFAccessor::Inheritance<structdim, dimension, space_dimension>::BaseClass
+ BaseClass;
+
+ /**
+ * Data type passed by the iterator class.
+ */
+ typedef DH AccessorData;
+
+ /**
+ * @name Constructors
+ */
+ /**
+ * @{
+ */
+
+ /**
+ * Default constructor. Provides
+ * an accessor that can't be
+ * used.
+ */
+ DoFAccessor ();
+
+ /**
+ * Constructor
+ */
+ DoFAccessor (const Triangulation<DH::dimension,DH::space_dimension> *tria,
+ const int level,
+ const int index,
+ const DH *local_data);
+
+ /**
+ * Conversion constructor. This
+ * constructor exists to make certain
+ * constructs simpler to write in
+ * dimension independent code. For
+ * example, it allows assigning a face
+ * iterator to a line iterator, an
+ * operation that is useful in 2d but
+ * doesn't make any sense in 3d. The
+ * constructor here exists for the
+ * purpose of making the code conform to
+ * C++ but it will unconditionally abort;
+ * in other words, assigning a face
+ * iterator to a line iterator is better
+ * put into an if-statement that checks
+ * that the dimension is two, and assign
+ * to a quad iterator in 3d (an operator
+ * that, without this constructor would
+ * be illegal if we happen to compile for
+ * 2d).
+ */
+ template <int structdim2, int dim2, int spacedim2>
+ DoFAccessor (const InvalidAccessor<structdim2,dim2,spacedim2> &);
+
+ /**
+ * Another conversion operator
+ * between objects that don't
+ * make sense, just like the
+ * previous one.
+ */
+ template <int dim2, class DH2>
+ DoFAccessor (const DoFAccessor<dim2, DH2> &);
+
+ /**
+ * @}
+ */
+
+ /**
+ * Return a handle on the
+ * DoFHandler object which we
+ * are using.
+ */
+ const DH &
+ get_dof_handler () const;
+
+ /**
+ * Implement the copy operator needed
+ * for the iterator classes.
+ */
+ void copy_from (const DoFAccessor<structdim, DH> &a);
+
+ /**
+ * Copy operator used by the
+ * iterator class. Keeps the
+ * previously set dof handler,
+ * but sets the object
+ * coordinates of the TriaAccessor.
+ */
+ void copy_from (const TriaAccessorBase<structdim, DH::dimension, DH::space_dimension> &da);
+
+ /**
+ * Return an iterator pointing to
+ * the the parent.
+ */
+ TriaIterator<DoFAccessor<structdim,DH> >
+ parent () const;
+
+ /**
+ * @name Accessing sub-objects
+ */
+ /**
+ * @{
+ */
+
+ /**
+ * Return an iterator pointing to
+ * the the @p c-th child.
+ */
+ TriaIterator<DoFAccessor<structdim,DH> >
+ child (const unsigned int c) const;
+
+ /**
+ * Pointer to the @p ith line
+ * bounding this object. If the
+ * current object is a line itself,
+ * then the only valid index is
+ * @p i equals to zero, and the
+ * function returns an iterator
+ * to itself.
+ */
+ typename dealii::internal::DoFHandler::Iterators<DH>::line_iterator
+ line (const unsigned int i) const;
+
+ /**
+ * Pointer to the @p ith quad
+ * bounding this object. If the
+ * current object is a quad itself,
+ * then the only valid index is
+ * @p i equals to zero, and the
+ * function returns an iterator
+ * to itself.
+ */
+ typename dealii::internal::DoFHandler::Iterators<DH>::quad_iterator
+ quad (const unsigned int i) const;
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Accessing the DoF indices of this object
+ */
+ /**
+ * @{
+ */
+
+ /**
+ * Return the indices of the dofs of this
+ * object in the standard ordering: dofs
+ * on vertex 0, dofs on vertex 1, etc,
+ * dofs on line 0, dofs on line 1, etc,
+ * dofs on quad 0, etc.
+ *
+ * The vector has to have the
+ * right size before being passed
+ * to this function.
+ *
+ * This function is most often
+ * used on active objects (edges,
+ * faces, cells). It can be used
+ * on non-active objects as well
+ * (i.e. objects that have
+ * children), but only if the
+ * finite element under
+ * consideration has degrees of
+ * freedom exclusively on
+ * vertices. Otherwise, the
+ * function doesn't make much
+ * sense, since for example
+ * inactive edges do not have
+ * degrees of freedom associated
+ * with them at all.
+ *
+ * The last argument denotes the
+ * finite element index. For the
+ * standard ::DoFHandler class,
+ * this value must be equal to
+ * its default value since that
+ * class only supports the same
+ * finite element on all cells
+ * anyway.
+ *
+ * However, for hp objects
+ * (i.e. the hp::DoFHandler
+ * class), different finite
+ * element objects may be used on
+ * different cells. On faces
+ * between two cells, as well as
+ * vertices, there may therefore
+ * be two sets of degrees of
+ * freedom, one for each of the
+ * finite elements used on the
+ * adjacent cells. In order to
+ * specify which set of degrees
+ * of freedom to work on, the
+ * last argument is used to
+ * disambiguate. Finally, if this
+ * function is called for a cell
+ * object, there can only be a
+ * single set of degrees of
+ * freedom, and fe_index has to
+ * match the result of
+ * active_fe_index().
+ *
+ * For cells, there is only a
+ * single possible finite element
+ * index (namely the one for that
+ * cell, returned by
+ * <code>cell-@>active_fe_index</code>. Consequently,
+ * the derived DoFCellAccessor
+ * class has an overloaded
+ * version of this function that
+ * calls the present function
+ * with
+ * <code>cell-@>active_fe_index</code>
+ * as last argument.
+ */
- void get_dof_indices (std::vector<unsigned int> &dof_indices,
++ void get_dof_indices (std::vector<types::global_dof_index> &dof_indices,
+ const unsigned int fe_index = DH::default_fe_index) const;
+
++ void get_mg_dof_indices (const int level, std::vector<types::global_dof_index> &dof_indices, const unsigned int fe_index = DH::default_fe_index) const;
++
+ /**
+ * Global DoF index of the <i>i</i>
+ * degree associated with the @p vertexth
+ * vertex of the present cell.
+ *
+ * The last argument denotes the
+ * finite element index. For the
+ * standard ::DoFHandler class,
+ * this value must be equal to
+ * its default value since that
+ * class only supports the same
+ * finite element on all cells
+ * anyway.
+ *
+ * However, for hp objects
+ * (i.e. the hp::DoFHandler
+ * class), different finite
+ * element objects may be used on
+ * different cells. On faces
+ * between two cells, as well as
+ * vertices, there may therefore
+ * be two sets of degrees of
+ * freedom, one for each of the
+ * finite elements used on the
+ * adjacent cells. In order to
+ * specify which set of degrees
+ * of freedom to work on, the
+ * last argument is used to
+ * disambiguate. Finally, if this
+ * function is called for a cell
+ * object, there can only be a
+ * single set of degrees of
+ * freedom, and fe_index has to
+ * match the result of
+ * active_fe_index().
+ */
- unsigned int vertex_dof_index (const unsigned int vertex,
- const unsigned int i,
- const unsigned int fe_index = DH::default_fe_index) const;
++ types::global_dof_index vertex_dof_index (const unsigned int vertex,
++ const unsigned int i,
++ const unsigned int fe_index = DH::default_fe_index) const;
++
++ types::global_dof_index mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const unsigned int fe_index = DH::default_fe_index) const;
+
+ /**
+ * Index of the <i>i</i>th degree
+ * of freedom of this object.
+ *
+ * The last argument denotes the
+ * finite element index. For the
+ * standard ::DoFHandler class,
+ * this value must be equal to
+ * its default value since that
+ * class only supports the same
+ * finite element on all cells
+ * anyway.
+ *
+ * However, for hp objects
+ * (i.e. the hp::DoFHandler
+ * class), different finite
+ * element objects may be used on
+ * different cells. On faces
+ * between two cells, as well as
+ * vertices, there may therefore
+ * be two sets of degrees of
+ * freedom, one for each of the
+ * finite elements used on the
+ * adjacent cells. In order to
+ * specify which set of degrees
+ * of freedom to work on, the
+ * last argument is used to
+ * disambiguate. Finally, if this
+ * function is called for a cell
+ * object, there can only be a
+ * single set of degrees of
+ * freedom, and fe_index has to
+ * match the result of
+ * active_fe_index().
+ *
+ * @note While the get_dof_indices()
+ * function returns an array that
+ * contains the indices of all degrees of
+ * freedom that somehow live on this
+ * object (i.e. on the vertices, edges or
+ * interior of this object), the current
+ * dof_index() function only considers
+ * the DoFs that really belong to this
+ * particular object's interior. In other
+ * words, as an example, if the current
+ * object refers to a quad (a cell in 2d,
+ * a face in 3d) and the finite element
+ * associated with it is a bilinear one,
+ * then the get_dof_indices() will return
+ * an array of size 4 while dof_index()
+ * will produce an exception because no
+ * degrees are defined in the interior of
+ * the face.
+ */
- unsigned int dof_index (const unsigned int i,
- const unsigned int fe_index = DH::default_fe_index) const;
++ types::global_dof_index dof_index (const unsigned int i,
++ const unsigned int fe_index = DH::default_fe_index) const;
++
++ types::global_dof_index mg_dof_index (const int level, const unsigned int i) const;
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Accessing the finite element associated with this object
+ */
+ /**
+ * @{
+ */
+
+ /**
+ * Return the number of finite
+ * elements that are active on a
+ * given object.
+ *
+ * For non-hp DoFHandler objects,
+ * the answer is of course always
+ * one. However, for
+ * hp::DoFHandler objects, this
+ * isn't the case: If this is a
+ * cell, the answer is of course
+ * one. If it is a face, the
+ * answer may be one or two,
+ * depending on whether the two
+ * adjacent cells use the same
+ * finite element or not. If it
+ * is an edge in 3d, the possible
+ * return value may be one or any
+ * other value larger than that.
+ */
+ unsigned int
+ n_active_fe_indices () const;
+
+ /**
+ * Return the @p n-th active fe
+ * index on this object. For
+ * cells and all non-hp objects,
+ * there is only a single active
+ * fe index, so the argument must
+ * be equal to zero. For
+ * lower-dimensional hp objects,
+ * there are
+ * n_active_fe_indices() active
+ * finite elements, and this
+ * function can be queried for
+ * their indices.
+ */
+ unsigned int
+ nth_active_fe_index (const unsigned int n) const;
+
+ /**
+ * Return true if the finite
+ * element with given index is
+ * active on the present
+ * object. For non-hp DoF
+ * accessors, this is of course
+ * the case only if @p fe_index
+ * equals zero. For cells, it is
+ * the case if @p fe_index equals
+ * active_fe_index() of this
+ * cell. For faces and other
+ * lower-dimensional objects,
+ * there may be more than one @p
+ * fe_index that are active on
+ * any given object (see
+ * n_active_fe_indices()).
+ */
+ bool
+ fe_index_is_active (const unsigned int fe_index) const;
+
+ /**
+ * Return a reference to the finite
+ * element used on this object with the
+ * given @p fe_index. @p fe_index must be
+ * used on this object,
+ * i.e. <code>fe_index_is_active(fe_index)</code>
+ * must return true.
+ */
+ const FiniteElement<DH::dimension,DH::space_dimension> &
+ get_fe (const unsigned int fe_index) const;
+
+ /**
+ * @}
+ */
+
+ /**
+ * Exceptions for child classes
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcInvalidObject);
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcVectorNotEmpty);
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcVectorDoesNotMatch);
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcMatrixDoesNotMatch);
+ /**
+ * A function has been called for
+ * a cell which should be active,
+ * but is refined. @ref GlossActive
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcNotActive);
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcCantCompareIterators);
+
+ protected:
+
+ /**
+ * Store the address of the DoFHandler object
+ * to be accessed.
+ */
+ DH *dof_handler;
+
+ /**
+ * Compare for equality.
+ */
+ bool operator == (const DoFAccessor &) const;
+
+ /**
+ * Compare for inequality.
+ */
+ bool operator != (const DoFAccessor &) const;
+
+ /**
+ * Reset the DoF handler pointer.
+ */
+ void set_dof_handler (DH *dh);
+
+ /**
+ * Set the index of the
+ * <i>i</i>th degree of freedom
+ * of this object to @p index.
+ *
+ * The last argument denotes the
+ * finite element index. For the
+ * standard ::DoFHandler class,
+ * this value must be equal to
+ * its default value since that
+ * class only supports the same
+ * finite element on all cells
+ * anyway.
+ *
+ * However, for hp objects
+ * (i.e. the hp::DoFHandler
+ * class), different finite
+ * element objects may be used on
+ * different cells. On faces
+ * between two cells, as well as
+ * vertices, there may therefore
+ * be two sets of degrees of
+ * freedom, one for each of the
+ * finite elements used on the
+ * adjacent cells. In order to
+ * specify which set of degrees
+ * of freedom to work on, the
+ * last argument is used to
+ * disambiguate. Finally, if this
+ * function is called for a cell
+ * object, there can only be a
+ * single set of degrees of
+ * freedom, and fe_index has to
+ * match the result of
+ * active_fe_index().
+ */
+ void set_dof_index (const unsigned int i,
- const unsigned int index,
++ const types::global_dof_index index,
+ const unsigned int fe_index = DH::default_fe_index) const;
+
++ void set_mg_dof_index (const int level, const unsigned int i, const types::global_dof_index index) const;
++
+ /**
+ * Set the global index of the <i>i</i>
+ * degree on the @p vertex-th vertex of
+ * the present cell to @p index.
+ *
+ * The last argument denotes the
+ * finite element index. For the
+ * standard ::DoFHandler class,
+ * this value must be equal to
+ * its default value since that
+ * class only supports the same
+ * finite element on all cells
+ * anyway.
+ *
+ * However, for hp objects
+ * (i.e. the hp::DoFHandler
+ * class), different finite
+ * element objects may be used on
+ * different cells. On faces
+ * between two cells, as well as
+ * vertices, there may therefore
+ * be two sets of degrees of
+ * freedom, one for each of the
+ * finite elements used on the
+ * adjacent cells. In order to
+ * specify which set of degrees
+ * of freedom to work on, the
+ * last argument is used to
+ * disambiguate. Finally, if this
+ * function is called for a cell
+ * object, there can only be a
+ * single set of degrees of
+ * freedom, and fe_index has to
+ * match the result of
+ * active_fe_index().
+ */
+ void set_vertex_dof_index (const unsigned int vertex,
+ const unsigned int i,
- const unsigned int index,
++ const types::global_dof_index index,
+ const unsigned int fe_index = DH::default_fe_index) const;
+
++ void set_mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const types::global_dof_index index, const unsigned int fe_index = DH::default_fe_index) const;
++
+ /**
+ * Iterator classes need to be friends
+ * because they need to access operator==
+ * and operator!=.
+ */
+ template <typename> friend class TriaRawIterator;
+
+
+ private:
+ /**
+ * Copy operator. This is normally used
+ * in a context like <tt>iterator a,b;
+ * *a=*b;</tt>. Presumably, the intent
+ * here is to copy the object pointed to
+ * by @p b to the object pointed to by
+ * @p a. However, the result of
+ * dereferencing an iterator is not an
+ * object but an accessor; consequently,
+ * this operation is not useful for
+ * iterators on triangulations. We
+ * declare this function here private,
+ * thus it may not be used from outside.
+ * Furthermore it is not implemented and
+ * will give a linker error if used
+ * anyway.
+ */
+ DoFAccessor<structdim,DH> &
+ operator = (const DoFAccessor<structdim,DH> &da);
+
+ /**
+ * Make the DoFHandler class a friend so
+ * that it can call the set_xxx()
+ * functions.
+ */
+ template <int dim, int spacedim> friend class DoFHandler;
+ template <int dim, int spacedim> friend class hp::DoFHandler;
+
+ friend struct dealii::internal::DoFHandler::Policy::Implementation;
+ friend struct dealii::internal::DoFHandler::Implementation;
+ friend struct dealii::internal::hp::DoFHandler::Implementation;
+ friend struct dealii::internal::DoFCellAccessor::Implementation;
};
template <template <int, int> class DH, int spacedim>
class DoFAccessor<0,DH<1,spacedim> > : public TriaAccessor<0,1,spacedim>
{
- public:
-
- /**
- * A static variable that allows users of
- * this class to discover the value of
- * the second template argument.
- */
- static const unsigned int dimension=1;
-
- /**
- * A static variable that allows users of
- * this class to discover the value of
- * the third template argument.
- */
- static const unsigned int space_dimension=spacedim;
-
- /**
- * Declare a typedef to the base
- * class to make accessing some
- * of the exception classes
- * simpler.
- */
- typedef TriaAccessor<0,1,spacedim> BaseClass;
-
- /**
- * Data type passed by the iterator class.
- */
- typedef DH<1,spacedim> AccessorData;
-
- /**
- * @name Constructors
- */
- /**
- * @{
- */
-
- /**
- * Default constructor. Provides
- * an accessor that can't be
- * used.
- */
- DoFAccessor ();
-
- /**
- * Constructor to be used if the
- * object here refers to a vertex
- * of a one-dimensional
- * triangulation, i.e. a face of
- * the triangulation.
- *
- * Since there is no mapping from
- * vertices to cells, an accessor
- * object for a point has no way
- * to figure out whether it is at
- * the boundary of the domain or
- * not. Consequently, the second
- * argument must be passed by the
- * object that generates this
- * accessor -- e.g. a 1d cell
- * that can figure out whether
- * its left or right vertex are
- * at the boundary.
- *
- * The third argument is the
- * global index of the vertex we
- * point to.
- *
- * The fourth argument is a
- * pointer to the DoFHandler
- * object.
- *
- * This iterator can only be
- * called for one-dimensional
- * triangulations.
- */
- DoFAccessor (const Triangulation<1,spacedim> * tria,
- const typename TriaAccessor<0,1,spacedim>::VertexKind vertex_kind,
- const unsigned int vertex_index,
- const DH<1,spacedim> * dof_handler);
-
- /**
- * Constructor. This constructor
- * exists in order to maintain
- * interface compatibility with
- * the other accessor
- * classes. However, it doesn't
- * do anything useful here and so
- * may not actually be called.
- */
- DoFAccessor (const Triangulation<1,spacedim> *,
- const int = 0,
- const int = 0,
- const DH<1,spacedim> * = 0);
-
- /**
- * Conversion constructor. This
- * constructor exists to make certain
- * constructs simpler to write in
- * dimension independent code. For
- * example, it allows assigning a face
- * iterator to a line iterator, an
- * operation that is useful in 2d but
- * doesn't make any sense in 3d. The
- * constructor here exists for the
- * purpose of making the code conform to
- * C++ but it will unconditionally abort;
- * in other words, assigning a face
- * iterator to a line iterator is better
- * put into an if-statement that checks
- * that the dimension is two, and assign
- * to a quad iterator in 3d (an operator
- * that, without this constructor would
- * be illegal if we happen to compile for
- * 2d).
- */
- template <int structdim2, int dim2, int spacedim2>
- DoFAccessor (const InvalidAccessor<structdim2,dim2,spacedim2> &);
-
- /**
- * Another conversion operator
- * between objects that don't
- * make sense, just like the
- * previous one.
- */
- template <int dim2, class DH2>
- DoFAccessor (const DoFAccessor<dim2, DH2> &);
-
- /**
- * @}
- */
-
- /**
- * Return a handle on the
- * DoFHandler object which we
- * are using.
- */
- const DH<1,spacedim> &
- get_dof_handler () const;
-
- /**
- * Copy operator.
- */
- DoFAccessor<0,DH<1,spacedim> > &
- operator = (const DoFAccessor<0,DH<1,spacedim> > &da);
-
- /**
- * Implement the copy operator needed
- * for the iterator classes.
- */
- void copy_from (const DoFAccessor<0, DH<1,spacedim> > &a);
-
- /**
- * Copy operator used by the
- * iterator class. Keeps the
- * previously set dof handler,
- * but sets the object
- * coordinates of the TriaAccessor.
- */
- void copy_from (const TriaAccessorBase<0, 1, spacedim> &da);
-
- /**
- * Return an iterator pointing to
- * the the parent.
- */
- TriaIterator<DoFAccessor<0,DH<1,spacedim> > >
- parent () const;
-
- /**
- * @name Accessing sub-objects
- */
- /**
- * @{
- */
-
- /**
- * Return an iterator pointing to
- * the the @p c-th child.
- */
- TriaIterator<DoFAccessor<0,DH<1,spacedim> > >
- child (const unsigned int c) const;
-
- /**
- * Pointer to the @p ith line
- * bounding this object. If the
- * current object is a line itself,
- * then the only valid index is
- * @p i equals to zero, and the
- * function returns an iterator
- * to itself.
- */
- typename dealii::internal::DoFHandler::Iterators<DH<1,spacedim> >::line_iterator
- line (const unsigned int i) const;
-
- /**
- * Pointer to the @p ith quad
- * bounding this object. If the
- * current object is a quad itself,
- * then the only valid index is
- * @p i equals to zero, and the
- * function returns an iterator
- * to itself.
- */
- typename dealii::internal::DoFHandler::Iterators<DH<1,spacedim> >::quad_iterator
- quad (const unsigned int i) const;
-
- /**
- * @}
- */
-
- /**
- * @name Accessing the DoF indices of this object
- */
- /**
- * @{
- */
-
- /**
- * Return the indices of the dofs of this
- * object in the standard ordering: dofs
- * on vertex 0, dofs on vertex 1, etc,
- * dofs on line 0, dofs on line 1, etc,
- * dofs on quad 0, etc.
- *
- * The vector has to have the
- * right size before being passed
- * to this function.
- *
- * This function is most often
- * used on active objects (edges,
- * faces, cells). It can be used
- * on non-active objects as well
- * (i.e. objects that have
- * children), but only if the
- * finite element under
- * consideration has degrees of
- * freedom exclusively on
- * vertices. Otherwise, the
- * function doesn't make much
- * sense, since for example
- * inactive edges do not have
- * degrees of freedom associated
- * with them at all.
- *
- * The last argument denotes the
- * finite element index. For the
- * standard ::DoFHandler class,
- * this value must be equal to
- * its default value since that
- * class only supports the same
- * finite element on all cells
- * anyway.
- *
- * However, for hp objects
- * (i.e. the hp::DoFHandler
- * class), different finite
- * element objects may be used on
- * different cells. On faces
- * between two cells, as well as
- * vertices, there may therefore
- * be two sets of degrees of
- * freedom, one for each of the
- * finite elements used on the
- * adjacent cells. In order to
- * specify which set of degrees
- * of freedom to work on, the
- * last argument is used to
- * disambiguate. Finally, if this
- * function is called for a cell
- * object, there can only be a
- * single set of degrees of
- * freedom, and fe_index has to
- * match the result of
- * active_fe_index().
- *
- * For cells, there is only a
- * single possible finite element
- * index (namely the one for that
- * cell, returned by
- * <code>cell-@>active_fe_index</code>. Consequently,
- * the derived DoFCellAccessor
- * class has an overloaded
- * version of this function that
- * calls the present function
- * with
- * <code>cell-@>active_fe_index</code>
- * as last argument.
- */
- void get_dof_indices (std::vector<types::global_dof_index> &dof_indices,
- const unsigned int fe_index = AccessorData::default_fe_index) const;
-
- /**
- * Global DoF index of the <i>i</i>
- * degree associated with the @p vertexth
- * vertex of the present cell.
- *
- * The last argument denotes the
- * finite element index. For the
- * standard ::DoFHandler class,
- * this value must be equal to
- * its default value since that
- * class only supports the same
- * finite element on all cells
- * anyway.
- *
- * However, for hp objects
- * (i.e. the hp::DoFHandler
- * class), different finite
- * element objects may be used on
- * different cells. On faces
- * between two cells, as well as
- * vertices, there may therefore
- * be two sets of degrees of
- * freedom, one for each of the
- * finite elements used on the
- * adjacent cells. In order to
- * specify which set of degrees
- * of freedom to work on, the
- * last argument is used to
- * disambiguate. Finally, if this
- * function is called for a cell
- * object, there can only be a
- * single set of degrees of
- * freedom, and fe_index has to
- * match the result of
- * active_fe_index().
- */
- types::global_dof_index vertex_dof_index (const unsigned int vertex,
- const unsigned int i,
- const unsigned int fe_index = AccessorData::default_fe_index) const;
-
- /**
- * Index of the <i>i</i>th degree
- * of freedom of this object.
- *
- * The last argument denotes the
- * finite element index. For the
- * standard ::DoFHandler class,
- * this value must be equal to
- * its default value since that
- * class only supports the same
- * finite element on all cells
- * anyway.
- *
- * However, for hp objects
- * (i.e. the hp::DoFHandler
- * class), different finite
- * element objects may be used on
- * different cells. On faces
- * between two cells, as well as
- * vertices, there may therefore
- * be two sets of degrees of
- * freedom, one for each of the
- * finite elements used on the
- * adjacent cells. In order to
- * specify which set of degrees
- * of freedom to work on, the
- * last argument is used to
- * disambiguate. Finally, if this
- * function is called for a cell
- * object, there can only be a
- * single set of degrees of
- * freedom, and fe_index has to
- * match the result of
- * active_fe_index().
- *
- * @note While the get_dof_indices()
- * function returns an array that
- * contains the indices of all degrees of
- * freedom that somehow live on this
- * object (i.e. on the vertices, edges or
- * interior of this object), the current
- * dof_index() function only considers
- * the DoFs that really belong to this
- * particular object's interior. In other
- * words, as an example, if the current
- * object refers to a quad (a cell in 2d,
- * a face in 3d) and the finite element
- * associated with it is a bilinear one,
- * then the get_dof_indices() will return
- * an array of size 4 while dof_index()
- * will produce an exception because no
- * degrees are defined in the interior of
- * the face.
- */
- types::global_dof_index dof_index (const unsigned int i,
- const unsigned int fe_index = AccessorData::default_fe_index) const;
-
- /**
- * @}
- */
-
- /**
- * @name Accessing the finite element associated with this object
- */
- /**
- * @{
- */
-
- /**
- * Return the number of finite
- * elements that are active on a
- * given object.
- *
- * For non-hp DoFHandler objects,
- * the answer is of course always
- * one. However, for
- * hp::DoFHandler objects, this
- * isn't the case: If this is a
- * cell, the answer is of course
- * one. If it is a face, the
- * answer may be one or two,
- * depending on whether the two
- * adjacent cells use the same
- * finite element or not. If it
- * is an edge in 3d, the possible
- * return value may be one or any
- * other value larger than that.
- */
- unsigned int
- n_active_fe_indices () const;
-
- /**
- * Return the @p n-th active fe
- * index on this object. For
- * cells and all non-hp objects,
- * there is only a single active
- * fe index, so the argument must
- * be equal to zero. For
- * lower-dimensional hp objects,
- * there are
- * n_active_fe_indices() active
- * finite elements, and this
- * function can be queried for
- * their indices.
- */
- unsigned int
- nth_active_fe_index (const unsigned int n) const;
-
- /**
- * Return true if the finite
- * element with given index is
- * active on the present
- * object. For non-hp DoF
- * accessors, this is of course
- * the case only if @p fe_index
- * equals zero. For cells, it is
- * the case if @p fe_index equals
- * active_fe_index() of this
- * cell. For faces and other
- * lower-dimensional objects,
- * there may be more than one @p
- * fe_index that are active on
- * any given object (see
- * n_active_fe_indices()).
- */
- bool
- fe_index_is_active (const unsigned int fe_index) const;
-
- /**
- * Return a reference to the finite
- * element used on this object with the
- * given @p fe_index. @p fe_index must be
- * used on this object,
- * i.e. <code>fe_index_is_active(fe_index)</code>
- * must return true.
- */
- const FiniteElement<DH<1,spacedim>::dimension,DH<1,spacedim>::space_dimension> &
- get_fe (const unsigned int fe_index) const;
-
- /**
- * @}
- */
-
- /**
- * Exceptions for child classes
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcInvalidObject);
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcVectorNotEmpty);
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcVectorDoesNotMatch);
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcMatrixDoesNotMatch);
- /**
- * A function has been called for
- * a cell which should be active,
- * but is refined. @ref GlossActive
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcNotActive);
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcCantCompareIterators);
-
- protected:
-
- /**
- * Store the address of the DoFHandler object
- * to be accessed.
- */
- DH<1,spacedim> *dof_handler;
-
- /**
- * Compare for equality.
- */
- bool operator == (const DoFAccessor &) const;
-
- /**
- * Compare for inequality.
- */
- bool operator != (const DoFAccessor &) const;
-
- /**
- * Reset the DoF handler pointer.
- */
- void set_dof_handler (DH<1,spacedim> *dh);
-
- /**
- * Set the index of the
- * <i>i</i>th degree of freedom
- * of this object to @p index.
- *
- * The last argument denotes the
- * finite element index. For the
- * standard ::DoFHandler class,
- * this value must be equal to
- * its default value since that
- * class only supports the same
- * finite element on all cells
- * anyway.
- *
- * However, for hp objects
- * (i.e. the hp::DoFHandler
- * class), different finite
- * element objects may be used on
- * different cells. On faces
- * between two cells, as well as
- * vertices, there may therefore
- * be two sets of degrees of
- * freedom, one for each of the
- * finite elements used on the
- * adjacent cells. In order to
- * specify which set of degrees
- * of freedom to work on, the
- * last argument is used to
- * disambiguate. Finally, if this
- * function is called for a cell
- * object, there can only be a
- * single set of degrees of
- * freedom, and fe_index has to
- * match the result of
- * active_fe_index().
- */
- void set_dof_index (const unsigned int i,
- const types::global_dof_index index,
- const unsigned int fe_index = AccessorData::default_fe_index) const;
-
- /**
- * Set the global index of the <i>i</i>
- * degree on the @p vertex-th vertex of
- * the present cell to @p index.
- *
- * The last argument denotes the
- * finite element index. For the
- * standard ::DoFHandler class,
- * this value must be equal to
- * its default value since that
- * class only supports the same
- * finite element on all cells
- * anyway.
- *
- * However, for hp objects
- * (i.e. the hp::DoFHandler
- * class), different finite
- * element objects may be used on
- * different cells. On faces
- * between two cells, as well as
- * vertices, there may therefore
- * be two sets of degrees of
- * freedom, one for each of the
- * finite elements used on the
- * adjacent cells. In order to
- * specify which set of degrees
- * of freedom to work on, the
- * last argument is used to
- * disambiguate. Finally, if this
- * function is called for a cell
- * object, there can only be a
- * single set of degrees of
- * freedom, and fe_index has to
- * match the result of
- * active_fe_index().
- */
- void set_vertex_dof_index (const unsigned int vertex,
- const unsigned int i,
- const types::global_dof_index index,
- const unsigned int fe_index = AccessorData::default_fe_index) const;
-
- /**
- * Iterator classes need to be friends
- * because they need to access operator==
- * and operator!=.
- */
- template <typename> friend class TriaRawIterator;
-
-
- /**
- * Make the DoFHandler class a friend so
- * that it can call the set_xxx()
- * functions.
- */
- template <int, int> friend class DoFHandler;
- template <int, int> friend class hp::DoFHandler;
-
- friend struct dealii::internal::DoFHandler::Policy::Implementation;
- friend struct dealii::internal::DoFHandler::Implementation;
- friend struct dealii::internal::hp::DoFHandler::Implementation;
- friend struct dealii::internal::DoFCellAccessor::Implementation;
+ public:
+
+ /**
+ * A static variable that allows users of
+ * this class to discover the value of
+ * the second template argument.
+ */
+ static const unsigned int dimension=1;
+
+ /**
+ * A static variable that allows users of
+ * this class to discover the value of
+ * the third template argument.
+ */
+ static const unsigned int space_dimension=spacedim;
+
+ /**
+ * Declare a typedef to the base
+ * class to make accessing some
+ * of the exception classes
+ * simpler.
+ */
+ typedef TriaAccessor<0,1,spacedim> BaseClass;
+
+ /**
+ * Data type passed by the iterator class.
+ */
+ typedef DH<1,spacedim> AccessorData;
+
+ /**
+ * @name Constructors
+ */
+ /**
+ * @{
+ */
+
+ /**
+ * Default constructor. Provides
+ * an accessor that can't be
+ * used.
+ */
+ DoFAccessor ();
+
+ /**
+ * Constructor to be used if the
+ * object here refers to a vertex
+ * of a one-dimensional
+ * triangulation, i.e. a face of
+ * the triangulation.
+ *
+ * Since there is no mapping from
+ * vertices to cells, an accessor
+ * object for a point has no way
+ * to figure out whether it is at
+ * the boundary of the domain or
+ * not. Consequently, the second
+ * argument must be passed by the
+ * object that generates this
+ * accessor -- e.g. a 1d cell
+ * that can figure out whether
+ * its left or right vertex are
+ * at the boundary.
+ *
+ * The third argument is the
+ * global index of the vertex we
+ * point to.
+ *
+ * The fourth argument is a
+ * pointer to the DoFHandler
+ * object.
+ *
+ * This iterator can only be
+ * called for one-dimensional
+ * triangulations.
+ */
+ DoFAccessor (const Triangulation<1,spacedim> *tria,
+ const typename TriaAccessor<0,1,spacedim>::VertexKind vertex_kind,
+ const unsigned int vertex_index,
+ const DH<1,spacedim> *dof_handler);
+
+ /**
+ * Constructor. This constructor
+ * exists in order to maintain
+ * interface compatibility with
+ * the other accessor
+ * classes. However, it doesn't
+ * do anything useful here and so
+ * may not actually be called.
+ */
+ DoFAccessor (const Triangulation<1,spacedim> *,
+ const int = 0,
+ const int = 0,
+ const DH<1,spacedim> *= 0);
+
+ /**
+ * Conversion constructor. This
+ * constructor exists to make certain
+ * constructs simpler to write in
+ * dimension independent code. For
+ * example, it allows assigning a face
+ * iterator to a line iterator, an
+ * operation that is useful in 2d but
+ * doesn't make any sense in 3d. The
+ * constructor here exists for the
+ * purpose of making the code conform to
+ * C++ but it will unconditionally abort;
+ * in other words, assigning a face
+ * iterator to a line iterator is better
+ * put into an if-statement that checks
+ * that the dimension is two, and assign
+ * to a quad iterator in 3d (an operator
+ * that, without this constructor would
+ * be illegal if we happen to compile for
+ * 2d).
+ */
+ template <int structdim2, int dim2, int spacedim2>
+ DoFAccessor (const InvalidAccessor<structdim2,dim2,spacedim2> &);
+
+ /**
+ * Another conversion operator
+ * between objects that don't
+ * make sense, just like the
+ * previous one.
+ */
+ template <int dim2, class DH2>
+ DoFAccessor (const DoFAccessor<dim2, DH2> &);
+
+ /**
+ * @}
+ */
+
+ /**
+ * Return a handle on the
+ * DoFHandler object which we
+ * are using.
+ */
+ const DH<1,spacedim> &
+ get_dof_handler () const;
+
+ /**
+ * Copy operator.
+ */
+ DoFAccessor<0,DH<1,spacedim> > &
+ operator = (const DoFAccessor<0,DH<1,spacedim> > &da);
+
+ /**
+ * Implement the copy operator needed
+ * for the iterator classes.
+ */
+ void copy_from (const DoFAccessor<0, DH<1,spacedim> > &a);
+
+ /**
+ * Copy operator used by the
+ * iterator class. Keeps the
+ * previously set dof handler,
+ * but sets the object
+ * coordinates of the TriaAccessor.
+ */
+ void copy_from (const TriaAccessorBase<0, 1, spacedim> &da);
+
+ /**
+ * Return an iterator pointing to
+ * the the parent.
+ */
+ TriaIterator<DoFAccessor<0,DH<1,spacedim> > >
+ parent () const;
+
+ /**
+ * @name Accessing sub-objects
+ */
+ /**
+ * @{
+ */
+
+ /**
+ * Return an iterator pointing to
+ * the the @p c-th child.
+ */
+ TriaIterator<DoFAccessor<0,DH<1,spacedim> > >
+ child (const unsigned int c) const;
+
+ /**
+ * Pointer to the @p ith line
+ * bounding this object. If the
+ * current object is a line itself,
+ * then the only valid index is
+ * @p i equals to zero, and the
+ * function returns an iterator
+ * to itself.
+ */
+ typename dealii::internal::DoFHandler::Iterators<DH<1,spacedim> >::line_iterator
+ line (const unsigned int i) const;
+
+ /**
+ * Pointer to the @p ith quad
+ * bounding this object. If the
+ * current object is a quad itself,
+ * then the only valid index is
+ * @p i equals to zero, and the
+ * function returns an iterator
+ * to itself.
+ */
+ typename dealii::internal::DoFHandler::Iterators<DH<1,spacedim> >::quad_iterator
+ quad (const unsigned int i) const;
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Accessing the DoF indices of this object
+ */
+ /**
+ * @{
+ */
+
+ /**
+ * Return the indices of the dofs of this
+ * object in the standard ordering: dofs
+ * on vertex 0, dofs on vertex 1, etc,
+ * dofs on line 0, dofs on line 1, etc,
+ * dofs on quad 0, etc.
+ *
+ * The vector has to have the
+ * right size before being passed
+ * to this function.
+ *
+ * This function is most often
+ * used on active objects (edges,
+ * faces, cells). It can be used
+ * on non-active objects as well
+ * (i.e. objects that have
+ * children), but only if the
+ * finite element under
+ * consideration has degrees of
+ * freedom exclusively on
+ * vertices. Otherwise, the
+ * function doesn't make much
+ * sense, since for example
+ * inactive edges do not have
+ * degrees of freedom associated
+ * with them at all.
+ *
+ * The last argument denotes the
+ * finite element index. For the
+ * standard ::DoFHandler class,
+ * this value must be equal to
+ * its default value since that
+ * class only supports the same
+ * finite element on all cells
+ * anyway.
+ *
+ * However, for hp objects
+ * (i.e. the hp::DoFHandler
+ * class), different finite
+ * element objects may be used on
+ * different cells. On faces
+ * between two cells, as well as
+ * vertices, there may therefore
+ * be two sets of degrees of
+ * freedom, one for each of the
+ * finite elements used on the
+ * adjacent cells. In order to
+ * specify which set of degrees
+ * of freedom to work on, the
+ * last argument is used to
+ * disambiguate. Finally, if this
+ * function is called for a cell
+ * object, there can only be a
+ * single set of degrees of
+ * freedom, and fe_index has to
+ * match the result of
+ * active_fe_index().
+ *
+ * For cells, there is only a
+ * single possible finite element
+ * index (namely the one for that
+ * cell, returned by
+ * <code>cell-@>active_fe_index</code>. Consequently,
+ * the derived DoFCellAccessor
+ * class has an overloaded
+ * version of this function that
+ * calls the present function
+ * with
+ * <code>cell-@>active_fe_index</code>
+ * as last argument.
+ */
- void get_dof_indices (std::vector<unsigned int> &dof_indices,
++ void get_dof_indices (std::vector<types::global_dof_index> &dof_indices,
+ const unsigned int fe_index = AccessorData::default_fe_index) const;
+
+ /**
+ * Global DoF index of the <i>i</i>
+ * degree associated with the @p vertexth
+ * vertex of the present cell.
+ *
+ * The last argument denotes the
+ * finite element index. For the
+ * standard ::DoFHandler class,
+ * this value must be equal to
+ * its default value since that
+ * class only supports the same
+ * finite element on all cells
+ * anyway.
+ *
+ * However, for hp objects
+ * (i.e. the hp::DoFHandler
+ * class), different finite
+ * element objects may be used on
+ * different cells. On faces
+ * between two cells, as well as
+ * vertices, there may therefore
+ * be two sets of degrees of
+ * freedom, one for each of the
+ * finite elements used on the
+ * adjacent cells. In order to
+ * specify which set of degrees
+ * of freedom to work on, the
+ * last argument is used to
+ * disambiguate. Finally, if this
+ * function is called for a cell
+ * object, there can only be a
+ * single set of degrees of
+ * freedom, and fe_index has to
+ * match the result of
+ * active_fe_index().
+ */
- unsigned int vertex_dof_index (const unsigned int vertex,
- const unsigned int i,
- const unsigned int fe_index = AccessorData::default_fe_index) const;
++ types::global_dof_index vertex_dof_index (const unsigned int vertex,
++ const unsigned int i,
++ const unsigned int fe_index = AccessorData::default_fe_index) const;
+
+ /**
+ * Index of the <i>i</i>th degree
+ * of freedom of this object.
+ *
+ * The last argument denotes the
+ * finite element index. For the
+ * standard ::DoFHandler class,
+ * this value must be equal to
+ * its default value since that
+ * class only supports the same
+ * finite element on all cells
+ * anyway.
+ *
+ * However, for hp objects
+ * (i.e. the hp::DoFHandler
+ * class), different finite
+ * element objects may be used on
+ * different cells. On faces
+ * between two cells, as well as
+ * vertices, there may therefore
+ * be two sets of degrees of
+ * freedom, one for each of the
+ * finite elements used on the
+ * adjacent cells. In order to
+ * specify which set of degrees
+ * of freedom to work on, the
+ * last argument is used to
+ * disambiguate. Finally, if this
+ * function is called for a cell
+ * object, there can only be a
+ * single set of degrees of
+ * freedom, and fe_index has to
+ * match the result of
+ * active_fe_index().
+ *
+ * @note While the get_dof_indices()
+ * function returns an array that
+ * contains the indices of all degrees of
+ * freedom that somehow live on this
+ * object (i.e. on the vertices, edges or
+ * interior of this object), the current
+ * dof_index() function only considers
+ * the DoFs that really belong to this
+ * particular object's interior. In other
+ * words, as an example, if the current
+ * object refers to a quad (a cell in 2d,
+ * a face in 3d) and the finite element
+ * associated with it is a bilinear one,
+ * then the get_dof_indices() will return
+ * an array of size 4 while dof_index()
+ * will produce an exception because no
+ * degrees are defined in the interior of
+ * the face.
+ */
- unsigned int dof_index (const unsigned int i,
- const unsigned int fe_index = AccessorData::default_fe_index) const;
++ types::global_dof_index dof_index (const unsigned int i,
++ const unsigned int fe_index = AccessorData::default_fe_index) const;
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Accessing the finite element associated with this object
+ */
+ /**
+ * @{
+ */
+
+ /**
+ * Return the number of finite
+ * elements that are active on a
+ * given object.
+ *
+ * For non-hp DoFHandler objects,
+ * the answer is of course always
+ * one. However, for
+ * hp::DoFHandler objects, this
+ * isn't the case: If this is a
+ * cell, the answer is of course
+ * one. If it is a face, the
+ * answer may be one or two,
+ * depending on whether the two
+ * adjacent cells use the same
+ * finite element or not. If it
+ * is an edge in 3d, the possible
+ * return value may be one or any
+ * other value larger than that.
+ */
+ unsigned int
+ n_active_fe_indices () const;
+
+ /**
+ * Return the @p n-th active fe
+ * index on this object. For
+ * cells and all non-hp objects,
+ * there is only a single active
+ * fe index, so the argument must
+ * be equal to zero. For
+ * lower-dimensional hp objects,
+ * there are
+ * n_active_fe_indices() active
+ * finite elements, and this
+ * function can be queried for
+ * their indices.
+ */
+ unsigned int
+ nth_active_fe_index (const unsigned int n) const;
+
+ /**
+ * Return true if the finite
+ * element with given index is
+ * active on the present
+ * object. For non-hp DoF
+ * accessors, this is of course
+ * the case only if @p fe_index
+ * equals zero. For cells, it is
+ * the case if @p fe_index equals
+ * active_fe_index() of this
+ * cell. For faces and other
+ * lower-dimensional objects,
+ * there may be more than one @p
+ * fe_index that are active on
+ * any given object (see
+ * n_active_fe_indices()).
+ */
+ bool
+ fe_index_is_active (const unsigned int fe_index) const;
+
+ /**
+ * Return a reference to the finite
+ * element used on this object with the
+ * given @p fe_index. @p fe_index must be
+ * used on this object,
+ * i.e. <code>fe_index_is_active(fe_index)</code>
+ * must return true.
+ */
+ const FiniteElement<DH<1,spacedim>::dimension,DH<1,spacedim>::space_dimension> &
+ get_fe (const unsigned int fe_index) const;
+
+ /**
+ * @}
+ */
+
+ /**
+ * Exceptions for child classes
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcInvalidObject);
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcVectorNotEmpty);
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcVectorDoesNotMatch);
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcMatrixDoesNotMatch);
+ /**
+ * A function has been called for
+ * a cell which should be active,
+ * but is refined. @ref GlossActive
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcNotActive);
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcCantCompareIterators);
+
+ protected:
+
+ /**
+ * Store the address of the DoFHandler object
+ * to be accessed.
+ */
+ DH<1,spacedim> *dof_handler;
+
+ /**
+ * Compare for equality.
+ */
+ bool operator == (const DoFAccessor &) const;
+
+ /**
+ * Compare for inequality.
+ */
+ bool operator != (const DoFAccessor &) const;
+
+ /**
+ * Reset the DoF handler pointer.
+ */
+ void set_dof_handler (DH<1,spacedim> *dh);
+
+ /**
+ * Set the index of the
+ * <i>i</i>th degree of freedom
+ * of this object to @p index.
+ *
+ * The last argument denotes the
+ * finite element index. For the
+ * standard ::DoFHandler class,
+ * this value must be equal to
+ * its default value since that
+ * class only supports the same
+ * finite element on all cells
+ * anyway.
+ *
+ * However, for hp objects
+ * (i.e. the hp::DoFHandler
+ * class), different finite
+ * element objects may be used on
+ * different cells. On faces
+ * between two cells, as well as
+ * vertices, there may therefore
+ * be two sets of degrees of
+ * freedom, one for each of the
+ * finite elements used on the
+ * adjacent cells. In order to
+ * specify which set of degrees
+ * of freedom to work on, the
+ * last argument is used to
+ * disambiguate. Finally, if this
+ * function is called for a cell
+ * object, there can only be a
+ * single set of degrees of
+ * freedom, and fe_index has to
+ * match the result of
+ * active_fe_index().
+ */
+ void set_dof_index (const unsigned int i,
- const unsigned int index,
++ const types::global_dof_index index,
+ const unsigned int fe_index = AccessorData::default_fe_index) const;
+
+ /**
+ * Set the global index of the <i>i</i>
+ * degree on the @p vertex-th vertex of
+ * the present cell to @p index.
+ *
+ * The last argument denotes the
+ * finite element index. For the
+ * standard ::DoFHandler class,
+ * this value must be equal to
+ * its default value since that
+ * class only supports the same
+ * finite element on all cells
+ * anyway.
+ *
+ * However, for hp objects
+ * (i.e. the hp::DoFHandler
+ * class), different finite
+ * element objects may be used on
+ * different cells. On faces
+ * between two cells, as well as
+ * vertices, there may therefore
+ * be two sets of degrees of
+ * freedom, one for each of the
+ * finite elements used on the
+ * adjacent cells. In order to
+ * specify which set of degrees
+ * of freedom to work on, the
+ * last argument is used to
+ * disambiguate. Finally, if this
+ * function is called for a cell
+ * object, there can only be a
+ * single set of degrees of
+ * freedom, and fe_index has to
+ * match the result of
+ * active_fe_index().
+ */
+ void set_vertex_dof_index (const unsigned int vertex,
+ const unsigned int i,
- const unsigned int index,
++ const types::global_dof_index index,
+ const unsigned int fe_index = AccessorData::default_fe_index) const;
+
+ /**
+ * Iterator classes need to be friends
+ * because they need to access operator==
+ * and operator!=.
+ */
+ template <typename> friend class TriaRawIterator;
+
+
+ /**
+ * Make the DoFHandler class a friend so
+ * that it can call the set_xxx()
+ * functions.
+ */
+ template <int, int> friend class DoFHandler;
+ template <int, int> friend class hp::DoFHandler;
+
+ friend struct dealii::internal::DoFHandler::Policy::Implementation;
+ friend struct dealii::internal::DoFHandler::Implementation;
+ friend struct dealii::internal::hp::DoFHandler::Implementation;
+ friend struct dealii::internal::DoFCellAccessor::Implementation;
};
template <class DH>
class DoFCellAccessor : public DoFAccessor<DH::dimension,DH>
{
- public:
- /**
- * Extract dimension from DH.
- */
- static const unsigned int dim = DH::dimension;
-
- /**
- * Extract space dimension from DH.
- */
- static const unsigned int spacedim = DH::space_dimension;
-
- /**
- * Declare the data type that
- * this accessor class expects to
- * get passed from the iterator
- * classes.
- */
- typedef typename DoFAccessor<DH::dimension,DH>::AccessorData AccessorData;
-
- /**
- * Declare a typedef to the base
- * class to make accessing some
- * of the exception classes
- * simpler.
- */
- typedef DoFAccessor<DH::dimension,DH> BaseClass;
-
- /**
- * Define the type of the
- * container this is part of.
- */
- typedef DH Container;
-
- /**
- * @name Constructors
- */
- /**
- * @{
- */
-
- /**
- * Constructor
- */
- DoFCellAccessor (const Triangulation<DH::dimension,DH::space_dimension> *tria,
- const int level,
- const int index,
- const AccessorData *local_data);
-
- /**
- * Conversion constructor. This
- * constructor exists to make certain
- * constructs simpler to write in
- * dimension independent code. For
- * example, it allows assigning a face
- * iterator to a line iterator, an
- * operation that is useful in 2d but
- * doesn't make any sense in 3d. The
- * constructor here exists for the
- * purpose of making the code conform to
- * C++ but it will unconditionally abort;
- * in other words, assigning a face
- * iterator to a line iterator is better
- * put into an if-statement that checks
- * that the dimension is two, and assign
- * to a quad iterator in 3d (an operator
- * that, without this constructor would
- * be illegal if we happen to compile for
- * 2d).
- */
- template <int structdim2, int dim2, int spacedim2>
- DoFCellAccessor (const InvalidAccessor<structdim2,dim2,spacedim2> &);
-
- /**
- * Another conversion operator
- * between objects that don't
- * make sense, just like the
- * previous one.
- */
- template <int dim2, class DH2>
- DoFCellAccessor (const DoFAccessor<dim2, DH2> &);
-
- /**
- * @}
- */
-
- /**
- * Return the parent as a DoF
- * cell iterator. This
- * function is needed since the
- * parent function of the base
- * class returns a cell accessor
- * without access to the DoF
- * data.
- */
- typename dealii::internal::DoFHandler::Iterators<DH>::cell_iterator
- parent () const;
-
- /**
- * @name Accessing sub-objects and neighbors
- */
- /**
- * @{
- */
-
- /**
- * Return the @p ith neighbor as
- * a DoF cell iterator. This
- * function is needed since the
- * neighbor function of the base
- * class returns a cell accessor
- * without access to the DoF
- * data.
- */
- typename dealii::internal::DoFHandler::Iterators<DH>::cell_iterator
- neighbor (const unsigned int) const;
-
- /**
- * Return the @p ith child as a
- * DoF cell iterator. This
- * function is needed since the
- * child function of the base
- * class returns a cell accessor
- * without access to the DoF
- * data.
- */
- typename dealii::internal::DoFHandler::Iterators<DH>::cell_iterator
- child (const unsigned int) const;
-
- /**
- * Return an iterator to the @p ith face
- * of this cell.
- *
- * This function is not implemented in
- * 1D, and maps to DoFAccessor::line
- * in 2D.
- */
- typename dealii::internal::DoFHandler::Iterators<DH>::face_iterator
- face (const unsigned int i) const;
-
- /**
- * Return the result of the
- * @p neighbor_child_on_subface
- * function of the base class,
- * but convert it so that one can
- * also access the DoF data (the
- * function in the base class
- * only returns an iterator with
- * access to the triangulation
- * data).
- */
- typename dealii::internal::DoFHandler::Iterators<DH>::cell_iterator
- neighbor_child_on_subface (const unsigned int face_no,
- const unsigned int subface_no) const;
-
- /**
- * @}
- */
-
- /**
- * @name Extracting values from global vectors
- */
- /**
- * @{
- */
-
- /**
- * Return the values of the given vector
- * restricted to the dofs of this
- * cell in the standard ordering: dofs
- * on vertex 0, dofs on vertex 1, etc,
- * dofs on line 0, dofs on line 1, etc,
- * dofs on quad 0, etc.
- *
- * The vector has to have the
- * right size before being passed
- * to this function. This
- * function is only callable for
- * active cells.
- *
- * The input vector may be either
- * a <tt>Vector<float></tt>,
- * Vector<double>, or a
- * BlockVector<double>, or a
- * PETSc or Trilinos vector if
- * deal.II is compiled to support
- * these libraries. It is in the
- * responsibility of the caller
- * to assure that the types of
- * the numbers stored in input
- * and output vectors are
- * compatible and with similar
- * accuracy.
- */
- template <class InputVector, typename number>
- void get_dof_values (const InputVector &values,
- Vector<number> &local_values) const;
-
- /**
- * Return the values of the given vector
- * restricted to the dofs of this
- * cell in the standard ordering: dofs
- * on vertex 0, dofs on vertex 1, etc,
- * dofs on line 0, dofs on line 1, etc,
- * dofs on quad 0, etc.
- *
- * The vector has to have the
- * right size before being passed
- * to this function. This
- * function is only callable for
- * active cells.
- *
- * The input vector may be either
- * a <tt>Vector<float></tt>,
- * Vector<double>, or a
- * BlockVector<double>, or a
- * PETSc or Trilinos vector if
- * deal.II is compiled to support
- * these libraries. It is in the
- * responsibility of the caller
- * to assure that the types of
- * the numbers stored in input
- * and output vectors are
- * compatible and with similar
- * accuracy.
- */
- template <class InputVector, typename ForwardIterator>
- void get_dof_values (const InputVector &values,
- ForwardIterator local_values_begin,
- ForwardIterator local_values_end) const;
-
- /**
- * Return the values of the given vector
- * restricted to the dofs of this
- * cell in the standard ordering: dofs
- * on vertex 0, dofs on vertex 1, etc,
- * dofs on line 0, dofs on line 1, etc,
- * dofs on quad 0, etc.
- *
- * The vector has to have the
- * right size before being passed
- * to this function. This
- * function is only callable for
- * active cells.
- *
- * The input vector may be either a
- * <tt>Vector<float></tt>,
- * Vector<double>, or a
- * BlockVector<double>, or a PETSc or
- * Trilinos vector if deal.II is
- * compiled to support these
- * libraries. It is in the
- * responsibility of the caller to
- * assure that the types of the numbers
- * stored in input and output vectors
- * are compatible and with similar
- * accuracy. The ConstraintMatrix
- * passed as an argument to this
- * function makes sure that constraints
- * are correctly distributed when the
- * dof values are calculated.
- */
- template <class InputVector, typename ForwardIterator>
- void get_dof_values (const ConstraintMatrix &constraints,
- const InputVector &values,
- ForwardIterator local_values_begin,
- ForwardIterator local_values_end) const;
-
- /**
- * This function is the counterpart to
- * get_dof_values(): it takes a vector
- * of values for the degrees of freedom
- * of the cell pointed to by this iterator
- * and writes these values into the global
- * data vector @p values. This function
- * is only callable for active cells.
- *
- * Note that for continuous finite
- * elements, calling this function affects
- * the dof values on neighboring cells as
- * well. It may also violate continuity
- * requirements for hanging nodes, if
- * neighboring cells are less refined than
- * the present one. These requirements
- * are not taken care of and must be
- * enforced by the user afterwards.
- *
- * The vector has to have the
- * right size before being passed
- * to this function.
- *
- * The output vector may be either a
- * Vector<float>,
- * Vector<double>, or a
- * BlockVector<double>, or a
- * PETSc vector if deal.II is compiled to
- * support these libraries. It is in the
- * responsibility of the caller to assure
- * that the types of the numbers stored
- * in input and output vectors are
- * compatible and with similar accuracy.
- */
- template <class OutputVector, typename number>
- void set_dof_values (const Vector<number> &local_values,
- OutputVector &values) const;
-
- /**
- * Return the interpolation of
- * the given finite element
- * function to the present
- * cell. In the simplest case,
- * the cell is a terminal one,
- * i.e. has no children; then,
- * the returned value is the
- * vector of nodal values on that
- * cell. You could then as well
- * get the desired values through
- * the @p get_dof_values
- * function. In the other case,
- * when the cell has children, we
- * use the restriction matrices
- * provided by the finite element
- * class to compute the
- * interpolation from the
- * children to the present cell.
- *
- * It is assumed that both
- * vectors already have the right
- * size beforehand.
- *
- * Unlike the get_dof_values()
- * function, this function works
- * on cells rather than to lines,
- * quads, and hexes, since
- * interpolation is presently
- * only provided for cells by the
- * finite element classes.
- */
- template <class InputVector, typename number>
- void get_interpolated_dof_values (const InputVector &values,
- Vector<number> &interpolated_values) const;
-
- /**
- * This, again, is the
- * counterpart to
- * get_interpolated_dof_values():
- * you specify the dof values on
- * a cell and these are
- * interpolated to the children
- * of the present cell and set on
- * the terminal cells.
- *
- * In principle, it works as
- * follows: if the cell pointed
- * to by this object is terminal,
- * then the dof values are set in
- * the global data vector by
- * calling the set_dof_values()
- * function; otherwise, the
- * values are prolonged to each
- * of the children and this
- * function is called for each of
- * them.
- *
- * Using the
- * get_interpolated_dof_values()
- * and this function, you can
- * compute the interpolation of a
- * finite element function to a
- * coarser grid by first getting
- * the interpolated solution on a
- * cell of the coarse grid and
- * afterwards redistributing it
- * using this function.
- *
- * Note that for continuous
- * finite elements, calling this
- * function affects the dof
- * values on neighboring cells as
- * well. It may also violate
- * continuity requirements for
- * hanging nodes, if neighboring
- * cells are less refined than
- * the present one, or if their
- * children are less refined than
- * the children of this
- * cell. These requirements are
- * not taken care of and must be
- * enforced by the user
- * afterward.
- *
- * It is assumed that both
- * vectors already have the right
- * size beforehand. This function
- * relies on the existence of a
- * natural interpolation property
- * of finite element spaces of a
- * cell to its children, denoted
- * by the prolongation matrices
- * of finite element classes. For
- * some elements, the spaces on
- * coarse and fine grids are not
- * nested, in which case the
- * interpolation to a child is
- * not the identity; refer to the
- * documentation of the
- * respective finite element
- * class for a description of
- * what the prolongation matrices
- * represent in this case.
- *
- * Unlike the set_dof_values()
- * function, this function is
- * associated to cells rather
- * than to lines, quads, and
- * hexes, since interpolation is
- * presently only provided for
- * cells by the finite element
- * objects.
- *
- * The output vector may be either a
- * Vector<float>,
- * Vector<double>, or a
- * BlockVector<double>, or a
- * PETSc vector if deal.II is compiled to
- * support these libraries. It is in the
- * responsibility of the caller to assure
- * that the types of the numbers stored
- * in input and output vectors are
- * compatible and with similar accuracy.
- */
- template <class OutputVector, typename number>
- void set_dof_values_by_interpolation (const Vector<number> &local_values,
- OutputVector &values) const;
-
- /**
- * Distribute a local (cell
- * based) vector to a global one
- * by mapping the local numbering
- * of the degrees of freedom to
- * the global one and entering
- * the local values into the
- * global vector.
- *
- * The elements are
- * <em>added</em> up to the
- * elements in the global vector,
- * rather than just set, since
- * this is usually what one
- * wants.
- */
- template <typename number, typename OutputVector>
- void
- distribute_local_to_global (const Vector<number> &local_source,
- OutputVector &global_destination) const;
-
- /**
- * Distribute a local (cell based)
- * vector in iterator format to a
- * global one by mapping the local
- * numbering of the degrees of freedom
- * to the global one and entering the
- * local values into the global vector.
- *
- * The elements are <em>added</em> up
- * to the elements in the global
- * vector, rather than just set, since
- * this is usually what one wants.
- */
- template <typename ForwardIterator, typename OutputVector>
- void
- distribute_local_to_global (ForwardIterator local_source_begin,
- ForwardIterator local_source_end,
- OutputVector &global_destination) const;
-
- /**
- * Distribute a local (cell based)
- * vector in iterator format to a
- * global one by mapping the local
- * numbering of the degrees of freedom
- * to the global one and entering the
- * local values into the global vector.
- *
- * The elements are <em>added</em> up
- * to the elements in the global
- * vector, rather than just set, since
- * this is usually what one
- * wants. Moreover, the
- * ConstraintMatrix passed to this
- * function makes sure that also
- * constraints are eliminated in this
- * process.
- */
- template <typename ForwardIterator, typename OutputVector>
- void
- distribute_local_to_global (const ConstraintMatrix &constraints,
- ForwardIterator local_source_begin,
- ForwardIterator local_source_end,
- OutputVector &global_destination) const;
-
- /**
- * This function does much the
- * same as the
- * <tt>distribute_local_to_global(Vector,Vector)</tt>
- * function, but operates on
- * matrices instead of
- * vectors. If the matrix type is
- * a sparse matrix then it is
- * supposed to have non-zero
- * entry slots where required.
- */
- template <typename number, typename OutputMatrix>
- void
- distribute_local_to_global (const FullMatrix<number> &local_source,
- OutputMatrix &global_destination) const;
-
- /**
- * This function does what the two
- * <tt>distribute_local_to_global</tt>
- * functions with vector and matrix
- * argument do, but all at once.
- */
- template <typename number, typename OutputMatrix, typename OutputVector>
- void
- distribute_local_to_global (const FullMatrix<number> &local_matrix,
- const Vector<number> &local_vector,
- OutputMatrix &global_matrix,
- OutputVector &global_vector) const;
-
- /**
- * @}
- */
-
- /**
- * @name Accessing the DoF indices of this object
- */
- /**
- * @{
- */
-
- /**
- * Return the indices of the dofs of this
- * quad in the standard ordering: dofs
- * on vertex 0, dofs on vertex 1, etc,
- * dofs on line 0, dofs on line 1, etc,
- * dofs on quad 0, etc.
- *
- * It is assumed that the vector already
- * has the right size beforehand.
- *
- * This function reimplements the
- * same function in the base
- * class. The functions in the
- * base classes are available for
- * all geometric objects,
- * i.e. even in 3d they can be
- * used to access the dof indices
- * of edges, for example. On the
- * other hand, the most common
- * case is clearly the use on
- * cells, which is why we cache
- * the array for each cell, but
- * not edge. To retrieve the
- * cached values, rather than
- * collect the necessary
- * information every time, this
- * function overwrites the one in
- * the base class.
- *
- * This function is most often
- * used on active objects (edges,
- * faces, cells). It can be used
- * on non-active objects as well
- * (i.e. objects that have
- * children), but only if the
- * finite element under
- * consideration has degrees of
- * freedom exclusively on
- * vertices. Otherwise, the
- * function doesn't make much
- * sense, since for example
- * inactive edges do not have
- * degrees of freedom associated
- * with them at all.
- */
- void get_dof_indices (std::vector<types::global_dof_index> &dof_indices) const;
-
- void get_mg_dof_indices (std::vector<types::global_dof_index>& dof_indices) const;
-
- /**
- * @}
- */
-
- /**
- * @name Accessing the finite element associated with this object
- */
- /**
- * @{
- */
-
- /**
- * Return the finite element that
- * is used on the cell pointed to
- * by this iterator. For non-hp
- * DoF handlers, this is of
- * course always the same
- * element, independent of the
- * cell we are presently on, but
- * for hp DoF handlers, this may
- * change from cell to cell.
- */
- const FiniteElement<DH::dimension,DH::space_dimension> &
- get_fe () const;
-
- /**
- * Returns the index inside the
- * hp::FECollection of the FiniteElement
- * used for this cell.
- */
- unsigned int active_fe_index () const;
-
- /**
- * Sets the index of the FiniteElement used for
- * this cell.
- */
- void set_active_fe_index (const unsigned int i);
- /**
- * @}
- */
-
- /**
- * Set the DoF indices of this
- * cell to the given values. This
- * function bypasses the DoF
- * cache, if one exists for the
- * given DoF handler class.
- */
- void set_dof_indices (const std::vector<types::global_dof_index> &dof_indices);
-
- /**
- * Update the cache in which we
- * store the dof indices of this
- * cell.
- */
- void update_cell_dof_indices_cache () const;
-
- private:
- /**
- * Copy operator. This is normally used
- * in a context like <tt>iterator a,b;
- * *a=*b;</tt>. Presumably, the intent
- * here is to copy the object pointed to
- * by @p b to the object pointed to by
- * @p a. However, the result of
- * dereferencing an iterator is not an
- * object but an accessor; consequently,
- * this operation is not useful for
- * iterators on triangulations. We
- * declare this function here private,
- * thus it may not be used from outside.
- * Furthermore it is not implemented and
- * will give a linker error if used
- * anyway.
- */
- DoFCellAccessor<DH> &
- operator = (const DoFCellAccessor<DH> &da);
-
- /**
- * Make the DoFHandler class a
- * friend so that it can call the
- * update_cell_dof_indices_cache()
- * function
- */
- template <int dim, int spacedim> friend class DoFHandler;
- friend struct dealii::internal::DoFCellAccessor::Implementation;
+ public:
+ /**
+ * Extract dimension from DH.
+ */
+ static const unsigned int dim = DH::dimension;
+
+ /**
+ * Extract space dimension from DH.
+ */
+ static const unsigned int spacedim = DH::space_dimension;
+
+ /**
+ * Declare the data type that
+ * this accessor class expects to
+ * get passed from the iterator
+ * classes.
+ */
+ typedef typename DoFAccessor<DH::dimension,DH>::AccessorData AccessorData;
+
+ /**
+ * Declare a typedef to the base
+ * class to make accessing some
+ * of the exception classes
+ * simpler.
+ */
+ typedef DoFAccessor<DH::dimension,DH> BaseClass;
+
+ /**
+ * Define the type of the
+ * container this is part of.
+ */
+ typedef DH Container;
+
+ /**
+ * @name Constructors
+ */
+ /**
+ * @{
+ */
+
+ /**
+ * Constructor
+ */
+ DoFCellAccessor (const Triangulation<DH::dimension,DH::space_dimension> *tria,
+ const int level,
+ const int index,
+ const AccessorData *local_data);
+
+ /**
+ * Conversion constructor. This
+ * constructor exists to make certain
+ * constructs simpler to write in
+ * dimension independent code. For
+ * example, it allows assigning a face
+ * iterator to a line iterator, an
+ * operation that is useful in 2d but
+ * doesn't make any sense in 3d. The
+ * constructor here exists for the
+ * purpose of making the code conform to
+ * C++ but it will unconditionally abort;
+ * in other words, assigning a face
+ * iterator to a line iterator is better
+ * put into an if-statement that checks
+ * that the dimension is two, and assign
+ * to a quad iterator in 3d (an operator
+ * that, without this constructor would
+ * be illegal if we happen to compile for
+ * 2d).
+ */
+ template <int structdim2, int dim2, int spacedim2>
+ DoFCellAccessor (const InvalidAccessor<structdim2,dim2,spacedim2> &);
+
+ /**
+ * Another conversion operator
+ * between objects that don't
+ * make sense, just like the
+ * previous one.
+ */
+ template <int dim2, class DH2>
+ DoFCellAccessor (const DoFAccessor<dim2, DH2> &);
+
+ /**
+ * @}
+ */
+
+ /**
- * Return the parent as a DoF
- * cell iterator. This
- * function is needed since the
- * parent function of the base
- * class returns a cell accessor
- * without access to the DoF
- * data.
- */
++ * Return the parent as a DoF
++ * cell iterator. This
++ * function is needed since the
++ * parent function of the base
++ * class returns a cell accessor
++ * without access to the DoF
++ * data.
++ */
+ typename dealii::internal::DoFHandler::Iterators<DH>::cell_iterator
+ parent () const;
+
+ /**
+ * @name Accessing sub-objects and neighbors
+ */
+ /**
+ * @{
+ */
+
+ /**
+ * Return the @p ith neighbor as
+ * a DoF cell iterator. This
+ * function is needed since the
+ * neighbor function of the base
+ * class returns a cell accessor
+ * without access to the DoF
+ * data.
+ */
+ typename dealii::internal::DoFHandler::Iterators<DH>::cell_iterator
+ neighbor (const unsigned int) const;
+
+ /**
+ * Return the @p ith child as a
+ * DoF cell iterator. This
+ * function is needed since the
+ * child function of the base
+ * class returns a cell accessor
+ * without access to the DoF
+ * data.
+ */
+ typename dealii::internal::DoFHandler::Iterators<DH>::cell_iterator
+ child (const unsigned int) const;
+
+ /**
+ * Return an iterator to the @p ith face
+ * of this cell.
+ *
+ * This function is not implemented in
+ * 1D, and maps to DoFAccessor::line
+ * in 2D.
+ */
+ typename dealii::internal::DoFHandler::Iterators<DH>::face_iterator
+ face (const unsigned int i) const;
+
+ /**
+ * Return the result of the
+ * @p neighbor_child_on_subface
+ * function of the base class,
+ * but convert it so that one can
+ * also access the DoF data (the
+ * function in the base class
+ * only returns an iterator with
+ * access to the triangulation
+ * data).
+ */
+ typename dealii::internal::DoFHandler::Iterators<DH>::cell_iterator
+ neighbor_child_on_subface (const unsigned int face_no,
+ const unsigned int subface_no) const;
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Extracting values from global vectors
+ */
+ /**
+ * @{
+ */
+
+ /**
+ * Return the values of the given vector
+ * restricted to the dofs of this
+ * cell in the standard ordering: dofs
+ * on vertex 0, dofs on vertex 1, etc,
+ * dofs on line 0, dofs on line 1, etc,
+ * dofs on quad 0, etc.
+ *
+ * The vector has to have the
+ * right size before being passed
+ * to this function. This
+ * function is only callable for
+ * active cells.
+ *
+ * The input vector may be either
+ * a <tt>Vector<float></tt>,
+ * Vector<double>, or a
+ * BlockVector<double>, or a
+ * PETSc or Trilinos vector if
+ * deal.II is compiled to support
+ * these libraries. It is in the
+ * responsibility of the caller
+ * to assure that the types of
+ * the numbers stored in input
+ * and output vectors are
+ * compatible and with similar
+ * accuracy.
+ */
+ template <class InputVector, typename number>
+ void get_dof_values (const InputVector &values,
+ Vector<number> &local_values) const;
+
+ /**
+ * Return the values of the given vector
+ * restricted to the dofs of this
+ * cell in the standard ordering: dofs
+ * on vertex 0, dofs on vertex 1, etc,
+ * dofs on line 0, dofs on line 1, etc,
+ * dofs on quad 0, etc.
+ *
+ * The vector has to have the
+ * right size before being passed
+ * to this function. This
+ * function is only callable for
+ * active cells.
+ *
+ * The input vector may be either
+ * a <tt>Vector<float></tt>,
+ * Vector<double>, or a
+ * BlockVector<double>, or a
+ * PETSc or Trilinos vector if
+ * deal.II is compiled to support
+ * these libraries. It is in the
+ * responsibility of the caller
+ * to assure that the types of
+ * the numbers stored in input
+ * and output vectors are
+ * compatible and with similar
+ * accuracy.
+ */
+ template <class InputVector, typename ForwardIterator>
+ void get_dof_values (const InputVector &values,
+ ForwardIterator local_values_begin,
+ ForwardIterator local_values_end) const;
+
+ /**
+ * Return the values of the given vector
+ * restricted to the dofs of this
+ * cell in the standard ordering: dofs
+ * on vertex 0, dofs on vertex 1, etc,
+ * dofs on line 0, dofs on line 1, etc,
+ * dofs on quad 0, etc.
+ *
+ * The vector has to have the
+ * right size before being passed
+ * to this function. This
+ * function is only callable for
+ * active cells.
+ *
+ * The input vector may be either a
+ * <tt>Vector<float></tt>,
+ * Vector<double>, or a
+ * BlockVector<double>, or a PETSc or
+ * Trilinos vector if deal.II is
+ * compiled to support these
+ * libraries. It is in the
+ * responsibility of the caller to
+ * assure that the types of the numbers
+ * stored in input and output vectors
+ * are compatible and with similar
+ * accuracy. The ConstraintMatrix
+ * passed as an argument to this
+ * function makes sure that constraints
+ * are correctly distributed when the
+ * dof values are calculated.
+ */
+ template <class InputVector, typename ForwardIterator>
+ void get_dof_values (const ConstraintMatrix &constraints,
+ const InputVector &values,
+ ForwardIterator local_values_begin,
+ ForwardIterator local_values_end) const;
+
+ /**
+ * This function is the counterpart to
+ * get_dof_values(): it takes a vector
+ * of values for the degrees of freedom
+ * of the cell pointed to by this iterator
+ * and writes these values into the global
+ * data vector @p values. This function
+ * is only callable for active cells.
+ *
+ * Note that for continuous finite
+ * elements, calling this function affects
+ * the dof values on neighboring cells as
+ * well. It may also violate continuity
+ * requirements for hanging nodes, if
+ * neighboring cells are less refined than
+ * the present one. These requirements
+ * are not taken care of and must be
+ * enforced by the user afterwards.
+ *
+ * The vector has to have the
+ * right size before being passed
+ * to this function.
+ *
+ * The output vector may be either a
+ * Vector<float>,
+ * Vector<double>, or a
+ * BlockVector<double>, or a
+ * PETSc vector if deal.II is compiled to
+ * support these libraries. It is in the
+ * responsibility of the caller to assure
+ * that the types of the numbers stored
+ * in input and output vectors are
+ * compatible and with similar accuracy.
+ */
+ template <class OutputVector, typename number>
+ void set_dof_values (const Vector<number> &local_values,
+ OutputVector &values) const;
+
+ /**
+ * Return the interpolation of
+ * the given finite element
+ * function to the present
+ * cell. In the simplest case,
+ * the cell is a terminal one,
+ * i.e. has no children; then,
+ * the returned value is the
+ * vector of nodal values on that
+ * cell. You could then as well
+ * get the desired values through
+ * the @p get_dof_values
+ * function. In the other case,
+ * when the cell has children, we
+ * use the restriction matrices
+ * provided by the finite element
+ * class to compute the
+ * interpolation from the
+ * children to the present cell.
+ *
+ * It is assumed that both
+ * vectors already have the right
+ * size beforehand.
+ *
+ * Unlike the get_dof_values()
+ * function, this function works
+ * on cells rather than to lines,
+ * quads, and hexes, since
+ * interpolation is presently
+ * only provided for cells by the
+ * finite element classes.
+ */
+ template <class InputVector, typename number>
+ void get_interpolated_dof_values (const InputVector &values,
+ Vector<number> &interpolated_values) const;
+
+ /**
+ * This, again, is the
+ * counterpart to
+ * get_interpolated_dof_values():
+ * you specify the dof values on
+ * a cell and these are
+ * interpolated to the children
+ * of the present cell and set on
+ * the terminal cells.
+ *
+ * In principle, it works as
+ * follows: if the cell pointed
+ * to by this object is terminal,
+ * then the dof values are set in
+ * the global data vector by
+ * calling the set_dof_values()
+ * function; otherwise, the
+ * values are prolonged to each
+ * of the children and this
+ * function is called for each of
+ * them.
+ *
+ * Using the
+ * get_interpolated_dof_values()
+ * and this function, you can
+ * compute the interpolation of a
+ * finite element function to a
+ * coarser grid by first getting
+ * the interpolated solution on a
+ * cell of the coarse grid and
+ * afterwards redistributing it
+ * using this function.
+ *
+ * Note that for continuous
+ * finite elements, calling this
+ * function affects the dof
+ * values on neighboring cells as
+ * well. It may also violate
+ * continuity requirements for
+ * hanging nodes, if neighboring
+ * cells are less refined than
+ * the present one, or if their
+ * children are less refined than
+ * the children of this
+ * cell. These requirements are
+ * not taken care of and must be
+ * enforced by the user
+ * afterward.
+ *
+ * It is assumed that both
+ * vectors already have the right
+ * size beforehand. This function
+ * relies on the existence of a
+ * natural interpolation property
+ * of finite element spaces of a
+ * cell to its children, denoted
+ * by the prolongation matrices
+ * of finite element classes. For
+ * some elements, the spaces on
+ * coarse and fine grids are not
+ * nested, in which case the
+ * interpolation to a child is
+ * not the identity; refer to the
+ * documentation of the
+ * respective finite element
+ * class for a description of
+ * what the prolongation matrices
+ * represent in this case.
+ *
+ * Unlike the set_dof_values()
+ * function, this function is
+ * associated to cells rather
+ * than to lines, quads, and
+ * hexes, since interpolation is
+ * presently only provided for
+ * cells by the finite element
+ * objects.
+ *
+ * The output vector may be either a
+ * Vector<float>,
+ * Vector<double>, or a
+ * BlockVector<double>, or a
+ * PETSc vector if deal.II is compiled to
+ * support these libraries. It is in the
+ * responsibility of the caller to assure
+ * that the types of the numbers stored
+ * in input and output vectors are
+ * compatible and with similar accuracy.
+ */
+ template <class OutputVector, typename number>
+ void set_dof_values_by_interpolation (const Vector<number> &local_values,
+ OutputVector &values) const;
+
+ /**
+ * Distribute a local (cell
+ * based) vector to a global one
+ * by mapping the local numbering
+ * of the degrees of freedom to
+ * the global one and entering
+ * the local values into the
+ * global vector.
+ *
+ * The elements are
+ * <em>added</em> up to the
+ * elements in the global vector,
+ * rather than just set, since
+ * this is usually what one
+ * wants.
+ */
+ template <typename number, typename OutputVector>
+ void
+ distribute_local_to_global (const Vector<number> &local_source,
+ OutputVector &global_destination) const;
+
+ /**
+ * Distribute a local (cell based)
+ * vector in iterator format to a
+ * global one by mapping the local
+ * numbering of the degrees of freedom
+ * to the global one and entering the
+ * local values into the global vector.
+ *
+ * The elements are <em>added</em> up
+ * to the elements in the global
+ * vector, rather than just set, since
+ * this is usually what one wants.
+ */
+ template <typename ForwardIterator, typename OutputVector>
+ void
+ distribute_local_to_global (ForwardIterator local_source_begin,
+ ForwardIterator local_source_end,
+ OutputVector &global_destination) const;
+
+ /**
+ * Distribute a local (cell based)
+ * vector in iterator format to a
+ * global one by mapping the local
+ * numbering of the degrees of freedom
+ * to the global one and entering the
+ * local values into the global vector.
+ *
+ * The elements are <em>added</em> up
+ * to the elements in the global
+ * vector, rather than just set, since
+ * this is usually what one
+ * wants. Moreover, the
+ * ConstraintMatrix passed to this
+ * function makes sure that also
+ * constraints are eliminated in this
+ * process.
+ */
+ template <typename ForwardIterator, typename OutputVector>
+ void
+ distribute_local_to_global (const ConstraintMatrix &constraints,
+ ForwardIterator local_source_begin,
+ ForwardIterator local_source_end,
+ OutputVector &global_destination) const;
+
+ /**
+ * This function does much the
+ * same as the
+ * <tt>distribute_local_to_global(Vector,Vector)</tt>
+ * function, but operates on
+ * matrices instead of
+ * vectors. If the matrix type is
+ * a sparse matrix then it is
+ * supposed to have non-zero
+ * entry slots where required.
+ */
+ template <typename number, typename OutputMatrix>
+ void
+ distribute_local_to_global (const FullMatrix<number> &local_source,
+ OutputMatrix &global_destination) const;
+
+ /**
+ * This function does what the two
+ * <tt>distribute_local_to_global</tt>
+ * functions with vector and matrix
+ * argument do, but all at once.
+ */
+ template <typename number, typename OutputMatrix, typename OutputVector>
+ void
+ distribute_local_to_global (const FullMatrix<number> &local_matrix,
+ const Vector<number> &local_vector,
+ OutputMatrix &global_matrix,
+ OutputVector &global_vector) const;
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Accessing the DoF indices of this object
+ */
+ /**
+ * @{
+ */
+
+ /**
+ * Return the indices of the dofs of this
+ * quad in the standard ordering: dofs
+ * on vertex 0, dofs on vertex 1, etc,
+ * dofs on line 0, dofs on line 1, etc,
+ * dofs on quad 0, etc.
+ *
+ * It is assumed that the vector already
+ * has the right size beforehand.
+ *
+ * This function reimplements the
+ * same function in the base
+ * class. The functions in the
+ * base classes are available for
+ * all geometric objects,
+ * i.e. even in 3d they can be
+ * used to access the dof indices
+ * of edges, for example. On the
+ * other hand, the most common
+ * case is clearly the use on
+ * cells, which is why we cache
+ * the array for each cell, but
+ * not edge. To retrieve the
+ * cached values, rather than
+ * collect the necessary
+ * information every time, this
+ * function overwrites the one in
+ * the base class.
+ *
+ * This function is most often
+ * used on active objects (edges,
+ * faces, cells). It can be used
+ * on non-active objects as well
+ * (i.e. objects that have
+ * children), but only if the
+ * finite element under
+ * consideration has degrees of
+ * freedom exclusively on
+ * vertices. Otherwise, the
+ * function doesn't make much
+ * sense, since for example
+ * inactive edges do not have
+ * degrees of freedom associated
+ * with them at all.
+ */
- void get_dof_indices (std::vector<unsigned int> &dof_indices) const;
++ void get_dof_indices (std::vector<types::global_dof_index> &dof_indices) const;
++
++ void get_mg_dof_indices (std::vector<types::global_dof_index> &dof_indices) const;
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Accessing the finite element associated with this object
+ */
+ /**
+ * @{
+ */
+
+ /**
+ * Return the finite element that
+ * is used on the cell pointed to
+ * by this iterator. For non-hp
+ * DoF handlers, this is of
+ * course always the same
+ * element, independent of the
+ * cell we are presently on, but
+ * for hp DoF handlers, this may
+ * change from cell to cell.
+ */
+ const FiniteElement<DH::dimension,DH::space_dimension> &
+ get_fe () const;
+
+ /**
+ * Returns the index inside the
+ * hp::FECollection of the FiniteElement
+ * used for this cell.
+ */
+ unsigned int active_fe_index () const;
+
+ /**
+ * Sets the index of the FiniteElement used for
+ * this cell.
+ */
+ void set_active_fe_index (const unsigned int i);
+ /**
+ * @}
+ */
+
+ /**
+ * Set the DoF indices of this
+ * cell to the given values. This
+ * function bypasses the DoF
+ * cache, if one exists for the
+ * given DoF handler class.
+ */
- void set_dof_indices (const std::vector<unsigned int> &dof_indices);
++ void set_dof_indices (const std::vector<types::global_dof_index> &dof_indices);
+
+ /**
+ * Update the cache in which we
+ * store the dof indices of this
+ * cell.
+ */
+ void update_cell_dof_indices_cache () const;
+
+ private:
+ /**
+ * Copy operator. This is normally used
+ * in a context like <tt>iterator a,b;
+ * *a=*b;</tt>. Presumably, the intent
+ * here is to copy the object pointed to
+ * by @p b to the object pointed to by
+ * @p a. However, the result of
+ * dereferencing an iterator is not an
+ * object but an accessor; consequently,
+ * this operation is not useful for
+ * iterators on triangulations. We
+ * declare this function here private,
+ * thus it may not be used from outside.
+ * Furthermore it is not implemented and
+ * will give a linker error if used
+ * anyway.
+ */
+ DoFCellAccessor<DH> &
+ operator = (const DoFCellAccessor<DH> &da);
+
+ /**
+ * Make the DoFHandler class a
+ * friend so that it can call the
+ * update_cell_dof_indices_cache()
+ * function
+ */
+ template <int dim, int spacedim> friend class DoFHandler;
+ friend struct dealii::internal::DoFCellAccessor::Implementation;
};
{
namespace DoFAccessor
{
- /**
- * A class like the one with same
- * name in tria.cc. See there for
- * more information.
- */
+ /**
+ * A class like the one with same
+ * name in tria.cc. See there for
+ * more information.
+ */
struct Implementation
{
- /**
- * Implementations of the
- * get_dof_index/set_dof_index functions.
- */
- template <int spacedim>
- static
- types::global_dof_index
- get_dof_index (const dealii::DoFHandler<1,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- dealii::internal::int2type<1>)
- {
- return dof_handler.levels[obj_level]->dof_object.
- get_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index);
- }
-
-
- template <int spacedim>
- static
- void
- set_dof_index (const dealii::DoFHandler<1,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- dealii::internal::int2type<1>,
- const types::global_dof_index global_index)
- {
- dof_handler.levels[obj_level]->dof_object.
- set_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- global_index);
- }
-
-
- template <int spacedim>
- static
- types::global_dof_index
- get_dof_index (const dealii::DoFHandler<2,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- dealii::internal::int2type<1>)
- {
- // faces have no levels
- Assert (obj_level == 0, ExcInternalError());
- return dof_handler.faces->lines.
- get_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index);
- }
-
-
- template <int spacedim>
- static
- void
- set_dof_index (const dealii::DoFHandler<2,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- dealii::internal::int2type<1>,
- const types::global_dof_index global_index)
- {
- // faces have no levels
- Assert (obj_level == 0, ExcInternalError());
- dof_handler.faces->lines.
- set_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- global_index);
- }
-
-
- template <int spacedim>
- static
- types::global_dof_index
- get_dof_index (const dealii::DoFHandler<2,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- dealii::internal::int2type<2>)
- {
- return dof_handler.levels[obj_level]->dof_object.
- get_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index);
- }
-
-
- template <int spacedim>
- static
- void
- set_dof_index (const dealii::DoFHandler<2,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- dealii::internal::int2type<2>,
- const types::global_dof_index global_index)
- {
- dof_handler.levels[obj_level]->dof_object.
- set_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- global_index);
- }
-
-
- template <int spacedim>
- static
- types::global_dof_index
- get_dof_index (const dealii::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- dealii::internal::int2type<1>)
- {
- // faces have no levels
- Assert (obj_level == 0, ExcInternalError());
- return dof_handler.faces->lines.
- get_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index);
- }
-
-
- template <int spacedim>
- static
- void
- set_dof_index (const dealii::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- dealii::internal::int2type<1>,
- const types::global_dof_index global_index)
- {
- // faces have no levels
- Assert (obj_level == 0, ExcInternalError());
- dof_handler.faces->lines.
- set_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- global_index);
- }
-
-
-
- template <int spacedim>
- static
- types::global_dof_index
- get_dof_index (const dealii::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- dealii::internal::int2type<2>)
- {
- // faces have no levels
- Assert (obj_level == 0, ExcInternalError());
- return dof_handler.faces->quads.
- get_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index);
- }
-
-
- template <int spacedim>
- static
- void
- set_dof_index (const dealii::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- dealii::internal::int2type<2>,
- const types::global_dof_index global_index)
- {
- // faces have no levels
- Assert (obj_level == 0, ExcInternalError());
- dof_handler.faces->quads.
- set_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- global_index);
- }
-
-
-
- template <int spacedim>
- static
- types::global_dof_index
- get_dof_index (const dealii::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- dealii::internal::int2type<3>)
- {
- return dof_handler.levels[obj_level]->dof_object.
- get_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index);
- }
-
-
- template <int spacedim>
- static
- void
- set_dof_index (const dealii::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- dealii::internal::int2type<3>,
- const types::global_dof_index global_index)
- {
- dof_handler.levels[obj_level]->dof_object.
- set_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- global_index);
- }
-
-
- template <int spacedim>
- static
- types::global_dof_index
- get_dof_index (const dealii::hp::DoFHandler<1,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- const dealii::internal::int2type<1> &)
- {
- return dof_handler.levels[obj_level]->dof_object.
- get_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- obj_level);
- }
-
-
- template <int spacedim>
- static
- void
- set_dof_index (const dealii::hp::DoFHandler<1,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- const dealii::internal::int2type<1> &,
- const types::global_dof_index global_index)
- {
- dof_handler.levels[obj_level]->dof_object.
- set_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- global_index,
- obj_level);
- }
-
-
- template <int spacedim>
- static
- types::global_dof_index
- get_dof_index (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- const dealii::internal::int2type<1> &)
- {
- return dof_handler.faces->lines.
- get_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- obj_level);
- }
-
-
- template <int spacedim>
- static
- void
- set_dof_index (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- const dealii::internal::int2type<1> &,
- const types::global_dof_index global_index)
- {
- dof_handler.faces->lines.
- set_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- global_index,
- obj_level);
- }
-
-
- template <int spacedim>
- static
- types::global_dof_index
- get_dof_index (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- const dealii::internal::int2type<2> &)
- {
- return dof_handler.levels[obj_level]->dof_object.
- get_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- obj_level);
- }
-
-
- template <int spacedim>
- static
- void
- set_dof_index (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- const dealii::internal::int2type<2> &,
- const types::global_dof_index global_index)
- {
- dof_handler.levels[obj_level]->dof_object.
- set_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- global_index,
- obj_level);
- }
-
-
- template <int spacedim>
- static
- types::global_dof_index
- get_dof_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- const dealii::internal::int2type<1> &)
- {
- return dof_handler.faces->lines.
- get_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- obj_level);
- }
-
-
- template <int spacedim>
- static
- void
- set_dof_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- const dealii::internal::int2type<1> &,
- const types::global_dof_index global_index)
- {
- dof_handler.faces->lines.
- set_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- global_index,
- obj_level);
- }
-
-
- template <int spacedim>
- static
- types::global_dof_index
- get_dof_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- const dealii::internal::int2type<2> &)
- {
- return dof_handler.faces->quads.
- get_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- obj_level);
- }
-
-
- template <int spacedim>
- static
- void
- set_dof_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- const dealii::internal::int2type<2> &,
- const types::global_dof_index global_index)
- {
- dof_handler.faces->quads.
- set_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- global_index,
- obj_level);
- }
-
-
- template <int spacedim>
- static
- types::global_dof_index
- get_dof_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- const dealii::internal::int2type<3> &)
- {
- return dof_handler.levels[obj_level]->dof_object.
- get_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- obj_level);
- }
-
-
- template <int spacedim>
- static
- void
- set_dof_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- const dealii::internal::int2type<3> &,
- const types::global_dof_index global_index)
- {
- dof_handler.levels[obj_level]->dof_object.
- set_dof_index (dof_handler,
- obj_index,
- fe_index,
- local_index,
- global_index,
- obj_level);
- }
-
-
- template <int structdim, int dim, int spacedim>
- static
- bool
- fe_index_is_active (const dealii::DoFHandler<dim,spacedim> &,
- const unsigned int,
- const unsigned int,
- const unsigned int fe_index,
- const dealii::internal::int2type<structdim> &)
- {
- return (fe_index == 0);
- }
-
-
-
- template <int structdim, int dim, int spacedim>
- static
- unsigned int
- n_active_fe_indices (const dealii::DoFHandler<dim,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const dealii::internal::int2type<structdim> &)
- {
- // check that the object we look
- // at is in fact active. the
- // problem is that we have
- // templatized on the
- // dimensionality of the object,
- // so it may be a cell, a face,
- // or a line. we have a bit of
- // trouble doing this all in the
- // generic case, so only check if
- // it is either a cell or a
- // line. the only case this
- // leaves out is faces in 3d --
- // let's hope that this never is
- // a problem
- Assert ((dim==structdim
- ?
- typename
- dealii::internal::DoFHandler::
- Iterators<dealii::DoFHandler<dim,spacedim> >::
- raw_cell_iterator (&dof_handler.get_tria(),
- obj_level,
- obj_index,
- &dof_handler)->used()
- :
- (structdim==1
- ?
- typename
- dealii::internal::DoFHandler::
- Iterators<dealii::DoFHandler<dim,spacedim> >::
- raw_line_iterator (&dof_handler.get_tria(),
- obj_level,
- obj_index,
- &dof_handler)->used()
- :
- true))
- == true,
- ExcMessage ("This cell is not active and therefore can't be "
- "queried for its active FE indices"));
- return 1;
- }
-
-
-
- template <int structdim, int dim, int spacedim>
- static
- unsigned int
- nth_active_fe_index (const dealii::DoFHandler<dim,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int n,
- const dealii::internal::int2type<structdim> &)
- {
- // check that the object we look
- // at is in fact active. the
- // problem is that we have
- // templatized on the
- // dimensionality of the object,
- // so it may be a cell, a face,
- // or a line. we have a bit of
- // trouble doing this all in the
- // generic case, so only check if
- // it is either a cell or a
- // line. the only case this
- // leaves out is faces in 3d --
- // let's hope that this never is
- // a problem
- Assert ((dim==structdim
- ?
- typename
- dealii::internal::DoFHandler::
- Iterators<dealii::DoFHandler<dim,spacedim> >::
- raw_cell_iterator (&dof_handler.get_tria(),
- obj_level,
- obj_index,
- &dof_handler)->used()
- :
- (structdim==1
- ?
- typename
- dealii::internal::DoFHandler::
- Iterators<dealii::DoFHandler<dim,spacedim> >::
- raw_line_iterator (&dof_handler.get_tria(),
- obj_level,
- obj_index,
- &dof_handler)->used()
- :
- true))
- == true,
- ExcMessage ("This cell is not active and therefore can't be "
- "queried for its active FE indices"));
- Assert (n == 0, ExcIndexRange (n, 0, 1));
-
- return dealii::DoFHandler<dim,spacedim>::default_fe_index;
- }
-
-
- template <int spacedim>
- static
- bool
- fe_index_is_active (const dealii::hp::DoFHandler<1,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const dealii::internal::int2type<1> &)
- {
- return dof_handler.levels[obj_level]->dof_object.fe_index_is_active(dof_handler,
- obj_index,
- fe_index,
- obj_level);
- }
-
-
- template <int spacedim>
- static
- unsigned int
- n_active_fe_indices (const dealii::hp::DoFHandler<1,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const dealii::internal::int2type<1> &)
- {
- return dof_handler.levels[obj_level]->dof_object.n_active_fe_indices (dof_handler,
- obj_index);
- }
-
-
-
- template <int spacedim>
- static
- unsigned int
- nth_active_fe_index (const dealii::hp::DoFHandler<1,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int n,
- const dealii::internal::int2type<1> &)
- {
- return dof_handler.levels[obj_level]->dof_object.nth_active_fe_index (dof_handler,
- obj_level,
- obj_index,
- n);
- }
-
-
- template <int spacedim>
- static
- bool
- fe_index_is_active (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const dealii::internal::int2type<1> &)
- {
- return dof_handler.faces->lines.fe_index_is_active(dof_handler,
- obj_index,
- fe_index,
- obj_level);
- }
-
-
- template <int spacedim>
- static
- unsigned int
- n_active_fe_indices (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
- const unsigned int ,
- const unsigned int obj_index,
- const dealii::internal::int2type<1> &)
- {
- return dof_handler.faces->lines.n_active_fe_indices (dof_handler,
- obj_index);
- }
-
-
- template <int spacedim>
- static
- unsigned int
- nth_active_fe_index (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int n,
- const dealii::internal::int2type<1> &)
- {
- return dof_handler.faces->lines.nth_active_fe_index (dof_handler,
- obj_level,
- obj_index,
- n);
- }
-
-
-
- template <int spacedim>
- static
- bool
- fe_index_is_active (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const dealii::internal::int2type<2> &)
- {
- return dof_handler.levels[obj_level]->dof_object.fe_index_is_active(dof_handler,
- obj_index,
- fe_index,
- obj_level);
- }
-
-
- template <int spacedim>
- static
- unsigned int
- n_active_fe_indices (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const dealii::internal::int2type<2> &)
- {
- return dof_handler.levels[obj_level]->dof_object.n_active_fe_indices (dof_handler,
- obj_index);
- }
-
-
-
- template <int spacedim>
- static
- unsigned int
- nth_active_fe_index (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int n,
- const dealii::internal::int2type<2> &)
- {
- return dof_handler.levels[obj_level]->dof_object.nth_active_fe_index (dof_handler,
- obj_level,
- obj_index,
- n);
- }
-
-
-
- template <int spacedim>
- static
- bool
- fe_index_is_active (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const dealii::internal::int2type<1> &)
- {
- return dof_handler.faces->lines.fe_index_is_active(dof_handler,
- obj_index,
- fe_index,
- obj_level);
- }
-
-
- template <int spacedim>
- static
- unsigned int
- n_active_fe_indices (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int,
- const unsigned int obj_index,
- const dealii::internal::int2type<1> &)
- {
- return dof_handler.faces->lines.n_active_fe_indices (dof_handler,
- obj_index);
- }
-
-
-
- template <int spacedim>
- static
- unsigned int
- nth_active_fe_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int n,
- const dealii::internal::int2type<1> &)
- {
- return dof_handler.faces->lines.nth_active_fe_index (dof_handler,
- obj_level,
- obj_index,
- n);
- }
-
-
-
- template <int spacedim>
- static
- bool
- fe_index_is_active (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const dealii::internal::int2type<2> &)
- {
- return dof_handler.faces->quads.fe_index_is_active(dof_handler,
- obj_index,
- fe_index,
- obj_level);
- }
-
- template <int spacedim>
- static
- bool
- fe_index_is_active (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const dealii::internal::int2type<3> &)
- {
- return dof_handler.levels[obj_level]->dof_object.fe_index_is_active(dof_handler,
- obj_index,
- fe_index,
- obj_level);
- }
-
-
- template <int spacedim>
- static
- unsigned int
- n_active_fe_indices (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int ,
- const unsigned int obj_index,
- const dealii::internal::int2type<2> &)
- {
- return dof_handler.faces->quads.n_active_fe_indices (dof_handler,
- obj_index);
- }
-
-
-
- template <int spacedim>
- static
- unsigned int
- nth_active_fe_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int n,
- const dealii::internal::int2type<2> &)
- {
- return dof_handler.faces->quads.nth_active_fe_index (dof_handler,
- obj_level,
- obj_index,
- n);
- }
-
-
-
- template <int spacedim>
- static
- unsigned int
- n_active_fe_indices (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const dealii::internal::int2type<3> &)
- {
- return dof_handler.levels[obj_level]->dof_object.n_active_fe_indices (dof_handler,
- obj_index);
- }
-
-
-
- template <int spacedim>
- static
- unsigned int
- nth_active_fe_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int n,
- const dealii::internal::int2type<3> &)
- {
- return dof_handler.levels[obj_level]->dof_object.nth_active_fe_index (dof_handler,
- obj_level,
- obj_index,
- n);
- }
-
- /**
- * Set the @p local_index-th
- * degree of freedom
- * corresponding to the finite
- * element specified by @p
- * fe_index on the vertex with
- * global number @p
- * vertex_index to @p
- * global_index.
- */
- template <int dim, int spacedim>
- static
- void
- set_vertex_dof_index (dealii::DoFHandler<dim,spacedim> &dof_handler,
- const unsigned int vertex_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- const types::global_dof_index global_index)
- {
- Assert ((fe_index == dealii::DoFHandler<dim,spacedim>::default_fe_index),
- ExcMessage ("Only the default FE index is allowed for non-hp DoFHandler objects"));
- Assert (dof_handler.selected_fe != 0,
- ExcMessage ("No finite element collection is associated with "
- "this DoFHandler"));
- Assert (local_index < dof_handler.selected_fe->dofs_per_vertex,
- ExcIndexRange(local_index, 0,
- dof_handler.selected_fe->dofs_per_vertex));
-
- dof_handler.vertex_dofs[vertex_index *
- dof_handler.selected_fe->dofs_per_vertex
- + local_index]
- = global_index;
- }
-
-
- template <int dim, int spacedim>
- static
- void
- set_vertex_dof_index (dealii::hp::DoFHandler<dim,spacedim> &dof_handler,
- const unsigned int vertex_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- const types::global_dof_index global_index)
- {
- Assert ( (fe_index != dealii::hp::DoFHandler<dim,spacedim>::default_fe_index),
- ExcMessage ("You need to specify a FE index when working "
- "with hp DoFHandlers"));
- Assert (dof_handler.finite_elements != 0,
- ExcMessage ("No finite element collection is associated with "
- "this DoFHandler"));
- Assert (local_index < (*dof_handler.finite_elements)[fe_index].dofs_per_vertex,
- ExcIndexRange(local_index, 0,
- (*dof_handler.finite_elements)[fe_index].dofs_per_vertex));
- Assert (fe_index < dof_handler.finite_elements->size(),
- ExcInternalError());
- Assert (dof_handler.vertex_dofs_offsets[vertex_index] !=
- numbers::invalid_unsigned_int,
- ExcMessage ("This vertex is unused and has no DoFs associated with it"));
-
- // hop along the list of index
- // sets until we find the one
- // with the correct fe_index, and
- // then poke into that
- // part. trigger an exception if
- // we can't find a set for this
- // particular fe_index
- const unsigned int starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
- types::global_dof_index *pointer = &dof_handler.vertex_dofs[starting_offset];
- while (true)
- {
- Assert (pointer <= &dof_handler.vertex_dofs.back(), ExcInternalError());
-
- // a fe index is always small
- Assert((*pointer)<std::numeric_limits<unsigned int>::max(), ExcInternalError());
- const unsigned int this_fe_index = static_cast<unsigned int>(*pointer);
-
- Assert (this_fe_index != numbers::invalid_unsigned_int,
- ExcInternalError());
- Assert (this_fe_index < dof_handler.finite_elements->size(),
- ExcInternalError());
-
- if (this_fe_index == fe_index)
- {
- *(pointer + 1 + local_index) = global_index;
- return;
- }
- else
- pointer += (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1;
- }
- }
-
-
- /**
- * Get the @p local_index-th
- * degree of freedom
- * corresponding to the finite
- * element specified by @p
- * fe_index on the vertex with
- * global number @p
- * vertex_index to @p
- * global_index.
- */
-
- template <int dim, int spacedim>
- static
- unsigned int
- get_vertex_dof_index (const dealii::DoFHandler<dim,spacedim> &dof_handler,
- const unsigned int vertex_index,
- const unsigned int fe_index,
- const unsigned int local_index)
- {
- Assert ((fe_index == dealii::DoFHandler<dim,spacedim>::default_fe_index),
- ExcMessage ("Only the default FE index is allowed for non-hp DoFHandler objects"));
- Assert (dof_handler.selected_fe != 0,
- ExcMessage ("No finite element collection is associated with "
- "this DoFHandler"));
- Assert (local_index < dof_handler.selected_fe->dofs_per_vertex,
- ExcIndexRange(local_index, 0,
- dof_handler.selected_fe->dofs_per_vertex));
-
- return
- dof_handler.vertex_dofs[vertex_index *
- dof_handler.selected_fe->dofs_per_vertex
- + local_index];
- }
-
-
- template<int dim, int spacedim>
- static
- unsigned int
- get_vertex_dof_index (const dealii::hp::DoFHandler<dim,spacedim> &dof_handler,
- const unsigned int vertex_index,
- const unsigned int fe_index,
- const unsigned int local_index)
- {
- Assert ( (fe_index != dealii::hp::DoFHandler<dim,spacedim>::default_fe_index),
- ExcMessage ("You need to specify a FE index when working "
- "with hp DoFHandlers"));
- Assert (dof_handler.finite_elements != 0,
- ExcMessage ("No finite element collection is associated with "
- "this DoFHandler"));
- Assert (local_index < (*dof_handler.finite_elements)[fe_index].dofs_per_vertex,
- ExcIndexRange(local_index, 0,
- (*dof_handler.finite_elements)[fe_index].dofs_per_vertex));
- Assert (vertex_index < dof_handler.vertex_dofs_offsets.size(),
- ExcIndexRange (vertex_index, 0,
- dof_handler.vertex_dofs_offsets.size()));
- Assert (dof_handler.vertex_dofs_offsets[vertex_index] !=
- numbers::invalid_unsigned_int,
- ExcMessage ("This vertex is unused and has no DoFs associated with it"));
-
- // hop along the list of index
- // sets until we find the one
- // with the correct fe_index, and
- // then poke into that
- // part. trigger an exception if
- // we can't find a set for this
- // particular fe_index
- const unsigned int starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
- const types::global_dof_index *pointer = &dof_handler.vertex_dofs[starting_offset];
- while (true)
- {
- Assert (pointer <= &dof_handler.vertex_dofs.back(), ExcInternalError());
-
- Assert((*pointer)<std::numeric_limits<unsigned int>::max(), ExcInternalError());
- const unsigned int this_fe_index = static_cast<unsigned int>(*pointer);
-
- Assert (this_fe_index != numbers::invalid_unsigned_int,
- ExcInternalError());
- Assert (this_fe_index < dof_handler.finite_elements->size(),
- ExcInternalError());
-
- if (this_fe_index == fe_index)
- return *(pointer + 1 + local_index);
- else
- pointer += (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1;
- }
- }
-
-
- /**
- * Return the number of
- * different finite elements
- * that are active on a given
- * vertex.
- */
- template<int dim, int spacedim>
- static
- unsigned int
- n_active_vertex_fe_indices (const dealii::hp::DoFHandler<dim,spacedim> &dof_handler,
- const unsigned int vertex_index)
- {
- Assert (dof_handler.finite_elements != 0,
- ExcMessage ("No finite element collection is associated with "
- "this DoFHandler"));
-
- // if this vertex is unused, return 0
- if (dof_handler.vertex_dofs_offsets[vertex_index] == numbers::invalid_unsigned_int)
- return 0;
-
- // hop along the list of index
- // sets and count the number of
- // hops
- const unsigned int starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
- const types::global_dof_index *pointer = &dof_handler.vertex_dofs[starting_offset];
-
- Assert (*pointer != numbers::invalid_unsigned_int,
- ExcInternalError());
-
- unsigned int counter = 0;
- while (true)
- {
- Assert (pointer <= &dof_handler.vertex_dofs.back(), ExcInternalError());
-
- Assert((*pointer)<std::numeric_limits<unsigned int>::max(), ExcInternalError());
- const unsigned int this_fe_index = static_cast<unsigned int>(*pointer);
-
- if (this_fe_index == numbers::invalid_unsigned_int)
- return counter;
- else
- {
- pointer += (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1;
- ++counter;
- }
- }
- }
-
-
-
- /**
- * Return the fe index of the
- * n-th finite element active
- * on a given vertex.
- */
- template<int dim, int spacedim>
- static
- unsigned int
- nth_active_vertex_fe_index (const dealii::hp::DoFHandler<dim,spacedim> &dof_handler,
- const unsigned int vertex_index,
- const unsigned int n)
- {
- Assert (dof_handler.finite_elements != 0,
- ExcMessage ("No finite element collection is associated with "
- "this DoFHandler"));
- Assert (n < n_active_vertex_fe_indices(dof_handler, vertex_index),
- ExcIndexRange (n, 0, n_active_vertex_fe_indices(dof_handler,
- vertex_index)));
- // make sure we don't ask on
- // unused vertices
- Assert (dof_handler.vertex_dofs_offsets[vertex_index] !=
- numbers::invalid_unsigned_int,
- ExcInternalError());
-
- // hop along the list of index
- // sets and count the number of
- // hops
- const unsigned int starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
- const types::global_dof_index *pointer = &dof_handler.vertex_dofs[starting_offset];
-
- Assert (*pointer != numbers::invalid_unsigned_int,
- ExcInternalError());
-
- unsigned int counter = 0;
- while (true)
- {
- Assert (pointer <= &dof_handler.vertex_dofs.back(), ExcInternalError());
-
- Assert((*pointer)<std::numeric_limits<unsigned int>::max(), ExcInternalError());
- const unsigned int this_fe_index = static_cast<unsigned int>(*pointer);
-
- Assert (this_fe_index < dof_handler.finite_elements->size(),
- ExcInternalError());
-
- if (counter == n)
- return this_fe_index;
-
- Assert (this_fe_index != numbers::invalid_unsigned_int,
- ExcInternalError());
-
- pointer += (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1;
- ++counter;
- }
- }
-
-
-
- /**
- * Return whether a particular
- * finite element index is
- * active on the specified
- * vertex.
- */
- template<int dim, int spacedim>
- static
- bool
- fe_is_active_on_vertex (const dealii::hp::DoFHandler<dim,spacedim> &dof_handler,
- const unsigned int vertex_index,
- const unsigned int fe_index)
- {
- Assert ( (fe_index != dealii::hp::DoFHandler<dim,spacedim>::default_fe_index),
- ExcMessage ("You need to specify a FE index when working "
- "with hp DoFHandlers"));
- Assert (dof_handler.finite_elements != 0,
- ExcMessage ("No finite element collection is associated with "
- "this DoFHandler"));
- Assert (fe_index < dof_handler.finite_elements->size(),
- ExcInternalError());
-
- // make sure we don't ask on
- // unused vertices
- Assert (dof_handler.vertex_dofs_offsets[vertex_index] !=
- numbers::invalid_unsigned_int,
- ExcInternalError());
-
- // hop along the list of index
- // sets and see whether we find
- // the given index
- const unsigned int starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
- const types::global_dof_index *pointer = &dof_handler.vertex_dofs[starting_offset];
-
- Assert (*pointer != numbers::invalid_unsigned_int,
- ExcInternalError());
-
- while (true)
- {
- Assert (pointer <= &dof_handler.vertex_dofs.back(), ExcInternalError());
-
- Assert((*pointer)<std::numeric_limits<unsigned int>::max(), ExcInternalError());
- const unsigned int this_fe_index = static_cast<unsigned int>(*pointer);
-
- Assert (this_fe_index < dof_handler.finite_elements->size(),
- ExcInternalError());
-
- if (this_fe_index == numbers::invalid_unsigned_int)
- return false;
- else
- if (this_fe_index == fe_index)
- return true;
- else
- pointer += (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1;
- }
- }
+ /**
+ * Implementations of the
+ * get_dof_index/set_dof_index functions.
+ */
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ get_dof_index (const dealii::DoFHandler<1,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ dealii::internal::int2type<1>)
+ {
+ return dof_handler.levels[obj_level]->dof_object.
+ get_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index);
+ }
+
+
+ template <int spacedim>
+ static
+ void
+ set_dof_index (const dealii::DoFHandler<1,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ dealii::internal::int2type<1>,
- const unsigned int global_index)
++ const types::global_dof_index global_index)
+ {
+ dof_handler.levels[obj_level]->dof_object.
+ set_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ global_index);
+ }
+
+
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ get_dof_index (const dealii::DoFHandler<2,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ dealii::internal::int2type<1>)
+ {
+ // faces have no levels
+ Assert (obj_level == 0, ExcInternalError());
+ return dof_handler.faces->lines.
+ get_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index);
+ }
+
+
+ template <int spacedim>
+ static
+ void
+ set_dof_index (const dealii::DoFHandler<2,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ dealii::internal::int2type<1>,
- const unsigned int global_index)
++ const types::global_dof_index global_index)
+ {
+ // faces have no levels
+ Assert (obj_level == 0, ExcInternalError());
+ dof_handler.faces->lines.
+ set_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ global_index);
+ }
+
+
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ get_dof_index (const dealii::DoFHandler<2,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ dealii::internal::int2type<2>)
+ {
+ return dof_handler.levels[obj_level]->dof_object.
+ get_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index);
+ }
+
+
+ template <int spacedim>
+ static
+ void
+ set_dof_index (const dealii::DoFHandler<2,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ dealii::internal::int2type<2>,
- const unsigned int global_index)
++ const types::global_dof_index global_index)
+ {
+ dof_handler.levels[obj_level]->dof_object.
+ set_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ global_index);
+ }
+
+
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ get_dof_index (const dealii::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ dealii::internal::int2type<1>)
+ {
+ // faces have no levels
+ Assert (obj_level == 0, ExcInternalError());
+ return dof_handler.faces->lines.
+ get_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index);
+ }
+
+
+ template <int spacedim>
+ static
+ void
+ set_dof_index (const dealii::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ dealii::internal::int2type<1>,
- const unsigned int global_index)
++ const types::global_dof_index global_index)
+ {
+ // faces have no levels
+ Assert (obj_level == 0, ExcInternalError());
+ dof_handler.faces->lines.
+ set_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ global_index);
+ }
+
+
+
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ get_dof_index (const dealii::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ dealii::internal::int2type<2>)
+ {
+ // faces have no levels
+ Assert (obj_level == 0, ExcInternalError());
+ return dof_handler.faces->quads.
+ get_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index);
+ }
+
+
+ template <int spacedim>
+ static
+ void
+ set_dof_index (const dealii::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ dealii::internal::int2type<2>,
- const unsigned int global_index)
++ const types::global_dof_index global_index)
+ {
+ // faces have no levels
+ Assert (obj_level == 0, ExcInternalError());
+ dof_handler.faces->quads.
+ set_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ global_index);
+ }
+
+
+
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ get_dof_index (const dealii::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ dealii::internal::int2type<3>)
+ {
+ return dof_handler.levels[obj_level]->dof_object.
+ get_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index);
+ }
+
+
+ template <int spacedim>
+ static
+ void
+ set_dof_index (const dealii::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ dealii::internal::int2type<3>,
- const unsigned int global_index)
++ const types::global_dof_index global_index)
+ {
+ dof_handler.levels[obj_level]->dof_object.
+ set_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ global_index);
+ }
+
+
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ get_dof_index (const dealii::hp::DoFHandler<1,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ const dealii::internal::int2type<1> &)
+ {
+ return dof_handler.levels[obj_level]->dof_object.
+ get_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ obj_level);
+ }
+
+
+ template <int spacedim>
+ static
+ void
+ set_dof_index (const dealii::hp::DoFHandler<1,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ const dealii::internal::int2type<1> &,
- const unsigned int global_index)
++ const types::global_dof_index global_index)
+ {
+ dof_handler.levels[obj_level]->dof_object.
+ set_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ global_index,
+ obj_level);
+ }
+
+
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ get_dof_index (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ const dealii::internal::int2type<1> &)
+ {
+ return dof_handler.faces->lines.
+ get_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ obj_level);
+ }
+
+
+ template <int spacedim>
+ static
+ void
+ set_dof_index (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ const dealii::internal::int2type<1> &,
- const unsigned int global_index)
++ const types::global_dof_index global_index)
+ {
+ dof_handler.faces->lines.
+ set_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ global_index,
+ obj_level);
+ }
+
+
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ get_dof_index (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ const dealii::internal::int2type<2> &)
+ {
+ return dof_handler.levels[obj_level]->dof_object.
+ get_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ obj_level);
+ }
+
+
+ template <int spacedim>
+ static
+ void
+ set_dof_index (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ const dealii::internal::int2type<2> &,
- const unsigned int global_index)
++ const types::global_dof_index global_index)
+ {
+ dof_handler.levels[obj_level]->dof_object.
+ set_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ global_index,
+ obj_level);
+ }
+
+
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ get_dof_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ const dealii::internal::int2type<1> &)
+ {
+ return dof_handler.faces->lines.
+ get_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ obj_level);
+ }
+
+
+ template <int spacedim>
+ static
+ void
+ set_dof_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ const dealii::internal::int2type<1> &,
- const unsigned int global_index)
++ const types::global_dof_index global_index)
+ {
+ dof_handler.faces->lines.
+ set_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ global_index,
+ obj_level);
+ }
+
+
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ get_dof_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ const dealii::internal::int2type<2> &)
+ {
+ return dof_handler.faces->quads.
+ get_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ obj_level);
+ }
+
+
+ template <int spacedim>
+ static
+ void
+ set_dof_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ const dealii::internal::int2type<2> &,
- const unsigned int global_index)
++ const types::global_dof_index global_index)
+ {
+ dof_handler.faces->quads.
+ set_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ global_index,
+ obj_level);
+ }
+
+
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ get_dof_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ const dealii::internal::int2type<3> &)
+ {
+ return dof_handler.levels[obj_level]->dof_object.
+ get_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ obj_level);
+ }
+
+
+ template <int spacedim>
+ static
+ void
+ set_dof_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ const dealii::internal::int2type<3> &,
- const unsigned int global_index)
++ const types::global_dof_index global_index)
+ {
+ dof_handler.levels[obj_level]->dof_object.
+ set_dof_index (dof_handler,
+ obj_index,
+ fe_index,
+ local_index,
+ global_index,
+ obj_level);
+ }
+
+
+ template <int structdim, int dim, int spacedim>
+ static
+ bool
+ fe_index_is_active (const dealii::DoFHandler<dim,spacedim> &,
+ const unsigned int,
+ const unsigned int,
+ const unsigned int fe_index,
+ const dealii::internal::int2type<structdim> &)
+ {
+ return (fe_index == 0);
+ }
+
+
+
+ template <int structdim, int dim, int spacedim>
+ static
+ unsigned int
+ n_active_fe_indices (const dealii::DoFHandler<dim,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const dealii::internal::int2type<structdim> &)
+ {
+ // check that the object we look
+ // at is in fact active. the
+ // problem is that we have
+ // templatized on the
+ // dimensionality of the object,
+ // so it may be a cell, a face,
+ // or a line. we have a bit of
+ // trouble doing this all in the
+ // generic case, so only check if
+ // it is either a cell or a
+ // line. the only case this
+ // leaves out is faces in 3d --
+ // let's hope that this never is
+ // a problem
+ Assert ((dim==structdim
+ ?
+ typename
+ dealii::internal::DoFHandler::
+ Iterators<dealii::DoFHandler<dim,spacedim> >::
+ raw_cell_iterator (&dof_handler.get_tria(),
+ obj_level,
+ obj_index,
+ &dof_handler)->used()
+ :
+ (structdim==1
+ ?
+ typename
+ dealii::internal::DoFHandler::
+ Iterators<dealii::DoFHandler<dim,spacedim> >::
+ raw_line_iterator (&dof_handler.get_tria(),
+ obj_level,
+ obj_index,
+ &dof_handler)->used()
+ :
+ true))
+ == true,
+ ExcMessage ("This cell is not active and therefore can't be "
+ "queried for its active FE indices"));
+ return 1;
+ }
+
+
+
+ template <int structdim, int dim, int spacedim>
+ static
+ unsigned int
+ nth_active_fe_index (const dealii::DoFHandler<dim,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int n,
+ const dealii::internal::int2type<structdim> &)
+ {
+ // check that the object we look
+ // at is in fact active. the
+ // problem is that we have
+ // templatized on the
+ // dimensionality of the object,
+ // so it may be a cell, a face,
+ // or a line. we have a bit of
+ // trouble doing this all in the
+ // generic case, so only check if
+ // it is either a cell or a
+ // line. the only case this
+ // leaves out is faces in 3d --
+ // let's hope that this never is
+ // a problem
+ Assert ((dim==structdim
+ ?
+ typename
+ dealii::internal::DoFHandler::
+ Iterators<dealii::DoFHandler<dim,spacedim> >::
+ raw_cell_iterator (&dof_handler.get_tria(),
+ obj_level,
+ obj_index,
+ &dof_handler)->used()
+ :
+ (structdim==1
+ ?
+ typename
+ dealii::internal::DoFHandler::
+ Iterators<dealii::DoFHandler<dim,spacedim> >::
+ raw_line_iterator (&dof_handler.get_tria(),
+ obj_level,
+ obj_index,
+ &dof_handler)->used()
+ :
+ true))
+ == true,
+ ExcMessage ("This cell is not active and therefore can't be "
+ "queried for its active FE indices"));
+ Assert (n == 0, ExcIndexRange (n, 0, 1));
+
+ return dealii::DoFHandler<dim,spacedim>::default_fe_index;
+ }
+
+
+ template <int spacedim>
+ static
+ bool
+ fe_index_is_active (const dealii::hp::DoFHandler<1,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const dealii::internal::int2type<1> &)
+ {
+ return dof_handler.levels[obj_level]->dof_object.fe_index_is_active(dof_handler,
+ obj_index,
+ fe_index,
+ obj_level);
+ }
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ n_active_fe_indices (const dealii::hp::DoFHandler<1,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const dealii::internal::int2type<1> &)
+ {
+ return dof_handler.levels[obj_level]->dof_object.n_active_fe_indices (dof_handler,
+ obj_index);
+ }
+
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ nth_active_fe_index (const dealii::hp::DoFHandler<1,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int n,
+ const dealii::internal::int2type<1> &)
+ {
+ return dof_handler.levels[obj_level]->dof_object.nth_active_fe_index (dof_handler,
+ obj_level,
+ obj_index,
+ n);
+ }
+
+
+ template <int spacedim>
+ static
+ bool
+ fe_index_is_active (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const dealii::internal::int2type<1> &)
+ {
+ return dof_handler.faces->lines.fe_index_is_active(dof_handler,
+ obj_index,
+ fe_index,
+ obj_level);
+ }
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ n_active_fe_indices (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
+ const unsigned int ,
+ const unsigned int obj_index,
+ const dealii::internal::int2type<1> &)
+ {
+ return dof_handler.faces->lines.n_active_fe_indices (dof_handler,
+ obj_index);
+ }
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ nth_active_fe_index (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int n,
+ const dealii::internal::int2type<1> &)
+ {
+ return dof_handler.faces->lines.nth_active_fe_index (dof_handler,
+ obj_level,
+ obj_index,
+ n);
+ }
+
+
+
+ template <int spacedim>
+ static
+ bool
+ fe_index_is_active (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const dealii::internal::int2type<2> &)
+ {
+ return dof_handler.levels[obj_level]->dof_object.fe_index_is_active(dof_handler,
+ obj_index,
+ fe_index,
+ obj_level);
+ }
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ n_active_fe_indices (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const dealii::internal::int2type<2> &)
+ {
+ return dof_handler.levels[obj_level]->dof_object.n_active_fe_indices (dof_handler,
+ obj_index);
+ }
+
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ nth_active_fe_index (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int n,
+ const dealii::internal::int2type<2> &)
+ {
+ return dof_handler.levels[obj_level]->dof_object.nth_active_fe_index (dof_handler,
+ obj_level,
+ obj_index,
+ n);
+ }
+
+
+
+ template <int spacedim>
+ static
+ bool
+ fe_index_is_active (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const dealii::internal::int2type<1> &)
+ {
+ return dof_handler.faces->lines.fe_index_is_active(dof_handler,
+ obj_index,
+ fe_index,
+ obj_level);
+ }
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ n_active_fe_indices (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int,
+ const unsigned int obj_index,
+ const dealii::internal::int2type<1> &)
+ {
+ return dof_handler.faces->lines.n_active_fe_indices (dof_handler,
+ obj_index);
+ }
+
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ nth_active_fe_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int n,
+ const dealii::internal::int2type<1> &)
+ {
+ return dof_handler.faces->lines.nth_active_fe_index (dof_handler,
+ obj_level,
+ obj_index,
+ n);
+ }
+
+
+
+ template <int spacedim>
+ static
+ bool
+ fe_index_is_active (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const dealii::internal::int2type<2> &)
+ {
+ return dof_handler.faces->quads.fe_index_is_active(dof_handler,
+ obj_index,
+ fe_index,
+ obj_level);
+ }
+
+ template <int spacedim>
+ static
+ bool
+ fe_index_is_active (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const dealii::internal::int2type<3> &)
+ {
+ return dof_handler.levels[obj_level]->dof_object.fe_index_is_active(dof_handler,
+ obj_index,
+ fe_index,
+ obj_level);
+ }
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ n_active_fe_indices (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int ,
+ const unsigned int obj_index,
+ const dealii::internal::int2type<2> &)
+ {
+ return dof_handler.faces->quads.n_active_fe_indices (dof_handler,
+ obj_index);
+ }
+
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ nth_active_fe_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int n,
+ const dealii::internal::int2type<2> &)
+ {
+ return dof_handler.faces->quads.nth_active_fe_index (dof_handler,
+ obj_level,
+ obj_index,
+ n);
+ }
+
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ n_active_fe_indices (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const dealii::internal::int2type<3> &)
+ {
+ return dof_handler.levels[obj_level]->dof_object.n_active_fe_indices (dof_handler,
+ obj_index);
+ }
+
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ nth_active_fe_index (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int n,
+ const dealii::internal::int2type<3> &)
+ {
+ return dof_handler.levels[obj_level]->dof_object.nth_active_fe_index (dof_handler,
+ obj_level,
+ obj_index,
+ n);
+ }
+
+ /**
+ * Set the @p local_index-th
+ * degree of freedom
+ * corresponding to the finite
+ * element specified by @p
+ * fe_index on the vertex with
+ * global number @p
+ * vertex_index to @p
+ * global_index.
+ */
+ template <int dim, int spacedim>
+ static
+ void
+ set_vertex_dof_index (dealii::DoFHandler<dim,spacedim> &dof_handler,
+ const unsigned int vertex_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
- const unsigned int global_index)
++ const types::global_dof_index global_index)
+ {
+ Assert ((fe_index == dealii::DoFHandler<dim,spacedim>::default_fe_index),
+ ExcMessage ("Only the default FE index is allowed for non-hp DoFHandler objects"));
+ Assert (dof_handler.selected_fe != 0,
+ ExcMessage ("No finite element collection is associated with "
+ "this DoFHandler"));
+ Assert (local_index < dof_handler.selected_fe->dofs_per_vertex,
+ ExcIndexRange(local_index, 0,
+ dof_handler.selected_fe->dofs_per_vertex));
+
+ dof_handler.vertex_dofs[vertex_index *
+ dof_handler.selected_fe->dofs_per_vertex
+ + local_index]
+ = global_index;
+ }
+
+
+ template <int dim, int spacedim>
+ static
+ void
+ set_vertex_dof_index (dealii::hp::DoFHandler<dim,spacedim> &dof_handler,
+ const unsigned int vertex_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
- const unsigned int global_index)
++ const types::global_dof_index global_index)
+ {
+ Assert ( (fe_index != dealii::hp::DoFHandler<dim,spacedim>::default_fe_index),
+ ExcMessage ("You need to specify a FE index when working "
+ "with hp DoFHandlers"));
+ Assert (dof_handler.finite_elements != 0,
+ ExcMessage ("No finite element collection is associated with "
+ "this DoFHandler"));
+ Assert (local_index < (*dof_handler.finite_elements)[fe_index].dofs_per_vertex,
+ ExcIndexRange(local_index, 0,
+ (*dof_handler.finite_elements)[fe_index].dofs_per_vertex));
+ Assert (fe_index < dof_handler.finite_elements->size(),
+ ExcInternalError());
+ Assert (dof_handler.vertex_dofs_offsets[vertex_index] !=
+ numbers::invalid_unsigned_int,
+ ExcMessage ("This vertex is unused and has no DoFs associated with it"));
+
+ // hop along the list of index
+ // sets until we find the one
+ // with the correct fe_index, and
+ // then poke into that
+ // part. trigger an exception if
+ // we can't find a set for this
+ // particular fe_index
+ const unsigned int starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
- unsigned int *pointer = &dof_handler.vertex_dofs[starting_offset];
++ types::global_dof_index *pointer = &dof_handler.vertex_dofs[starting_offset];
+ while (true)
+ {
+ Assert (pointer <= &dof_handler.vertex_dofs.back(), ExcInternalError());
+
- const unsigned int this_fe_index = *pointer;
++ // a fe index is always small
++ Assert((*pointer)<std::numeric_limits<unsigned int>::max(), ExcInternalError());
++ const unsigned int this_fe_index = static_cast<unsigned int>(*pointer);
+
+ Assert (this_fe_index != numbers::invalid_unsigned_int,
+ ExcInternalError());
+ Assert (this_fe_index < dof_handler.finite_elements->size(),
+ ExcInternalError());
+
+ if (this_fe_index == fe_index)
+ {
+ *(pointer + 1 + local_index) = global_index;
+ return;
+ }
+ else
+ pointer += (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1;
+ }
+ }
+
+
+ /**
+ * Get the @p local_index-th
+ * degree of freedom
+ * corresponding to the finite
+ * element specified by @p
+ * fe_index on the vertex with
+ * global number @p
+ * vertex_index to @p
+ * global_index.
+ */
+
+ template <int dim, int spacedim>
+ static
+ unsigned int
+ get_vertex_dof_index (const dealii::DoFHandler<dim,spacedim> &dof_handler,
+ const unsigned int vertex_index,
+ const unsigned int fe_index,
+ const unsigned int local_index)
+ {
+ Assert ((fe_index == dealii::DoFHandler<dim,spacedim>::default_fe_index),
+ ExcMessage ("Only the default FE index is allowed for non-hp DoFHandler objects"));
+ Assert (dof_handler.selected_fe != 0,
+ ExcMessage ("No finite element collection is associated with "
+ "this DoFHandler"));
+ Assert (local_index < dof_handler.selected_fe->dofs_per_vertex,
+ ExcIndexRange(local_index, 0,
+ dof_handler.selected_fe->dofs_per_vertex));
+
+ return
+ dof_handler.vertex_dofs[vertex_index *
+ dof_handler.selected_fe->dofs_per_vertex
+ + local_index];
+ }
+
+
+ template<int dim, int spacedim>
+ static
+ unsigned int
+ get_vertex_dof_index (const dealii::hp::DoFHandler<dim,spacedim> &dof_handler,
+ const unsigned int vertex_index,
+ const unsigned int fe_index,
+ const unsigned int local_index)
+ {
+ Assert ( (fe_index != dealii::hp::DoFHandler<dim,spacedim>::default_fe_index),
+ ExcMessage ("You need to specify a FE index when working "
+ "with hp DoFHandlers"));
+ Assert (dof_handler.finite_elements != 0,
+ ExcMessage ("No finite element collection is associated with "
+ "this DoFHandler"));
+ Assert (local_index < (*dof_handler.finite_elements)[fe_index].dofs_per_vertex,
+ ExcIndexRange(local_index, 0,
+ (*dof_handler.finite_elements)[fe_index].dofs_per_vertex));
+ Assert (vertex_index < dof_handler.vertex_dofs_offsets.size(),
+ ExcIndexRange (vertex_index, 0,
+ dof_handler.vertex_dofs_offsets.size()));
+ Assert (dof_handler.vertex_dofs_offsets[vertex_index] !=
+ numbers::invalid_unsigned_int,
+ ExcMessage ("This vertex is unused and has no DoFs associated with it"));
+
+ // hop along the list of index
+ // sets until we find the one
+ // with the correct fe_index, and
+ // then poke into that
+ // part. trigger an exception if
+ // we can't find a set for this
+ // particular fe_index
+ const unsigned int starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
- const unsigned int *pointer = &dof_handler.vertex_dofs[starting_offset];
++ const types::global_dof_index *pointer = &dof_handler.vertex_dofs[starting_offset];
+ while (true)
+ {
+ Assert (pointer <= &dof_handler.vertex_dofs.back(), ExcInternalError());
+
- const unsigned int this_fe_index = *pointer;
++ Assert((*pointer)<std::numeric_limits<unsigned int>::max(), ExcInternalError());
++ const unsigned int this_fe_index = static_cast<unsigned int>(*pointer);
+
+ Assert (this_fe_index != numbers::invalid_unsigned_int,
+ ExcInternalError());
+ Assert (this_fe_index < dof_handler.finite_elements->size(),
+ ExcInternalError());
+
+ if (this_fe_index == fe_index)
+ return *(pointer + 1 + local_index);
+ else
+ pointer += (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1;
+ }
+ }
+
+
+ /**
+ * Return the number of
+ * different finite elements
+ * that are active on a given
+ * vertex.
+ */
+ template<int dim, int spacedim>
+ static
+ unsigned int
+ n_active_vertex_fe_indices (const dealii::hp::DoFHandler<dim,spacedim> &dof_handler,
+ const unsigned int vertex_index)
+ {
+ Assert (dof_handler.finite_elements != 0,
+ ExcMessage ("No finite element collection is associated with "
+ "this DoFHandler"));
+
+ // if this vertex is unused, return 0
+ if (dof_handler.vertex_dofs_offsets[vertex_index] == numbers::invalid_unsigned_int)
+ return 0;
+
+ // hop along the list of index
+ // sets and count the number of
+ // hops
+ const unsigned int starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
- const unsigned int *pointer = &dof_handler.vertex_dofs[starting_offset];
++ const types::global_dof_index *pointer = &dof_handler.vertex_dofs[starting_offset];
+
+ Assert (*pointer != numbers::invalid_unsigned_int,
+ ExcInternalError());
+
+ unsigned int counter = 0;
+ while (true)
+ {
+ Assert (pointer <= &dof_handler.vertex_dofs.back(), ExcInternalError());
+
- const unsigned int this_fe_index = *pointer;
++ Assert((*pointer)<std::numeric_limits<unsigned int>::max(), ExcInternalError());
++ const unsigned int this_fe_index = static_cast<unsigned int>(*pointer);
+
+ if (this_fe_index == numbers::invalid_unsigned_int)
+ return counter;
+ else
+ {
+ pointer += (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1;
+ ++counter;
+ }
+ }
+ }
+
+
+
+ /**
+ * Return the fe index of the
+ * n-th finite element active
+ * on a given vertex.
+ */
+ template<int dim, int spacedim>
+ static
+ unsigned int
+ nth_active_vertex_fe_index (const dealii::hp::DoFHandler<dim,spacedim> &dof_handler,
+ const unsigned int vertex_index,
+ const unsigned int n)
+ {
+ Assert (dof_handler.finite_elements != 0,
+ ExcMessage ("No finite element collection is associated with "
+ "this DoFHandler"));
+ Assert (n < n_active_vertex_fe_indices(dof_handler, vertex_index),
+ ExcIndexRange (n, 0, n_active_vertex_fe_indices(dof_handler,
+ vertex_index)));
+ // make sure we don't ask on
+ // unused vertices
+ Assert (dof_handler.vertex_dofs_offsets[vertex_index] !=
+ numbers::invalid_unsigned_int,
+ ExcInternalError());
+
+ // hop along the list of index
+ // sets and count the number of
+ // hops
+ const unsigned int starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
- const unsigned int *pointer = &dof_handler.vertex_dofs[starting_offset];
++ const types::global_dof_index *pointer = &dof_handler.vertex_dofs[starting_offset];
+
+ Assert (*pointer != numbers::invalid_unsigned_int,
+ ExcInternalError());
+
+ unsigned int counter = 0;
+ while (true)
+ {
+ Assert (pointer <= &dof_handler.vertex_dofs.back(), ExcInternalError());
+
- const unsigned int this_fe_index = *pointer;
++ Assert((*pointer)<std::numeric_limits<unsigned int>::max(), ExcInternalError());
++ const unsigned int this_fe_index = static_cast<unsigned int>(*pointer);
+
+ Assert (this_fe_index < dof_handler.finite_elements->size(),
+ ExcInternalError());
+
+ if (counter == n)
+ return this_fe_index;
+
+ Assert (this_fe_index != numbers::invalid_unsigned_int,
+ ExcInternalError());
+
+ pointer += (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1;
+ ++counter;
+ }
+ }
+
+
+
+ /**
+ * Return whether a particular
+ * finite element index is
+ * active on the specified
+ * vertex.
+ */
+ template<int dim, int spacedim>
+ static
+ bool
+ fe_is_active_on_vertex (const dealii::hp::DoFHandler<dim,spacedim> &dof_handler,
+ const unsigned int vertex_index,
+ const unsigned int fe_index)
+ {
+ Assert ( (fe_index != dealii::hp::DoFHandler<dim,spacedim>::default_fe_index),
+ ExcMessage ("You need to specify a FE index when working "
+ "with hp DoFHandlers"));
+ Assert (dof_handler.finite_elements != 0,
+ ExcMessage ("No finite element collection is associated with "
+ "this DoFHandler"));
+ Assert (fe_index < dof_handler.finite_elements->size(),
+ ExcInternalError());
+
+ // make sure we don't ask on
+ // unused vertices
+ Assert (dof_handler.vertex_dofs_offsets[vertex_index] !=
+ numbers::invalid_unsigned_int,
+ ExcInternalError());
+
+ // hop along the list of index
+ // sets and see whether we find
+ // the given index
+ const unsigned int starting_offset = dof_handler.vertex_dofs_offsets[vertex_index];
- const unsigned int *pointer = &dof_handler.vertex_dofs[starting_offset];
++ const types::global_dof_index *pointer = &dof_handler.vertex_dofs[starting_offset];
+
+ Assert (*pointer != numbers::invalid_unsigned_int,
+ ExcInternalError());
+
+ while (true)
+ {
+ Assert (pointer <= &dof_handler.vertex_dofs.back(), ExcInternalError());
+
- const unsigned int this_fe_index = *pointer;
++ Assert((*pointer)<std::numeric_limits<unsigned int>::max(), ExcInternalError());
++ const unsigned int this_fe_index = static_cast<unsigned int>(*pointer);
+
+ Assert (this_fe_index < dof_handler.finite_elements->size(),
+ ExcInternalError());
+
+ if (this_fe_index == numbers::invalid_unsigned_int)
+ return false;
+ else if (this_fe_index == fe_index)
+ return true;
+ else
+ pointer += (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1;
+ }
+ }
};
}
template <int dim, class DH>
inline
-unsigned int
+types::global_dof_index
DoFAccessor<dim,DH>::dof_index (const unsigned int i,
- const unsigned int fe_index) const
+ const unsigned int fe_index) const
{
- // access the respective DoF
+ // access the respective DoF
return dealii::internal::DoFAccessor::Implementation::get_dof_index (*this->dof_handler,
- this->level(),
- this->present_index,
- fe_index,
- i,
- dealii::internal::int2type<dim>());
+ this->level(),
+ this->present_index,
+ fe_index,
+ i,
+ dealii::internal::int2type<dim>());
}
- types::global_dof_index DoFAccessor<structdim, DH>::mg_dof_index (const int level, const unsigned int i) const {
+template<int structdim, class DH>
+inline
++types::global_dof_index DoFAccessor<structdim, DH>::mg_dof_index (const int level, const unsigned int i) const
++{
+ return this->dof_handler->template get_dof_index<structdim> (level, this->present_index, 0, i);
+}
+
template <int dim, class DH>
inline
void
DoFAccessor<dim,DH>::set_dof_index (const unsigned int i,
- const types::global_dof_index index,
- const unsigned int fe_index) const
- const unsigned int index,
++ const types::global_dof_index index,
+ const unsigned int fe_index) const
{
- // access the respective DoF
+ // access the respective DoF
dealii::internal::DoFAccessor::Implementation::set_dof_index (*this->dof_handler,
- this->level(),
- this->present_index,
- fe_index,
- i,
- dealii::internal::int2type<dim>(),
- index);
+ this->level(),
+ this->present_index,
+ fe_index,
+ i,
+ dealii::internal::int2type<dim>(),
+ index);
}
template <int structdim, class DH>
inline
-unsigned int
+types::global_dof_index
DoFAccessor<structdim, DH>::vertex_dof_index (const unsigned int vertex,
- const unsigned int i,
- const unsigned int fe_index) const
+ const unsigned int i,
+ const unsigned int fe_index) const
{
return
dealii::internal::DoFAccessor::Implementation::get_vertex_dof_index
}
- DoFAccessor<structdim, DH>::mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const unsigned int fe_index) const {
+template<int structdim, class DH>
+inline
+types::global_dof_index
++DoFAccessor<structdim, DH>::mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const unsigned int fe_index) const
++{
+ Assert (this->dof_handler != 0, ExcInvalidObject ());
+ Assert (&this->dof_handler->get_fe () != 0, ExcInvalidObject ());
+ Assert (vertex < GeometryInfo<structdim>::vertices_per_cell, ExcIndexRange (vertex, 0, GeometryInfo<structdim>::vertices_per_cell));
+ Assert (i < this->dof_handler->get_fe ()[fe_index].dofs_per_vertex, ExcIndexRange (i, 0, this->dof_handler->get_fe ()[fe_index].dofs_per_vertex));
+ return this->dof_handler->mg_vertex_dofs[this->vertex_index (vertex)].get_index (level, i);
+}
+
template <int structdim, class DH>
inline
void
DoFAccessor<structdim, DH>::set_vertex_dof_index (const unsigned int vertex,
- const unsigned int i,
- const types::global_dof_index index,
- const unsigned int fe_index) const
+ const unsigned int i,
- const unsigned int index,
++ const types::global_dof_index index,
+ const unsigned int fe_index) const
{
dealii::internal::DoFAccessor::Implementation::set_vertex_dof_index
- (*this->dof_handler,
- this->vertex_index(vertex),
- fe_index,
- i,
- index);
+ (*this->dof_handler,
+ this->vertex_index(vertex),
+ fe_index,
+ i,
+ index);
}
- DoFAccessor<structdim, DH>::set_mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const types::global_dof_index index, const unsigned int fe_index) const {
+template<int structdim, class DH>
+inline
+void
++DoFAccessor<structdim, DH>::set_mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const types::global_dof_index index, const unsigned int fe_index) const
++{
+ Assert (this->dof_handler != 0, ExcInvalidObject ());
+ Assert (&this->dof_handler->get_fe () != 0, ExcInvalidObject ());
+ Assert (vertex < GeometryInfo<structdim>::vertices_per_cell, ExcIndexRange (vertex, 0, GeometryInfo<structdim>::vertices_per_cell));
+ Assert (i < this->dof_handler->get_fe ()[fe_index].dofs_per_vertex, ExcIndexRange (i, 0, this->dof_handler->get_fe ()[fe_index].dofs_per_vertex));
+ this->dof_handler->mg_vertex_dofs[this->vertex_index (vertex)].set_index (level, i, index);
+}
- DoFAccessor<structdim, DH>::set_mg_dof_index (const int level, const unsigned int i, const types::global_dof_index index) const {
+template<int structdim, class DH>
+inline
+void
++DoFAccessor<structdim, DH>::set_mg_dof_index (const int level, const unsigned int i, const types::global_dof_index index) const
++{
+ this->dof_handler->template set_dof_index<structdim> (level, this->present_index, 0, i, index);
+}
namespace internal
{
template <class DH>
void get_dof_indices (const dealii::DoFAccessor<1,DH> &accessor,
- std::vector<types::global_dof_index> &dof_indices,
- const unsigned int fe_index)
- std::vector<unsigned int> &dof_indices,
++ std::vector<types::global_dof_index> &dof_indices,
+ const unsigned int fe_index)
{
const unsigned int dofs_per_vertex = accessor.get_fe(fe_index).dofs_per_vertex,
- dofs_per_line = accessor.get_fe(fe_index).dofs_per_line;
+ dofs_per_line = accessor.get_fe(fe_index).dofs_per_line;
- std::vector<unsigned int>::iterator next = dof_indices.begin();
+ std::vector<types::global_dof_index>::iterator next = dof_indices.begin();
for (unsigned int vertex=0; vertex<2; ++vertex)
- for (unsigned int d=0; d<dofs_per_vertex; ++d)
- *next++ = accessor.vertex_dof_index(vertex,d,fe_index);
+ for (unsigned int d=0; d<dofs_per_vertex; ++d)
+ *next++ = accessor.vertex_dof_index(vertex,d,fe_index);
for (unsigned int d=0; d<dofs_per_line; ++d)
- *next++ = accessor.dof_index(d,fe_index);
+ *next++ = accessor.dof_index(d,fe_index);
}
template <class DH>
void get_dof_indices (const dealii::DoFAccessor<2,DH> &accessor,
- std::vector<types::global_dof_index> &dof_indices,
- const unsigned int fe_index)
- std::vector<unsigned int> &dof_indices,
++ std::vector<types::global_dof_index> &dof_indices,
+ const unsigned int fe_index)
{
const unsigned int dofs_per_vertex = accessor.get_fe(fe_index).dofs_per_vertex,
- dofs_per_line = accessor.get_fe(fe_index).dofs_per_line,
- dofs_per_quad = accessor.get_fe(fe_index).dofs_per_quad;
+ dofs_per_line = accessor.get_fe(fe_index).dofs_per_line,
+ dofs_per_quad = accessor.get_fe(fe_index).dofs_per_quad;
- std::vector<unsigned int>::iterator next = dof_indices.begin();
+ std::vector<types::global_dof_index>::iterator next = dof_indices.begin();
for (unsigned int vertex=0; vertex<4; ++vertex)
- for (unsigned int d=0; d<dofs_per_vertex; ++d)
- *next++ = accessor.vertex_dof_index(vertex,d,fe_index);
- // now copy dof numbers from the line. for
- // lines with the wrong orientation (which
- // might occur in 3d), we have already made
- // sure that we're ok by picking the correct
- // vertices (this happens automatically in
- // the vertex() function). however, if the
- // line is in wrong orientation, we look at
- // it in flipped orientation and we will have
- // to adjust the shape function indices that
- // we see to correspond to the correct
- // (face-local) ordering.
+ for (unsigned int d=0; d<dofs_per_vertex; ++d)
+ *next++ = accessor.vertex_dof_index(vertex,d,fe_index);
+ // now copy dof numbers from the line. for
+ // lines with the wrong orientation (which
+ // might occur in 3d), we have already made
+ // sure that we're ok by picking the correct
+ // vertices (this happens automatically in
+ // the vertex() function). however, if the
+ // line is in wrong orientation, we look at
+ // it in flipped orientation and we will have
+ // to adjust the shape function indices that
+ // we see to correspond to the correct
+ // (face-local) ordering.
for (unsigned int line=0; line<4; ++line)
- for (unsigned int d=0; d<dofs_per_line; ++d)
- *next++ = accessor.line(line)->dof_index(accessor.get_fe(fe_index).
- adjust_line_dof_index_for_line_orientation(d,
- accessor.line_orientation(line)),
- fe_index);
+ for (unsigned int d=0; d<dofs_per_line; ++d)
+ *next++ = accessor.line(line)->dof_index(accessor.get_fe(fe_index).
+ adjust_line_dof_index_for_line_orientation(d,
+ accessor.line_orientation(line)),
+ fe_index);
for (unsigned int d=0; d<dofs_per_quad; ++d)
- *next++ = accessor.dof_index(d,fe_index);
+ *next++ = accessor.dof_index(d,fe_index);
}
template <class DH>
void get_dof_indices (const dealii::DoFAccessor<3,DH> &accessor,
- std::vector<types::global_dof_index> &dof_indices,
- const unsigned int fe_index)
- std::vector<unsigned int> &dof_indices,
++ std::vector<types::global_dof_index> &dof_indices,
+ const unsigned int fe_index)
{
const unsigned int dofs_per_vertex = accessor.get_fe(fe_index).dofs_per_vertex,
- dofs_per_line = accessor.get_fe(fe_index).dofs_per_line,
- dofs_per_quad = accessor.get_fe(fe_index).dofs_per_quad,
- dofs_per_hex = accessor.get_fe(fe_index).dofs_per_hex;
+ dofs_per_line = accessor.get_fe(fe_index).dofs_per_line,
+ dofs_per_quad = accessor.get_fe(fe_index).dofs_per_quad,
+ dofs_per_hex = accessor.get_fe(fe_index).dofs_per_hex;
- std::vector<unsigned int>::iterator next = dof_indices.begin();
+ std::vector<types::global_dof_index>::iterator next = dof_indices.begin();
for (unsigned int vertex=0; vertex<8; ++vertex)
- for (unsigned int d=0; d<dofs_per_vertex; ++d)
- *next++ = accessor.vertex_dof_index(vertex,d,fe_index);
- // now copy dof numbers from the line. for
- // lines with the wrong orientation, we have
- // already made sure that we're ok by picking
- // the correct vertices (this happens
- // automatically in the vertex()
- // function). however, if the line is in
- // wrong orientation, we look at it in
- // flipped orientation and we will have to
- // adjust the shape function indices that we
- // see to correspond to the correct
- // (cell-local) ordering.
+ for (unsigned int d=0; d<dofs_per_vertex; ++d)
+ *next++ = accessor.vertex_dof_index(vertex,d,fe_index);
+ // now copy dof numbers from the line. for
+ // lines with the wrong orientation, we have
+ // already made sure that we're ok by picking
+ // the correct vertices (this happens
+ // automatically in the vertex()
+ // function). however, if the line is in
+ // wrong orientation, we look at it in
+ // flipped orientation and we will have to
+ // adjust the shape function indices that we
+ // see to correspond to the correct
+ // (cell-local) ordering.
for (unsigned int line=0; line<12; ++line)
- for (unsigned int d=0; d<dofs_per_line; ++d)
- *next++ = accessor.line(line)->dof_index(accessor.get_fe(fe_index).
- adjust_line_dof_index_for_line_orientation(d,
- accessor.line_orientation(line)),fe_index);
- // now copy dof numbers from the face. for
- // faces with the wrong orientation, we
- // have already made sure that we're ok by
- // picking the correct lines and vertices
- // (this happens automatically in the
- // line() and vertex() functions). however,
- // if the face is in wrong orientation, we
- // look at it in flipped orientation and we
- // will have to adjust the shape function
- // indices that we see to correspond to the
- // correct (cell-local) ordering. The same
- // applies, if the face_rotation or
- // face_orientation is non-standard
+ for (unsigned int d=0; d<dofs_per_line; ++d)
+ *next++ = accessor.line(line)->dof_index(accessor.get_fe(fe_index).
+ adjust_line_dof_index_for_line_orientation(d,
+ accessor.line_orientation(line)),fe_index);
+ // now copy dof numbers from the face. for
+ // faces with the wrong orientation, we
+ // have already made sure that we're ok by
+ // picking the correct lines and vertices
+ // (this happens automatically in the
+ // line() and vertex() functions). however,
+ // if the face is in wrong orientation, we
+ // look at it in flipped orientation and we
+ // will have to adjust the shape function
+ // indices that we see to correspond to the
+ // correct (cell-local) ordering. The same
+ // applies, if the face_rotation or
+ // face_orientation is non-standard
for (unsigned int quad=0; quad<6; ++quad)
- for (unsigned int d=0; d<dofs_per_quad; ++d)
- *next++ = accessor.quad(quad)->dof_index(accessor.get_fe(fe_index).
- adjust_quad_dof_index_for_face_orientation(d,
- accessor.face_orientation(quad),
- accessor.face_flip(quad),
- accessor.face_rotation(quad)),
- fe_index);
+ for (unsigned int d=0; d<dofs_per_quad; ++d)
+ *next++ = accessor.quad(quad)->dof_index(accessor.get_fe(fe_index).
+ adjust_quad_dof_index_for_face_orientation(d,
+ accessor.face_orientation(quad),
+ accessor.face_flip(quad),
+ accessor.face_rotation(quad)),
+ fe_index);
for (unsigned int d=0; d<dofs_per_hex; ++d)
- *next++ = accessor.dof_index(d,fe_index);
+ *next++ = accessor.dof_index(d,fe_index);
}
- void get_mg_dof_indices (const dealii::DoFAccessor<1, DH>& accessor, const int level, std::vector<types::global_dof_index>& dof_indices, const unsigned int fe_index) {
- const FiniteElement<DH::dimension, DH::space_dimension>& fe = accessor.get_dof_handler ().get_fe ()[fe_index];
+
+ template<class DH>
- void get_mg_dof_indices (const dealii::DoFAccessor<2, DH>& accessor, const int level, std::vector<types::global_dof_index>& dof_indices, const unsigned int fe_index) {
- const FiniteElement<DH::dimension, DH::space_dimension>& fe = accessor.get_dof_handler ().get_fe ()[fe_index];
++ void get_mg_dof_indices (const dealii::DoFAccessor<1, DH> &accessor, const int level, std::vector<types::global_dof_index> &dof_indices, const unsigned int fe_index)
++ {
++ const FiniteElement<DH::dimension, DH::space_dimension> &fe = accessor.get_dof_handler ().get_fe ()[fe_index];
+ std::vector<types::global_dof_index>::iterator next = dof_indices.begin ();
+
+ for (unsigned int vertex = 0; vertex < GeometryInfo<1>::vertices_per_cell; ++vertex)
+ for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof)
+ *next++ = accessor.mg_vertex_dof_index (level, vertex, dof);
+
+ for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof)
+ *next++ = accessor.mg_dof_index (level, dof);
+
+ Assert (next == dof_indices.end (), ExcInternalError ());
+ }
+
+ template<class DH>
- void get_mg_dof_indices (const dealii::DoFAccessor<3, DH>& accessor, const int level, std::vector<types::global_dof_index>& dof_indices, const unsigned int fe_index) {
- const FiniteElement<DH::dimension, DH::space_dimension>& fe = accessor.get_dof_handler ().get_fe ()[fe_index];
++ void get_mg_dof_indices (const dealii::DoFAccessor<2, DH> &accessor, const int level, std::vector<types::global_dof_index> &dof_indices, const unsigned int fe_index)
++ {
++ const FiniteElement<DH::dimension, DH::space_dimension> &fe = accessor.get_dof_handler ().get_fe ()[fe_index];
+ std::vector<types::global_dof_index>::iterator next = dof_indices.begin ();
+
+ for (unsigned int vertex = 0; vertex < GeometryInfo<2>::vertices_per_cell; ++vertex)
+ for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof)
+ *next++ = accessor.mg_vertex_dof_index (level, vertex, dof);
+
+ for (unsigned int line = 0; line < GeometryInfo<2>::lines_per_cell; ++line)
+ for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof)
+ *next++ = accessor.line (line)->mg_dof_index (level, dof);
+
+ for (unsigned int dof = 0; dof < fe.dofs_per_quad; ++dof)
+ *next++ = accessor.mg_dof_index (level, dof);
+
+ Assert (next == dof_indices.end (), ExcInternalError ());
+ }
+
+ template<class DH>
++ void get_mg_dof_indices (const dealii::DoFAccessor<3, DH> &accessor, const int level, std::vector<types::global_dof_index> &dof_indices, const unsigned int fe_index)
++ {
++ const FiniteElement<DH::dimension, DH::space_dimension> &fe = accessor.get_dof_handler ().get_fe ()[fe_index];
+ std::vector<types::global_dof_index>::iterator next = dof_indices.begin ();
+
+ for (unsigned int vertex = 0; vertex < GeometryInfo<3>::vertices_per_cell; ++vertex)
+ for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof)
+ *next++ = accessor.mg_vertex_dof_index (level, vertex, dof);
+
+ for (unsigned int line = 0; line < GeometryInfo<3>::lines_per_cell; ++line)
+ for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof)
+ *next++ = accessor.line (line)->mg_dof_index (level, dof);
+
+ for (unsigned int quad = 0; quad < GeometryInfo<3>::quads_per_cell; ++quad)
+ for (unsigned int dof = 0; dof < fe.dofs_per_quad; ++dof)
+ *next++ = accessor.quad (quad)->mg_dof_index (level, dof);
+
+ for (unsigned int dof = 0; dof < fe.dofs_per_hex; ++dof)
+ *next++ = accessor.mg_dof_index (level, dof);
+
+ Assert (next == dof_indices.end (), ExcInternalError ());
+ }
}
}
template <int structdim, class DH>
inline
void
-DoFAccessor<structdim,DH>::get_dof_indices (std::vector<unsigned int> &dof_indices,
+DoFAccessor<structdim,DH>::get_dof_indices (std::vector<types::global_dof_index> &dof_indices,
- const unsigned int fe_index) const
+ const unsigned int fe_index) const
{
Assert (static_cast<unsigned int>(this->level()) < this->dof_handler->levels.size(),
ExcMessage ("DoFHandler not initialized"));
dealii::internal::DoFAccessor::get_dof_indices (*this, dof_indices, fe_index);
}
- void DoFAccessor<structdim, DH>::get_mg_dof_indices (const int level, std::vector<types::global_dof_index>& dof_indices, const unsigned int fe_index) const {
+template<int structdim, class DH>
+inline
++void DoFAccessor<structdim, DH>::get_mg_dof_indices (const int level, std::vector<types::global_dof_index> &dof_indices, const unsigned int fe_index) const
++{
+ Assert (this->dof_handler != 0, ExcInvalidObject ());
+ Assert (&this->dof_handler->get_fe () != 0, ExcInvalidObject ());
- switch (structdim) {
- case 1: {
- Assert (dof_indices.size () == 2 * this->dof_handler->get_fe ()[fe_index].dofs_per_vertex + this->dof_handler->get_fe ()[fe_index].dofs_per_line, ExcVectorDoesNotMatch ());
- break;
++ switch (structdim)
++ {
++ case 1:
++ {
++ Assert (dof_indices.size () == 2 * this->dof_handler->get_fe ()[fe_index].dofs_per_vertex + this->dof_handler->get_fe ()[fe_index].dofs_per_line, ExcVectorDoesNotMatch ());
++ break;
+ }
- case 2: {
- Assert (dof_indices.size () == 4 * (this->dof_handler->get_fe ()[fe_index].dofs_per_vertex + this->dof_handler->get_fe ()[fe_index].dofs_per_line) + this->dof_handler->get_fe ()[fe_index].dofs_per_quad, ExcVectorDoesNotMatch ());
- break;
++ case 2:
++ {
++ Assert (dof_indices.size () == 4 * (this->dof_handler->get_fe ()[fe_index].dofs_per_vertex + this->dof_handler->get_fe ()[fe_index].dofs_per_line) + this->dof_handler->get_fe ()[fe_index].dofs_per_quad, ExcVectorDoesNotMatch ());
++ break;
+ }
+
- case 3: {
- Assert (dof_indices.size () == 8 * this->dof_handler->get_fe ()[fe_index].dofs_per_vertex + 12 * this->dof_handler->get_fe ()[fe_index].dofs_per_line + 6 * this->dof_handler->get_fe ()[fe_index].dofs_per_quad + this->dof_handler->get_fe ()[fe_index].dofs_per_hex, ExcVectorDoesNotMatch ());
- break;
++ case 3:
++ {
++ Assert (dof_indices.size () == 8 * this->dof_handler->get_fe ()[fe_index].dofs_per_vertex + 12 * this->dof_handler->get_fe ()[fe_index].dofs_per_line + 6 * this->dof_handler->get_fe ()[fe_index].dofs_per_quad + this->dof_handler->get_fe ()[fe_index].dofs_per_hex, ExcVectorDoesNotMatch ());
++ break;
+ }
- Assert (false, ExcNotImplemented ());
- }
+ default:
++ Assert (false, ExcNotImplemented ());
++ }
+
+ internal::DoFAccessor::get_mg_dof_indices (*this, level, dof_indices, fe_index);
+}
template <int structdim, class DH>
inline
inline
void
DoFAccessor<0,DH<1,spacedim> >::
-get_dof_indices (std::vector<unsigned int> &dof_indices,
+get_dof_indices (std::vector<types::global_dof_index> &dof_indices,
- const unsigned int fe_index) const
+ const unsigned int fe_index) const
{
for (unsigned int i=0; i<dof_indices.size(); ++i)
dof_indices[i]
template <template <int, int> class DH, int spacedim>
inline
-unsigned int
+types::global_dof_index
DoFAccessor<0,DH<1,spacedim> >::
vertex_dof_index (const unsigned int vertex,
- const unsigned int i,
- const unsigned int fe_index) const
+ const unsigned int i,
+ const unsigned int fe_index) const
{
Assert (vertex == 0, ExcIndexRange (vertex, 0, 1));
return dealii::internal::DoFAccessor::Implementation::
using dealii::DoFCellAccessor;
using dealii::DoFHandler;
- /**
- * A class with the same purpose as the similarly named class of the
- * Triangulation class. See there for more information.
- */
+ /**
+ * A class with the same purpose as the similarly named class of the
+ * Triangulation class. See there for more information.
+ */
struct Implementation
{
- /**
- * Implement the updating of the
- * cache. Currently not
- * implemented for hp::DoFHandler
- * objects.
- */
- template <int spacedim>
- static
- void
- update_cell_dof_indices_cache (const DoFCellAccessor<DoFHandler<1,spacedim> > &accessor)
- {
- // check as in documentation that
- // cell is either active, or dofs
- // are only in vertices. otherwise
- // simply don't update the cache at
- // all. the get_dof_indices
- // function will then make sure we
- // don't access the invalid data
- if (accessor.has_children()
- &&
- (accessor.get_fe().dofs_per_cell !=
- accessor.get_fe().dofs_per_vertex * GeometryInfo<1>::vertices_per_cell))
- return;
-
- const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
- dofs_per_line = accessor.get_fe().dofs_per_line,
- dofs_per_cell = accessor.get_fe().dofs_per_cell;
-
- // make sure the cache is at least
- // as big as we need it when
- // writing to the last element of
- // this cell
- Assert (accessor.present_index * dofs_per_cell + dofs_per_cell
- <=
- accessor.dof_handler->levels[accessor.present_level]
- ->cell_dof_indices_cache.size(),
- ExcInternalError());
-
- std::vector<types::global_dof_index>::iterator next
- = (accessor.dof_handler->levels[accessor.present_level]
- ->cell_dof_indices_cache.begin() + accessor.present_index * dofs_per_cell);
-
- for (unsigned int vertex=0; vertex<2; ++vertex)
- for (unsigned int d=0; d<dofs_per_vertex; ++d)
- *next++ = accessor.vertex_dof_index(vertex,d);
- for (unsigned int d=0; d<dofs_per_line; ++d)
- *next++ = accessor.dof_index(d);
- }
-
-
-
- template <int spacedim>
- static
- void
- update_cell_dof_indices_cache (const DoFCellAccessor<DoFHandler<2,spacedim> > &accessor)
- {
- // check as in documentation that
- // cell is either active, or dofs
- // are only in vertices. otherwise
- // simply don't update the cache at
- // all. the get_dof_indices
- // function will then make sure we
- // don't access the invalid data
- if (accessor.has_children()
- &&
- (accessor.get_fe().dofs_per_cell !=
- accessor.get_fe().dofs_per_vertex * GeometryInfo<2>::vertices_per_cell))
- return;
-
- const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
- dofs_per_line = accessor.get_fe().dofs_per_line,
- dofs_per_quad = accessor.get_fe().dofs_per_quad,
- dofs_per_cell = accessor.get_fe().dofs_per_cell;
-
- // make sure the cache is at least
- // as big as we need it when
- // writing to the last element of
- // this cell
- Assert (accessor.present_index * dofs_per_cell + dofs_per_cell
- <=
- accessor.dof_handler->levels[accessor.present_level]
- ->cell_dof_indices_cache.size(),
- ExcInternalError());
-
- std::vector<types::global_dof_index>::iterator next
- = (accessor.dof_handler->levels[accessor.present_level]
- ->cell_dof_indices_cache.begin() + accessor.present_index * dofs_per_cell);
-
- for (unsigned int vertex=0; vertex<4; ++vertex)
- for (unsigned int d=0; d<dofs_per_vertex; ++d)
- *next++ = accessor.vertex_dof_index(vertex,d);
- for (unsigned int line=0; line<4; ++line)
- for (unsigned int d=0; d<dofs_per_line; ++d)
- *next++ = accessor.line(line)->dof_index(d);
- for (unsigned int d=0; d<dofs_per_quad; ++d)
- *next++ = accessor.dof_index(d);
- }
-
-
-
- template <int spacedim>
- static
- void
- update_cell_dof_indices_cache (const DoFCellAccessor<DoFHandler<3,spacedim> > &accessor)
- {
- // check as in documentation that
- // cell is either active, or dofs
- // are only in vertices. otherwise
- // simply don't update the cache at
- // all. the get_dof_indices
- // function will then make sure we
- // don't access the invalid data
- if (accessor.has_children()
- &&
- (accessor.get_fe().dofs_per_cell !=
- accessor.get_fe().dofs_per_vertex * GeometryInfo<3>::vertices_per_cell))
- return;
-
- const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
- dofs_per_line = accessor.get_fe().dofs_per_line,
- dofs_per_quad = accessor.get_fe().dofs_per_quad,
- dofs_per_hex = accessor.get_fe().dofs_per_hex,
- dofs_per_cell = accessor.get_fe().dofs_per_cell;
-
- // make sure the cache is at least
- // as big as we need it when
- // writing to the last element of
- // this cell
- Assert (accessor.present_index * dofs_per_cell + dofs_per_cell
- <=
- accessor.dof_handler->levels[accessor.present_level]
- ->cell_dof_indices_cache.size(),
- ExcInternalError());
-
- std::vector<types::global_dof_index>::iterator next
- = (accessor.dof_handler->levels[accessor.present_level]
- ->cell_dof_indices_cache.begin() + accessor.present_index * dofs_per_cell);
-
- for (unsigned int vertex=0; vertex<8; ++vertex)
- for (unsigned int d=0; d<dofs_per_vertex; ++d)
- *next++ = accessor.vertex_dof_index(vertex,d);
- // now copy dof numbers from the line. for
- // lines with the wrong orientation, we have
- // already made sure that we're ok by picking
- // the correct vertices (this happens
- // automatically in the vertex()
- // function). however, if the line is in
- // wrong orientation, we look at it in
- // flipped orientation and we will have to
- // adjust the shape function indices that we
- // see to correspond to the correct
- // (cell-local) ordering.
- for (unsigned int line=0; line<12; ++line)
- for (unsigned int d=0; d<dofs_per_line; ++d)
- *next++ = accessor.line(line)->dof_index(accessor.dof_handler->get_fe().
- adjust_line_dof_index_for_line_orientation(d,
- accessor.line_orientation(line)));
- // now copy dof numbers from the face. for
- // faces with the wrong orientation, we
- // have already made sure that we're ok by
- // picking the correct lines and vertices
- // (this happens automatically in the
- // line() and vertex() functions). however,
- // if the face is in wrong orientation, we
- // look at it in flipped orientation and we
- // will have to adjust the shape function
- // indices that we see to correspond to the
- // correct (cell-local) ordering. The same
- // applies, if the face_rotation or
- // face_orientation is non-standard
- for (unsigned int quad=0; quad<6; ++quad)
- for (unsigned int d=0; d<dofs_per_quad; ++d)
- *next++ = accessor.quad(quad)->dof_index(accessor.dof_handler->get_fe().
- adjust_quad_dof_index_for_face_orientation(d,
- accessor.face_orientation(quad),
- accessor.face_flip(quad),
- accessor.face_rotation(quad)));
- for (unsigned int d=0; d<dofs_per_hex; ++d)
- *next++ = accessor.dof_index(d);
- }
-
-
- // implementation for the case of
- // hp::DoFHandler objects. it's
- // not implemented there, for no
- // space dimension
- template <int dim, int spacedim>
- static
- void
- update_cell_dof_indices_cache (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &)
- {
+ /**
+ * Implement the updating of the
+ * cache. Currently not
+ * implemented for hp::DoFHandler
+ * objects.
+ */
+ template <int spacedim>
+ static
+ void
+ update_cell_dof_indices_cache (const DoFCellAccessor<DoFHandler<1,spacedim> > &accessor)
+ {
+ // check as in documentation that
+ // cell is either active, or dofs
+ // are only in vertices. otherwise
+ // simply don't update the cache at
+ // all. the get_dof_indices
+ // function will then make sure we
+ // don't access the invalid data
+ if (accessor.has_children()
+ &&
+ (accessor.get_fe().dofs_per_cell !=
+ accessor.get_fe().dofs_per_vertex * GeometryInfo<1>::vertices_per_cell))
+ return;
+
+ const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
+ dofs_per_line = accessor.get_fe().dofs_per_line,
+ dofs_per_cell = accessor.get_fe().dofs_per_cell;
+
+ // make sure the cache is at least
+ // as big as we need it when
+ // writing to the last element of
+ // this cell
+ Assert (accessor.present_index * dofs_per_cell + dofs_per_cell
+ <=
+ accessor.dof_handler->levels[accessor.present_level]
+ ->cell_dof_indices_cache.size(),
+ ExcInternalError());
+
- std::vector<unsigned int>::iterator next
++ std::vector<types::global_dof_index>::iterator next
+ = (accessor.dof_handler->levels[accessor.present_level]
+ ->cell_dof_indices_cache.begin() + accessor.present_index * dofs_per_cell);
+
+ for (unsigned int vertex=0; vertex<2; ++vertex)
+ for (unsigned int d=0; d<dofs_per_vertex; ++d)
+ *next++ = accessor.vertex_dof_index(vertex,d);
+ for (unsigned int d=0; d<dofs_per_line; ++d)
+ *next++ = accessor.dof_index(d);
+ }
+
+
+
+ template <int spacedim>
+ static
+ void
+ update_cell_dof_indices_cache (const DoFCellAccessor<DoFHandler<2,spacedim> > &accessor)
+ {
+ // check as in documentation that
+ // cell is either active, or dofs
+ // are only in vertices. otherwise
+ // simply don't update the cache at
+ // all. the get_dof_indices
+ // function will then make sure we
+ // don't access the invalid data
+ if (accessor.has_children()
+ &&
+ (accessor.get_fe().dofs_per_cell !=
+ accessor.get_fe().dofs_per_vertex * GeometryInfo<2>::vertices_per_cell))
+ return;
+
+ const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
+ dofs_per_line = accessor.get_fe().dofs_per_line,
+ dofs_per_quad = accessor.get_fe().dofs_per_quad,
+ dofs_per_cell = accessor.get_fe().dofs_per_cell;
+
+ // make sure the cache is at least
+ // as big as we need it when
+ // writing to the last element of
+ // this cell
+ Assert (accessor.present_index * dofs_per_cell + dofs_per_cell
+ <=
+ accessor.dof_handler->levels[accessor.present_level]
+ ->cell_dof_indices_cache.size(),
+ ExcInternalError());
+
- std::vector<unsigned int>::iterator next
++ std::vector<types::global_dof_index>::iterator next
+ = (accessor.dof_handler->levels[accessor.present_level]
+ ->cell_dof_indices_cache.begin() + accessor.present_index * dofs_per_cell);
+
+ for (unsigned int vertex=0; vertex<4; ++vertex)
+ for (unsigned int d=0; d<dofs_per_vertex; ++d)
+ *next++ = accessor.vertex_dof_index(vertex,d);
+ for (unsigned int line=0; line<4; ++line)
+ for (unsigned int d=0; d<dofs_per_line; ++d)
+ *next++ = accessor.line(line)->dof_index(d);
+ for (unsigned int d=0; d<dofs_per_quad; ++d)
+ *next++ = accessor.dof_index(d);
+ }
+
+
+
+ template <int spacedim>
+ static
+ void
+ update_cell_dof_indices_cache (const DoFCellAccessor<DoFHandler<3,spacedim> > &accessor)
+ {
+ // check as in documentation that
+ // cell is either active, or dofs
+ // are only in vertices. otherwise
+ // simply don't update the cache at
+ // all. the get_dof_indices
+ // function will then make sure we
+ // don't access the invalid data
+ if (accessor.has_children()
+ &&
+ (accessor.get_fe().dofs_per_cell !=
+ accessor.get_fe().dofs_per_vertex * GeometryInfo<3>::vertices_per_cell))
+ return;
+
+ const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
+ dofs_per_line = accessor.get_fe().dofs_per_line,
+ dofs_per_quad = accessor.get_fe().dofs_per_quad,
+ dofs_per_hex = accessor.get_fe().dofs_per_hex,
+ dofs_per_cell = accessor.get_fe().dofs_per_cell;
+
+ // make sure the cache is at least
+ // as big as we need it when
+ // writing to the last element of
+ // this cell
+ Assert (accessor.present_index * dofs_per_cell + dofs_per_cell
+ <=
+ accessor.dof_handler->levels[accessor.present_level]
+ ->cell_dof_indices_cache.size(),
+ ExcInternalError());
+
- std::vector<unsigned int>::iterator next
++ std::vector<types::global_dof_index>::iterator next
+ = (accessor.dof_handler->levels[accessor.present_level]
+ ->cell_dof_indices_cache.begin() + accessor.present_index * dofs_per_cell);
+
+ for (unsigned int vertex=0; vertex<8; ++vertex)
+ for (unsigned int d=0; d<dofs_per_vertex; ++d)
+ *next++ = accessor.vertex_dof_index(vertex,d);
+ // now copy dof numbers from the line. for
+ // lines with the wrong orientation, we have
+ // already made sure that we're ok by picking
+ // the correct vertices (this happens
+ // automatically in the vertex()
+ // function). however, if the line is in
+ // wrong orientation, we look at it in
+ // flipped orientation and we will have to
+ // adjust the shape function indices that we
+ // see to correspond to the correct
+ // (cell-local) ordering.
+ for (unsigned int line=0; line<12; ++line)
+ for (unsigned int d=0; d<dofs_per_line; ++d)
+ *next++ = accessor.line(line)->dof_index(accessor.dof_handler->get_fe().
+ adjust_line_dof_index_for_line_orientation(d,
+ accessor.line_orientation(line)));
+ // now copy dof numbers from the face. for
+ // faces with the wrong orientation, we
+ // have already made sure that we're ok by
+ // picking the correct lines and vertices
+ // (this happens automatically in the
+ // line() and vertex() functions). however,
+ // if the face is in wrong orientation, we
+ // look at it in flipped orientation and we
+ // will have to adjust the shape function
+ // indices that we see to correspond to the
+ // correct (cell-local) ordering. The same
+ // applies, if the face_rotation or
+ // face_orientation is non-standard
+ for (unsigned int quad=0; quad<6; ++quad)
+ for (unsigned int d=0; d<dofs_per_quad; ++d)
+ *next++ = accessor.quad(quad)->dof_index(accessor.dof_handler->get_fe().
+ adjust_quad_dof_index_for_face_orientation(d,
+ accessor.face_orientation(quad),
+ accessor.face_flip(quad),
+ accessor.face_rotation(quad)));
+ for (unsigned int d=0; d<dofs_per_hex; ++d)
+ *next++ = accessor.dof_index(d);
+ }
+
+
+ // implementation for the case of
+ // hp::DoFHandler objects. it's
+ // not implemented there, for no
+ // space dimension
+ template <int dim, int spacedim>
+ static
+ void
+ update_cell_dof_indices_cache (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &)
+ {
//TODO[WB]: should implement a dof indices cache for hp as well
- // not implemented, but should also
- // not be called
- Assert (false, ExcNotImplemented());
- }
-
- /**
- * Implement setting dof
- * indices on a
- * cell. Currently not
- * implemented for
- * hp::DoFHandler objects.
- */
- template <int spacedim>
- static
- void
- set_dof_indices (DoFCellAccessor<DoFHandler<1,spacedim> > &accessor,
- const std::vector<types::global_dof_index> &dof_indices)
- {
- Assert (accessor.has_children() == false,
- ExcInternalError());
-
- const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
- dofs_per_line = accessor.get_fe().dofs_per_line,
- dofs_per_cell = accessor.get_fe().dofs_per_cell;
-
- Assert (dof_indices.size() == dofs_per_cell,
- ExcInternalError());
-
- unsigned int index = 0;
-
- for (unsigned int vertex=0; vertex<2; ++vertex)
- for (unsigned int d=0; d<dofs_per_vertex; ++d, ++index)
- accessor.set_vertex_dof_index(vertex,d,
- dof_indices[index]);
- for (unsigned int d=0; d<dofs_per_line; ++d, ++index)
- accessor.dof_index(d, dof_indices[index]);
-
- Assert (index == dofs_per_cell,
- ExcInternalError());
- }
-
-
-
- template <int spacedim>
- static
- void
- set_dof_indices (DoFCellAccessor<DoFHandler<2,spacedim> > &accessor,
- const std::vector<types::global_dof_index> &local_dof_indices)
- {
- Assert (accessor.has_children() == false,
- ExcInternalError());
-
- const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
- dofs_per_line = accessor.get_fe().dofs_per_line,
- dofs_per_quad = accessor.get_fe().dofs_per_quad,
- dofs_per_cell = accessor.get_fe().dofs_per_cell;
-
- Assert (local_dof_indices.size() == dofs_per_cell,
- ExcInternalError());
-
- unsigned int index = 0;
-
- for (unsigned int vertex=0; vertex<4; ++vertex)
- for (unsigned int d=0; d<dofs_per_vertex; ++d, ++index)
- accessor.set_vertex_dof_index(vertex,d,
- local_dof_indices[index]);
- for (unsigned int line=0; line<4; ++line)
- for (unsigned int d=0; d<dofs_per_line; ++d, ++index)
- accessor.line(line)->set_dof_index(d, local_dof_indices[index]);
-
- for (unsigned int d=0; d<dofs_per_quad; ++d, ++index)
- accessor.set_dof_index(d, local_dof_indices[index]);
-
- Assert (index == dofs_per_cell,
- ExcInternalError());
- }
-
-
-
- template <int spacedim>
- static
- void
- set_dof_indices (DoFCellAccessor<DoFHandler<3,spacedim> > &accessor,
- const std::vector<types::global_dof_index> &dof_indices)
- {
- Assert (accessor.has_children() == false,
- ExcInternalError());
-
- const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
- dofs_per_line = accessor.get_fe().dofs_per_line,
- dofs_per_quad = accessor.get_fe().dofs_per_quad,
- dofs_per_hex = accessor.get_fe().dofs_per_hex,
- dofs_per_cell = accessor.get_fe().dofs_per_cell;
-
- Assert (dof_indices.size() == dofs_per_cell,
- ExcInternalError());
-
- unsigned int index = 0;
-
- for (unsigned int vertex=0; vertex<8; ++vertex)
- for (unsigned int d=0; d<dofs_per_vertex; ++d, ++index)
- accessor.set_vertex_dof_index(vertex,d,
- dof_indices[index]);
- // now copy dof numbers into the line. for
- // lines with the wrong orientation, we have
- // already made sure that we're ok by picking
- // the correct vertices (this happens
- // automatically in the vertex()
- // function). however, if the line is in
- // wrong orientation, we look at it in
- // flipped orientation and we will have to
- // adjust the shape function indices that we
- // see to correspond to the correct
- // (cell-local) ordering.
- for (unsigned int line=0; line<12; ++line)
- for (unsigned int d=0; d<dofs_per_line; ++d, ++index)
- accessor.line(line)->set_dof_index(accessor.dof_handler->get_fe().
- adjust_line_dof_index_for_line_orientation(d,
- accessor.line_orientation(line)),
- dof_indices[index]);
- // now copy dof numbers into the face. for
- // faces with the wrong orientation, we
- // have already made sure that we're ok by
- // picking the correct lines and vertices
- // (this happens automatically in the
- // line() and vertex() functions). however,
- // if the face is in wrong orientation, we
- // look at it in flipped orientation and we
- // will have to adjust the shape function
- // indices that we see to correspond to the
- // correct (cell-local) ordering. The same
- // applies, if the face_rotation or
- // face_orientation is non-standard
- for (unsigned int quad=0; quad<6; ++quad)
- for (unsigned int d=0; d<dofs_per_quad; ++d, ++index)
- accessor.quad(quad)->set_dof_index(accessor.dof_handler->get_fe().
- adjust_quad_dof_index_for_face_orientation(d,
- accessor.face_orientation(quad),
- accessor.face_flip(quad),
- accessor.face_rotation(quad)),
- dof_indices[index]);
- for (unsigned int d=0; d<dofs_per_hex; ++d, ++index)
- accessor.set_dof_index(d, dof_indices[index]);
-
- Assert (index == dofs_per_cell,
- ExcInternalError());
- }
-
-
- // implementation for the case of
- // hp::DoFHandler objects. it's
- // not implemented there, for no
- // space dimension
- template <int dim, int spacedim>
- static
- void
- set_dof_indices (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &,
- const std::vector<types::global_dof_index> &)
- {
- Assert (false, ExcNotImplemented());
- }
-
-
- /**
- * A function that collects the
- * global indices of degrees of
- * freedom. This function works
- * for ::DoFHandler and all
- * template arguments and copies
- * the data out of the cache that
- * we hold for each cell.
- */
- template <int dim, int spacedim>
- static
- void
- get_dof_indices (const DoFCellAccessor<DoFHandler<dim,spacedim> > &accessor,
- std::vector<types::global_dof_index> &dof_indices)
- {
- typedef
- dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> >
- BaseClass;
- Assert (dof_indices.size() == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcVectorDoesNotMatch());
-
- // check as in documentation that
- // cell is either active, or dofs
- // are only in vertices
- Assert (!accessor.has_children()
- ||
- (accessor.get_fe().dofs_per_cell ==
- accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
- ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
-
- types::global_dof_index *cache = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index * accessor.get_fe().dofs_per_cell];
- for (unsigned int i=0; i<accessor.get_fe().dofs_per_cell; ++i, ++cache)
- dof_indices[i] = *cache;
- }
-
- /**
- * Same function as above except
- * that it works for
- * hp::DoFHandler objects that do
- * not have a cache for the local
- * DoF indices.
- */
- template <int dim, int spacedim>
- static
- void
- get_dof_indices (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
- std::vector<types::global_dof_index> &dof_indices)
- {
- // no caching for
- // hp::DoFHandler
- // implemented
- typedef
- dealii::DoFAccessor<dim,dealii::hp::DoFHandler<dim,spacedim> >
- DoFAccessor;
- accessor.DoFAccessor::get_dof_indices (dof_indices,
- accessor.active_fe_index());
- }
-
-
- /**
- * A function that collects the
- * values of degrees of freedom. This
- * function works for ::DoFHandler
- * and all template arguments and
- * uses the data from the cache of
- * indices that we hold for each
- * cell.
- */
- template <int dim, int spacedim, class InputVector, typename ForwardIterator>
- static
- void
- get_dof_values (const DoFCellAccessor<DoFHandler<dim,spacedim> > &accessor,
- const InputVector &values,
- ForwardIterator local_values_begin,
- ForwardIterator local_values_end)
- {
- typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
- Assert (static_cast<unsigned int>(local_values_end-local_values_begin)
- == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcVectorDoesNotMatch());
- Assert (values.size() == accessor.get_dof_handler().n_dofs(),
- typename BaseClass::ExcVectorDoesNotMatch());
-
- // check as in documentation that
- // cell is either active, or dofs
- // are only in vertices
- Assert (!accessor.has_children()
- ||
- (accessor.get_fe().dofs_per_cell ==
- accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
- ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
-
- types::global_dof_index *cache = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index *
- accessor.get_fe().dofs_per_cell];
- for ( ; local_values_begin != local_values_end; ++local_values_begin, ++cache)
- *local_values_begin = values(*cache);
- }
-
- /**
- * Same function as above except
- * that it works for
- * hp::DoFHandler objects that do
- * not have a cache for the local
- * DoF indices.
- */
- template <int dim, int spacedim, class InputVector, typename ForwardIterator>
- static
- void
- get_dof_values (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
- const InputVector &values,
- ForwardIterator local_values_begin,
- ForwardIterator local_values_end)
- {
- typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
-
- // no caching for hp::DoFHandler
- // implemented
- Assert (static_cast<unsigned int>(local_values_end-local_values_begin)
- == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcVectorDoesNotMatch());
- const unsigned int dofs_per_cell = accessor.get_fe().dofs_per_cell;
-
- std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
- get_dof_indices (accessor, local_dof_indices);
-
- for (unsigned int i=0; i<dofs_per_cell; ++i, ++local_values_begin)
- *local_values_begin = values(local_dof_indices[i]);
- }
-
-
- /**
- * A function that collects the
- * values of degrees of freedom. This
- * function works for ::DoFHandler
- * and all template arguments and
- * uses the data from the cache of
- * indices that we hold for each
- * cell.
- */
- template <int dim, int spacedim, class InputVector, typename ForwardIterator>
- static
- void
- get_dof_values (const DoFCellAccessor<DoFHandler<dim,spacedim> > &accessor,
- const ConstraintMatrix &constraints,
- const InputVector &values,
- ForwardIterator local_values_begin,
- ForwardIterator local_values_end)
- {
- typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
- Assert (static_cast<unsigned int>(local_values_end-local_values_begin)
- == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcVectorDoesNotMatch());
- Assert (values.size() == accessor.get_dof_handler().n_dofs(),
- typename BaseClass::ExcVectorDoesNotMatch());
-
- // check as in documentation that
- // cell is either active, or dofs
- // are only in vertices
- Assert (!accessor.has_children()
- ||
- (accessor.get_fe().dofs_per_cell ==
- accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
- ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
-
- types::global_dof_index *cache = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index *
- accessor.get_fe().dofs_per_cell];
- constraints.get_dof_values(values, *cache, local_values_begin,
- local_values_end);
- }
-
- /**
- * Same function as above except
- * that it works for
- * hp::DoFHandler objects that do
- * not have a cache for the local
- * DoF indices.
- */
- template <int dim, int spacedim, class InputVector, typename ForwardIterator>
- static
- void
- get_dof_values (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
- const ConstraintMatrix &constraints,
- const InputVector &values,
- ForwardIterator local_values_begin,
- ForwardIterator local_values_end)
- {
- // no caching for hp::DoFHandler
- // implemented
- typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
- Assert (static_cast<unsigned int>(local_values_end-local_values_begin)
- == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcVectorDoesNotMatch());
- const unsigned int dofs_per_cell = accessor.get_fe().dofs_per_cell;
-
- std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
- get_dof_indices (accessor, local_dof_indices);
-
- constraints.get_dof_values (values, local_dof_indices.begin(),
- local_values_begin, local_values_end);
- }
-
-
- /**
- * Same set of functions as above
- * except that it sets rather than
- * gets values
- */
- template <int dim, int spacedim, class OutputVector, typename number>
- static
- void
- set_dof_values (const DoFCellAccessor<DoFHandler<dim,spacedim> > &accessor,
- const dealii::Vector<number> &local_values,
- OutputVector &values)
- {
- typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
- Assert (local_values.size() == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcVectorDoesNotMatch());
- Assert (values.size() == accessor.get_dof_handler().n_dofs(),
- typename BaseClass::ExcVectorDoesNotMatch());
-
- // check as in documentation that
- // cell is either active, or dofs
- // are only in vertices
- Assert (!accessor.has_children()
- ||
- (accessor.get_fe().dofs_per_cell ==
- accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
- ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
-
- types::global_dof_index *cache = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index * accessor.get_fe().dofs_per_cell];
- for (unsigned int i=0; i<accessor.get_fe().dofs_per_cell; ++i, ++cache)
- values(*cache) = local_values(i);
- }
-
-
-
- template <int dim, int spacedim, class OutputVector, typename number>
- static
- void
- set_dof_values (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
- const dealii::Vector<number> &local_values,
- OutputVector &values)
- {
- // no caching for hp::DoFHandler
- // implemented
- const unsigned int dofs_per_cell = accessor.get_fe().dofs_per_cell;
-
- std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
- get_dof_indices (accessor, local_dof_indices);
-
- for (unsigned int i=0; i<dofs_per_cell; ++i)
- values(local_dof_indices[i]) = local_values(i);
- }
-
-
- /**
- * Do what the active_fe_index
- * function in the parent class
- * is supposed to do.
- */
- template <int dim, int spacedim>
- static
- unsigned int
- active_fe_index (const DoFCellAccessor<DoFHandler<dim,spacedim> > &)
- {
- // ::DoFHandler only supports a
- // single active fe with index
- // zero
- return 0;
- }
-
-
-
- template <int dim, int spacedim>
- static
- unsigned int
- active_fe_index (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor)
- {
- Assert (static_cast<unsigned int>(accessor.level()) < accessor.dof_handler->levels.size(),
- ExcMessage ("DoFHandler not initialized"));
- Assert (static_cast<std::vector<unsigned int>::size_type>(accessor.present_index) <
- accessor.dof_handler->levels[accessor.level()]->active_fe_indices.size (),
- ExcIndexRange (accessor.present_index, 0,
- accessor.dof_handler->levels[accessor.level()]->active_fe_indices.size ()));
- return accessor.dof_handler->levels[accessor.level()]
- ->active_fe_indices[accessor.present_index];
- }
-
-
-
- /**
- * Do what the
- * set_active_fe_index function
- * in the parent class is
- * supposed to do.
- */
- template <int dim, int spacedim>
- static
- void
- set_active_fe_index (const DoFCellAccessor<DoFHandler<dim,spacedim> > &,
- const unsigned int i)
- {
- // ::DoFHandler only supports a
- // single active fe with index
- // zero
- typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
- Assert (i == 0, typename BaseClass::ExcInvalidObject());
- }
-
-
-
- template <int dim, int spacedim>
- static
- void
- set_active_fe_index (DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
- const unsigned int i)
- {
- typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
- Assert (accessor.dof_handler != 0,
- typename BaseClass::ExcInvalidObject());
- Assert (static_cast<unsigned int>(accessor.level()) <
- accessor.dof_handler->levels.size(),
- ExcMessage ("DoFHandler not initialized"));
- Assert (static_cast<std::vector<unsigned int>::size_type>(accessor.present_index) <
- accessor.dof_handler->levels[accessor.level()]->active_fe_indices.size (),
- ExcIndexRange (accessor.present_index, 0,
- accessor.dof_handler->levels[accessor.level()]->active_fe_indices.size ()));
- accessor.dof_handler->levels[accessor.level()]
- ->active_fe_indices[accessor.present_index] = i;
- }
-
-
-
- template <int dim, int spacedim, typename ForwardIterator, class OutputVector>
- static
- void
- distribute_local_to_global (const DoFCellAccessor<dealii::DoFHandler<dim,spacedim> > &accessor,
- ForwardIterator local_source_begin,
- ForwardIterator local_source_end,
- OutputVector &global_destination)
- {
- typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
- Assert (accessor.dof_handler != 0,
- typename BaseClass::ExcInvalidObject());
- Assert (&accessor.get_fe() != 0,
- typename BaseClass::ExcInvalidObject());
- Assert (static_cast<unsigned int>(local_source_end-local_source_begin)
- ==
- accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcVectorDoesNotMatch());
- Assert (accessor.dof_handler->n_dofs() == global_destination.size(),
- typename BaseClass::ExcVectorDoesNotMatch());
-
- // check as in documentation that
- // cell is either active, or dofs
- // are only in vertices
- Assert (!accessor.has_children()
- ||
- (accessor.get_fe().dofs_per_cell ==
- accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
- ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
-
- const unsigned int n_dofs = local_source_end - local_source_begin;
-
- types::global_dof_index * dofs = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index * n_dofs];
-
- // distribute cell vector
- global_destination.add(n_dofs, dofs, local_source_begin);
- }
-
-
-
- template <int dim, int spacedim, typename ForwardIterator, class OutputVector>
- static
- void
- distribute_local_to_global (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
- ForwardIterator local_source_begin,
- ForwardIterator local_source_end,
- OutputVector &global_destination)
- {
- typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
- Assert (accessor.dof_handler != 0,
- typename BaseClass::ExcInvalidObject());
- Assert (&accessor.get_fe() != 0,
- typename BaseClass::ExcInvalidObject());
- Assert (local_source_end-local_source_begin == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcVectorDoesNotMatch());
- Assert (accessor.dof_handler->n_dofs() == global_destination.size(),
- typename BaseClass::ExcVectorDoesNotMatch());
-
- const unsigned int n_dofs = local_source_end - local_source_begin;
+ // not implemented, but should also
+ // not be called
+ Assert (false, ExcNotImplemented());
+ }
+
+ /**
+ * Implement setting dof
+ * indices on a
+ * cell. Currently not
+ * implemented for
+ * hp::DoFHandler objects.
+ */
+ template <int spacedim>
+ static
+ void
+ set_dof_indices (DoFCellAccessor<DoFHandler<1,spacedim> > &accessor,
- const std::vector<unsigned int> &local_dof_indices)
++ const std::vector<types::global_dof_index> &dof_indices)
+ {
+ Assert (accessor.has_children() == false,
+ ExcInternalError());
+
+ const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
+ dofs_per_line = accessor.get_fe().dofs_per_line,
+ dofs_per_cell = accessor.get_fe().dofs_per_cell;
+
- Assert (local_dof_indices.size() == dofs_per_cell,
++ Assert (dof_indices.size() == dofs_per_cell,
+ ExcInternalError());
+
+ unsigned int index = 0;
+
+ for (unsigned int vertex=0; vertex<2; ++vertex)
+ for (unsigned int d=0; d<dofs_per_vertex; ++d, ++index)
+ accessor.set_vertex_dof_index(vertex,d,
- local_dof_indices[index]);
++ dof_indices[index]);
+ for (unsigned int d=0; d<dofs_per_line; ++d, ++index)
- accessor.dof_index(d, local_dof_indices[index]);
++ accessor.dof_index(d, dof_indices[index]);
+
+ Assert (index == dofs_per_cell,
+ ExcInternalError());
+ }
+
+
+
+ template <int spacedim>
+ static
+ void
+ set_dof_indices (DoFCellAccessor<DoFHandler<2,spacedim> > &accessor,
- const std::vector<unsigned int> &local_dof_indices)
++ const std::vector<types::global_dof_index> &local_dof_indices)
+ {
+ Assert (accessor.has_children() == false,
+ ExcInternalError());
+
+ const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
+ dofs_per_line = accessor.get_fe().dofs_per_line,
+ dofs_per_quad = accessor.get_fe().dofs_per_quad,
+ dofs_per_cell = accessor.get_fe().dofs_per_cell;
+
+ Assert (local_dof_indices.size() == dofs_per_cell,
+ ExcInternalError());
+
+ unsigned int index = 0;
+
+ for (unsigned int vertex=0; vertex<4; ++vertex)
+ for (unsigned int d=0; d<dofs_per_vertex; ++d, ++index)
+ accessor.set_vertex_dof_index(vertex,d,
+ local_dof_indices[index]);
+ for (unsigned int line=0; line<4; ++line)
+ for (unsigned int d=0; d<dofs_per_line; ++d, ++index)
+ accessor.line(line)->set_dof_index(d, local_dof_indices[index]);
+
+ for (unsigned int d=0; d<dofs_per_quad; ++d, ++index)
+ accessor.set_dof_index(d, local_dof_indices[index]);
+
+ Assert (index == dofs_per_cell,
+ ExcInternalError());
+ }
+
+
+
+ template <int spacedim>
+ static
+ void
+ set_dof_indices (DoFCellAccessor<DoFHandler<3,spacedim> > &accessor,
- const std::vector<unsigned int> &local_dof_indices)
++ const std::vector<types::global_dof_index> &dof_indices)
+ {
+ Assert (accessor.has_children() == false,
+ ExcInternalError());
+
+ const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
+ dofs_per_line = accessor.get_fe().dofs_per_line,
+ dofs_per_quad = accessor.get_fe().dofs_per_quad,
+ dofs_per_hex = accessor.get_fe().dofs_per_hex,
+ dofs_per_cell = accessor.get_fe().dofs_per_cell;
+
- Assert (local_dof_indices.size() == dofs_per_cell,
++ Assert (dof_indices.size() == dofs_per_cell,
+ ExcInternalError());
+
+ unsigned int index = 0;
+
+ for (unsigned int vertex=0; vertex<8; ++vertex)
+ for (unsigned int d=0; d<dofs_per_vertex; ++d, ++index)
+ accessor.set_vertex_dof_index(vertex,d,
- local_dof_indices[index]);
++ dof_indices[index]);
+ // now copy dof numbers into the line. for
+ // lines with the wrong orientation, we have
+ // already made sure that we're ok by picking
+ // the correct vertices (this happens
+ // automatically in the vertex()
+ // function). however, if the line is in
+ // wrong orientation, we look at it in
+ // flipped orientation and we will have to
+ // adjust the shape function indices that we
+ // see to correspond to the correct
+ // (cell-local) ordering.
+ for (unsigned int line=0; line<12; ++line)
+ for (unsigned int d=0; d<dofs_per_line; ++d, ++index)
+ accessor.line(line)->set_dof_index(accessor.dof_handler->get_fe().
+ adjust_line_dof_index_for_line_orientation(d,
+ accessor.line_orientation(line)),
- local_dof_indices[index]);
++ dof_indices[index]);
+ // now copy dof numbers into the face. for
+ // faces with the wrong orientation, we
+ // have already made sure that we're ok by
+ // picking the correct lines and vertices
+ // (this happens automatically in the
+ // line() and vertex() functions). however,
+ // if the face is in wrong orientation, we
+ // look at it in flipped orientation and we
+ // will have to adjust the shape function
+ // indices that we see to correspond to the
+ // correct (cell-local) ordering. The same
+ // applies, if the face_rotation or
+ // face_orientation is non-standard
+ for (unsigned int quad=0; quad<6; ++quad)
+ for (unsigned int d=0; d<dofs_per_quad; ++d, ++index)
+ accessor.quad(quad)->set_dof_index(accessor.dof_handler->get_fe().
+ adjust_quad_dof_index_for_face_orientation(d,
+ accessor.face_orientation(quad),
+ accessor.face_flip(quad),
+ accessor.face_rotation(quad)),
- local_dof_indices[index]);
++ dof_indices[index]);
+ for (unsigned int d=0; d<dofs_per_hex; ++d, ++index)
- accessor.set_dof_index(d, local_dof_indices[index]);
++ accessor.set_dof_index(d, dof_indices[index]);
+
+ Assert (index == dofs_per_cell,
+ ExcInternalError());
+ }
+
+
+ // implementation for the case of
+ // hp::DoFHandler objects. it's
+ // not implemented there, for no
+ // space dimension
+ template <int dim, int spacedim>
+ static
+ void
+ set_dof_indices (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &,
- const std::vector<unsigned int> &)
++ const std::vector<types::global_dof_index> &)
+ {
+ Assert (false, ExcNotImplemented());
+ }
+
+
+ /**
+ * A function that collects the
+ * global indices of degrees of
+ * freedom. This function works
+ * for ::DoFHandler and all
+ * template arguments and copies
+ * the data out of the cache that
+ * we hold for each cell.
+ */
+ template <int dim, int spacedim>
+ static
+ void
+ get_dof_indices (const DoFCellAccessor<DoFHandler<dim,spacedim> > &accessor,
- std::vector<unsigned int> &dof_indices)
++ std::vector<types::global_dof_index> &dof_indices)
+ {
+ typedef
+ dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> >
+ BaseClass;
+ Assert (dof_indices.size() == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcVectorDoesNotMatch());
+
+ // check as in documentation that
+ // cell is either active, or dofs
+ // are only in vertices
+ Assert (!accessor.has_children()
+ ||
+ (accessor.get_fe().dofs_per_cell ==
+ accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
+ ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
+
- unsigned int *cache = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index * accessor.get_fe().dofs_per_cell];
++ types::global_dof_index *cache = &accessor.dof_handler->levels[accessor.level()]
++ ->cell_dof_indices_cache[accessor.present_index * accessor.get_fe().dofs_per_cell];
+ for (unsigned int i=0; i<accessor.get_fe().dofs_per_cell; ++i, ++cache)
+ dof_indices[i] = *cache;
+ }
+
+ /**
+ * Same function as above except
+ * that it works for
+ * hp::DoFHandler objects that do
+ * not have a cache for the local
+ * DoF indices.
+ */
+ template <int dim, int spacedim>
+ static
+ void
+ get_dof_indices (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
- std::vector<unsigned int> &dof_indices)
++ std::vector<types::global_dof_index> &dof_indices)
+ {
+ // no caching for
+ // hp::DoFHandler
+ // implemented
+ typedef
+ dealii::DoFAccessor<dim,dealii::hp::DoFHandler<dim,spacedim> >
+ DoFAccessor;
+ accessor.DoFAccessor::get_dof_indices (dof_indices,
+ accessor.active_fe_index());
+ }
+
+
+ /**
+ * A function that collects the
+ * values of degrees of freedom. This
+ * function works for ::DoFHandler
+ * and all template arguments and
+ * uses the data from the cache of
+ * indices that we hold for each
+ * cell.
+ */
+ template <int dim, int spacedim, class InputVector, typename ForwardIterator>
+ static
+ void
+ get_dof_values (const DoFCellAccessor<DoFHandler<dim,spacedim> > &accessor,
+ const InputVector &values,
+ ForwardIterator local_values_begin,
+ ForwardIterator local_values_end)
+ {
+ typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
+ Assert (static_cast<unsigned int>(local_values_end-local_values_begin)
+ == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcVectorDoesNotMatch());
+ Assert (values.size() == accessor.get_dof_handler().n_dofs(),
+ typename BaseClass::ExcVectorDoesNotMatch());
+
+ // check as in documentation that
+ // cell is either active, or dofs
+ // are only in vertices
+ Assert (!accessor.has_children()
+ ||
+ (accessor.get_fe().dofs_per_cell ==
+ accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
+ ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
+
- unsigned int *cache = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index *
- accessor.get_fe().dofs_per_cell];
++ types::global_dof_index *cache = &accessor.dof_handler->levels[accessor.level()]
++ ->cell_dof_indices_cache[accessor.present_index *
++ accessor.get_fe().dofs_per_cell];
+ for ( ; local_values_begin != local_values_end; ++local_values_begin, ++cache)
+ *local_values_begin = values(*cache);
+ }
+
+ /**
+ * Same function as above except
+ * that it works for
+ * hp::DoFHandler objects that do
+ * not have a cache for the local
+ * DoF indices.
+ */
+ template <int dim, int spacedim, class InputVector, typename ForwardIterator>
+ static
+ void
+ get_dof_values (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
+ const InputVector &values,
+ ForwardIterator local_values_begin,
+ ForwardIterator local_values_end)
+ {
+ typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
+
+ // no caching for hp::DoFHandler
+ // implemented
+ Assert (static_cast<unsigned int>(local_values_end-local_values_begin)
+ == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcVectorDoesNotMatch());
+ const unsigned int dofs_per_cell = accessor.get_fe().dofs_per_cell;
+
- std::vector<unsigned int> local_dof_indices (dofs_per_cell);
++ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
+ get_dof_indices (accessor, local_dof_indices);
+
+ for (unsigned int i=0; i<dofs_per_cell; ++i, ++local_values_begin)
+ *local_values_begin = values(local_dof_indices[i]);
+ }
+
+
+ /**
+ * A function that collects the
+ * values of degrees of freedom. This
+ * function works for ::DoFHandler
+ * and all template arguments and
+ * uses the data from the cache of
+ * indices that we hold for each
+ * cell.
+ */
+ template <int dim, int spacedim, class InputVector, typename ForwardIterator>
+ static
+ void
+ get_dof_values (const DoFCellAccessor<DoFHandler<dim,spacedim> > &accessor,
+ const ConstraintMatrix &constraints,
+ const InputVector &values,
+ ForwardIterator local_values_begin,
+ ForwardIterator local_values_end)
+ {
+ typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
+ Assert (static_cast<unsigned int>(local_values_end-local_values_begin)
+ == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcVectorDoesNotMatch());
+ Assert (values.size() == accessor.get_dof_handler().n_dofs(),
+ typename BaseClass::ExcVectorDoesNotMatch());
+
+ // check as in documentation that
+ // cell is either active, or dofs
+ // are only in vertices
+ Assert (!accessor.has_children()
+ ||
+ (accessor.get_fe().dofs_per_cell ==
+ accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
+ ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
+
- unsigned int *cache = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index *
- accessor.get_fe().dofs_per_cell];
++ types::global_dof_index *cache = &accessor.dof_handler->levels[accessor.level()]
++ ->cell_dof_indices_cache[accessor.present_index *
++ accessor.get_fe().dofs_per_cell];
+ constraints.get_dof_values(values, *cache, local_values_begin,
+ local_values_end);
+ }
+
+ /**
+ * Same function as above except
+ * that it works for
+ * hp::DoFHandler objects that do
+ * not have a cache for the local
+ * DoF indices.
+ */
+ template <int dim, int spacedim, class InputVector, typename ForwardIterator>
+ static
+ void
+ get_dof_values (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
+ const ConstraintMatrix &constraints,
+ const InputVector &values,
+ ForwardIterator local_values_begin,
+ ForwardIterator local_values_end)
+ {
+ // no caching for hp::DoFHandler
+ // implemented
+ typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
+ Assert (static_cast<unsigned int>(local_values_end-local_values_begin)
+ == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcVectorDoesNotMatch());
+ const unsigned int dofs_per_cell = accessor.get_fe().dofs_per_cell;
+
- std::vector<unsigned int> local_dof_indices (dofs_per_cell);
++ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
+ get_dof_indices (accessor, local_dof_indices);
+
+ constraints.get_dof_values (values, local_dof_indices.begin(),
+ local_values_begin, local_values_end);
+ }
+
+
+ /**
+ * Same set of functions as above
+ * except that it sets rather than
+ * gets values
+ */
+ template <int dim, int spacedim, class OutputVector, typename number>
+ static
+ void
+ set_dof_values (const DoFCellAccessor<DoFHandler<dim,spacedim> > &accessor,
+ const dealii::Vector<number> &local_values,
+ OutputVector &values)
+ {
+ typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
+ Assert (local_values.size() == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcVectorDoesNotMatch());
+ Assert (values.size() == accessor.get_dof_handler().n_dofs(),
+ typename BaseClass::ExcVectorDoesNotMatch());
+
+ // check as in documentation that
+ // cell is either active, or dofs
+ // are only in vertices
+ Assert (!accessor.has_children()
+ ||
+ (accessor.get_fe().dofs_per_cell ==
+ accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
+ ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
+
- unsigned int *cache = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index * accessor.get_fe().dofs_per_cell];
++ types::global_dof_index *cache = &accessor.dof_handler->levels[accessor.level()]
++ ->cell_dof_indices_cache[accessor.present_index * accessor.get_fe().dofs_per_cell];
+ for (unsigned int i=0; i<accessor.get_fe().dofs_per_cell; ++i, ++cache)
+ values(*cache) = local_values(i);
+ }
+
+
+
+ template <int dim, int spacedim, class OutputVector, typename number>
+ static
+ void
+ set_dof_values (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
+ const dealii::Vector<number> &local_values,
+ OutputVector &values)
+ {
+ // no caching for hp::DoFHandler
+ // implemented
+ const unsigned int dofs_per_cell = accessor.get_fe().dofs_per_cell;
+
- std::vector<unsigned int> local_dof_indices (dofs_per_cell);
++ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
+ get_dof_indices (accessor, local_dof_indices);
+
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ values(local_dof_indices[i]) = local_values(i);
+ }
+
+
+ /**
+ * Do what the active_fe_index
+ * function in the parent class
+ * is supposed to do.
+ */
+ template <int dim, int spacedim>
+ static
+ unsigned int
+ active_fe_index (const DoFCellAccessor<DoFHandler<dim,spacedim> > &)
+ {
+ // ::DoFHandler only supports a
+ // single active fe with index
+ // zero
+ return 0;
+ }
+
+
+
+ template <int dim, int spacedim>
+ static
+ unsigned int
+ active_fe_index (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor)
+ {
+ Assert (static_cast<unsigned int>(accessor.level()) < accessor.dof_handler->levels.size(),
+ ExcMessage ("DoFHandler not initialized"));
+ Assert (static_cast<std::vector<unsigned int>::size_type>(accessor.present_index) <
+ accessor.dof_handler->levels[accessor.level()]->active_fe_indices.size (),
+ ExcIndexRange (accessor.present_index, 0,
+ accessor.dof_handler->levels[accessor.level()]->active_fe_indices.size ()));
+ return accessor.dof_handler->levels[accessor.level()]
+ ->active_fe_indices[accessor.present_index];
+ }
+
+
+
+ /**
+ * Do what the
+ * set_active_fe_index function
+ * in the parent class is
+ * supposed to do.
+ */
+ template <int dim, int spacedim>
+ static
+ void
+ set_active_fe_index (const DoFCellAccessor<DoFHandler<dim,spacedim> > &,
+ const unsigned int i)
+ {
+ // ::DoFHandler only supports a
+ // single active fe with index
+ // zero
+ typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
+ Assert (i == 0, typename BaseClass::ExcInvalidObject());
+ }
+
+
+
+ template <int dim, int spacedim>
+ static
+ void
+ set_active_fe_index (DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
+ const unsigned int i)
+ {
+ typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
+ Assert (accessor.dof_handler != 0,
+ typename BaseClass::ExcInvalidObject());
+ Assert (static_cast<unsigned int>(accessor.level()) <
+ accessor.dof_handler->levels.size(),
+ ExcMessage ("DoFHandler not initialized"));
+ Assert (static_cast<std::vector<unsigned int>::size_type>(accessor.present_index) <
+ accessor.dof_handler->levels[accessor.level()]->active_fe_indices.size (),
+ ExcIndexRange (accessor.present_index, 0,
+ accessor.dof_handler->levels[accessor.level()]->active_fe_indices.size ()));
+ accessor.dof_handler->levels[accessor.level()]
+ ->active_fe_indices[accessor.present_index] = i;
+ }
+
+
+
+ template <int dim, int spacedim, typename ForwardIterator, class OutputVector>
+ static
+ void
+ distribute_local_to_global (const DoFCellAccessor<dealii::DoFHandler<dim,spacedim> > &accessor,
+ ForwardIterator local_source_begin,
+ ForwardIterator local_source_end,
+ OutputVector &global_destination)
+ {
+ typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
+ Assert (accessor.dof_handler != 0,
+ typename BaseClass::ExcInvalidObject());
+ Assert (&accessor.get_fe() != 0,
+ typename BaseClass::ExcInvalidObject());
+ Assert (static_cast<unsigned int>(local_source_end-local_source_begin)
+ ==
+ accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcVectorDoesNotMatch());
+ Assert (accessor.dof_handler->n_dofs() == global_destination.size(),
+ typename BaseClass::ExcVectorDoesNotMatch());
+
+ // check as in documentation that
+ // cell is either active, or dofs
+ // are only in vertices
+ Assert (!accessor.has_children()
+ ||
+ (accessor.get_fe().dofs_per_cell ==
+ accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
+ ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
+
+ const unsigned int n_dofs = local_source_end - local_source_begin;
+
- unsigned int *dofs = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index * n_dofs];
++ types::global_dof_index *dofs = &accessor.dof_handler->levels[accessor.level()]
++ ->cell_dof_indices_cache[accessor.present_index * n_dofs];
+
+ // distribute cell vector
+ global_destination.add(n_dofs, dofs, local_source_begin);
+ }
+
+
+
+ template <int dim, int spacedim, typename ForwardIterator, class OutputVector>
+ static
+ void
+ distribute_local_to_global (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
+ ForwardIterator local_source_begin,
+ ForwardIterator local_source_end,
+ OutputVector &global_destination)
+ {
+ typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
+ Assert (accessor.dof_handler != 0,
+ typename BaseClass::ExcInvalidObject());
+ Assert (&accessor.get_fe() != 0,
+ typename BaseClass::ExcInvalidObject());
+ Assert (local_source_end-local_source_begin == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcVectorDoesNotMatch());
+ Assert (accessor.dof_handler->n_dofs() == global_destination.size(),
+ typename BaseClass::ExcVectorDoesNotMatch());
+
+ const unsigned int n_dofs = local_source_end - local_source_begin;
//TODO[WB/MK]: This function could me made more efficient because it allocates memory, which could be avoided by passing in another argument as a scratch array. This should be fixed eventually
- // get indices of dofs
- std::vector<types::global_dof_index> dofs (n_dofs);
- accessor.get_dof_indices (dofs);
-
- // distribute cell vector
- global_destination.add (n_dofs, dofs.begin(), local_source_begin);
- }
-
-
-
- template <int dim, int spacedim, typename ForwardIterator, class OutputVector>
- static
- void
- distribute_local_to_global (const DoFCellAccessor<dealii::DoFHandler<dim,spacedim> > &accessor,
- const ConstraintMatrix &constraints,
- ForwardIterator local_source_begin,
- ForwardIterator local_source_end,
- OutputVector &global_destination)
- {
- typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
- Assert (accessor.dof_handler != 0,
- typename BaseClass::ExcInvalidObject());
- Assert (&accessor.get_fe() != 0,
- typename BaseClass::ExcInvalidObject());
- Assert (local_source_end-local_source_begin == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcVectorDoesNotMatch());
- Assert (accessor.dof_handler->n_dofs() == global_destination.size(),
- typename BaseClass::ExcVectorDoesNotMatch());
-
- // check as in documentation that
- // cell is either active, or dofs
- // are only in vertices
- Assert (!accessor.has_children()
- ||
- (accessor.get_fe().dofs_per_cell ==
- accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
- ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
-
- const unsigned int n_dofs = local_source_end - local_source_begin;
-
- types::global_dof_index * dofs = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index * n_dofs];
-
- // distribute cell vector
- constraints.distribute_local_to_global (local_source_begin, local_source_end,
- dofs, global_destination);
- }
-
-
-
- template <int dim, int spacedim, typename ForwardIterator, class OutputVector>
- static
- void
- distribute_local_to_global (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
- const ConstraintMatrix &constraints,
- ForwardIterator local_source_begin,
- ForwardIterator local_source_end,
- OutputVector &global_destination)
- {
- typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
- Assert (accessor.dof_handler != 0,
- typename BaseClass::ExcInvalidObject());
- Assert (&accessor.get_fe() != 0,
- typename BaseClass::ExcInvalidObject());
- Assert (local_source_end-local_source_begin == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcVectorDoesNotMatch());
- Assert (accessor.dof_handler->n_dofs() == global_destination.size(),
- typename BaseClass::ExcVectorDoesNotMatch());
-
- const unsigned int n_dofs = local_source_end - local_source_begin;
+ // get indices of dofs
- std::vector<unsigned int> dofs (n_dofs);
++ std::vector<types::global_dof_index> dofs (n_dofs);
+ accessor.get_dof_indices (dofs);
+
+ // distribute cell vector
+ global_destination.add (n_dofs, dofs.begin(), local_source_begin);
+ }
+
+
+
+ template <int dim, int spacedim, typename ForwardIterator, class OutputVector>
+ static
+ void
+ distribute_local_to_global (const DoFCellAccessor<dealii::DoFHandler<dim,spacedim> > &accessor,
+ const ConstraintMatrix &constraints,
+ ForwardIterator local_source_begin,
+ ForwardIterator local_source_end,
+ OutputVector &global_destination)
+ {
+ typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
+ Assert (accessor.dof_handler != 0,
+ typename BaseClass::ExcInvalidObject());
+ Assert (&accessor.get_fe() != 0,
+ typename BaseClass::ExcInvalidObject());
+ Assert (local_source_end-local_source_begin == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcVectorDoesNotMatch());
+ Assert (accessor.dof_handler->n_dofs() == global_destination.size(),
+ typename BaseClass::ExcVectorDoesNotMatch());
+
+ // check as in documentation that
+ // cell is either active, or dofs
+ // are only in vertices
+ Assert (!accessor.has_children()
+ ||
+ (accessor.get_fe().dofs_per_cell ==
+ accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
+ ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
+
+ const unsigned int n_dofs = local_source_end - local_source_begin;
+
- unsigned int *dofs = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index * n_dofs];
++ types::global_dof_index *dofs = &accessor.dof_handler->levels[accessor.level()]
++ ->cell_dof_indices_cache[accessor.present_index * n_dofs];
+
+ // distribute cell vector
+ constraints.distribute_local_to_global (local_source_begin, local_source_end,
+ dofs, global_destination);
+ }
+
+
+
+ template <int dim, int spacedim, typename ForwardIterator, class OutputVector>
+ static
+ void
+ distribute_local_to_global (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
+ const ConstraintMatrix &constraints,
+ ForwardIterator local_source_begin,
+ ForwardIterator local_source_end,
+ OutputVector &global_destination)
+ {
+ typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
+ Assert (accessor.dof_handler != 0,
+ typename BaseClass::ExcInvalidObject());
+ Assert (&accessor.get_fe() != 0,
+ typename BaseClass::ExcInvalidObject());
+ Assert (local_source_end-local_source_begin == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcVectorDoesNotMatch());
+ Assert (accessor.dof_handler->n_dofs() == global_destination.size(),
+ typename BaseClass::ExcVectorDoesNotMatch());
+
+ const unsigned int n_dofs = local_source_end - local_source_begin;
//TODO[WB/MK]: This function could me made more efficient because it allocates memory, which could be avoided by passing in another argument as a scratch array. This should be fixed eventually
- // get indices of dofs
- std::vector<types::global_dof_index> dofs (n_dofs);
- accessor.get_dof_indices (dofs);
-
- // distribute cell vector
- constraints.distribute_local_to_global (local_source_begin, local_source_end,
- dofs.begin(), global_destination);
- }
-
-
-
- template <int dim, int spacedim, typename number, class OutputMatrix>
- static
- void
- distribute_local_to_global (const DoFCellAccessor<dealii::DoFHandler<dim,spacedim> > &accessor,
- const dealii::FullMatrix<number> &local_source,
- OutputMatrix &global_destination)
- {
- typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
- Assert (accessor.dof_handler != 0,
- typename BaseClass::ExcInvalidObject());
- Assert (&accessor.get_fe() != 0,
- typename BaseClass::ExcInvalidObject());
- Assert (local_source.m() == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcMatrixDoesNotMatch());
- Assert (local_source.n() == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcMatrixDoesNotMatch());
- Assert (accessor.dof_handler->n_dofs() == global_destination.m(),
- typename BaseClass::ExcMatrixDoesNotMatch());
- Assert (accessor.dof_handler->n_dofs() == global_destination.n(),
- typename BaseClass::ExcMatrixDoesNotMatch());
-
- // check as in documentation that
- // cell is either active, or dofs
- // are only in vertices
- Assert (!accessor.has_children()
- ||
- (accessor.get_fe().dofs_per_cell ==
- accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
- ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
-
- const unsigned int n_dofs = local_source.m();
-
- types::global_dof_index * dofs = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index * n_dofs];
-
- // distribute cell matrix
- for (unsigned int i=0; i<n_dofs; ++i)
- global_destination.add(dofs[i], n_dofs, dofs,
- &local_source(i,0));
- }
-
-
-
- template <int dim, int spacedim, typename number, class OutputMatrix>
- static
- void
- distribute_local_to_global (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
- const dealii::FullMatrix<number> &local_source,
- OutputMatrix &global_destination)
- {
- typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
- Assert (accessor.dof_handler != 0,
- typename BaseClass::ExcInvalidObject());
- Assert (&accessor.get_fe() != 0,
- typename BaseClass::ExcInvalidObject());
- Assert (local_source.m() == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcMatrixDoesNotMatch());
- Assert (local_source.n() == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcVectorDoesNotMatch());
- Assert (accessor.dof_handler->n_dofs() == global_destination.m(),
- typename BaseClass::ExcMatrixDoesNotMatch());
- Assert (accessor.dof_handler->n_dofs() == global_destination.n(),
- typename BaseClass::ExcMatrixDoesNotMatch());
-
- const unsigned int n_dofs = local_source.size();
+ // get indices of dofs
- std::vector<unsigned int> dofs (n_dofs);
++ std::vector<types::global_dof_index> dofs (n_dofs);
+ accessor.get_dof_indices (dofs);
+
+ // distribute cell vector
+ constraints.distribute_local_to_global (local_source_begin, local_source_end,
+ dofs.begin(), global_destination);
+ }
+
+
+
+ template <int dim, int spacedim, typename number, class OutputMatrix>
+ static
+ void
+ distribute_local_to_global (const DoFCellAccessor<dealii::DoFHandler<dim,spacedim> > &accessor,
+ const dealii::FullMatrix<number> &local_source,
+ OutputMatrix &global_destination)
+ {
+ typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
+ Assert (accessor.dof_handler != 0,
+ typename BaseClass::ExcInvalidObject());
+ Assert (&accessor.get_fe() != 0,
+ typename BaseClass::ExcInvalidObject());
+ Assert (local_source.m() == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcMatrixDoesNotMatch());
+ Assert (local_source.n() == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcMatrixDoesNotMatch());
+ Assert (accessor.dof_handler->n_dofs() == global_destination.m(),
+ typename BaseClass::ExcMatrixDoesNotMatch());
+ Assert (accessor.dof_handler->n_dofs() == global_destination.n(),
+ typename BaseClass::ExcMatrixDoesNotMatch());
+
+ // check as in documentation that
+ // cell is either active, or dofs
+ // are only in vertices
+ Assert (!accessor.has_children()
+ ||
+ (accessor.get_fe().dofs_per_cell ==
+ accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
+ ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
+
+ const unsigned int n_dofs = local_source.m();
+
- unsigned int *dofs = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index * n_dofs];
++ types::global_dof_index *dofs = &accessor.dof_handler->levels[accessor.level()]
++ ->cell_dof_indices_cache[accessor.present_index * n_dofs];
+
+ // distribute cell matrix
+ for (unsigned int i=0; i<n_dofs; ++i)
+ global_destination.add(dofs[i], n_dofs, dofs,
+ &local_source(i,0));
+ }
+
+
+
+ template <int dim, int spacedim, typename number, class OutputMatrix>
+ static
+ void
+ distribute_local_to_global (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
+ const dealii::FullMatrix<number> &local_source,
+ OutputMatrix &global_destination)
+ {
+ typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
+ Assert (accessor.dof_handler != 0,
+ typename BaseClass::ExcInvalidObject());
+ Assert (&accessor.get_fe() != 0,
+ typename BaseClass::ExcInvalidObject());
+ Assert (local_source.m() == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcMatrixDoesNotMatch());
+ Assert (local_source.n() == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcVectorDoesNotMatch());
+ Assert (accessor.dof_handler->n_dofs() == global_destination.m(),
+ typename BaseClass::ExcMatrixDoesNotMatch());
+ Assert (accessor.dof_handler->n_dofs() == global_destination.n(),
+ typename BaseClass::ExcMatrixDoesNotMatch());
+
+ const unsigned int n_dofs = local_source.size();
//TODO[WB/MK]: This function could me made more efficient because it allocates memory, which could be avoided by passing in another argument as a scratch array.
- // get indices of dofs
- std::vector<types::global_dof_index> dofs (n_dofs);
- accessor.get_dof_indices (dofs);
-
- // distribute cell matrix
- global_destination.add(dofs,local_source);
- }
-
-
-
- template <int dim, int spacedim, typename number,
- class OutputMatrix, typename OutputVector>
- static
- void
- distribute_local_to_global (const DoFCellAccessor<dealii::DoFHandler<dim,spacedim> > &accessor,
- const dealii::FullMatrix<number> &local_matrix,
- const dealii::Vector<number> &local_vector,
- OutputMatrix &global_matrix,
- OutputVector &global_vector)
+ // get indices of dofs
- std::vector<unsigned int> dofs (n_dofs);
++ std::vector<types::global_dof_index> dofs (n_dofs);
+ accessor.get_dof_indices (dofs);
+
+ // distribute cell matrix
+ global_destination.add(dofs,local_source);
+ }
+
+
+
+ template <int dim, int spacedim, typename number,
+ class OutputMatrix, typename OutputVector>
+ static
+ void
+ distribute_local_to_global (const DoFCellAccessor<dealii::DoFHandler<dim,spacedim> > &accessor,
+ const dealii::FullMatrix<number> &local_matrix,
+ const dealii::Vector<number> &local_vector,
+ OutputMatrix &global_matrix,
+ OutputVector &global_vector)
+ {
+ typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
+ Assert (accessor.dof_handler != 0,
+ typename BaseClass::ExcInvalidObject());
+ Assert (&accessor.get_fe() != 0,
+ typename BaseClass::ExcInvalidObject());
+ Assert (local_matrix.m() == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcMatrixDoesNotMatch());
+ Assert (local_matrix.n() == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcVectorDoesNotMatch());
+ Assert (accessor.dof_handler->n_dofs() == global_matrix.m(),
+ typename BaseClass::ExcMatrixDoesNotMatch());
+ Assert (accessor.dof_handler->n_dofs() == global_matrix.n(),
+ typename BaseClass::ExcMatrixDoesNotMatch());
+ Assert (local_vector.size() == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcVectorDoesNotMatch());
+ Assert (accessor.dof_handler->n_dofs() == global_vector.size(),
+ typename BaseClass::ExcVectorDoesNotMatch());
+
+ // check as in documentation that
+ // cell is either active, or dofs
+ // are only in vertices
+ Assert (!accessor.has_children()
+ ||
+ (accessor.get_fe().dofs_per_cell ==
+ accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
+ ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
+
+ const unsigned int n_dofs = accessor.get_fe().dofs_per_cell;
- unsigned int *dofs = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index *n_dofs];
++ types::global_dof_index *dofs = &accessor.dof_handler->levels[accessor.level()]
++ ->cell_dof_indices_cache[accessor.present_index *n_dofs];
+
+ // distribute cell matrices
+ for (unsigned int i=0; i<n_dofs; ++i)
{
- typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
- Assert (accessor.dof_handler != 0,
- typename BaseClass::ExcInvalidObject());
- Assert (&accessor.get_fe() != 0,
- typename BaseClass::ExcInvalidObject());
- Assert (local_matrix.m() == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcMatrixDoesNotMatch());
- Assert (local_matrix.n() == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcVectorDoesNotMatch());
- Assert (accessor.dof_handler->n_dofs() == global_matrix.m(),
- typename BaseClass::ExcMatrixDoesNotMatch());
- Assert (accessor.dof_handler->n_dofs() == global_matrix.n(),
- typename BaseClass::ExcMatrixDoesNotMatch());
- Assert (local_vector.size() == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcVectorDoesNotMatch());
- Assert (accessor.dof_handler->n_dofs() == global_vector.size(),
- typename BaseClass::ExcVectorDoesNotMatch());
-
- // check as in documentation that
- // cell is either active, or dofs
- // are only in vertices
- Assert (!accessor.has_children()
- ||
- (accessor.get_fe().dofs_per_cell ==
- accessor.get_fe().dofs_per_vertex * GeometryInfo<dim>::vertices_per_cell),
- ExcMessage ("Cell must either be active, or all DoFs must be in vertices"));
-
- const unsigned int n_dofs = accessor.get_fe().dofs_per_cell;
- types::global_dof_index * dofs = &accessor.dof_handler->levels[accessor.level()]
- ->cell_dof_indices_cache[accessor.present_index * n_dofs];
-
- // distribute cell matrices
- for (unsigned int i=0; i<n_dofs; ++i)
- {
- global_matrix.add(dofs[i], n_dofs, dofs, &local_matrix(i,0));
- global_vector(dofs[i]) += local_vector(i);
- }
- }
-
-
-
- template <int dim, int spacedim, typename number,
- class OutputMatrix, typename OutputVector>
- static
- void
- distribute_local_to_global (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
- const dealii::FullMatrix<number> &local_matrix,
- const dealii::Vector<number> &local_vector,
- OutputMatrix &global_matrix,
- OutputVector &global_vector)
- {
- typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
- Assert (accessor.dof_handler != 0,
- typename BaseClass::ExcInvalidObject());
- Assert (&accessor.get_fe() != 0,
- typename BaseClass::ExcInvalidObject());
- Assert (local_matrix.m() == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcMatrixDoesNotMatch());
- Assert (local_matrix.n() == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcVectorDoesNotMatch());
- Assert (accessor.dof_handler->n_dofs() == global_matrix.m(),
- typename BaseClass::ExcMatrixDoesNotMatch());
- Assert (accessor.dof_handler->n_dofs() == global_matrix.n(),
- typename BaseClass::ExcMatrixDoesNotMatch());
- Assert (local_vector.size() == accessor.get_fe().dofs_per_cell,
- typename BaseClass::ExcVectorDoesNotMatch());
- Assert (accessor.dof_handler->n_dofs() == global_vector.size(),
- typename BaseClass::ExcVectorDoesNotMatch());
-
- const unsigned int n_dofs = local_matrix.size();
+ global_matrix.add(dofs[i], n_dofs, dofs, &local_matrix(i,0));
+ global_vector(dofs[i]) += local_vector(i);
+ }
+ }
+
+
+
+ template <int dim, int spacedim, typename number,
+ class OutputMatrix, typename OutputVector>
+ static
+ void
+ distribute_local_to_global (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &accessor,
+ const dealii::FullMatrix<number> &local_matrix,
+ const dealii::Vector<number> &local_vector,
+ OutputMatrix &global_matrix,
+ OutputVector &global_vector)
+ {
+ typedef dealii::DoFAccessor<dim,DoFHandler<dim,spacedim> > BaseClass;
+ Assert (accessor.dof_handler != 0,
+ typename BaseClass::ExcInvalidObject());
+ Assert (&accessor.get_fe() != 0,
+ typename BaseClass::ExcInvalidObject());
+ Assert (local_matrix.m() == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcMatrixDoesNotMatch());
+ Assert (local_matrix.n() == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcVectorDoesNotMatch());
+ Assert (accessor.dof_handler->n_dofs() == global_matrix.m(),
+ typename BaseClass::ExcMatrixDoesNotMatch());
+ Assert (accessor.dof_handler->n_dofs() == global_matrix.n(),
+ typename BaseClass::ExcMatrixDoesNotMatch());
+ Assert (local_vector.size() == accessor.get_fe().dofs_per_cell,
+ typename BaseClass::ExcVectorDoesNotMatch());
+ Assert (accessor.dof_handler->n_dofs() == global_vector.size(),
+ typename BaseClass::ExcVectorDoesNotMatch());
+
+ const unsigned int n_dofs = local_matrix.size();
//TODO[WB/MK]: This function could me made more efficient because it allocates memory, which could be avoided by passing in another argument as a scratch array.
- // get indices of dofs
- std::vector<types::global_dof_index> dofs (n_dofs);
- accessor.get_dof_indices (dofs);
+ // get indices of dofs
- std::vector<unsigned int> dofs (n_dofs);
++ std::vector<types::global_dof_index> dofs (n_dofs);
+ accessor.get_dof_indices (dofs);
- // distribute cell matrix and vector
- global_matrix.add(dofs,local_matrix);
- global_vector.add(dofs,local_vector);
- }
+ // distribute cell matrix and vector
+ global_matrix.add(dofs,local_matrix);
+ global_vector.add(dofs,local_vector);
+ }
};
}
}
inline
void
DoFCellAccessor<DH>::
-get_dof_indices (std::vector<unsigned int> &dof_indices) const
+get_dof_indices (std::vector<types::global_dof_index> &dof_indices) const
{
Assert (this->is_artificial() == false,
- ExcMessage ("Can't ask for DoF indices on artificial cells."));
+ ExcMessage ("Can't ask for DoF indices on artificial cells."));
AssertDimension (dof_indices.size(), this->get_fe().dofs_per_cell);
dealii::internal::DoFCellAccessor::Implementation::get_dof_indices (*this, dof_indices);
}
- void DoFCellAccessor<DH>::get_mg_dof_indices (std::vector<types::global_dof_index>& dof_indices) const {
+template<class DH>
+inline
++void DoFCellAccessor<DH>::get_mg_dof_indices (std::vector<types::global_dof_index> &dof_indices) const
++{
+ DoFAccessor<dim, DH>::get_mg_dof_indices (this->level (), dof_indices);
+}
+
template <class DH>
template <class InputVector, typename number>
template <int dim, int spacedim=dim>
class DoFHandler : public Subscriptor
{
- typedef dealii::internal::DoFHandler::Iterators<DoFHandler<dim,spacedim> > IteratorSelector;
- public:
- typedef typename IteratorSelector::CellAccessor cell_accessor;
- typedef typename IteratorSelector::FaceAccessor face_accessor;
-
- typedef typename IteratorSelector::line_iterator line_iterator;
- typedef typename IteratorSelector::active_line_iterator active_line_iterator;
-
- typedef typename IteratorSelector::quad_iterator quad_iterator;
- typedef typename IteratorSelector::active_quad_iterator active_quad_iterator;
-
- typedef typename IteratorSelector::hex_iterator hex_iterator;
- typedef typename IteratorSelector::active_hex_iterator active_hex_iterator;
-
- typedef typename IteratorSelector::cell_iterator cell_iterator;
- typedef typename IteratorSelector::active_cell_iterator active_cell_iterator;
-
- typedef typename IteratorSelector::face_iterator face_iterator;
- typedef typename IteratorSelector::active_face_iterator active_face_iterator;
-
- /**
- * Alias the @p FunctionMap type
- * declared elsewhere.
- */
- typedef typename dealii::FunctionMap<spacedim>::type FunctionMap;
-
- /**
- * Make the dimension available
- * in function templates.
- */
- static const unsigned int dimension = dim;
-
- /**
- * Make the space dimension available
- * in function templates.
- */
- static const unsigned int space_dimension = spacedim;
-
- /**
- * When the arrays holding the
- * DoF indices are set up, but
- * before they are filled with
- * actual values, they are set to
- * an invalid value, in order to
- * monitor possible
- * problems. This invalid value
- * is the constant defined here.
- *
- * Please note that you should
- * not rely on it having a
- * certain value, but rather take
- * its symbolic name.
- */
- static const unsigned int invalid_dof_index = numbers::invalid_unsigned_int;
-
- /**
- * The default index of the
- * finite element to be used on a
- * given cell. Since the present
- * class only supports the same
- * finite element to be used on
- * all cells, the index of the
- * finite element needs to be the
- * same on all cells anyway, and
- * by convention we pick zero for
- * this value. The situation is
- * different for hp objects
- * (i.e. the hp::DoFHandler
- * class) where different finite
- * element indices may be used on
- * different cells, and the
- * default index there
- * corresponds to an invalid
- * value.
- */
- static const unsigned int default_fe_index = 0;
-
- /**
- * Standard constructor, not
- * initializing any data. After
- * constructing an object with
- * this constructor, use
- * initialize() to make a valid
- * DoFHandler.
- */
- DoFHandler ();
-
- /**
- * Constructor. Take @p tria as the
- * triangulation to work on.
- */
- DoFHandler ( const Triangulation<dim,spacedim> &tria);
-
- /**
- * Destructor.
- */
- virtual ~DoFHandler ();
-
- /**
- * Assign a Triangulation and a
- * FiniteElement to the
- * DoFHandler and compute the
- * distribution of degrees of
- * freedom over the mesh.
- */
- void initialize(const Triangulation<dim,spacedim>& tria,
- const FiniteElement<dim,spacedim>& fe);
-
- /**
- * Go through the triangulation and
- * distribute the degrees of freedoms
- * needed for the given finite element
- * according to the given distribution
- * method. The purpose of this function
- * is first discussed in the introduction
- * to the step-2 tutorial program.
- *
- * A pointer of the transferred
- * finite element is
- * stored. Therefore, the
- * lifetime of the finite element
- * object shall be longer than
- * that of this object. If you
- * don't want this behaviour, you
- * may want to call the @p clear
- * member function which also
- * releases the lock of this
- * object to the finite element.
- */
- virtual void distribute_dofs (const FiniteElement<dim,spacedim> &fe);
-
- virtual void distribute_mg_dofs (const FiniteElement<dim, spacedim>& fe);
-
- virtual void distribute_mg_dofs (const FiniteElement<dim, spacedim>& fe, const types::global_dof_index offset = 0);
-
- /**
- * After distribute_dofs() with
- * an FESystem element, the block
- * structure of global and level
- * vectors is stored in a
- * BlockInfo object accessible
- * with block_info(). This
- * function initializes the local
- * block structure on each cell
- * in the same object.
- */
- void initialize_local_block_info();
-
- /**
- * Clear all data of this object and
- * especially delete the lock this object
- * has to the finite element used the last
- * time when @p distribute_dofs was called.
- */
- virtual void clear ();
-
- /**
- * Renumber degrees of freedom based on
- * a list of new dof numbers for all the
- * dofs.
- *
- * This function is called by
- * the functions in
- * DoFRenumbering function
- * after computing the ordering
- * of the degrees of freedom.
- * This function is called, for
- * example, by the functions in
- * the DoFRenumbering
- * namespace, but it can of
- * course also be called from
- * user code.
- *
- * @arg new_number This array
- * must have a size equal to
- * the number of degrees of
- * freedom owned by the current
- * processor, i.e. the size
- * must be equal to what
- * n_locally_owned_dofs()
- * returns. If only one
- * processor participates in
- * storing the current mesh,
- * then this equals the total
- * number of degrees of
- * freedom, i.e. the result of
- * n_dofs(). The contents of
- * this array are the new
- * global indices for each
- * freedom listed in the
- * IndexSet returned by
- * locally_owned_dofs(). In the
- * case of a sequential mesh
- * this means that the array is
- * a list of new indices for
- * each of the degrees of
- * freedom on the current
- * mesh. In the case that we
- * have a
- * parallel::distributed::Triangulation
- * underlying this DoFHandler
- * object, the array is a list
- * of new indices for all the
- * locally owned degrees of
- * freedom, enumerated in the
- * same order as the currently
- * locally owned DoFs. In other
- * words, assume that degree of
- * freedom <code>i</code> is
- * currently locally owned,
- * then
- * <code>new_numbers[locally_owned_dofs().index_within_set(i)]</code>
- * returns the new global DoF
- * index of
- * <code>i</code>. Since the
- * IndexSet of
- * locally_owned_dofs() is
- * complete in the sequential
- * case, the latter convention
- * for the content of the array
- * reduces to the former in the
- * case that only one processor
- * participates in the mesh.
- */
- void renumber_dofs (const std::vector<types::global_dof_index> &new_numbers);
-
- /**
- * @deprecated Use
- * CompressedSparsityPattern instead of
- * initializing SparsityPattern with this
- * value, see the discussion in step-2
- * and the @ref Sparsity module.
- *
- * Return the maximum number of
- * degrees of freedom a degree of freedom
- * in the given triangulation with the
- * given finite element may couple with.
- * This is the maximum number of entries
- * per line in the system matrix; this
- * information can therefore be used upon
- * construction of the SparsityPattern
- * object.
- *
- * The returned number is not really the
- * maximum number but an estimate based
- * on the finite element and the maximum
- * number of cells meeting at a vertex.
- * The number holds for the constrained
- * matrix as well.
- *
- * The determination of the number of
- * couplings can be done by simple
- * picture drawing. An example can be
- * found in the implementation of this
- * function.
- *
- * Note that this function is most often
- * used to determine the maximal row
- * length for sparsity
- * patterns. Unfortunately, while the
- * estimates returned by this function
- * are rather accurate in 1d and 2d, they
- * are often significantly too high in
- * 3d, leading the SparsityPattern class
- * to allocate much too much memory in
- * some cases. Unless someone comes
- * around to improving the present
- * function for 3d, there is not very
- * much one can do about these cases. The
- * typical way to work around this
- * problem is to use an intermediate
- * compressed sparsity pattern that only
- * allocates memory on demand. Refer to
- * the step-2 and step-11 example
- * programs on how to do this. The problem
- * is also discussed in the documentation
- * of the module on @ref Sparsity.
- */
- unsigned int max_couplings_between_dofs () const;
-
- /**
- * @deprecated Use
- * CompressedSparsityPattern
- * instead of initializing
- * SparsityPattern with this
- * value.
- *
- * Return the number of degrees of freedom
- * located on the boundary another dof on
- * the boundary can couple with.
- *
- * The number is the same as for
- * max_couplings_between_dofs() in one
- * dimension less.
- */
- unsigned int max_couplings_between_boundary_dofs () const;
-
- /*--------------------------------------*/
-
- /**
- * @name Cell iterator functions
- */
- /*@{*/
- /**
- * Iterator to the first used
- * cell on level @p level.
- */
- cell_iterator begin (const unsigned int level = 0) const;
-
- /**
- * Iterator to the first active
- * cell on level @p level.
- */
- active_cell_iterator begin_active(const unsigned int level = 0) const;
-
- /**
- * Iterator past the end; this
- * iterator serves for
- * comparisons of iterators with
- * past-the-end or
- * before-the-beginning states.
- */
- cell_iterator end () const;
-
- /**
- * Return an iterator which is
- * the first iterator not on
- * level. If @p level is the
- * last level, then this returns
- * <tt>end()</tt>.
- */
- cell_iterator end (const unsigned int level) const;
-
- /**
- * Return an active iterator
- * which is the first iterator
- * not on level. If @p level is
- * the last level, then this
- * returns <tt>end()</tt>.
- */
- active_cell_iterator end_active (const unsigned int level) const;
-
- //@}
-
- /*---------------------------------------*/
-
-
- /**
- * Return the global number of
- * degrees of freedom. If the
- * current object handles all
- * degrees of freedom itself
- * (even if you may intend to
- * solve your linear system in
- * parallel, such as in step-17
- * or step-18), then this number
- * equals the number of locally
- * owned degrees of freedom since
- * this object doesn't know
- * anything about what you want
- * to do with it and believes
- * that it owns every degree of
- * freedom it knows about.
- *
- * On the other hand, if this
- * object operates on a
- * parallel::distributed::Triangulation
- * object, then this function
- * returns the global number of
- * degrees of freedom,
- * accumulated over all
- * processors.
- *
- * In either case, included in
- * the returned number are those
- * DoFs which are constrained by
- * hanging nodes, see @ref constraints.
- */
- types::global_dof_index n_dofs () const;
-
- types::global_dof_index n_dofs (const unsigned int level) const;
-
- /**
- * Return the number of degrees of freedom
- * located on the boundary.
- */
- types::global_dof_index n_boundary_dofs () const;
-
- /**
- * Return the number of degrees
- * of freedom located on those
- * parts of the boundary which
- * have a boundary indicator
- * listed in the given set. The
- * reason that a @p map rather
- * than a @p set is used is the
- * same as described in the
- * section on the
- * @p make_boundary_sparsity_pattern
- * function.
- */
- types::global_dof_index
- n_boundary_dofs (const FunctionMap &boundary_indicators) const;
-
- /**
- * Same function, but with
- * different data type of the
- * argument, which is here simply
- * a list of the boundary
- * indicators under
- * consideration.
- */
- types::global_dof_index
- n_boundary_dofs (const std::set<types::boundary_id> &boundary_indicators) const;
-
- /**
- * Access to an object informing
- * of the block structure of the
- * dof handler.
- *
- * If an FESystem is used in
- * distribute_dofs(), degrees of
- * freedom naturally split into
- * several @ref GlossBlock
- * "blocks". For each base element
- * as many blocks appear as its
- * multiplicity.
- *
- * At the end of
- * distribute_dofs(), the number
- * of degrees of freedom in each
- * block is counted, and stored
- * in a BlockInfo object, which
- * can be accessed here. In an
- * MGDoFHandler, the same is done
- * on each level. Additionally,
- * the block structure on each
- * cell can be generated in this
- * object by calling
- * initialize_local_block_info().
- */
- const BlockInfo& block_info() const;
-
-
- /**
- * Return the number of
- * degrees of freedom that
- * belong to this
- * process.
- *
- * If this is a sequential job,
- * then the result equals that
- * produced by n_dofs(). On the
- * other hand, if we are
- * operating on a
- * parallel::distributed::Triangulation,
- * then it includes only the
- * degrees of freedom that the
- * current processor owns. Note
- * that in this case this does
- * not include all degrees of
- * freedom that have been
- * distributed on the current
- * processor's image of the mesh:
- * in particular, some of the
- * degrees of freedom on the
- * interface between the cells
- * owned by this processor and
- * cells owned by other
- * processors may be theirs, and
- * degrees of freedom on ghost
- * cells are also not necessarily
- * included.
- */
- unsigned int n_locally_owned_dofs() const;
-
- /**
- * Return an IndexSet describing
- * the set of locally owned DoFs
- * as a subset of
- * 0..n_dofs(). The number of
- * elements of this set equals
- * n_locally_owned_dofs().
- */
- const IndexSet & locally_owned_dofs() const;
-
-
- /**
- * Returns a vector that
- * stores the locally owned
- * DoFs of each processor. If
- * you are only interested in
- * the number of elements
- * each processor owns then
- * n_locally_owned_dofs_per_processor() is
- * a better choice.
- *
- * If this is a sequential job,
- * then the vector has a single
- * element that equals the
- * IndexSet representing the
- * entire range [0,n_dofs()].
- */
- const std::vector<IndexSet> &
- locally_owned_dofs_per_processor () const;
-
- /**
- * Return a vector that
- * stores the number of
- * degrees of freedom each
- * processor that
- * participates in this
- * triangulation owns
- * locally. The sum of all
- * these numbers equals the
- * number of degrees of
- * freedom that exist
- * globally, i.e. what
- * n_dofs() returns.
- *
- * Each element of the vector
- * returned by this function
- * equals the number of
- * elements of the
- * corresponding sets
- * returned by
- * global_dof_indices().
- *
- * If this is a sequential job,
- * then the vector has a single
- * element equal to n_dofs().
- */
- const std::vector<unsigned int> &
- n_locally_owned_dofs_per_processor () const;
-
- /**
- * Return a constant reference to
- * the selected finite element
- * object.
- */
- const FiniteElement<dim,spacedim> & get_fe () const;
-
- /**
- * Return a constant reference to
- * the triangulation underlying
- * this object.
- */
- const Triangulation<dim,spacedim> & get_tria () const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
- *
- * This function is made virtual,
- * since a dof handler object
- * might be accessed through a
- * pointers to this base class,
- * although the actual object
- * might be a derived class.
- */
- virtual std::size_t memory_consumption () const;
-
- /**
- * Write the data of this object to a
- * stream for the purpose of
- * serialization.
- */
- template <class Archive>
- void save (Archive & ar, const unsigned int version) const;
-
- /**
- * Read the data of this object from a
- * stream for the purpose of
- * serialization.
- */
- template <class Archive>
- void load (Archive & ar, const unsigned int version);
-
- BOOST_SERIALIZATION_SPLIT_MEMBER()
-
- /**
- * We are trying to renumber the
- * degrees of freedom, but
- * somehow did not count
- * correctly.
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcRenumberingIncomplete);
- /**
- * Exception
- * @ingroup Exceptions
- */
- DeclException0 (ExcGridsDoNotMatch);
- /**
- * Exception
- * @ingroup Exceptions
- */
- DeclException0 (ExcInvalidBoundaryIndicator);
- /**
- * Exception
- * @ingroup Exceptions
- */
- DeclException1 (ExcNewNumbersNotConsecutive,
- int,
- << "The given list of new dof indices is not consecutive: "
- << "the index " << arg1 << " does not exist.");
- /**
- * Exception
- * @ingroup Exceptions
- */
- DeclException1 (ExcInvalidLevel,
- int,
- << "The given level " << arg1
- << " is not in the valid range!");
- /**
- * Exception
- * @ingroup Exceptions
- */
- DeclException0 (ExcFacesHaveNoLevel);
- /**
- * The triangulation level you
- * accessed is empty.
- * @ingroup Exceptions
- */
- DeclException1 (ExcEmptyLevel,
- int,
- << "You tried to do something on level " << arg1
- << ", but this level is empty.");
-
-
- protected:
- /**
- * The object containing
- * information on the block structure.
- */
- BlockInfo block_info_object;
-
- /**
- * Array to store the indices for
- * degrees of freedom located at
- * vertices.
- */
- std::vector<types::global_dof_index> vertex_dofs;
-
-
-
- /**
- * Address of the triangulation to
- * work on.
- */
- SmartPointer<const Triangulation<dim,spacedim>,DoFHandler<dim,spacedim> >
- tria;
-
- /**
- * Store a pointer to the finite element
- * given latest for the distribution of
- * dofs. In order to avoid destruction of
- * the object before the lifetime of
- * the DoF handler, we subscribe to
- * the finite element object. To unlock
- * the FE before the end of the lifetime
- * of this DoF handler, use the <tt>clear()</tt>
- * function (this clears all data of
- * this object as well, though).
- */
- SmartPointer<const FiniteElement<dim,spacedim>,DoFHandler<dim,spacedim> >
- selected_fe;
-
- /**
- * An object that describes how degrees
- * of freedom should be distributed and
- * renumbered.
- */
- std_cxx1x::shared_ptr<dealii::internal::DoFHandler::Policy::PolicyBase<dim,spacedim> > policy;
-
- /**
- * A structure that contains all
- * sorts of numbers that
- * characterize the degrees of
- * freedom this object works on.
- *
- * For most members of this
- * structure, there is an
- * accessor function in this
- * class that returns its value.
- */
- dealii::internal::DoFHandler::NumberCache number_cache;
-
+ typedef dealii::internal::DoFHandler::Iterators<DoFHandler<dim,spacedim> > IteratorSelector;
+ public:
+ typedef typename IteratorSelector::CellAccessor cell_accessor;
+ typedef typename IteratorSelector::FaceAccessor face_accessor;
+
+ typedef typename IteratorSelector::line_iterator line_iterator;
+ typedef typename IteratorSelector::active_line_iterator active_line_iterator;
+
+ typedef typename IteratorSelector::quad_iterator quad_iterator;
+ typedef typename IteratorSelector::active_quad_iterator active_quad_iterator;
+
+ typedef typename IteratorSelector::hex_iterator hex_iterator;
+ typedef typename IteratorSelector::active_hex_iterator active_hex_iterator;
+
+ typedef typename IteratorSelector::cell_iterator cell_iterator;
+ typedef typename IteratorSelector::active_cell_iterator active_cell_iterator;
+
+ typedef typename IteratorSelector::face_iterator face_iterator;
+ typedef typename IteratorSelector::active_face_iterator active_face_iterator;
+
+ /**
+ * Alias the @p FunctionMap type
+ * declared elsewhere.
+ */
+ typedef typename dealii::FunctionMap<spacedim>::type FunctionMap;
+
+ /**
+ * Make the dimension available
+ * in function templates.
+ */
+ static const unsigned int dimension = dim;
+
+ /**
+ * Make the space dimension available
+ * in function templates.
+ */
+ static const unsigned int space_dimension = spacedim;
+
+ /**
+ * When the arrays holding the
+ * DoF indices are set up, but
+ * before they are filled with
+ * actual values, they are set to
+ * an invalid value, in order to
+ * monitor possible
+ * problems. This invalid value
+ * is the constant defined here.
+ *
+ * Please note that you should
+ * not rely on it having a
+ * certain value, but rather take
+ * its symbolic name.
+ */
+ static const unsigned int invalid_dof_index = numbers::invalid_unsigned_int;
+
+ /**
+ * The default index of the
+ * finite element to be used on a
+ * given cell. Since the present
+ * class only supports the same
+ * finite element to be used on
+ * all cells, the index of the
+ * finite element needs to be the
+ * same on all cells anyway, and
+ * by convention we pick zero for
+ * this value. The situation is
+ * different for hp objects
+ * (i.e. the hp::DoFHandler
+ * class) where different finite
+ * element indices may be used on
+ * different cells, and the
+ * default index there
+ * corresponds to an invalid
+ * value.
+ */
+ static const unsigned int default_fe_index = 0;
+
+ /**
+ * Standard constructor, not
+ * initializing any data. After
+ * constructing an object with
+ * this constructor, use
+ * initialize() to make a valid
+ * DoFHandler.
+ */
+ DoFHandler ();
+
+ /**
+ * Constructor. Take @p tria as the
+ * triangulation to work on.
+ */
+ DoFHandler ( const Triangulation<dim,spacedim> &tria);
+
+ /**
+ * Destructor.
+ */
+ virtual ~DoFHandler ();
+
+ /**
+ * Assign a Triangulation and a
+ * FiniteElement to the
+ * DoFHandler and compute the
+ * distribution of degrees of
+ * freedom over the mesh.
+ */
+ void initialize(const Triangulation<dim,spacedim> &tria,
+ const FiniteElement<dim,spacedim> &fe);
+
+ /**
+ * Go through the triangulation and
+ * distribute the degrees of freedoms
+ * needed for the given finite element
+ * according to the given distribution
+ * method. The purpose of this function
+ * is first discussed in the introduction
+ * to the step-2 tutorial program.
+ *
+ * A pointer of the transferred
+ * finite element is
+ * stored. Therefore, the
+ * lifetime of the finite element
+ * object shall be longer than
+ * that of this object. If you
+ * don't want this behaviour, you
+ * may want to call the @p clear
+ * member function which also
+ * releases the lock of this
+ * object to the finite element.
+ */
+ virtual void distribute_dofs (const FiniteElement<dim,spacedim> &fe);
+
++ virtual void distribute_mg_dofs (const FiniteElement<dim, spacedim> &fe);
++
++ virtual void distribute_mg_dofs (const FiniteElement<dim, spacedim> &fe, const types::global_dof_index offset = 0);
++
+ /**
+ * After distribute_dofs() with
+ * an FESystem element, the block
+ * structure of global and level
+ * vectors is stored in a
+ * BlockInfo object accessible
+ * with block_info(). This
+ * function initializes the local
+ * block structure on each cell
+ * in the same object.
+ */
+ void initialize_local_block_info();
+
+ /**
+ * Clear all data of this object and
+ * especially delete the lock this object
+ * has to the finite element used the last
+ * time when @p distribute_dofs was called.
+ */
+ virtual void clear ();
+
+ /**
+ * Renumber degrees of freedom based on
+ * a list of new dof numbers for all the
+ * dofs.
+ *
+ * This function is called by
+ * the functions in
+ * DoFRenumbering function
+ * after computing the ordering
+ * of the degrees of freedom.
+ * This function is called, for
+ * example, by the functions in
+ * the DoFRenumbering
+ * namespace, but it can of
+ * course also be called from
+ * user code.
+ *
+ * @arg new_number This array
+ * must have a size equal to
+ * the number of degrees of
+ * freedom owned by the current
+ * processor, i.e. the size
+ * must be equal to what
+ * n_locally_owned_dofs()
+ * returns. If only one
+ * processor participates in
+ * storing the current mesh,
+ * then this equals the total
+ * number of degrees of
+ * freedom, i.e. the result of
+ * n_dofs(). The contents of
+ * this array are the new
+ * global indices for each
+ * freedom listed in the
+ * IndexSet returned by
+ * locally_owned_dofs(). In the
+ * case of a sequential mesh
+ * this means that the array is
+ * a list of new indices for
+ * each of the degrees of
+ * freedom on the current
+ * mesh. In the case that we
+ * have a
+ * parallel::distributed::Triangulation
+ * underlying this DoFHandler
+ * object, the array is a list
+ * of new indices for all the
+ * locally owned degrees of
+ * freedom, enumerated in the
+ * same order as the currently
+ * locally owned DoFs. In other
+ * words, assume that degree of
+ * freedom <code>i</code> is
+ * currently locally owned,
+ * then
+ * <code>new_numbers[locally_owned_dofs().index_within_set(i)]</code>
+ * returns the new global DoF
+ * index of
+ * <code>i</code>. Since the
+ * IndexSet of
+ * locally_owned_dofs() is
+ * complete in the sequential
+ * case, the latter convention
+ * for the content of the array
+ * reduces to the former in the
+ * case that only one processor
+ * participates in the mesh.
+ */
- void renumber_dofs (const std::vector<unsigned int> &new_numbers);
++ void renumber_dofs (const std::vector<types::global_dof_index> &new_numbers);
+
+ /**
+ * @deprecated Use
+ * CompressedSparsityPattern instead of
+ * initializing SparsityPattern with this
+ * value, see the discussion in step-2
+ * and the @ref Sparsity module.
+ *
+ * Return the maximum number of
+ * degrees of freedom a degree of freedom
+ * in the given triangulation with the
+ * given finite element may couple with.
+ * This is the maximum number of entries
+ * per line in the system matrix; this
+ * information can therefore be used upon
+ * construction of the SparsityPattern
+ * object.
+ *
+ * The returned number is not really the
+ * maximum number but an estimate based
+ * on the finite element and the maximum
+ * number of cells meeting at a vertex.
+ * The number holds for the constrained
+ * matrix as well.
+ *
+ * The determination of the number of
+ * couplings can be done by simple
+ * picture drawing. An example can be
+ * found in the implementation of this
+ * function.
+ *
+ * Note that this function is most often
+ * used to determine the maximal row
+ * length for sparsity
+ * patterns. Unfortunately, while the
+ * estimates returned by this function
+ * are rather accurate in 1d and 2d, they
+ * are often significantly too high in
+ * 3d, leading the SparsityPattern class
+ * to allocate much too much memory in
+ * some cases. Unless someone comes
+ * around to improving the present
+ * function for 3d, there is not very
+ * much one can do about these cases. The
+ * typical way to work around this
+ * problem is to use an intermediate
+ * compressed sparsity pattern that only
+ * allocates memory on demand. Refer to
+ * the step-2 and step-11 example
+ * programs on how to do this. The problem
+ * is also discussed in the documentation
+ * of the module on @ref Sparsity.
+ */
+ unsigned int max_couplings_between_dofs () const;
+
+ /**
+ * @deprecated Use
+ * CompressedSparsityPattern
+ * instead of initializing
+ * SparsityPattern with this
+ * value.
+ *
+ * Return the number of degrees of freedom
+ * located on the boundary another dof on
+ * the boundary can couple with.
+ *
+ * The number is the same as for
+ * max_couplings_between_dofs() in one
+ * dimension less.
+ */
+ unsigned int max_couplings_between_boundary_dofs () const;
+
+ /*--------------------------------------*/
+
+ /**
+ * @name Cell iterator functions
+ */
+ /*@{*/
+ /**
+ * Iterator to the first used
+ * cell on level @p level.
+ */
+ cell_iterator begin (const unsigned int level = 0) const;
+
+ /**
+ * Iterator to the first active
+ * cell on level @p level.
+ */
+ active_cell_iterator begin_active(const unsigned int level = 0) const;
+
+ /**
+ * Iterator past the end; this
+ * iterator serves for
+ * comparisons of iterators with
+ * past-the-end or
+ * before-the-beginning states.
+ */
+ cell_iterator end () const;
+
+ /**
+ * Return an iterator which is
+ * the first iterator not on
+ * level. If @p level is the
+ * last level, then this returns
+ * <tt>end()</tt>.
+ */
+ cell_iterator end (const unsigned int level) const;
+
+ /**
+ * Return an active iterator
+ * which is the first iterator
+ * not on level. If @p level is
+ * the last level, then this
+ * returns <tt>end()</tt>.
+ */
+ active_cell_iterator end_active (const unsigned int level) const;
+
+ //@}
+
+ /*---------------------------------------*/
+
+
+ /**
+ * Return the global number of
+ * degrees of freedom. If the
+ * current object handles all
+ * degrees of freedom itself
+ * (even if you may intend to
+ * solve your linear system in
+ * parallel, such as in step-17
+ * or step-18), then this number
+ * equals the number of locally
+ * owned degrees of freedom since
+ * this object doesn't know
+ * anything about what you want
+ * to do with it and believes
+ * that it owns every degree of
+ * freedom it knows about.
+ *
+ * On the other hand, if this
+ * object operates on a
+ * parallel::distributed::Triangulation
+ * object, then this function
+ * returns the global number of
+ * degrees of freedom,
+ * accumulated over all
+ * processors.
+ *
+ * In either case, included in
+ * the returned number are those
+ * DoFs which are constrained by
+ * hanging nodes, see @ref constraints.
+ */
- unsigned int n_dofs () const;
++ types::global_dof_index n_dofs () const;
++
++ types::global_dof_index n_dofs (const unsigned int level) const;
+
+ /**
+ * Return the number of degrees of freedom
+ * located on the boundary.
+ */
- unsigned int n_boundary_dofs () const;
++ types::global_dof_index n_boundary_dofs () const;
+
+ /**
+ * Return the number of degrees
+ * of freedom located on those
+ * parts of the boundary which
+ * have a boundary indicator
+ * listed in the given set. The
+ * reason that a @p map rather
+ * than a @p set is used is the
+ * same as described in the
+ * section on the
+ * @p make_boundary_sparsity_pattern
+ * function.
+ */
- unsigned int
++ types::global_dof_index
+ n_boundary_dofs (const FunctionMap &boundary_indicators) const;
+
+ /**
+ * Same function, but with
+ * different data type of the
+ * argument, which is here simply
+ * a list of the boundary
+ * indicators under
+ * consideration.
+ */
- unsigned int
++ types::global_dof_index
+ n_boundary_dofs (const std::set<types::boundary_id> &boundary_indicators) const;
+
+ /**
+ * Access to an object informing
+ * of the block structure of the
+ * dof handler.
+ *
+ * If an FESystem is used in
+ * distribute_dofs(), degrees of
+ * freedom naturally split into
+ * several @ref GlossBlock
+ * "blocks". For each base element
+ * as many blocks appear as its
+ * multiplicity.
+ *
+ * At the end of
+ * distribute_dofs(), the number
+ * of degrees of freedom in each
+ * block is counted, and stored
+ * in a BlockInfo object, which
+ * can be accessed here. In an
+ * MGDoFHandler, the same is done
+ * on each level. Additionally,
+ * the block structure on each
+ * cell can be generated in this
+ * object by calling
+ * initialize_local_block_info().
+ */
+ const BlockInfo &block_info() const;
+
+
+ /**
+ * Return the number of
+ * degrees of freedom that
+ * belong to this
+ * process.
+ *
+ * If this is a sequential job,
+ * then the result equals that
+ * produced by n_dofs(). On the
+ * other hand, if we are
+ * operating on a
+ * parallel::distributed::Triangulation,
+ * then it includes only the
+ * degrees of freedom that the
+ * current processor owns. Note
+ * that in this case this does
+ * not include all degrees of
+ * freedom that have been
+ * distributed on the current
+ * processor's image of the mesh:
+ * in particular, some of the
+ * degrees of freedom on the
+ * interface between the cells
+ * owned by this processor and
+ * cells owned by other
+ * processors may be theirs, and
+ * degrees of freedom on ghost
+ * cells are also not necessarily
+ * included.
+ */
+ unsigned int n_locally_owned_dofs() const;
+
+ /**
+ * Return an IndexSet describing
+ * the set of locally owned DoFs
+ * as a subset of
+ * 0..n_dofs(). The number of
+ * elements of this set equals
+ * n_locally_owned_dofs().
+ */
+ const IndexSet &locally_owned_dofs() const;
+
+
+ /**
+ * Returns a vector that
+ * stores the locally owned
+ * DoFs of each processor. If
+ * you are only interested in
+ * the number of elements
+ * each processor owns then
+ * n_locally_owned_dofs_per_processor() is
+ * a better choice.
+ *
+ * If this is a sequential job,
+ * then the vector has a single
+ * element that equals the
+ * IndexSet representing the
+ * entire range [0,n_dofs()].
+ */
+ const std::vector<IndexSet> &
+ locally_owned_dofs_per_processor () const;
+
+ /**
+ * Return a vector that
+ * stores the number of
+ * degrees of freedom each
+ * processor that
+ * participates in this
+ * triangulation owns
+ * locally. The sum of all
+ * these numbers equals the
+ * number of degrees of
+ * freedom that exist
+ * globally, i.e. what
+ * n_dofs() returns.
+ *
+ * Each element of the vector
+ * returned by this function
+ * equals the number of
+ * elements of the
+ * corresponding sets
+ * returned by
+ * global_dof_indices().
+ *
+ * If this is a sequential job,
+ * then the vector has a single
+ * element equal to n_dofs().
+ */
+ const std::vector<unsigned int> &
+ n_locally_owned_dofs_per_processor () const;
+
+ /**
+ * Return a constant reference to
+ * the selected finite element
+ * object.
+ */
+ const FiniteElement<dim,spacedim> &get_fe () const;
+
+ /**
+ * Return a constant reference to
+ * the triangulation underlying
+ * this object.
+ */
+ const Triangulation<dim,spacedim> &get_tria () const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object.
+ *
+ * This function is made virtual,
+ * since a dof handler object
+ * might be accessed through a
+ * pointers to this base class,
+ * although the actual object
+ * might be a derived class.
+ */
+ virtual std::size_t memory_consumption () const;
+
+ /**
+ * Write the data of this object to a
+ * stream for the purpose of
+ * serialization.
+ */
+ template <class Archive>
+ void save (Archive &ar, const unsigned int version) const;
+
+ /**
+ * Read the data of this object from a
+ * stream for the purpose of
+ * serialization.
+ */
+ template <class Archive>
+ void load (Archive &ar, const unsigned int version);
+
+ BOOST_SERIALIZATION_SPLIT_MEMBER()
+
+ /**
+ * We are trying to renumber the
+ * degrees of freedom, but
+ * somehow did not count
+ * correctly.
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcRenumberingIncomplete);
+ /**
+ * Exception
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcGridsDoNotMatch);
+ /**
+ * Exception
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcInvalidBoundaryIndicator);
+ /**
+ * Exception
+ * @ingroup Exceptions
+ */
+ DeclException1 (ExcNewNumbersNotConsecutive,
+ int,
+ << "The given list of new dof indices is not consecutive: "
+ << "the index " << arg1 << " does not exist.");
+ /**
+ * Exception
+ * @ingroup Exceptions
+ */
+ DeclException1 (ExcInvalidLevel,
+ int,
+ << "The given level " << arg1
+ << " is not in the valid range!");
+ /**
+ * Exception
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcFacesHaveNoLevel);
+ /**
+ * The triangulation level you
+ * accessed is empty.
+ * @ingroup Exceptions
+ */
+ DeclException1 (ExcEmptyLevel,
+ int,
+ << "You tried to do something on level " << arg1
+ << ", but this level is empty.");
+
+
+ protected:
+ /**
+ * The object containing
+ * information on the block structure.
+ */
+ BlockInfo block_info_object;
+
+ /**
+ * Array to store the indices for
+ * degrees of freedom located at
+ * vertices.
+ */
- std::vector<unsigned int> vertex_dofs;
++ std::vector<types::global_dof_index> vertex_dofs;
+
+
+
+ /**
+ * Address of the triangulation to
+ * work on.
+ */
+ SmartPointer<const Triangulation<dim,spacedim>,DoFHandler<dim,spacedim> >
+ tria;
+
+ /**
+ * Store a pointer to the finite element
+ * given latest for the distribution of
+ * dofs. In order to avoid destruction of
+ * the object before the lifetime of
+ * the DoF handler, we subscribe to
+ * the finite element object. To unlock
+ * the FE before the end of the lifetime
+ * of this DoF handler, use the <tt>clear()</tt>
+ * function (this clears all data of
+ * this object as well, though).
+ */
+ SmartPointer<const FiniteElement<dim,spacedim>,DoFHandler<dim,spacedim> >
+ selected_fe;
+
+ /**
+ * An object that describes how degrees
+ * of freedom should be distributed and
+ * renumbered.
+ */
+ std_cxx1x::shared_ptr<dealii::internal::DoFHandler::Policy::PolicyBase<dim,spacedim> > policy;
+
+ /**
+ * A structure that contains all
+ * sorts of numbers that
+ * characterize the degrees of
+ * freedom this object works on.
+ *
+ * For most members of this
+ * structure, there is an
+ * accessor function in this
+ * class that returns its value.
+ */
+ dealii::internal::DoFHandler::NumberCache number_cache;
+
+ private:
+
+ /**
+ * Copy constructor. I can see no reason
+ * why someone might want to use it, so
+ * I don't provide it. Since this class
+ * has pointer members, making it private
+ * prevents the compiler to provide it's
+ * own, incorrect one if anyone chose to
+ * copy such an object.
+ */
+ DoFHandler (const DoFHandler &);
+
+ /**
+ * Copy operator. I can see no reason
+ * why someone might want to use it, so
+ * I don't provide it. Since this class
+ * has pointer members, making it private
+ * prevents the compiler to provide it's
+ * own, incorrect one if anyone chose to
+ * copy such an object.
+ */
+ DoFHandler &operator = (const DoFHandler &);
+
++ class MGVertexDoFs
++ {
+ private:
++ unsigned int coarsest_level;
++ unsigned int finest_level;
++ types::global_dof_index *indices;
++ types::global_dof_index *indices_offset;
+
- /**
- * Copy constructor. I can see no reason
- * why someone might want to use it, so
- * I don't provide it. Since this class
- * has pointer members, making it private
- * prevents the compiler to provide it's
- * own, incorrect one if anyone chose to
- * copy such an object.
- */
- DoFHandler (const DoFHandler &);
-
- /**
- * Copy operator. I can see no reason
- * why someone might want to use it, so
- * I don't provide it. Since this class
- * has pointer members, making it private
- * prevents the compiler to provide it's
- * own, incorrect one if anyone chose to
- * copy such an object.
- */
- DoFHandler & operator = (const DoFHandler &);
-
- class MGVertexDoFs {
- private:
- unsigned int coarsest_level;
- unsigned int finest_level;
- types::global_dof_index* indices;
- types::global_dof_index* indices_offset;
-
- public:
- DeclException0 (ExcNoMemory);
- MGVertexDoFs ();
- ~MGVertexDoFs ();
- unsigned int get_coarsest_level () const;
- unsigned int get_finest_level () const;
- types::global_dof_index get_index (const unsigned int level, const unsigned int dof_number) const;
- void init (const unsigned int coarsest_level, const unsigned int finest_level, const unsigned int dofs_per_vertex);
- void set_index (const unsigned int level, const unsigned int dof_number, const types::global_dof_index index);
- };
-
- void clear_mg_space ();
-
- /**
- * Free all used memory.
- */
- void clear_space ();
-
- void reserve_space ();
-
- template <int structdim>
- types::global_dof_index get_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index) const;
-
- template<int structdim>
- void set_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index) const;
-
- /**
- * Space to store the DoF numbers
- * for the different
- * levels. Analogous to the
- * <tt>levels[]</tt> tree of the
- * Triangulation objects.
- */
- std::vector<dealii::internal::DoFHandler::DoFLevel<dim>*> levels;
-
- std::vector<dealii::internal::DoFHandler::DoFLevel<dim>*> mg_levels;
-
- /**
- * Space to store DoF numbers of
- * faces. They are not stored in
- * <tt>levels</tt> since faces
- * are not organized
- * hierarchically, but in a flat
- * array.
- */
- dealii::internal::DoFHandler::DoFFaces<dim> *faces;
-
- dealii::internal::DoFHandler::DoFFaces<dim>* mg_faces;
-
- std::vector<MGVertexDoFs> mg_vertex_dofs;
-
- std::vector<types::global_dof_index> mg_used_dofs;
-
- /**
- * Make accessor objects friends.
- */
- template <int, class> friend class DoFAccessor;
- template <class> friend class DoFCellAccessor;
- friend struct dealii::internal::DoFAccessor::Implementation;
- friend struct dealii::internal::DoFCellAccessor::Implementation;
-
- friend struct dealii::internal::DoFHandler::Implementation;
- friend struct dealii::internal::DoFHandler::Policy::Implementation;
++ public:
++ DeclException0 (ExcNoMemory);
++ MGVertexDoFs ();
++ ~MGVertexDoFs ();
++ unsigned int get_coarsest_level () const;
++ unsigned int get_finest_level () const;
++ types::global_dof_index get_index (const unsigned int level, const unsigned int dof_number) const;
++ void init (const unsigned int coarsest_level, const unsigned int finest_level, const unsigned int dofs_per_vertex);
++ void set_index (const unsigned int level, const unsigned int dof_number, const types::global_dof_index index);
++ };
++
++ void clear_mg_space ();
++
+ /**
+ * Free all used memory.
+ */
+ void clear_space ();
+
++ void reserve_space ();
++
++ template <int structdim>
++ types::global_dof_index get_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index) const;
++
++ template<int structdim>
++ void set_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index) const;
++
+ /**
+ * Space to store the DoF numbers
+ * for the different
+ * levels. Analogous to the
+ * <tt>levels[]</tt> tree of the
+ * Triangulation objects.
+ */
+ std::vector<dealii::internal::DoFHandler::DoFLevel<dim>*> levels;
+
++ std::vector<dealii::internal::DoFHandler::DoFLevel<dim>*> mg_levels;
++
+ /**
+ * Space to store DoF numbers of
+ * faces. They are not stored in
+ * <tt>levels</tt> since faces
+ * are not organized
+ * hierarchically, but in a flat
+ * array.
+ */
+ dealii::internal::DoFHandler::DoFFaces<dim> *faces;
+
++ dealii::internal::DoFHandler::DoFFaces<dim> *mg_faces;
++
++ std::vector<MGVertexDoFs> mg_vertex_dofs;
++
++ std::vector<types::global_dof_index> mg_used_dofs;
++
+ /**
+ * Make accessor objects friends.
+ */
+ template <int, class> friend class DoFAccessor;
+ template <class> friend class DoFCellAccessor;
+ friend struct dealii::internal::DoFAccessor::Implementation;
+ friend struct dealii::internal::DoFCellAccessor::Implementation;
+
+ friend struct dealii::internal::DoFHandler::Implementation;
+ friend struct dealii::internal::DoFHandler::Policy::Implementation;
};
return number_cache.n_global_dofs;
}
- DoFHandler<dim, spacedim>::n_dofs (const unsigned int level) const {
+template<int dim, int spacedim>
+inline
+types::global_dof_index
++DoFHandler<dim, spacedim>::n_dofs (const unsigned int level) const
++{
+ Assert (level < mg_used_dofs.size (), ExcInvalidLevel (level));
+ return mg_used_dofs[level];
+}
template <int dim, int spacedim>
-unsigned int
+types::global_dof_index
DoFHandler<dim, spacedim>::n_locally_owned_dofs() const
{
return number_cache.n_locally_owned_dofs;
}
- types::global_dof_index DoFHandler<dim, spacedim>::MGVertexDoFs::get_index (const unsigned int level, const unsigned int dof_number) const {
+template<int dim, int spacedim>
+inline
- void DoFHandler<dim, spacedim>::MGVertexDoFs::set_index (const unsigned int level, const unsigned int dof_number, const types::global_dof_index index) {
++types::global_dof_index DoFHandler<dim, spacedim>::MGVertexDoFs::get_index (const unsigned int level, const unsigned int dof_number) const
++{
+ Assert ((level >= coarsest_level) && (level <= finest_level), ExcInvalidLevel (level));
+ return indices[indices_offset[level - coarsest_level] + dof_number];
+}
+
+
+template<int dim, int spacedim>
+inline
++void DoFHandler<dim, spacedim>::MGVertexDoFs::set_index (const unsigned int level, const unsigned int dof_number, const types::global_dof_index index)
++{
+ Assert ((level >= coarsest_level) && (level <= finest_level), ExcInvalidLevel (level));
+ indices[indices_offset[level - coarsest_level] + dof_number] = index;
+}
+
#endif // DOXYGEN
DEAL_II_NAMESPACE_CLOSE
template <int dim, int spacedim>
class PolicyBase
{
- public:
- /**
- * Destructor.
- */
- virtual ~PolicyBase ();
-
- /**
- * Distribute degrees of freedom on
- * the object given as last argument.
- */
- virtual
- NumberCache
- distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const = 0;
-
- /**
- * Renumber degrees of freedom as
- * specified by the first argument.
- */
- virtual
- NumberCache
- renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
- dealii::DoFHandler<dim,spacedim> &dof_handler) const = 0;
+ public:
+ /**
+ * Destructor.
+ */
+ virtual ~PolicyBase ();
+
+ /**
+ * Distribute degrees of freedom on
+ * the object given as last argument.
+ */
+ virtual
+ NumberCache
+ distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const = 0;
+
+ /**
+ * Renumber degrees of freedom as
+ * specified by the first argument.
+ */
+ virtual
+ NumberCache
- renumber_dofs (const std::vector<unsigned int> &new_numbers,
++ renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
+ dealii::DoFHandler<dim,spacedim> &dof_handler) const = 0;
};
template <int dim, int spacedim>
class Sequential : public PolicyBase<dim,spacedim>
{
- public:
- /**
- * Distribute degrees of freedom on
- * the object given as last argument.
- */
- virtual
- NumberCache
- distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const;
-
- /**
- * Renumber degrees of freedom as
- * specified by the first argument.
- */
- virtual
- NumberCache
- renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
- dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+ public:
+ /**
+ * Distribute degrees of freedom on
+ * the object given as last argument.
+ */
+ virtual
+ NumberCache
+ distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+
+ /**
+ * Renumber degrees of freedom as
+ * specified by the first argument.
+ */
+ virtual
+ NumberCache
- renumber_dofs (const std::vector<unsigned int> &new_numbers,
++ renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
+ dealii::DoFHandler<dim,spacedim> &dof_handler) const;
};
template <int dim, int spacedim>
class ParallelDistributed : public PolicyBase<dim,spacedim>
{
- public:
- /**
- * Distribute degrees of freedom on
- * the object given as last argument.
- */
- virtual
- NumberCache
- distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const;
-
- /**
- * Renumber degrees of freedom as
- * specified by the first argument.
- */
- virtual
- NumberCache
- renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
- dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+ public:
+ /**
+ * Distribute degrees of freedom on
+ * the object given as last argument.
+ */
+ virtual
+ NumberCache
+ distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+
+ /**
+ * Renumber degrees of freedom as
+ * specified by the first argument.
+ */
+ virtual
+ NumberCache
- renumber_dofs (const std::vector<unsigned int> &new_numbers,
++ renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
+ dealii::DoFHandler<dim,spacedim> &dof_handler) const;
};
}
}
template <int dim>
class DoFLevel
{
- public:
- /**
- * Cache for the DoF indices
- * on cells. The size of this
- * array equals the number of
- * cells on a given level
- * times
- * selected_fe.dofs_per_cell.
- */
- std::vector<types::global_dof_index> cell_dof_indices_cache;
-
- /**
- * The object containing dof-indices
- * and related access-functions
- */
- DoFObjects<dim> dof_object;
-
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
- */
- std::size_t memory_consumption () const;
-
- /**
- * Read or write the data of this object to or
- * from a stream for the purpose of serialization
- */
- template <class Archive>
- void serialize(Archive & ar,
- const unsigned int version);
+ public:
+ /**
+ * Cache for the DoF indices
+ * on cells. The size of this
+ * array equals the number of
+ * cells on a given level
+ * times
+ * selected_fe.dofs_per_cell.
+ */
- std::vector<unsigned int> cell_dof_indices_cache;
++ std::vector<types::global_dof_index> cell_dof_indices_cache;
+
+ /**
+ * The object containing dof-indices
+ * and related access-functions
+ */
+ DoFObjects<dim> dof_object;
+
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object.
+ */
+ std::size_t memory_consumption () const;
+
+ /**
+ * Read or write the data of this object to or
+ * from a stream for the purpose of serialization
+ */
+ template <class Archive>
+ void serialize(Archive &ar,
+ const unsigned int version);
};
template <int dim>
class DoFObjects
{
- public:
- /**
- * Store the global indices of
- * the degrees of freedom.
- */
- std::vector<types::global_dof_index> dofs;
-
- public:
- /**
- * Set the global index of
- * the @p local_index-th
- * degree of freedom located
- * on the object with number @p
- * obj_index to the value
- * given by the last
- * argument. The @p
- * dof_handler argument is
- * used to access the finite
- * element that is to be used
- * to compute the location
- * where this data is stored.
- *
- * The third argument, @p
- * fe_index, must equal
- * zero. It is otherwise
- * unused, but we retain the
- * argument so that we can
- * use the same interface for
- * non-hp and hp finite
- * element methods, in effect
- * making it possible to
- * share the DoFAccessor
- * class hierarchy between hp
- * and non-hp classes.
- */
- template <int dh_dim, int spacedim>
- void
- set_dof_index (const dealii::DoFHandler<dh_dim,spacedim> &dof_handler,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- const types::global_dof_index global_index);
-
- /**
- * Return the global index of
- * the @p local_index-th
- * degree of freedom located
- * on the object with number @p
- * obj_index. The @p
- * dof_handler argument is
- * used to access the finite
- * element that is to be used
- * to compute the location
- * where this data is stored.
- *
- * The third argument, @p
- * fe_index, must equal
- * zero. It is otherwise
- * unused, but we retain the
- * argument so that we can
- * use the same interface for
- * non-hp and hp finite
- * element methods, in effect
- * making it possible to
- * share the DoFAccessor
- * class hierarchy between hp
- * and non-hp classes.
- */
- template <int dh_dim, int spacedim>
- types::global_dof_index
- get_dof_index (const dealii::DoFHandler<dh_dim,spacedim> &dof_handler,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index) const;
-
- /**
- * Return the value 1. The
- * meaning of this function
- * becomes clear by looking
- * at what the corresponding
- * functions in the classes
- * internal::hp::DoFObjects
- */
- template <int dh_dim, int spacedim>
- unsigned int
- n_active_fe_indices (const dealii::DoFHandler<dh_dim,spacedim> &dof_handler,
- const types::global_dof_index index) const;
-
- /**
- * Similar to the function
- * above. Assert that the
- * given index is zero, and
- * then return true.
- */
- template <int dh_dim, int spacedim>
- bool
- fe_index_is_active (const dealii::DoFHandler<dh_dim,spacedim> &dof_handler,
- const types::global_dof_index index,
- const unsigned int fe_index) const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
- */
- std::size_t memory_consumption () const;
-
- /**
- * Read or write the data of this object to or
- * from a stream for the purpose of serialization
- */
- template <class Archive>
- void serialize(Archive & ar,
- const unsigned int version);
-
- /**
- * Make the DoFHandler and
- * MGDoFHandler classes a
- * friend, so that they can
- * resize arrays as
- * necessary.
- */
- template <int> friend class DoFLevel;
- template <int> friend class DoFFaces;
+ public:
+ /**
+ * Store the global indices of
+ * the degrees of freedom.
+ */
- std::vector<unsigned int> dofs;
++ std::vector<types::global_dof_index> dofs;
+
+ public:
+ /**
+ * Set the global index of
+ * the @p local_index-th
+ * degree of freedom located
+ * on the object with number @p
+ * obj_index to the value
+ * given by the last
+ * argument. The @p
+ * dof_handler argument is
+ * used to access the finite
+ * element that is to be used
+ * to compute the location
+ * where this data is stored.
+ *
+ * The third argument, @p
+ * fe_index, must equal
+ * zero. It is otherwise
+ * unused, but we retain the
+ * argument so that we can
+ * use the same interface for
+ * non-hp and hp finite
+ * element methods, in effect
+ * making it possible to
+ * share the DoFAccessor
+ * class hierarchy between hp
+ * and non-hp classes.
+ */
+ template <int dh_dim, int spacedim>
+ void
+ set_dof_index (const dealii::DoFHandler<dh_dim,spacedim> &dof_handler,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
- const unsigned int global_index);
++ const types::global_dof_index global_index);
+
+ /**
+ * Return the global index of
+ * the @p local_index-th
+ * degree of freedom located
+ * on the object with number @p
+ * obj_index. The @p
+ * dof_handler argument is
+ * used to access the finite
+ * element that is to be used
+ * to compute the location
+ * where this data is stored.
+ *
+ * The third argument, @p
+ * fe_index, must equal
+ * zero. It is otherwise
+ * unused, but we retain the
+ * argument so that we can
+ * use the same interface for
+ * non-hp and hp finite
+ * element methods, in effect
+ * making it possible to
+ * share the DoFAccessor
+ * class hierarchy between hp
+ * and non-hp classes.
+ */
+ template <int dh_dim, int spacedim>
- unsigned int
++ types::global_dof_index
+ get_dof_index (const dealii::DoFHandler<dh_dim,spacedim> &dof_handler,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index) const;
+
+ /**
+ * Return the value 1. The
+ * meaning of this function
+ * becomes clear by looking
+ * at what the corresponding
+ * functions in the classes
+ * internal::hp::DoFObjects
+ */
+ template <int dh_dim, int spacedim>
+ unsigned int
+ n_active_fe_indices (const dealii::DoFHandler<dh_dim,spacedim> &dof_handler,
- const unsigned int index) const;
++ const types::global_dof_index index) const;
+
+ /**
+ * Similar to the function
+ * above. Assert that the
+ * given index is zero, and
+ * then return true.
+ */
+ template <int dh_dim, int spacedim>
+ bool
+ fe_index_is_active (const dealii::DoFHandler<dh_dim,spacedim> &dof_handler,
- const unsigned int index,
++ const types::global_dof_index index,
+ const unsigned int fe_index) const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object.
+ */
+ std::size_t memory_consumption () const;
+
+ /**
+ * Read or write the data of this object to or
+ * from a stream for the purpose of serialization
+ */
+ template <class Archive>
+ void serialize(Archive &ar,
+ const unsigned int version);
+
+ /**
+ * Make the DoFHandler and
+ * MGDoFHandler classes a
+ * friend, so that they can
+ * resize arrays as
+ * necessary.
+ */
+ template <int> friend class DoFLevel;
+ template <int> friend class DoFFaces;
};
template <int dim>
struct ComparePointwiseDownstream
{
- /**
- * Constructor.
- */
- ComparePointwiseDownstream (const Point<dim> &dir)
- :
- dir(dir)
- {}
- /**
- * Return true if c1 less c2.
- */
- bool operator () (const std::pair<Point<dim>,types::global_dof_index> &c1,
- const std::pair<Point<dim>,types::global_dof_index> &c2) const
- {
- const Point<dim> diff = c2.first-c1.first;
- return (diff*dir > 0 || (diff*dir==0 && c1.second<c2.second));
- }
-
- private:
- /**
- * Flow direction.
- */
- const Point<dim> dir;
+ /**
+ * Constructor.
+ */
+ ComparePointwiseDownstream (const Point<dim> &dir)
+ :
+ dir(dir)
+ {}
+ /**
+ * Return true if c1 less c2.
+ */
- bool operator () (const std::pair<Point<dim>,unsigned int> &c1,
- const std::pair<Point<dim>,unsigned int> &c2) const
++ bool operator () (const std::pair<Point<dim>,types::global_dof_index> &c1,
++ const std::pair<Point<dim>,types::global_dof_index> &c2) const
+ {
+ const Point<dim> diff = c2.first-c1.first;
+ return (diff*dir > 0 || (diff*dir==0 && c1.second<c2.second));
+ }
+
+ private:
+ /**
+ * Flow direction.
+ */
+ const Point<dim> dir;
};
- /**
- * A namespace for the
- * implementation of some
- * renumbering algorithms based
- * on algorithms implemented in
- * the Boost Graph Library (BGL)
- * by Jeremy Siek and others.
- *
- * While often slighty slower to
- * compute, the algorithms using
- * BOOST often lead to matrices
- * with smaller bandwidths and
- * sparse ILUs based on this
- * numbering are therefore more
- * efficient.
- *
- * For a comparison of these
- * algorithms with the ones
- * defined in DoFRenumbering, see
- * the comparison section in the
- * documentation of the
- * DoFRenumbering namespace.
- */
+ /**
+ * A namespace for the
+ * implementation of some
+ * renumbering algorithms based
+ * on algorithms implemented in
+ * the Boost Graph Library (BGL)
+ * by Jeremy Siek and others.
+ *
+ * While often slighty slower to
+ * compute, the algorithms using
+ * BOOST often lead to matrices
+ * with smaller bandwidths and
+ * sparse ILUs based on this
+ * numbering are therefore more
+ * efficient.
+ *
+ * For a comparison of these
+ * algorithms with the ones
+ * defined in DoFRenumbering, see
+ * the comparison section in the
+ * documentation of the
+ * DoFRenumbering namespace.
+ */
namespace boost
{
- /**
- * Renumber the degrees of
- * freedom according to the
- * Cuthill-McKee method,
- * eventually using the reverse
- * numbering scheme.
- *
- * See the general
- * documentation of the
- * parent class for details
- * on the different methods.
- *
- * As an example of the
- * results of this algorithm,
- * take a look at the
- * comparison of various
- * algorithms in the
- * documentation of the
- * DoFRenumbering namespace.
- */
+ /**
+ * Renumber the degrees of
+ * freedom according to the
+ * Cuthill-McKee method,
+ * eventually using the reverse
+ * numbering scheme.
+ *
+ * See the general
+ * documentation of the
+ * parent class for details
+ * on the different methods.
+ *
+ * As an example of the
+ * results of this algorithm,
+ * take a look at the
+ * comparison of various
+ * algorithms in the
+ * documentation of the
+ * DoFRenumbering namespace.
+ */
template <class DH>
void
- Cuthill_McKee (DH& dof_handler,
+ Cuthill_McKee (DH &dof_handler,
const bool reversed_numbering = false,
const bool use_constraints = false);
- /**
- * Computes the renumbering
- * vector needed by the
- * Cuthill_McKee() function. Does
- * not perform the renumbering on
- * the DoFHandler dofs but
- * returns the renumbering
- * vector.
- */
+ /**
+ * Computes the renumbering
+ * vector needed by the
+ * Cuthill_McKee() function. Does
+ * not perform the renumbering on
+ * the DoFHandler dofs but
+ * returns the renumbering
+ * vector.
+ */
template <class DH>
void
- compute_Cuthill_McKee (std::vector<types::global_dof_index>& new_dof_indices,
- const DH&,
- compute_Cuthill_McKee (std::vector<unsigned int> &new_dof_indices,
++ compute_Cuthill_McKee (std::vector<types::global_dof_index> &new_dof_indices,
+ const DH &,
const bool reversed_numbering = false,
const bool use_constraints = false);
const bool reversed_numbering = false,
const bool use_constraints = false);
- /**
- * Compute the renumbering
- * for the King algorithm but
- * do not actually renumber
- * the degrees of freedom in
- * the DoF handler argument.
- */
+ /**
+ * Compute the renumbering
+ * for the King algorithm but
+ * do not actually renumber
+ * the degrees of freedom in
+ * the DoF handler argument.
+ */
template <class DH>
void
- compute_king_ordering (std::vector<types::global_dof_index>& new_dof_indices,
- const DH&,
- compute_king_ordering (std::vector<unsigned int> &new_dof_indices,
++ compute_king_ordering (std::vector<types::global_dof_index> &new_dof_indices,
+ const DH &,
const bool reversed_numbering = false,
const bool use_constraints = false);
const bool reversed_numbering = false,
const bool use_constraints = false);
- /**
- * Compute the renumbering
- * for the minimum degree
- * algorithm but do not
- * actually renumber the
- * degrees of freedom in the
- * DoF handler argument.
- */
+ /**
+ * Compute the renumbering
+ * for the minimum degree
+ * algorithm but do not
+ * actually renumber the
+ * degrees of freedom in the
+ * DoF handler argument.
+ */
template <class DH>
void
- compute_minimum_degree (std::vector<types::global_dof_index>& new_dof_indices,
- const DH&,
- compute_minimum_degree (std::vector<unsigned int> &new_dof_indices,
++ compute_minimum_degree (std::vector<types::global_dof_index> &new_dof_indices,
+ const DH &,
const bool reversed_numbering = false,
const bool use_constraints = false);
}
- /**
- * Renumber the degrees of
- * freedom according to the
- * Cuthill-McKee method,
- * eventually using the reverse
- * numbering scheme.
- *
- * See the general documentation
- * of this class for details on
- * the different methods.
- *
- * As an example of the results
- * of this algorithm, take a look
- * at the comparison of various
- * algorithms in the
- * documentation of the
- * DoFRenumbering namespace.
- */
+ /**
+ * Renumber the degrees of
+ * freedom according to the
+ * Cuthill-McKee method,
+ * eventually using the reverse
+ * numbering scheme.
+ *
+ * See the general documentation
+ * of this class for details on
+ * the different methods.
+ *
+ * As an example of the results
+ * of this algorithm, take a look
+ * at the comparison of various
+ * algorithms in the
+ * documentation of the
+ * DoFRenumbering namespace.
+ */
template <class DH>
void
- Cuthill_McKee (DH& dof_handler,
+ Cuthill_McKee (DH &dof_handler,
const bool reversed_numbering = false,
const bool use_constraints = false,
- const std::vector<unsigned int> &starting_indices = std::vector<unsigned int>());
+ const std::vector<types::global_dof_index> &starting_indices = std::vector<types::global_dof_index>());
- /**
- * Computes the renumbering
- * vector needed by the
- * Cuthill_McKee() function. Does
- * not perform the renumbering on
- * the DoFHandler dofs but
- * returns the renumbering
- * vector.
- */
+ /**
+ * Computes the renumbering
+ * vector needed by the
+ * Cuthill_McKee() function. Does
+ * not perform the renumbering on
+ * the DoFHandler dofs but
+ * returns the renumbering
+ * vector.
+ */
template <class DH>
void
- compute_Cuthill_McKee (std::vector<types::global_dof_index>& new_dof_indices,
- const DH&,
- compute_Cuthill_McKee (std::vector<unsigned int> &new_dof_indices,
++ compute_Cuthill_McKee (std::vector<types::global_dof_index> &new_dof_indices,
+ const DH &,
const bool reversed_numbering = false,
const bool use_constraints = false,
- const std::vector<unsigned int> &starting_indices = std::vector<unsigned int>());
+ const std::vector<types::global_dof_index> &starting_indices = std::vector<types::global_dof_index>());
- /**
- * Renumber the degrees of
- * freedom according to the
- * Cuthill-McKee method,
- * eventually using the reverse
- * numbering scheme, in this case
- * for a multigrid numbering of
- * degrees of freedom.
- *
- * You can give a triangulation
- * level to which this function
- * is to be applied. Since with
- * a level-wise numbering there
- * are no hanging nodes, no
- * constraints can be used, so
- * the respective parameter of
- * the previous function is
- * ommitted.
- *
- * See the general documentation
- * of this class for details on
- * the different methods.
- */
+ /**
+ * Renumber the degrees of
+ * freedom according to the
+ * Cuthill-McKee method,
+ * eventually using the reverse
+ * numbering scheme, in this case
+ * for a multigrid numbering of
+ * degrees of freedom.
+ *
+ * You can give a triangulation
+ * level to which this function
+ * is to be applied. Since with
+ * a level-wise numbering there
+ * are no hanging nodes, no
+ * constraints can be used, so
+ * the respective parameter of
+ * the previous function is
+ * ommitted.
+ *
+ * See the general documentation
+ * of this class for details on
+ * the different methods.
+ */
template <int dim>
void
Cuthill_McKee (MGDoFHandler<dim> &dof_handler,
component_wise (hp::DoFHandler<dim> &dof_handler,
const std::vector<unsigned int> &target_component = std::vector<unsigned int> ());
- /**
- * Sort the degrees of freedom by
- * component. It does the same
- * thing as the above function,
- * only that it does this for one
- * single level of a multi-level
- * discretization. The
- * non-multigrid part of the
- * MGDoFHandler is not touched.
- */
+ /**
+ * Sort the degrees of freedom by
+ * component. It does the same
+ * thing as the above function,
+ * only that it does this for one
+ * single level of a multi-level
+ * discretization. The
+ * non-multigrid part of the
+ * MGDoFHandler is not touched.
+ */
template <int dim>
void
- component_wise (MGDoFHandler<dim>& dof_handler,
+ component_wise (MGDoFHandler<dim> &dof_handler,
const unsigned int level,
- const std::vector<unsigned int>& target_component = std::vector<unsigned int>());
-
-
- /**
- * Sort the degrees of freedom by
- * component. It does the same
- * thing as the previous
- * functions, but more: it
- * renumbers not only every level
- * of the multigrid part, but
- * also the global,
- * i.e. non-multigrid components.
- */
+ const std::vector<unsigned int> &target_component = std::vector<unsigned int>());
+
+
+ /**
+ * Sort the degrees of freedom by
+ * component. It does the same
+ * thing as the previous
+ * functions, but more: it
+ * renumbers not only every level
+ * of the multigrid part, but
+ * also the global,
+ * i.e. non-multigrid components.
+ */
template <int dim>
void
- component_wise (MGDoFHandler<dim>& dof_handler,
- const std::vector<unsigned int>& target_component = std::vector<unsigned int>());
-
- /**
- * Computes the renumbering
- * vector needed by the
- * component_wise()
- * functions. Does not perform
- * the renumbering on the
- * DoFHandler dofs but returns
- * the renumbering vector.
- */
+ component_wise (MGDoFHandler<dim> &dof_handler,
+ const std::vector<unsigned int> &target_component = std::vector<unsigned int>());
+
+ /**
+ * Computes the renumbering
+ * vector needed by the
+ * component_wise()
+ * functions. Does not perform
+ * the renumbering on the
+ * DoFHandler dofs but returns
+ * the renumbering vector.
+ */
template <int dim, int spacedim, class ITERATOR, class ENDITERATOR>
- unsigned int
- compute_component_wise (std::vector<unsigned int> &new_dof_indices,
+ types::global_dof_index
- compute_component_wise (std::vector<types::global_dof_index>& new_dof_indices,
- const ITERATOR& start,
- const ENDITERATOR& end,
++ compute_component_wise (std::vector<types::global_dof_index> &new_dof_indices,
+ const ITERATOR &start,
+ const ENDITERATOR &end,
const std::vector<unsigned int> &target_component);
/**
void
block_wise (hp::DoFHandler<dim> &dof_handler);
- /**
- * Sort the degrees of freedom by
- * block. It does the same
- * thing as the above function,
- * only that it does this for one
- * single level of a multi-level
- * discretization. The
- * non-multigrid part of the
- * MGDoFHandler is not touched.
- */
+ /**
+ * Sort the degrees of freedom by
+ * block. It does the same
+ * thing as the above function,
+ * only that it does this for one
+ * single level of a multi-level
+ * discretization. The
+ * non-multigrid part of the
+ * MGDoFHandler is not touched.
+ */
template <int dim>
void
- block_wise (MGDoFHandler<dim> &dof_handler,
+ block_wise (MGDoFHandler<dim> &dof_handler,
const unsigned int level);
cell_wise_dg (DH &dof_handler,
const std::vector<typename DH::cell_iterator> &cell_order);
- /**
- * Computes the renumbering
- * vector needed by the
- * cell_wise_dg() function. Does
- * not perform the renumbering on
- * the DoFHandler dofs but
- * returns the renumbering
- * vector.
- */
+ /**
+ * Computes the renumbering
+ * vector needed by the
+ * cell_wise_dg() function. Does
+ * not perform the renumbering on
+ * the DoFHandler dofs but
+ * returns the renumbering
+ * vector.
+ */
template <class DH>
void
- compute_cell_wise_dg (std::vector<types::global_dof_index>& renumbering,
- std::vector<types::global_dof_index>& inverse_renumbering,
- compute_cell_wise_dg (std::vector<unsigned int> &renumbering,
- std::vector<unsigned int> &inverse_renumbering,
++ compute_cell_wise_dg (std::vector<types::global_dof_index> &renumbering,
++ std::vector<types::global_dof_index> &inverse_renumbering,
const DH &dof_handler,
const std::vector<typename DH::cell_iterator> &cell_order);
- /**
- * Computes the renumbering
- * vector needed by the
- * cell_wise() function. Does
- * not perform the renumbering on
- * the DoFHandler dofs but
- * returns the renumbering
- * vector.
- */
+ /**
+ * Computes the renumbering
+ * vector needed by the
+ * cell_wise() function. Does
+ * not perform the renumbering on
+ * the DoFHandler dofs but
+ * returns the renumbering
+ * vector.
+ */
template <class DH>
void
- compute_cell_wise (std::vector<types::global_dof_index>& renumbering,
- std::vector<types::global_dof_index>& inverse_renumbering,
- compute_cell_wise (std::vector<unsigned int> &renumbering,
- std::vector<unsigned int> &inverse_renumbering,
++ compute_cell_wise (std::vector<types::global_dof_index> &renumbering,
++ std::vector<types::global_dof_index> &inverse_renumbering,
const DH &dof_handler,
const std::vector<typename DH::cell_iterator> &cell_order);
const unsigned int level,
const std::vector<typename MGDoFHandler<dim>::cell_iterator> &cell_order);
- /**
- * Computes the renumbering
- * vector needed by the
- * cell_wise_dg() level renumbering function. Does
- * not perform the renumbering on
- * the MGDoFHandler dofs but
- * returns the renumbering
- * vector.
- */
+ /**
+ * Computes the renumbering
+ * vector needed by the
+ * cell_wise_dg() level renumbering function. Does
+ * not perform the renumbering on
+ * the MGDoFHandler dofs but
+ * returns the renumbering
+ * vector.
+ */
template <int dim>
void
- compute_cell_wise_dg (std::vector<types::global_dof_index>& renumbering,
- std::vector<types::global_dof_index>& inverse_renumbering,
- const MGDoFHandler<dim>& dof_handler,
- compute_cell_wise_dg (std::vector<unsigned int> &renumbering,
- std::vector<unsigned int> &inverse_renumbering,
++ compute_cell_wise_dg (std::vector<types::global_dof_index> &renumbering,
++ std::vector<types::global_dof_index> &inverse_renumbering,
+ const MGDoFHandler<dim> &dof_handler,
const unsigned int level,
- const std::vector<typename MGDoFHandler<dim>::cell_iterator>& cell_order);
-
-
- /**
- * Computes the renumbering
- * vector needed by the
- * cell_wise() level renumbering function. Does
- * not perform the renumbering on
- * the MGDoFHandler dofs but
- * returns the renumbering
- * vector.
- */
+ const std::vector<typename MGDoFHandler<dim>::cell_iterator> &cell_order);
+
+
+ /**
+ * Computes the renumbering
+ * vector needed by the
+ * cell_wise() level renumbering function. Does
+ * not perform the renumbering on
+ * the MGDoFHandler dofs but
+ * returns the renumbering
+ * vector.
+ */
template <int dim>
void
- compute_cell_wise (std::vector<types::global_dof_index>& renumbering,
- std::vector<types::global_dof_index>& inverse_renumbering,
- const MGDoFHandler<dim>& dof_handler,
- compute_cell_wise (std::vector<unsigned int> &renumbering,
- std::vector<unsigned int> &inverse_renumbering,
++ compute_cell_wise (std::vector<types::global_dof_index> &renumbering,
++ std::vector<types::global_dof_index> &inverse_renumbering,
+ const MGDoFHandler<dim> &dof_handler,
const unsigned int level,
- const std::vector<typename MGDoFHandler<dim>::cell_iterator>& cell_order);
+ const std::vector<typename MGDoFHandler<dim>::cell_iterator> &cell_order);
/**
* @}
void
downstream (MGDoFHandler<dim> &dof_handler,
const unsigned int level,
- const Point<dim> &direction,
+ const Point<dim> &direction,
const bool dof_wise_renumbering = false);
- /**
- * @deprecated Use downstream()
- * instead.
- */
+ /**
+ * @deprecated Use downstream()
+ * instead.
+ */
template <int dim>
void
downstream_dg (MGDoFHandler<dim> &dof_handler,
const unsigned int level,
- const Point<dim> &direction);
+ const Point<dim> &direction);
- /**
- * @deprecated The new function
- * of this name computes the
- * renumbering and its inverse at
- * the same time. So, at least if
- * you need both, you should use
- * the other one.
- *
- * Computes the renumbering
- * vector needed by the
- * downstream_dg() function. Does
- * not perform the renumbering on
- * the DoFHandler dofs but
- * returns the renumbering
- * vector.
- */
+ /**
+ * @deprecated The new function
+ * of this name computes the
+ * renumbering and its inverse at
+ * the same time. So, at least if
+ * you need both, you should use
+ * the other one.
+ *
+ * Computes the renumbering
+ * vector needed by the
+ * downstream_dg() function. Does
+ * not perform the renumbering on
+ * the DoFHandler dofs but
+ * returns the renumbering
+ * vector.
+ */
template <class DH, int dim>
void
- compute_downstream_dg (std::vector<types::global_dof_index>& new_dof_indices,
- const DH& dof_handler,
- const Point<dim>& direction);
-
- /**
- * Computes the renumbering
- * vector needed by the
- * downstream_dg() function. Does
- * not perform the renumbering on
- * the DoFHandler dofs but
- * returns the renumbering
- * vector.
- */
- compute_downstream_dg (std::vector<unsigned int> &new_dof_indices,
++ compute_downstream_dg (std::vector<types::global_dof_index> &new_dof_indices,
+ const DH &dof_handler,
+ const Point<dim> &direction);
+
+ /**
+ * Computes the renumbering
+ * vector needed by the
+ * downstream_dg() function. Does
+ * not perform the renumbering on
+ * the DoFHandler dofs but
+ * returns the renumbering
+ * vector.
+ */
template <class DH, int dim>
void
- compute_downstream (std::vector<types::global_dof_index>& new_dof_indices,
- std::vector<types::global_dof_index>& reverse,
- const DH& dof_handler,
- const Point<dim>& direction,
- compute_downstream (std::vector<unsigned int> &new_dof_indices,
- std::vector<unsigned int> &reverse,
++ compute_downstream (std::vector<types::global_dof_index> &new_dof_indices,
++ std::vector<types::global_dof_index> &reverse,
+ const DH &dof_handler,
+ const Point<dim> &direction,
const bool dof_wise_renumbering);
- /**
- * @deprecated Use
- * compute_downstream() instead
- */
+ /**
+ * @deprecated Use
+ * compute_downstream() instead
+ */
template <class DH, int dim>
void
- compute_downstream_dg (std::vector<types::global_dof_index>& new_dof_indices,
- std::vector<types::global_dof_index>& reverse,
- const DH& dof_handler,
- const Point<dim>& direction);
-
- /**
- * Computes the renumbering
- * vector needed by the
- * downstream_dg() function. Does
- * not perform the renumbering on
- * the MGDoFHandler dofs but
- * returns the renumbering
- * vector.
- */
- compute_downstream_dg (std::vector<unsigned int> &new_dof_indices,
- std::vector<unsigned int> &reverse,
++ compute_downstream_dg (std::vector<types::global_dof_index> &new_dof_indices,
++ std::vector<types::global_dof_index> &reverse,
+ const DH &dof_handler,
+ const Point<dim> &direction);
+
+ /**
+ * Computes the renumbering
+ * vector needed by the
+ * downstream_dg() function. Does
+ * not perform the renumbering on
+ * the MGDoFHandler dofs but
+ * returns the renumbering
+ * vector.
+ */
template <int dim>
void
- compute_downstream (std::vector<types::global_dof_index>& new_dof_indices,
- std::vector<types::global_dof_index>& reverse,
- const MGDoFHandler<dim>& dof_handler,
- compute_downstream (std::vector<unsigned int> &new_dof_indices,
- std::vector<unsigned int> &reverse,
++ compute_downstream (std::vector<types::global_dof_index> &new_dof_indices,
++ std::vector<types::global_dof_index> &reverse,
+ const MGDoFHandler<dim> &dof_handler,
const unsigned int level,
- const Point<dim>& direction,
+ const Point<dim> &direction,
const bool dof_wise_renumbering);
- /**
- * @deprecated Use
- * compute_downstream() instead
- */
+ /**
+ * @deprecated Use
+ * compute_downstream() instead
+ */
template <int dim>
void
- compute_downstream_dg (std::vector<types::global_dof_index>& new_dof_indices,
- std::vector<types::global_dof_index>& reverse,
- const MGDoFHandler<dim>& dof_handler,
- compute_downstream_dg (std::vector<unsigned int> &new_dof_indices,
- std::vector<unsigned int> &reverse,
++ compute_downstream_dg (std::vector<types::global_dof_index> &new_dof_indices,
++ std::vector<types::global_dof_index> &reverse,
+ const MGDoFHandler<dim> &dof_handler,
const unsigned int level,
- const Point<dim>& direction);
-
- /**
- * Cell-wise clockwise numbering.
- *
- * This function produces a
- * (counter)clockwise ordering of
- * the mesh cells with respect to
- * the hub @p center and calls
- * cell_wise_dg(). Therefore, it
- * only works with Discontinuous
- * Galerkin Finite Elements,
- * i.e. all degrees of freedom
- * have to be associated with the
- * interior of the cell.
- */
+ const Point<dim> &direction);
+
+ /**
+ * Cell-wise clockwise numbering.
+ *
+ * This function produces a
+ * (counter)clockwise ordering of
+ * the mesh cells with respect to
+ * the hub @p center and calls
+ * cell_wise_dg(). Therefore, it
+ * only works with Discontinuous
+ * Galerkin Finite Elements,
+ * i.e. all degrees of freedom
+ * have to be associated with the
+ * interior of the cell.
+ */
template <class DH, int dim>
void
- clockwise_dg (DH& dof_handler,
- const Point<dim>& center,
+ clockwise_dg (DH &dof_handler,
+ const Point<dim> ¢er,
const bool counter = false);
- /**
- * Cell-wise clockwise numbering
- * on one level. See the other
- * function with the same name.
- */
+ /**
+ * Cell-wise clockwise numbering
+ * on one level. See the other
+ * function with the same name.
+ */
template <int dim>
void
- clockwise_dg (MGDoFHandler<dim> &dof_handler,
+ clockwise_dg (MGDoFHandler<dim> &dof_handler,
const unsigned int level,
const Point<dim> ¢er,
const bool counter = false);
-
- /**
- * Computes the renumbering
- * vector needed by the
- * clockwise_dg() functions. Does
- * not perform the renumbering on
- * the DoFHandler dofs but
- * returns the renumbering
- * vector.
- */
+
+ /**
+ * Computes the renumbering
+ * vector needed by the
+ * clockwise_dg() functions. Does
+ * not perform the renumbering on
+ * the DoFHandler dofs but
+ * returns the renumbering
+ * vector.
+ */
template <class DH, int dim>
void
- compute_clockwise_dg (std::vector<types::global_dof_index>& new_dof_indices,
- const DH& dof_handler,
- const Point<dim>& center,
- compute_clockwise_dg (std::vector<unsigned int> &new_dof_indices,
++ compute_clockwise_dg (std::vector<types::global_dof_index> &new_dof_indices,
+ const DH &dof_handler,
+ const Point<dim> ¢er,
const bool counter);
/**
* @{
*/
- /**
- * Sort those degrees of freedom
- * which are tagged with @p true
- * in the @p selected_dofs array
- * to the back of the DoF
- * numbers. The sorting is
- * stable, i.e. the relative
- * order within the tagged
- * degrees of freedom is
- * preserved, as is the relative
- * order within the untagged
- * ones.
- *
- * @pre The @p selected_dofs
- * array must have as many elements as
- * the @p dof_handler has degrees of
- * freedom.
- */
+ /**
+ * Sort those degrees of freedom
+ * which are tagged with @p true
+ * in the @p selected_dofs array
+ * to the back of the DoF
+ * numbers. The sorting is
+ * stable, i.e. the relative
+ * order within the tagged
+ * degrees of freedom is
+ * preserved, as is the relative
+ * order within the untagged
+ * ones.
+ *
+ * @pre The @p selected_dofs
+ * array must have as many elements as
+ * the @p dof_handler has degrees of
+ * freedom.
+ */
template <class DH>
void
- sort_selected_dofs_back (DH& dof_handler,
- const std::vector<bool>& selected_dofs);
-
- /**
- * Sort those degrees of freedom
- * which are tagged with @p true
- * in the @p selected_dofs array
- * on the level #level
- * to the back of the DoF
- * numbers. The sorting is
- * stable, i.e. the relative
- * order within the tagged
- * degrees of freedom is
- * preserved, as is the relative
- * order within the untagged
- * ones.
- *
- * @pre The @p selected_dofs
- * array must have as many elements as
- * the @p dof_handler has degrees of
- * freedom on the given level.
- */
+ sort_selected_dofs_back (DH &dof_handler,
+ const std::vector<bool> &selected_dofs);
+
+ /**
+ * Sort those degrees of freedom
+ * which are tagged with @p true
+ * in the @p selected_dofs array
+ * on the level #level
+ * to the back of the DoF
+ * numbers. The sorting is
+ * stable, i.e. the relative
+ * order within the tagged
+ * degrees of freedom is
+ * preserved, as is the relative
+ * order within the untagged
+ * ones.
+ *
+ * @pre The @p selected_dofs
+ * array must have as many elements as
+ * the @p dof_handler has degrees of
+ * freedom on the given level.
+ */
template <class DH>
void
- sort_selected_dofs_back (DH& dof_handler,
- const std::vector<bool>& selected_dofs,
+ sort_selected_dofs_back (DH &dof_handler,
+ const std::vector<bool> &selected_dofs,
const unsigned int level);
- /**
- * Computes the renumbering
- * vector needed by the
- * sort_selected_dofs_back()
- * function. Does not perform the
- * renumbering on the DoFHandler
- * dofs but returns the
- * renumbering vector.
- *
- * @pre The @p selected_dofs
- * array must have as many elements as
- * the @p dof_handler has degrees of
- * freedom.
- */
+ /**
+ * Computes the renumbering
+ * vector needed by the
+ * sort_selected_dofs_back()
+ * function. Does not perform the
+ * renumbering on the DoFHandler
+ * dofs but returns the
+ * renumbering vector.
+ *
+ * @pre The @p selected_dofs
+ * array must have as many elements as
+ * the @p dof_handler has degrees of
+ * freedom.
+ */
template <class DH>
void
- compute_sort_selected_dofs_back (std::vector<types::global_dof_index>& new_dof_indices,
- const DH& dof_handler,
- const std::vector<bool>& selected_dofs);
-
- /**
- * Computes the renumbering
- * vector on each level
- * needed by the
- * sort_selected_dofs_back()
- * function. Does not perform the
- * renumbering on the MGDoFHandler
- * dofs but returns the
- * renumbering vector.
- *
- * @pre The @p selected_dofs
- * array must have as many elements as
- * the @p dof_handler has degrees of
- * freedom on the given level.
- */
- compute_sort_selected_dofs_back (std::vector<unsigned int> &new_dof_indices,
++ compute_sort_selected_dofs_back (std::vector<types::global_dof_index> &new_dof_indices,
+ const DH &dof_handler,
+ const std::vector<bool> &selected_dofs);
+
+ /**
+ * Computes the renumbering
+ * vector on each level
+ * needed by the
+ * sort_selected_dofs_back()
+ * function. Does not perform the
+ * renumbering on the MGDoFHandler
+ * dofs but returns the
+ * renumbering vector.
+ *
+ * @pre The @p selected_dofs
+ * array must have as many elements as
+ * the @p dof_handler has degrees of
+ * freedom on the given level.
+ */
template <class DH>
void
- compute_sort_selected_dofs_back (std::vector<unsigned int>& new_dof_indices,
- const DH& dof_handler,
- const std::vector<bool>& selected_dofs,
+ compute_sort_selected_dofs_back (std::vector<unsigned int> &new_dof_indices,
+ const DH &dof_handler,
+ const std::vector<bool> &selected_dofs,
const unsigned int level);
- /**
- * Renumber the degrees of
- * freedom in a random way.
- */
+ /**
+ * Renumber the degrees of
+ * freedom in a random way.
+ */
template <class DH>
void
- random (DH& dof_handler);
-
- /**
- * Computes the renumbering
- * vector needed by the random()
- * function. Does not perform the
- * renumbering on the DoFHandler
- * dofs but returns the
- * renumbering vector.
- */
+ random (DH &dof_handler);
+
+ /**
+ * Computes the renumbering
+ * vector needed by the random()
+ * function. Does not perform the
+ * renumbering on the DoFHandler
+ * dofs but returns the
+ * renumbering vector.
+ */
template <class DH>
void
- compute_random (std::vector<unsigned int> &new_dof_indices,
+ compute_random (std::vector<types::global_dof_index> &new_dof_indices,
- const DH& dof_handler);
+ const DH &dof_handler);
/**
* @}
void
subdomain_wise (DH &dof_handler);
- /**
- * Computes the renumbering
- * vector needed by the
- * subdomain_wise()
- * function. Does not perform the
- * renumbering on the @p
- * DoFHandler dofs but returns
- * the renumbering vector.
- */
+ /**
+ * Computes the renumbering
+ * vector needed by the
+ * subdomain_wise()
+ * function. Does not perform the
+ * renumbering on the @p
+ * DoFHandler dofs but returns
+ * the renumbering vector.
+ */
template <class DH>
void
- compute_subdomain_wise (std::vector<unsigned int> &new_dof_indices,
+ compute_subdomain_wise (std::vector<types::global_dof_index> &new_dof_indices,
const DH &dof_handler);
/**
template <class DH, class SparsityPattern>
void
make_boundary_sparsity_pattern (const DH &dof,
- const std::vector<types::global_dof_index> &dof_to_boundary_mapping,
- SparsityPattern &sparsity_pattern);
-
- /**
- * Write the sparsity structure of the
- * matrix composed of the basis functions
- * on the boundary into the
- * matrix structure. In contrast to the
- * previous function, only those parts
- * of the boundary are considered of which
- * the boundary indicator is listed in the
- * set of numbers passed to this function.
- *
- * In fact, rather than a @p set
- * of boundary indicators, a
- * @p map needs to be passed,
- * since most of the functions
- * handling with boundary
- * indicators take a mapping of
- * boundary indicators and the
- * respective boundary
- * functions. The boundary
- * function, however, is ignored
- * in this function. If you have
- * no functions at hand, but only
- * the boundary indicators, set
- * the function pointers to null
- * pointers.
- *
- * For the type of the sparsity
- * pattern, the same holds as
- * said above.
- */
- const std::vector<unsigned int> &dof_to_boundary_mapping,
++ const std::vector<types::global_dof_index> &dof_to_boundary_mapping,
+ SparsityPattern &sparsity_pattern);
+
+ /**
+ * Write the sparsity structure of the
+ * matrix composed of the basis functions
+ * on the boundary into the
+ * matrix structure. In contrast to the
+ * previous function, only those parts
+ * of the boundary are considered of which
+ * the boundary indicator is listed in the
+ * set of numbers passed to this function.
+ *
+ * In fact, rather than a @p set
+ * of boundary indicators, a
+ * @p map needs to be passed,
+ * since most of the functions
+ * handling with boundary
+ * indicators take a mapping of
+ * boundary indicators and the
+ * respective boundary
+ * functions. The boundary
+ * function, however, is ignored
+ * in this function. If you have
+ * no functions at hand, but only
+ * the boundary indicators, set
+ * the function pointers to null
+ * pointers.
+ *
+ * For the type of the sparsity
+ * pattern, the same holds as
+ * said above.
+ */
template <class DH, class SparsityPattern>
void
make_boundary_sparsity_pattern (const DH &dof,
- const typename FunctionMap<DH::space_dimension>::type &boundary_indicators,
- const std::vector<types::global_dof_index> &dof_to_boundary_mapping,
- SparsityPattern &sparsity);
-
- /**
- * Generate sparsity pattern for
- * fluxes, i.e. formulations of
- * the discrete problem with
- * discontinuous elements which
- * couple across faces of cells.
- * This is a replacement of the
- * function
- * @p make_sparsity_pattern for
- * discontinuous methods. Since
- * the fluxes include couplings
- * between neighboring elements,
- * the normal couplings and these
- * extra matrix entries are
- * considered.
- */
+ const typename FunctionMap<DH::space_dimension>::type &boundary_indicators,
- const std::vector<unsigned int> &dof_to_boundary_mapping,
++ const std::vector<types::global_dof_index> &dof_to_boundary_mapping,
+ SparsityPattern &sparsity);
+
+ /**
+ * Generate sparsity pattern for
+ * fluxes, i.e. formulations of
+ * the discrete problem with
+ * discontinuous elements which
+ * couple across faces of cells.
+ * This is a replacement of the
+ * function
+ * @p make_sparsity_pattern for
+ * discontinuous methods. Since
+ * the fluxes include couplings
+ * between neighboring elements,
+ * the normal couplings and these
+ * extra matrix entries are
+ * considered.
+ */
template<class DH, class SparsityPattern>
void
make_flux_sparsity_pattern (const DH &dof_handler,
void
extract_subdomain_dofs (const DH &dof_handler,
const types::subdomain_id subdomain_id,
- std::vector<bool> &selected_dofs);
+ std::vector<bool> &selected_dofs);
- /**
- * Extract the set of global DoF
- * indices that are owned by the
- * current processor. For regular
- * DoFHandler objects, this set
- * is the complete set with all
- * DoF indices. In either case,
- * it equals what
- * DoFHandler::locally_owned_dofs()
- * returns.
- */
+ /**
+ * Extract the set of global DoF
+ * indices that are owned by the
+ * current processor. For regular
+ * DoFHandler objects, this set
+ * is the complete set with all
+ * DoF indices. In either case,
+ * it equals what
+ * DoFHandler::locally_owned_dofs()
+ * returns.
+ */
template <class DH>
void
- extract_locally_owned_dofs (const DH & dof_handler,
- IndexSet & dof_set);
-
-
- /**
- * Extract the set of global DoF
- * indices that are active on the
- * current DoFHandler. For
- * regular DoFHandlers, these are
- * all DoF indices, but for
- * DoFHandler objects built on
- * parallel::distributed::Triangulation
- * this set is a superset of
- * DoFHandler::locally_owned_dofs()
- * and contains all DoF indices
- * that live on all locally owned
- * cells (including on the
- * interface to ghost
- * cells). However, it does not
- * contain the DoF indices that
- * are exclusively defined on
- * ghost or artificial cells (see
- * @ref GlossArtificialCell "the
- * glossary").
- *
- * The degrees of freedom identified by
- * this function equal those obtained
- * from the
- * dof_indices_with_subdomain_association()
- * function when called with the locally
- * owned subdomain id.
- */
+ extract_locally_owned_dofs (const DH &dof_handler,
+ IndexSet &dof_set);
+
+
+ /**
+ * Extract the set of global DoF
+ * indices that are active on the
+ * current DoFHandler. For
+ * regular DoFHandlers, these are
+ * all DoF indices, but for
+ * DoFHandler objects built on
+ * parallel::distributed::Triangulation
+ * this set is a superset of
+ * DoFHandler::locally_owned_dofs()
+ * and contains all DoF indices
+ * that live on all locally owned
+ * cells (including on the
+ * interface to ghost
+ * cells). However, it does not
+ * contain the DoF indices that
+ * are exclusively defined on
+ * ghost or artificial cells (see
+ * @ref GlossArtificialCell "the
+ * glossary").
+ *
+ * The degrees of freedom identified by
+ * this function equal those obtained
+ * from the
+ * dof_indices_with_subdomain_association()
+ * function when called with the locally
+ * owned subdomain id.
+ */
template <class DH>
void
- extract_locally_active_dofs (const DH & dof_handler,
- IndexSet & dof_set);
-
- /**
- * Extract the set of global DoF
- * indices that are active on the
- * current DoFHandler. For
- * regular DoFHandlers, these are
- * all DoF indices, but for
- * DoFHandler objects built on
- * parallel::distributed::Triangulation
- * this set is the union of
- * DoFHandler::locally_owned_dofs()
- * and the DoF indices on all
- * ghost cells. In essence, it is
- * the DoF indices on all cells
- * that are not artificial (see
- * @ref GlossArtificialCell "the glossary").
- */
+ extract_locally_active_dofs (const DH &dof_handler,
+ IndexSet &dof_set);
+
+ /**
+ * Extract the set of global DoF
+ * indices that are active on the
+ * current DoFHandler. For
+ * regular DoFHandlers, these are
+ * all DoF indices, but for
+ * DoFHandler objects built on
+ * parallel::distributed::Triangulation
+ * this set is the union of
+ * DoFHandler::locally_owned_dofs()
+ * and the DoF indices on all
+ * ghost cells. In essence, it is
+ * the DoF indices on all cells
+ * that are not artificial (see
+ * @ref GlossArtificialCell "the glossary").
+ */
template <class DH>
void
- extract_locally_relevant_dofs (const DH & dof_handler,
- IndexSet & dof_set);
-
- /**
- * For each DoF, return in the output
- * array to which subdomain (as given by
- * the <tt>cell->subdomain_id()</tt> function)
- * it belongs. The output array is
- * supposed to have the right size
- * already when calling this function.
- *
- * Note that degrees of freedom
- * associated with faces, edges, and
- * vertices may be associated with
- * multiple subdomains if they are
- * sitting on partition boundaries. In
- * these cases, we put them into one of
- * the associated partitions in an
- * undefined way. This may sometimes lead
- * to different numbers of degrees of
- * freedom in partitions, even if the
- * number of cells is perfectly
- * equidistributed. While this is
- * regrettable, it is not a problem in
- * practice since the number of degrees
- * of freedom on partition boundaries is
- * asymptotically vanishing as we refine
- * the mesh as long as the number of
- * partitions is kept constant.
- *
- * This function returns the association
- * of each DoF with one subdomain. If you
- * are looking for the association of
- * each @em cell with a subdomain, either
- * query the
- * <tt>cell->subdomain_id()</tt>
- * function, or use the
- * <tt>GridTools::get_subdomain_association</tt>
- * function.
- *
- * Note that this function is of
- * questionable use for DoFHandler objects built on
- * parallel::distributed::Triangulation
- * since in that case ownership of
- * individual degrees of freedom by MPI
- * processes is controlled by the DoF
- * handler object, not based on some
- * geometric algorithm in conjunction
- * with subdomain id. In particular, the
- * degrees of freedom identified by the
- * functions in this namespace as
- * associated with a subdomain are not
- * the same the
- * DoFHandler class
- * identifies as those it owns.
- */
+ extract_locally_relevant_dofs (const DH &dof_handler,
+ IndexSet &dof_set);
+
+ /**
+ * For each DoF, return in the output
+ * array to which subdomain (as given by
+ * the <tt>cell->subdomain_id()</tt> function)
+ * it belongs. The output array is
+ * supposed to have the right size
+ * already when calling this function.
+ *
+ * Note that degrees of freedom
+ * associated with faces, edges, and
+ * vertices may be associated with
+ * multiple subdomains if they are
+ * sitting on partition boundaries. In
+ * these cases, we put them into one of
+ * the associated partitions in an
+ * undefined way. This may sometimes lead
+ * to different numbers of degrees of
+ * freedom in partitions, even if the
+ * number of cells is perfectly
+ * equidistributed. While this is
+ * regrettable, it is not a problem in
+ * practice since the number of degrees
+ * of freedom on partition boundaries is
+ * asymptotically vanishing as we refine
+ * the mesh as long as the number of
+ * partitions is kept constant.
+ *
+ * This function returns the association
+ * of each DoF with one subdomain. If you
+ * are looking for the association of
+ * each @em cell with a subdomain, either
+ * query the
+ * <tt>cell->subdomain_id()</tt>
+ * function, or use the
+ * <tt>GridTools::get_subdomain_association</tt>
+ * function.
+ *
+ * Note that this function is of
+ * questionable use for DoFHandler objects built on
+ * parallel::distributed::Triangulation
+ * since in that case ownership of
+ * individual degrees of freedom by MPI
+ * processes is controlled by the DoF
+ * handler object, not based on some
+ * geometric algorithm in conjunction
+ * with subdomain id. In particular, the
+ * degrees of freedom identified by the
+ * functions in this namespace as
+ * associated with a subdomain are not
+ * the same the
+ * DoFHandler class
+ * identifies as those it owns.
+ */
template <class DH>
void
get_subdomain_association (const DH &dof_handler,
IndexSet
dof_indices_with_subdomain_association (const DH &dof_handler,
const types::subdomain_id subdomain);
- // @}
- /**
- * @name Dof indices for patches
- *
- * Create structures containing a
- * large set of degrees of freedom
- * for small patches of cells. The
- * resulting objects can be used in
- * RelaxationBlockSOR and related
- * classes to implement Schwarz
- * preconditioners and smoothers,
- * where the subdomains consist of
- * small numbers of cells only.
- */
- //@{
- /**
- * Create an incidence matrix that
- * for every cell on a given level
- * of a multilevel DoFHandler flags
- * which degrees of freedom are
- * associated with the
- * corresponding cell. This data
- * structure is matrix with as many
- * rows as there are cells on a
- * given level, as many rows as
- * there are degrees of freedom on
- * this level, and entries that are
- * either true or false. This data
- * structure is conveniently
- * represented by a SparsityPattern
- * object.
- *
- * @note The ordering of rows
- * (cells) follows the ordering of
- * the standard cell iterators.
- */
-
+ // @}
+ /**
+ * @name Dof indices for patches
+ *
+ * Create structures containing a
+ * large set of degrees of freedom
+ * for small patches of cells. The
+ * resulting objects can be used in
+ * RelaxationBlockSOR and related
+ * classes to implement Schwarz
+ * preconditioners and smoothers,
+ * where the subdomains consist of
+ * small numbers of cells only.
+ */
+ //@{
+ /**
+ * Create an incidence matrix that
+ * for every cell on a given level
+ * of a multilevel DoFHandler flags
+ * which degrees of freedom are
+ * associated with the
+ * corresponding cell. This data
+ * structure is matrix with as many
+ * rows as there are cells on a
+ * given level, as many rows as
+ * there are degrees of freedom on
+ * this level, and entries that are
+ * either true or false. This data
+ * structure is conveniently
+ * represented by a SparsityPattern
+ * object.
+ *
+ * @note The ordering of rows
+ * (cells) follows the ordering of
+ * the standard cell iterators.
+ */
template <class DH, class Sparsity>
- void make_cell_patches(Sparsity& block_list,
- const DH& dof_handler,
+ void make_cell_patches(Sparsity &block_list,
+ const DH &dof_handler,
const unsigned int level,
- const std::vector<bool>& selected_dofs = std::vector<bool>(),
+ const std::vector<bool> &selected_dofs = std::vector<bool>(),
unsigned int offset = 0);
- /**
- * Create an incidence matrix that
- * for every vertex on a given level
- * of a multilevel DoFHandler flags
- * which degrees of freedom are
- * associated with the
- * adjacent cells. This data
- * structure is matrix with as many
- * rows as there are vertices on a
- * given level, as many rows as
- * there are degrees of freedom on
- * this level, and entries that are
- * either true or false. This data
- * structure is conveniently
- * represented by a SparsityPattern
- * object.
- * The sparsity pattern
- * may be empty when entering this
- * function and will be
- * reinitialized to the correct
- * size.
- *
- * The function has some boolean
- * arguments (listed below)
- * controlling details of the
- * generated patches. The default
- * settings are those for
- * Arnold-Falk-Winther type
- * smoothers for divergence and
- * curl conforming finite elements
- * with essential boundary
- * conditions. Other applications
- * are possible, in particular
- * changing
- * <tt>boundary_patches</tt> for
- * non-essential boundary conditions.
- *
- * @arg <tt>block_list</tt>: the
- * SparsityPattern into which the
- * patches will be stored.
- * @arg <tt>dof_handler</tt>: The
- * multilevel dof handler
- * providing the topology operated
- * on.
- * @arg
- * <tt>interior_dofs_only</tt>:
- * for each patch of cells around
- * a vertex, collect only the
- * interior degrees of freedom of
- * the patch and disregard those
- * on the boundary of the
- * patch. This is for instance the
- * setting for smoothers of
- * Arnold-Falk-Winther type.
- * @arg <tt>boundary_patches</tt>:
- * include patches around vertices
- * at the boundary of the
- * domain. If not, only patches
- * around interior vertices will
- * be generated.
- * @arg
- * <tt>level_boundary_patches</tt>:
- * same for refinement edges
- * towards coarser cells.
- * @arg
- * <tt>single_cell_patches</tt>:
- * if not true, patches containing
- * a single cell are eliminated.
- */
- template <class DH>
- void make_vertex_patches(SparsityPattern& block_list,
- const DH& dof_handler,
- const unsigned int level,
- const bool interior_dofs_only,
- const bool boundary_patches = false,
- const bool level_boundary_patches = false,
- const bool single_cell_patches = false);
-
- /**
- * Create an incidence matrix that
- * for every cell on a given level
- * of a multilevel DoFHandler flags
- * which degrees of freedom are
- * associated with children of this
- * cell. This data
- * structure is conveniently
- * represented by a SparsityPattern
- * object.
-
- * Create a sparsity pattern which
- * in each row lists the degrees of
- * freedom associated to the
- * cells which are the children of
- * the same cell. The
- * sparsity pattern may be empty
- * when entering this function and
- * will be reinitialized to the
- * correct size.
- *
- * The function has some boolean
- * arguments (lsited below)
- * controlling details of the
- * generated patches. The default
- * settings are those for
- * Arnold-Falk-Winther type
- * smoothers for divergence and
- * curl conforming finite elements
- * with essential boundary
- * conditions. Other applications
- * are possible, in particular
- * changing
- * <tt>boundary_dofs</tt> for
- * non-essential boundary
- * conditions.
- *
- * Since the patches are defined
- * through refinement, th
- *
- * @arg <tt>block_list</tt>: the
- * SparsityPattern into which the
- * patches will be stored.
- * @arg <tt>dof_handler</tt>: The
- * multilevel dof handler
- * providing the topology operated
- * on.
- * @arg
- * <tt>interior_dofs_only</tt>:
- * for each patch of cells around
- * a vertex, collect only the
- * interior degrees of freedom of
- * the patch and disregard those
- * on the boundary of the
- * patch. This is for instance the
- * setting for smoothers of
- * Arnold-Falk-Winther type.
- * @arg <tt>boundary_dofs</tt>:
- * include degrees of freedom,
- * which would have excluded by
- * <tt>interior_dofs_only</tt>,
- * but are lying on the boundary
- * of the domain, and thus need
- * smoothing. This parameter has
- * no effect if
- * <tt>interior_dofs_only</tt> is false.
- */
- template <class DH>
- void make_child_patches(SparsityPattern& block_list,
- const DH& dof_handler,
- const unsigned int level,
- const bool interior_dofs_only,
- const bool boundary_dofs = false);
-
- /**
- * Create a block list with only a
- * single patch, which in turn
- * contains all degrees of freedom
- * on the given level.
- *
- * This function is mostly a
- * closure on level 0 for functions
- * like make_child_patches() and
- * make_vertex_patches(), which may
- * produce an empty patch list.
- *
- * @arg <tt>block_list</tt>: the
- * SparsityPattern into which the
- * patches will be stored.
- * @arg <tt>dof_handler</tt>: The
- * multilevel dof handler
- * providing the topology operated
- * on.
- * @arg <tt>level</tt> The grid
- * level used for building the list.
- * @arg
- * <tt>interior_dofs_only</tt>:
- * if true, exclude degrees of freedom on
- * the boundary of the domain.
- */
+ /**
+ * Create an incidence matrix that
+ * for every vertex on a given level
+ * of a multilevel DoFHandler flags
+ * which degrees of freedom are
+ * associated with the
+ * adjacent cells. This data
+ * structure is matrix with as many
+ * rows as there are vertices on a
+ * given level, as many rows as
+ * there are degrees of freedom on
+ * this level, and entries that are
+ * either true or false. This data
+ * structure is conveniently
+ * represented by a SparsityPattern
+ * object.
+ * The sparsity pattern
+ * may be empty when entering this
+ * function and will be
+ * reinitialized to the correct
+ * size.
+ *
+ * The function has some boolean
+ * arguments (listed below)
+ * controlling details of the
+ * generated patches. The default
+ * settings are those for
+ * Arnold-Falk-Winther type
+ * smoothers for divergence and
+ * curl conforming finite elements
+ * with essential boundary
+ * conditions. Other applications
+ * are possible, in particular
+ * changing
+ * <tt>boundary_patches</tt> for
+ * non-essential boundary conditions.
+ *
+ * @arg <tt>block_list</tt>: the
+ * SparsityPattern into which the
+ * patches will be stored.
+ * @arg <tt>dof_handler</tt>: The
+ * multilevel dof handler
+ * providing the topology operated
+ * on.
+ * @arg
+ * <tt>interior_dofs_only</tt>:
+ * for each patch of cells around
+ * a vertex, collect only the
+ * interior degrees of freedom of
+ * the patch and disregard those
+ * on the boundary of the
+ * patch. This is for instance the
+ * setting for smoothers of
+ * Arnold-Falk-Winther type.
+ * @arg <tt>boundary_patches</tt>:
+ * include patches around vertices
+ * at the boundary of the
+ * domain. If not, only patches
+ * around interior vertices will
+ * be generated.
+ * @arg
+ * <tt>level_boundary_patches</tt>:
+ * same for refinement edges
+ * towards coarser cells.
+ * @arg
+ * <tt>single_cell_patches</tt>:
+ * if not true, patches containing
+ * a single cell are eliminated.
+ */
template <class DH>
- void make_single_patch(SparsityPattern& block_list,
- const DH& dof_handler,
+ void make_vertex_patches(SparsityPattern &block_list,
+ const DH &dof_handler,
+ const unsigned int level,
+ const bool interior_dofs_only,
+ const bool boundary_patches = false,
+ const bool level_boundary_patches = false,
+ const bool single_cell_patches = false);
+
+ /**
+ * Create an incidence matrix that
+ * for every cell on a given level
+ * of a multilevel DoFHandler flags
+ * which degrees of freedom are
+ * associated with children of this
+ * cell. This data
+ * structure is conveniently
+ * represented by a SparsityPattern
+ * object.
+
+ * Create a sparsity pattern which
+ * in each row lists the degrees of
+ * freedom associated to the
+ * cells which are the children of
+ * the same cell. The
+ * sparsity pattern may be empty
+ * when entering this function and
+ * will be reinitialized to the
+ * correct size.
+ *
+ * The function has some boolean
+ * arguments (lsited below)
+ * controlling details of the
+ * generated patches. The default
+ * settings are those for
+ * Arnold-Falk-Winther type
+ * smoothers for divergence and
+ * curl conforming finite elements
+ * with essential boundary
+ * conditions. Other applications
+ * are possible, in particular
+ * changing
+ * <tt>boundary_dofs</tt> for
+ * non-essential boundary
+ * conditions.
+ *
+ * Since the patches are defined
+ * through refinement, th
+ *
+ * @arg <tt>block_list</tt>: the
+ * SparsityPattern into which the
+ * patches will be stored.
+ * @arg <tt>dof_handler</tt>: The
+ * multilevel dof handler
+ * providing the topology operated
+ * on.
+ * @arg
+ * <tt>interior_dofs_only</tt>:
+ * for each patch of cells around
+ * a vertex, collect only the
+ * interior degrees of freedom of
+ * the patch and disregard those
+ * on the boundary of the
+ * patch. This is for instance the
+ * setting for smoothers of
+ * Arnold-Falk-Winther type.
+ * @arg <tt>boundary_dofs</tt>:
+ * include degrees of freedom,
+ * which would have excluded by
+ * <tt>interior_dofs_only</tt>,
+ * but are lying on the boundary
+ * of the domain, and thus need
+ * smoothing. This parameter has
+ * no effect if
+ * <tt>interior_dofs_only</tt> is false.
+ */
+ template <class DH>
+ void make_child_patches(SparsityPattern &block_list,
+ const DH &dof_handler,
const unsigned int level,
- const bool interior_dofs_only = false);
-
- //@}
- /**
- * Extract a vector that represents the
- * constant modes of the DoFHandler for the
- * components chosen by
- * <tt>component_mask</tt> (see @ref
- * GlossComponentMask). The constant modes
- * on a discretization are the null space
- * of a Laplace operator on the selected
- * components with Neumann boundary
- * conditions applied. The null space is a
- * necessary ingredient for obtaining a
- * good AMG preconditioner when using the
- * class TrilinosWrappers::PreconditionAMG.
- * Since the ML AMG package only works on
- * algebraic properties of the respective
- * matrix, it has no chance to detect
- * whether the matrix comes from a scalar
- * or a vector valued problem. However, a
- * near null space supplies exactly the
- * needed information about these
- * components. The null space will consist
- * of as many vectors as there are true
- * arguments in <tt>component_mask</tt>
- * (see @ref GlossComponentMask), each of
- * which will be one in one vector
- * component and zero in all others. We
- * store this object in a vector of
- * vectors, where the outer vector is of
- * the size of the number of selected
- * components, and each inner vector has as
- * many components as there are (locally
- * owned) degrees of freedom in the
- * selected components. Note that any
- * matrix associated with this null space
- * must have been constructed using the
- * same <tt>component_mask</tt> argument,
- * since the numbering of DoFs is done
- * relative to the selected dofs, not to
- * all dofs.
- *
- * The main reason for this
- * program is the use of the
- * null space with the
- * AMG preconditioner.
- */
+ const bool interior_dofs_only,
+ const bool boundary_dofs = false);
+
+ /**
+ * Create a block list with only a
+ * single patch, which in turn
+ * contains all degrees of freedom
+ * on the given level.
+ *
+ * This function is mostly a
+ * closure on level 0 for functions
+ * like make_child_patches() and
+ * make_vertex_patches(), which may
+ * produce an empty patch list.
+ *
+ * @arg <tt>block_list</tt>: the
+ * SparsityPattern into which the
+ * patches will be stored.
+ * @arg <tt>dof_handler</tt>: The
+ * multilevel dof handler
+ * providing the topology operated
+ * on.
+ * @arg <tt>level</tt> The grid
+ * level used for building the list.
+ * @arg
+ * <tt>interior_dofs_only</tt>:
+ * if true, exclude degrees of freedom on
+ * the boundary of the domain.
+ */
+ template <class DH>
+ void make_single_patch(SparsityPattern &block_list,
+ const DH &dof_handler,
+ const unsigned int level,
+ const bool interior_dofs_only = false);
+
+ //@}
+ /**
+ * Extract a vector that represents the
+ * constant modes of the DoFHandler for the
+ * components chosen by
+ * <tt>component_mask</tt> (see @ref
+ * GlossComponentMask). The constant modes
+ * on a discretization are the null space
+ * of a Laplace operator on the selected
+ * components with Neumann boundary
+ * conditions applied. The null space is a
+ * necessary ingredient for obtaining a
+ * good AMG preconditioner when using the
+ * class TrilinosWrappers::PreconditionAMG.
+ * Since the ML AMG package only works on
+ * algebraic properties of the respective
+ * matrix, it has no chance to detect
+ * whether the matrix comes from a scalar
+ * or a vector valued problem. However, a
+ * near null space supplies exactly the
+ * needed information about these
+ * components. The null space will consist
+ * of as many vectors as there are true
+ * arguments in <tt>component_mask</tt>
+ * (see @ref GlossComponentMask), each of
+ * which will be one in one vector
+ * component and zero in all others. We
+ * store this object in a vector of
+ * vectors, where the outer vector is of
+ * the size of the number of selected
+ * components, and each inner vector has as
+ * many components as there are (locally
+ * owned) degrees of freedom in the
+ * selected components. Note that any
+ * matrix associated with this null space
+ * must have been constructed using the
+ * same <tt>component_mask</tt> argument,
+ * since the numbering of DoFs is done
+ * relative to the selected dofs, not to
+ * all dofs.
+ *
+ * The main reason for this
+ * program is the use of the
+ * null space with the
+ * AMG preconditioner.
+ */
template <class DH>
void
extract_constant_modes (const DH &dof_handler,
get_active_fe_indices (const DH &dof_handler,
std::vector<unsigned int> &active_fe_indices);
- /**
- * Count how many degrees of
- * freedom out of the total
- * number belong to each
- * component. If the number of
- * components the finite element
- * has is one (i.e. you only have
- * one scalar variable), then the
- * number in this component
- * obviously equals the total
- * number of degrees of
- * freedom. Otherwise, the sum of
- * the DoFs in all the components
- * needs to equal the total
- * number.
- *
- * However, the last statement
- * does not hold true if the
- * finite element is not
- * primitive, i.e. some or all of
- * its shape functions are
- * non-zero in more than one
- * vector component. This
- * applies, for example, to the
- * Nedelec or Raviart-Thomas
- * elements. In this case, a
- * degree of freedom is counted
- * in each component in which it
- * is non-zero, so that the sum
- * mentioned above is greater
- * than the total number of
- * degrees of freedom.
- *
- * This behavior can be switched
- * off by the optional parameter
- * <tt>vector_valued_once</tt>. If
- * this is <tt>true</tt>, the
- * number of components of a
- * nonprimitive vector valued
- * element is collected only in
- * the first component. All other
- * components will have a count
- * of zero.
- *
- * The additional optional
- * argument @p target_component
- * allows for a re-sorting and
- * grouping of components. To
- * this end, it contains for each
- * component the component number
- * it shall be counted as. Having
- * the same number entered
- * several times sums up several
- * components as the same. One of
- * the applications of this
- * argument is when you want to
- * form block matrices and
- * vectors, but want to pack
- * several components into the
- * same block (for example, when
- * you have @p dim velocities
- * and one pressure, to put all
- * velocities into one block, and
- * the pressure into another).
- *
- * The result is returned in @p
- * dofs_per_component. Note that
- * the size of @p
- * dofs_per_component needs to be
- * enough to hold all the indices
- * specified in @p
- * target_component. If this is
- * not the case, an assertion is
- * thrown. The indices not
- * targeted by target_components
- * are left untouched.
- */
+ /**
+ * Count how many degrees of
+ * freedom out of the total
+ * number belong to each
+ * component. If the number of
+ * components the finite element
+ * has is one (i.e. you only have
+ * one scalar variable), then the
+ * number in this component
+ * obviously equals the total
+ * number of degrees of
+ * freedom. Otherwise, the sum of
+ * the DoFs in all the components
+ * needs to equal the total
+ * number.
+ *
+ * However, the last statement
+ * does not hold true if the
+ * finite element is not
+ * primitive, i.e. some or all of
+ * its shape functions are
+ * non-zero in more than one
+ * vector component. This
+ * applies, for example, to the
+ * Nedelec or Raviart-Thomas
+ * elements. In this case, a
+ * degree of freedom is counted
+ * in each component in which it
+ * is non-zero, so that the sum
+ * mentioned above is greater
+ * than the total number of
+ * degrees of freedom.
+ *
+ * This behavior can be switched
+ * off by the optional parameter
+ * <tt>vector_valued_once</tt>. If
+ * this is <tt>true</tt>, the
+ * number of components of a
+ * nonprimitive vector valued
+ * element is collected only in
+ * the first component. All other
+ * components will have a count
+ * of zero.
+ *
+ * The additional optional
+ * argument @p target_component
+ * allows for a re-sorting and
+ * grouping of components. To
+ * this end, it contains for each
+ * component the component number
+ * it shall be counted as. Having
+ * the same number entered
+ * several times sums up several
+ * components as the same. One of
+ * the applications of this
+ * argument is when you want to
+ * form block matrices and
+ * vectors, but want to pack
+ * several components into the
+ * same block (for example, when
+ * you have @p dim velocities
+ * and one pressure, to put all
+ * velocities into one block, and
+ * the pressure into another).
+ *
+ * The result is returned in @p
+ * dofs_per_component. Note that
+ * the size of @p
+ * dofs_per_component needs to be
+ * enough to hold all the indices
+ * specified in @p
+ * target_component. If this is
+ * not the case, an assertion is
+ * thrown. The indices not
+ * targeted by target_components
+ * are left untouched.
+ */
template <class DH>
void
- count_dofs_per_component (const DH & dof_handler,
- std::vector<types::global_dof_index>& dofs_per_component,
- const bool vector_valued_once = false,
- std::vector<types::global_dof_index> target_component
- = std::vector<types::global_dof_index>());
-
- /**
- * Count the degrees of freedom
- * in each block. This function
- * is similar to
- * count_dofs_per_component(),
- * with the difference that the
- * counting is done by
- * blocks. See @ref GlossBlock
- * "blocks" in the glossary for
- * details. Again the vectors are
- * assumed to have the correct
- * size before calling this
- * function. If this is not the
- * case, an assertion is thrown.
- *
- * This function is used in the
- * step-22,
- * step-31, and
- * step-32 tutorial
- * programs.
- *
- * @pre The dofs_per_block
- * variable has as many
- * components as the finite
- * element used by the
- * dof_handler argument has
- * blocks, or alternatively as
- * many blocks as are enumerated
- * in the target_blocks argument
- * if given.
- */
+ count_dofs_per_component (const DH &dof_handler,
- std::vector<unsigned int> &dofs_per_component,
++ std::vector<types::global_dof_index> &dofs_per_component,
+ const bool vector_valued_once = false,
- std::vector<unsigned int> target_component
- = std::vector<unsigned int>());
++ std::vector<types::global_dof_index> target_component
++ = std::vector<types::global_dof_index>());
+
+ /**
+ * Count the degrees of freedom
+ * in each block. This function
+ * is similar to
+ * count_dofs_per_component(),
+ * with the difference that the
+ * counting is done by
+ * blocks. See @ref GlossBlock
+ * "blocks" in the glossary for
+ * details. Again the vectors are
+ * assumed to have the correct
+ * size before calling this
+ * function. If this is not the
+ * case, an assertion is thrown.
+ *
+ * This function is used in the
+ * step-22,
+ * step-31, and
+ * step-32 tutorial
+ * programs.
+ *
+ * @pre The dofs_per_block
+ * variable has as many
+ * components as the finite
+ * element used by the
+ * dof_handler argument has
+ * blocks, or alternatively as
+ * many blocks as are enumerated
+ * in the target_blocks argument
+ * if given.
+ */
template <class DH>
void
count_dofs_per_block (const DH &dof,
- std::vector<unsigned int> &dofs_per_block,
- const std::vector<unsigned int> &target_block
- = std::vector<unsigned int>());
+ std::vector<types::global_dof_index> &dofs_per_block,
+ const std::vector<types::global_dof_index> &target_block
- = std::vector<types::global_dof_index>());
-
- /**
- * @deprecated See the previous
- * function with the same name
- * for a description. This
- * function exists for
- * compatibility with older
- * versions only.
- */
++ = std::vector<types::global_dof_index>());
+
+ /**
+ * @deprecated See the previous
+ * function with the same name
+ * for a description. This
+ * function exists for
+ * compatibility with older
+ * versions only.
+ */
template <int dim, int spacedim>
void
- count_dofs_per_component (const DoFHandler<dim,spacedim>& dof_handler,
- std::vector<types::global_dof_index>& dofs_per_component,
- std::vector<types::global_dof_index> target_component);
-
- /**
- * This function can be used when
- * different variables shall be
- * discretized on different
- * grids, where one grid is
- * coarser than the other. This
- * idea might seem nonsensical at
- * first, but has reasonable
- * applications in inverse
- * (parameter estimation)
- * problems, where there might
- * not be enough information to
- * recover the parameter on the
- * same grid as the state
- * variable; furthermore, the
- * smoothness properties of state
- * variable and parameter might
- * not be too much related, so
- * using different grids might be
- * an alternative to using
- * stronger regularization of the
- * problem.
- *
- * The basic idea of this
- * function is explained in the
- * following. Let us, for
- * convenience, denote by
- * ``parameter grid'' the coarser
- * of the two grids, and by
- * ``state grid'' the finer of
- * the two. We furthermore assume
- * that the finer grid can be
- * obtained by refinement of the
- * coarser one, i.e. the fine
- * grid is at least as much
- * refined as the coarse grid at
- * each point of the
- * domain. Then, each shape
- * function on the coarse grid
- * can be represented as a linear
- * combination of shape functions
- * on the fine grid (assuming
- * identical ansatz
- * spaces). Thus, if we
- * discretize as usual, using
- * shape functions on the fine
- * grid, we can consider the
- * restriction that the parameter
- * variable shall in fact be
- * discretized by shape functions
- * on the coarse grid as a
- * constraint. These constraints
- * are linear and happen to have
- * the form managed by the
- * ``ConstraintMatrix'' class.
- *
- * The construction of these
- * constraints is done as
- * follows: for each of the
- * degrees of freedom (i.e. shape
- * functions) on the coarse grid,
- * we compute its representation
- * on the fine grid, i.e. how the
- * linear combination of shape
- * functions on the fine grid
- * looks like that resembles the
- * shape function on the coarse
- * grid. From this information,
- * we can then compute the
- * constraints which have to hold
- * if a solution of a linear
- * equation on the fine grid
- * shall be representable on the
- * coarse grid. The exact
- * algorithm how these
- * constraints can be computed is
- * rather complicated and is best
- * understood by reading the
- * source code, which contains
- * many comments.
- *
- * Before explaining the use of
- * this function, we would like
- * to state that the total number
- * of degrees of freedom used for
- * the discretization is not
- * reduced by the use of this
- * function, i.e. even though we
- * discretize one variable on a
- * coarser grid, the total number
- * of degrees of freedom is that
- * of the fine grid. This seems
- * to be counter-productive,
- * since it does not give us a
- * benefit from using a coarser
- * grid. The reason why it may be
- * useful to choose this approach
- * nonetheless is three-fold:
- * first, as stated above, there
- * might not be enough
- * information to recover a
- * parameter on a fine grid,
- * i.e. we chose to discretize it
- * on the coarse grid not to save
- * DoFs, but for other
- * reasons. Second, the
- * ``ConstraintMatrix'' includes
- * the constraints into the
- * linear system of equations, by
- * which constrained nodes become
- * dummy nodes; we may therefore
- * exclude them from the linear
- * algebra, for example by
- * sorting them to the back of
- * the DoF numbers and simply
- * calling the solver for the
- * upper left block of the matrix
- * which works on the
- * non-constrained nodes only,
- * thus actually realizing the
- * savings in numerical effort
- * from the reduced number of
- * actual degrees of freedom. The
- * third reason is that for some
- * or other reason we have chosen
- * to use two different grids, it
- * may be actually quite
- * difficult to write a function
- * that assembles the system
- * matrix for finite element
- * spaces on different grids;
- * using the approach of
- * constraints as with this
- * function allows to use
- * standard techniques when
- * discretizing on only one grid
- * (the finer one) without having
- * to take care of the fact that
- * one or several of the variable
- * actually belong to different
- * grids.
- *
- * The use of this function is as
- * follows: it accepts as
- * parameters two DoF Handlers,
- * the first of which refers to
- * the coarse grid and the second
- * of which is the fine grid. On
- * both, a finite element is
- * represented by the DoF handler
- * objects, which will usually
- * have several components, which
- * may belong to different finite
- * elements. The second and
- * fourth parameter of this
- * function therefore state which
- * variable on the coarse grid
- * shall be used to restrict the
- * stated component on the fine
- * grid. Of course, the finite
- * elements used for the
- * respective components on the
- * two grids need to be the
- * same. An example may clarify
- * this: consider the parameter
- * estimation mentioned briefly
- * above; there, on the fine grid
- * the whole discretization is
- * done, thus the variables are
- * ``u'', ``q'', and the Lagrange
- * multiplier ``lambda'', which
- * are discretized using
- * continuous linear, piecewise
- * constant discontinuous, and
- * continuous linear elements,
- * respectively. Only the
- * parameter ``q'' shall be
- * represented on the coarse
- * grid, thus the DoFHandler
- * object on the coarse grid
- * represents only one variable,
- * discretized using piecewise
- * constant discontinuous
- * elements. Then, the parameter
- * denoting the component on the
- * coarse grid would be zero (the
- * only possible choice, since
- * the variable on the coarse
- * grid is scalar), and one on
- * the fine grid (corresponding
- * to the variable ``q''; zero
- * would be ``u'', two would be
- * ``lambda''). Furthermore, an
- * object of type IntergridMap
- * is needed; this could in
- * principle be generated by the
- * function itself from the two
- * DoFHandler objects, but since
- * it is probably available
- * anyway in programs that use
- * this function, we shall use it
- * instead of re-generating
- * it. Finally, the computed
- * constraints are entered into a
- * variable of type
- * ConstraintMatrix; the
- * constraints are added,
- * i.e. previous contents which
- * may have, for example, be
- * obtained from hanging nodes,
- * are not deleted, so that you
- * only need one object of this
- * type.
- */
+ count_dofs_per_component (const DoFHandler<dim,spacedim> &dof_handler,
- std::vector<unsigned int> &dofs_per_component,
- std::vector<unsigned int> target_component);
++ std::vector<types::global_dof_index> &dofs_per_component,
++ std::vector<types::global_dof_index> target_component);
+
+ /**
+ * This function can be used when
+ * different variables shall be
+ * discretized on different
+ * grids, where one grid is
+ * coarser than the other. This
+ * idea might seem nonsensical at
+ * first, but has reasonable
+ * applications in inverse
+ * (parameter estimation)
+ * problems, where there might
+ * not be enough information to
+ * recover the parameter on the
+ * same grid as the state
+ * variable; furthermore, the
+ * smoothness properties of state
+ * variable and parameter might
+ * not be too much related, so
+ * using different grids might be
+ * an alternative to using
+ * stronger regularization of the
+ * problem.
+ *
+ * The basic idea of this
+ * function is explained in the
+ * following. Let us, for
+ * convenience, denote by
+ * ``parameter grid'' the coarser
+ * of the two grids, and by
+ * ``state grid'' the finer of
+ * the two. We furthermore assume
+ * that the finer grid can be
+ * obtained by refinement of the
+ * coarser one, i.e. the fine
+ * grid is at least as much
+ * refined as the coarse grid at
+ * each point of the
+ * domain. Then, each shape
+ * function on the coarse grid
+ * can be represented as a linear
+ * combination of shape functions
+ * on the fine grid (assuming
+ * identical ansatz
+ * spaces). Thus, if we
+ * discretize as usual, using
+ * shape functions on the fine
+ * grid, we can consider the
+ * restriction that the parameter
+ * variable shall in fact be
+ * discretized by shape functions
+ * on the coarse grid as a
+ * constraint. These constraints
+ * are linear and happen to have
+ * the form managed by the
+ * ``ConstraintMatrix'' class.
+ *
+ * The construction of these
+ * constraints is done as
+ * follows: for each of the
+ * degrees of freedom (i.e. shape
+ * functions) on the coarse grid,
+ * we compute its representation
+ * on the fine grid, i.e. how the
+ * linear combination of shape
+ * functions on the fine grid
+ * looks like that resembles the
+ * shape function on the coarse
+ * grid. From this information,
+ * we can then compute the
+ * constraints which have to hold
+ * if a solution of a linear
+ * equation on the fine grid
+ * shall be representable on the
+ * coarse grid. The exact
+ * algorithm how these
+ * constraints can be computed is
+ * rather complicated and is best
+ * understood by reading the
+ * source code, which contains
+ * many comments.
+ *
+ * Before explaining the use of
+ * this function, we would like
+ * to state that the total number
+ * of degrees of freedom used for
+ * the discretization is not
+ * reduced by the use of this
+ * function, i.e. even though we
+ * discretize one variable on a
+ * coarser grid, the total number
+ * of degrees of freedom is that
+ * of the fine grid. This seems
+ * to be counter-productive,
+ * since it does not give us a
+ * benefit from using a coarser
+ * grid. The reason why it may be
+ * useful to choose this approach
+ * nonetheless is three-fold:
+ * first, as stated above, there
+ * might not be enough
+ * information to recover a
+ * parameter on a fine grid,
+ * i.e. we chose to discretize it
+ * on the coarse grid not to save
+ * DoFs, but for other
+ * reasons. Second, the
+ * ``ConstraintMatrix'' includes
+ * the constraints into the
+ * linear system of equations, by
+ * which constrained nodes become
+ * dummy nodes; we may therefore
+ * exclude them from the linear
+ * algebra, for example by
+ * sorting them to the back of
+ * the DoF numbers and simply
+ * calling the solver for the
+ * upper left block of the matrix
+ * which works on the
+ * non-constrained nodes only,
+ * thus actually realizing the
+ * savings in numerical effort
+ * from the reduced number of
+ * actual degrees of freedom. The
+ * third reason is that for some
+ * or other reason we have chosen
+ * to use two different grids, it
+ * may be actually quite
+ * difficult to write a function
+ * that assembles the system
+ * matrix for finite element
+ * spaces on different grids;
+ * using the approach of
+ * constraints as with this
+ * function allows to use
+ * standard techniques when
+ * discretizing on only one grid
+ * (the finer one) without having
+ * to take care of the fact that
+ * one or several of the variable
+ * actually belong to different
+ * grids.
+ *
+ * The use of this function is as
+ * follows: it accepts as
+ * parameters two DoF Handlers,
+ * the first of which refers to
+ * the coarse grid and the second
+ * of which is the fine grid. On
+ * both, a finite element is
+ * represented by the DoF handler
+ * objects, which will usually
+ * have several components, which
+ * may belong to different finite
+ * elements. The second and
+ * fourth parameter of this
+ * function therefore state which
+ * variable on the coarse grid
+ * shall be used to restrict the
+ * stated component on the fine
+ * grid. Of course, the finite
+ * elements used for the
+ * respective components on the
+ * two grids need to be the
+ * same. An example may clarify
+ * this: consider the parameter
+ * estimation mentioned briefly
+ * above; there, on the fine grid
+ * the whole discretization is
+ * done, thus the variables are
+ * ``u'', ``q'', and the Lagrange
+ * multiplier ``lambda'', which
+ * are discretized using
+ * continuous linear, piecewise
+ * constant discontinuous, and
+ * continuous linear elements,
+ * respectively. Only the
+ * parameter ``q'' shall be
+ * represented on the coarse
+ * grid, thus the DoFHandler
+ * object on the coarse grid
+ * represents only one variable,
+ * discretized using piecewise
+ * constant discontinuous
+ * elements. Then, the parameter
+ * denoting the component on the
+ * coarse grid would be zero (the
+ * only possible choice, since
+ * the variable on the coarse
+ * grid is scalar), and one on
+ * the fine grid (corresponding
+ * to the variable ``q''; zero
+ * would be ``u'', two would be
+ * ``lambda''). Furthermore, an
+ * object of type IntergridMap
+ * is needed; this could in
+ * principle be generated by the
+ * function itself from the two
+ * DoFHandler objects, but since
+ * it is probably available
+ * anyway in programs that use
+ * this function, we shall use it
+ * instead of re-generating
+ * it. Finally, the computed
+ * constraints are entered into a
+ * variable of type
+ * ConstraintMatrix; the
+ * constraints are added,
+ * i.e. previous contents which
+ * may have, for example, be
+ * obtained from hanging nodes,
+ * are not deleted, so that you
+ * only need one object of this
+ * type.
+ */
template <int dim, int spacedim>
void
compute_intergrid_constraints (const DoFHandler<dim,spacedim> &coarse_grid,
template <class DH>
void
map_dof_to_boundary_indices (const DH &dof_handler,
- std::vector<types::global_dof_index> &mapping);
-
- /**
- * Same as the previous function,
- * except that only those parts
- * of the boundary are considered
- * for which the boundary
- * indicator is listed in the
- * second argument.
- *
- * See the general doc of this
- * class for more information.
- */
- std::vector<unsigned int> &mapping);
++ std::vector<types::global_dof_index> &mapping);
+
+ /**
+ * Same as the previous function,
+ * except that only those parts
+ * of the boundary are considered
+ * for which the boundary
+ * indicator is listed in the
+ * second argument.
+ *
+ * See the general doc of this
+ * class for more information.
+ */
template <class DH>
void
map_dof_to_boundary_indices (const DH &dof_handler,
- const std::set<types::boundary_id> &boundary_indicators,
- std::vector<types::global_dof_index> &mapping);
-
- /**
- * Return a list of support
- * points (see this
- * @ref GlossSupport "glossary entry")
- * for all the degrees of
- * freedom handled by this DoF
- * handler object. This function,
- * of course, only works if the
- * finite element object used by
- * the DoF handler object
- * actually provides support
- * points, i.e. no edge elements
- * or the like. Otherwise, an
- * exception is thrown.
- *
- * The given array must have a
- * length of as many elements as
- * there are degrees of freedom.
- */
+ const std::set<types::boundary_id> &boundary_indicators,
- std::vector<unsigned int> &mapping);
++ std::vector<types::global_dof_index> &mapping);
+
+ /**
+ * Return a list of support
+ * points (see this
+ * @ref GlossSupport "glossary entry")
+ * for all the degrees of
+ * freedom handled by this DoF
+ * handler object. This function,
+ * of course, only works if the
+ * finite element object used by
+ * the DoF handler object
+ * actually provides support
+ * points, i.e. no edge elements
+ * or the like. Otherwise, an
+ * exception is thrown.
+ *
- * @pre The given array must have a
++ * The given array must have a
+ * length of as many elements as
+ * there are degrees of freedom.
- *
- * @note The precondition to this function
- * that the output argument needs to have
- * size equal to the total number of degrees
- * of freedom makes this function
- * unsuitable for the case that the given
- * DoFHandler object derives from a
- * parallel::distributed::Triangulation object.
- * Consequently, this function will produce an
- * error if called with such a DoFHandler.
+ */
template <int dim, int spacedim>
void
map_dofs_to_support_points (const Mapping<dim,spacedim> &mapping,
const DoFHandler<dim,spacedim> &dof_handler,
std::vector<Point<spacedim> > &support_points);
- /**
- * Same as above for the hp case.
- */
+ /**
- * Same as the previous function but for the hp case.
- */
- template <int dim, int spacedim>
- void
- map_dofs_to_support_points (const dealii::hp::MappingCollection<dim,spacedim> &mapping,
- const hp::DoFHandler<dim,spacedim> &dof_handler,
- std::vector<Point<spacedim> > &support_points);
-
- /**
- * This function is a version of the above map_dofs_to_support_points
- * function that doesn't simply return a vector of support points (see
- * this @ref GlossSupport "glossary entry") with one
- * entry for each global degree of freedom, but instead a map that
- * maps from the DoFs index to its location. The point of this
- * function is that it is also usable in cases where the DoFHandler
- * is based on a parallel::distributed::Triangulation object. In such cases,
- * each processor will not be able to determine the support point location
- * of all DoFs, and worse no processor may be able to hold a vector that
- * would contain the locations of all DoFs even if they were known. As
- * a consequence, this function constructs a map from those DoFs for which
- * we can know the locations (namely, those DoFs that are
- * locally relevant (see @ref GlossLocallyRelevantDof "locally relevant DoFs")
- * to their locations.
- *
- * For non-distributed triangulations, the map returned as @p support_points
- * is of course dense, i.e., every DoF is to be found in it.
- *
- * @param mapping The mapping from the reference cell to the real cell on
- * which DoFs are defined.
- * @param dof_handler The object that describes which DoF indices live on
- * which cell of the triangulation.
- * @param support_points A map that for every locally relevant DoF index
- * contains the corresponding location in real space coordinates.
- * Previous content of this object is deleted in this function.
++ * Same as above for the hp case.
+ */
- template <int dim, int spacedim>
- void
- map_dofs_to_support_points (const Mapping<dim,spacedim> &mapping,
- const DoFHandler<dim,spacedim> &dof_handler,
- std::map<unsigned int, Point<spacedim> > &support_points);
- /**
- * Same as the previous function but for the hp case.
- */
template <int dim, int spacedim>
void
map_dofs_to_support_points (const dealii::hp::MappingCollection<dim,spacedim> &mapping,
- const hp::DoFHandler<dim,spacedim> &dof_handler,
- std::vector<Point<spacedim> > &support_points);
-
- /**
- * This is the opposite function
- * to the one above. It generates
- * a map where the keys are the
- * support points of the degrees
- * of freedom, while the values
- * are the DoF indices. For a definition
- * of support points, see this
- * @ref GlossSupport "glossary entry".
- *
- * Since there is no natural
- * order in the space of points
- * (except for the 1d case), you
- * have to provide a map with an
- * explicitly specified
- * comparator object. This
- * function is therefore
- * templatized on the comparator
- * object. Previous content of
- * the map object is deleted in
- * this function.
- *
- * Just as with the function
- * above, it is assumed that the
- * finite element in use here
- * actually supports the notion
- * of support points of all its
- * components.
- */
+ const hp::DoFHandler<dim,spacedim> &dof_handler,
- std::map<unsigned int, Point<spacedim> > &support_points);
++ std::vector<Point<spacedim> > &support_points);
+
+ /**
+ * This is the opposite function
+ * to the one above. It generates
+ * a map where the keys are the
+ * support points of the degrees
+ * of freedom, while the values
+ * are the DoF indices. For a definition
+ * of support points, see this
+ * @ref GlossSupport "glossary entry".
+ *
+ * Since there is no natural
+ * order in the space of points
+ * (except for the 1d case), you
+ * have to provide a map with an
+ * explicitly specified
+ * comparator object. This
+ * function is therefore
+ * templatized on the comparator
+ * object. Previous content of
+ * the map object is deleted in
+ * this function.
+ *
+ * Just as with the function
+ * above, it is assumed that the
+ * finite element in use here
+ * actually supports the notion
+ * of support points of all its
+ * components.
+ */
template <class DH, class Comp>
void
map_support_points_to_dofs (const Mapping<DH::dimension, DH::space_dimension> &mapping,
- const DH &dof_handler,
- std::map<Point<DH::space_dimension>, types::global_dof_index, Comp> &point_to_index_map);
-
- /**
- * Map a coupling table from the
- * user friendly organization by
- * components to the organization
- * by blocks. Specializations of
- * this function for DoFHandler
- * and hp::DoFHandler are
- * required due to the different
- * results of their finite
- * element access.
- *
- * The return vector will be
- * initialized to the correct
- * length inside this function.
- */
+ const DH &dof_handler,
- std::map<Point<DH::space_dimension>, unsigned int, Comp> &point_to_index_map);
++ std::map<Point<DH::space_dimension>, types::global_dof_index, Comp> &point_to_index_map);
+
+ /**
+ * Map a coupling table from the
+ * user friendly organization by
+ * components to the organization
+ * by blocks. Specializations of
+ * this function for DoFHandler
+ * and hp::DoFHandler are
+ * required due to the different
+ * results of their finite
+ * element access.
+ *
+ * The return vector will be
+ * initialized to the correct
+ * length inside this function.
+ */
template <int dim, int spacedim>
void
- convert_couplings_to_blocks (const hp::DoFHandler<dim,spacedim>& dof_handler,
- const Table<2, Coupling>& table_by_component,
- std::vector<Table<2,Coupling> >& tables_by_block);
-
- /**
- * Make a constraint matrix for the
- * constraints that result from zero
- * boundary values.
- *
- * This function constrains all
- * degrees of freedom on the
- * boundary. Optionally, you can
- * add a component mask, which
- * restricts this functionality
- * to a subset of an FESystem.
- *
- * For non-@ref GlossPrimitive "primitive"
- * shape functions, any degree of freedom
- * is affected that belongs to a
- * shape function where at least
- * one of its nonzero components
- * is affected.
- *
- * The last argument indicates which
- * components of the solution
- * vector should be constrained to zero
- * (see @ref GlossComponentMask).
- *
- * This function is used
- * in step-36, for
- * example.
- *
- * @ingroup constraints
- */
+ convert_couplings_to_blocks (const hp::DoFHandler<dim,spacedim> &dof_handler,
+ const Table<2, Coupling> &table_by_component,
+ std::vector<Table<2,Coupling> > &tables_by_block);
+
+ /**
+ * Make a constraint matrix for the
+ * constraints that result from zero
+ * boundary values.
+ *
+ * This function constrains all
+ * degrees of freedom on the
+ * boundary. Optionally, you can
+ * add a component mask, which
+ * restricts this functionality
+ * to a subset of an FESystem.
+ *
+ * For non-@ref GlossPrimitive "primitive"
+ * shape functions, any degree of freedom
+ * is affected that belongs to a
+ * shape function where at least
+ * one of its nonzero components
+ * is affected.
+ *
+ * The last argument indicates which
+ * components of the solution
+ * vector should be constrained to zero
+ * (see @ref GlossComponentMask).
+ *
+ * This function is used
+ * in step-36, for
+ * example.
+ *
+ * @ingroup constraints
+ */
template <int dim, int spacedim, template <int, int> class DH>
void
make_zero_boundary_constraints (const DH<dim,spacedim> &dof,
map_support_points_to_dofs (
const Mapping<DH::dimension,DH::space_dimension> &mapping,
const DH &dof_handler,
- std::map<Point<DH::space_dimension>, unsigned int, Comp> &point_to_index_map)
+ std::map<Point<DH::space_dimension>, types::global_dof_index, Comp> &point_to_index_map)
{
- // let the checking of arguments be
- // done by the function first
- // called
+ // let the checking of arguments be
+ // done by the function first
+ // called
std::vector<Point<DH::space_dimension> > support_points (dof_handler.n_dofs());
map_dofs_to_support_points (mapping, dof_handler, support_points);
- // now copy over the results of the
- // previous function into the
- // output arg
+ // now copy over the results of the
+ // previous function into the
+ // output arg
point_to_index_map.clear ();
for (unsigned int i=0; i<dof_handler.n_dofs(); ++i)
point_to_index_map[support_points[i]] = i;
/**
* Default constructor.
*/
- NumberCache ();
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes) of
- * this object.
- */
- std::size_t memory_consumption () const;
-
- /**
- * This function resets all the stored information.
- */
- void clear ();
-
- /**
- * Total number of dofs,
- * accumulated over all
- * processors that may
- * participate on this mesh.
- */
- types::global_dof_index n_global_dofs;
-
- /**
- * Number of dofs owned by
- * this MPI process. If this
- * is a sequential
- * computation, then this
- * equals n_global_dofs.
- */
- types::global_dof_index n_locally_owned_dofs;
-
- /**
- * An index set denoting the
- * set of locally owned
- * dofs. If this is a
- * sequential computation,
- * then it contains the
- * entire range
- * [0,n_global_dofs).
- */
- IndexSet locally_owned_dofs;
-
- /**
- * The number of dofs owned
- * by each of the various MPI
- * processes. If this is a
- * sequential job, then the
- * vector contains a single
- * element equal to
- * n_global_dofs.
- */
- std::vector<types::global_dof_index> n_locally_owned_dofs_per_processor;
-
- /**
- * The dofs owned by each of
- * the various MPI
- * processes. If this is a
- * sequential job, then the
- * vector has a single
- * element equal to
- * locally_owned_dofs.
- */
- std::vector<IndexSet> locally_owned_dofs_per_processor;
-
- /**
- * Read or write the data of this object to or
- * from a stream for the purpose of serialization
- */
- template <class Archive>
- void serialize (Archive & ar,
- const unsigned int version);
+ NumberCache ();
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes) of
+ * this object.
+ */
+ std::size_t memory_consumption () const;
+
+ /**
+ * This function resets all the stored information.
+ */
+ void clear ();
+
+ /**
+ * Total number of dofs,
+ * accumulated over all
+ * processors that may
+ * participate on this mesh.
+ */
- unsigned int n_global_dofs;
++ types::global_dof_index n_global_dofs;
+
+ /**
+ * Number of dofs owned by
+ * this MPI process. If this
+ * is a sequential
+ * computation, then this
+ * equals n_global_dofs.
+ */
- unsigned int n_locally_owned_dofs;
++ types::global_dof_index n_locally_owned_dofs;
+
+ /**
+ * An index set denoting the
+ * set of locally owned
+ * dofs. If this is a
+ * sequential computation,
+ * then it contains the
+ * entire range
+ * [0,n_global_dofs).
+ */
+ IndexSet locally_owned_dofs;
+
+ /**
+ * The number of dofs owned
+ * by each of the various MPI
+ * processes. If this is a
+ * sequential job, then the
+ * vector contains a single
+ * element equal to
+ * n_global_dofs.
+ */
- std::vector<unsigned int> n_locally_owned_dofs_per_processor;
++ std::vector<types::global_dof_index> n_locally_owned_dofs_per_processor;
+
+ /**
+ * The dofs owned by each of
+ * the various MPI
+ * processes. If this is a
+ * sequential job, then the
+ * vector has a single
+ * element equal to
+ * locally_owned_dofs.
+ */
+ std::vector<IndexSet> locally_owned_dofs_per_processor;
+
+ /**
+ * Read or write the data of this object to or
+ * from a stream for the purpose of serialization
+ */
+ template <class Archive>
+ void serialize (Archive &ar,
+ const unsigned int version);
};
template <int dim>
class FE_Nothing : public FiniteElement<dim>
{
- public:
-
- /**
- * Constructor. Argument denotes the
- * number of components to give this
- * finite element (default = 1).
- */
- FE_Nothing (unsigned int n_components = 1);
-
- /**
- * A sort of virtual copy
- * constructor. Some places in
- * the library, for example the
- * constructors of FESystem as
- * well as the hp::FECollection
- * class, need to make copied of
- * finite elements without
- * knowing their exact type. They
- * do so through this function.
- */
- virtual
- FiniteElement<dim> *
- clone() const;
-
- /**
- * Return a string that uniquely
- * identifies a finite
- * element. In this case it is
- * <code>FE_Nothing@<dim@></code>.
- */
- virtual
- std::string
- get_name() const;
-
- /**
- * Determine the values a finite
- * element should compute on
- * initialization of data for
- * FEValues.
- *
- * Given a set of flags
- * indicating what quantities are
- * requested from a FEValues
- * object, update_once() and
- * update_each() compute which
- * values must really be
- * computed. Then, the
- * <tt>fill_*_values</tt> functions
- * are called with the result of
- * these.
- *
- * In this case, since the element
- * has zero degrees of freedom and
- * no information can be computed on
- * it, this function simply returns
- * the default (empty) set of update
- * flags.
- */
-
- virtual
- UpdateFlags
- update_once (const UpdateFlags flags) const;
-
- /**
- * Complementary function for
- * update_once().
- *
- * While update_once() returns
- * the values to be computed on
- * the unit cell for yielding the
- * required data, this function
- * determines the values that
- * must be recomputed on each
- * cell.
- *
- * Refer to update_once() for
- * more details.
- */
- virtual
- UpdateFlags
- update_each (const UpdateFlags flags) const;
-
- /**
- * Return the value of the
- * @p ith shape function at the
- * point @p p. @p p is a point
- * on the reference element. Because the
- * current element has no degrees of freedom,
- * this function should obviously not be
- * called in practice. All this function
- * really does, therefore, is trigger an
- * exception.
- */
- virtual
- double
- shape_value (const unsigned int i, const Point<dim> &p) const;
-
- /**
- * Fill the fields of
- * FEValues. This function
- * performs all the operations
- * needed to compute the data of an
- * FEValues object.
- *
- * In the current case, this function
- * returns no meaningful information,
- * since the element has no degrees of
- * freedom.
- */
- virtual
- void
- fill_fe_values (const Mapping<dim> & mapping,
- const typename Triangulation<dim>::cell_iterator & cell,
- const Quadrature<dim> & quadrature,
- typename Mapping<dim>::InternalDataBase & mapping_data,
- typename Mapping<dim>::InternalDataBase & fedata,
- FEValuesData<dim,dim> & data,
- CellSimilarity::Similarity & cell_similarity) const;
-
- /**
- * Fill the fields of
- * FEFaceValues. This function
- * performs all the operations
- * needed to compute the data of an
- * FEFaceValues object.
- *
- * In the current case, this function
- * returns no meaningful information,
- * since the element has no degrees of
- * freedom.
- */
- virtual
- void
- fill_fe_face_values (const Mapping<dim> & mapping,
- const typename Triangulation<dim> :: cell_iterator & cell,
- const unsigned int face,
- const Quadrature<dim-1> & quadrature,
- typename Mapping<dim> :: InternalDataBase & mapping_data,
- typename Mapping<dim> :: InternalDataBase & fedata,
- FEValuesData<dim,dim> & data) const;
-
- /**
- * Fill the fields of
- * FESubFaceValues. This function
- * performs all the operations
- * needed to compute the data of an
- * FESubFaceValues object.
- *
- * In the current case, this function
- * returns no meaningful information,
- * since the element has no degrees of
- * freedom.
- */
- virtual
- void
- fill_fe_subface_values (const Mapping<dim> & mapping,
- const typename Triangulation<dim>::cell_iterator & cell,
- const unsigned int face,
- const unsigned int subface,
- const Quadrature<dim-1> & quadrature,
- typename Mapping<dim>::InternalDataBase & mapping_data,
- typename Mapping<dim>::InternalDataBase & fedata,
- FEValuesData<dim,dim> & data) const;
-
- /**
- * Prepare internal data
- * structures and fill in values
- * independent of the
- * cell. Returns a pointer to an
- * object of which the caller of
- * this function then has to
- * assume ownership (which
- * includes destruction when it
- * is no more needed).
- *
- * In the current case, this function
- * just returns a default pointer, since
- * no meaningful data exists for this
- * element.
- */
- virtual
- typename Mapping<dim>::InternalDataBase *
- get_data (const UpdateFlags update_flags,
- const Mapping<dim> & mapping,
- const Quadrature<dim> & quadrature) const;
-
- /**
- * Return whether this element dominates
- * the one given as argument when they
- * meet at a common face,
- * whether it is the other way around,
- * whether neither dominates, or if
- * either could dominate.
- *
- * For a definition of domination, see
- * FiniteElementBase::Domination and in
- * particular the @ref hp_paper "hp paper".
- *
- * In the current case, this element
- * is always assumed to dominate, unless
- * it is also of type FE_Nothing(). In
- * that situation, either element can
- * dominate.
- */
- virtual
- FiniteElementDomination::Domination
- compare_for_face_domination (const FiniteElement<dim> & fe_other) const;
-
-
-
- virtual
- std::vector<std::pair<unsigned int, unsigned int> >
- hp_vertex_dof_identities (const FiniteElement<dim> &fe_other) const;
-
- virtual
- std::vector<std::pair<unsigned int, unsigned int> >
- hp_line_dof_identities (const FiniteElement<dim> &fe_other) const;
-
- virtual
- std::vector<std::pair<unsigned int, unsigned int> >
- hp_quad_dof_identities (const FiniteElement<dim> &fe_other) const;
-
- virtual
- bool
- hp_constraints_are_implemented () const;
-
- /**
- * Return the matrix
- * interpolating from a face of
- * of one element to the face of
- * the neighboring element.
- * The size of the matrix is
- * then <tt>source.#dofs_per_face</tt> times
- * <tt>this->#dofs_per_face</tt>.
- *
- * Since the current finite element has no
- * degrees of freedom, the interpolation
- * matrix is necessarily empty.
- */
-
- virtual
- void
- get_face_interpolation_matrix (const FiniteElement<dim> &source_fe,
- FullMatrix<double> &interpolation_matrix) const;
-
-
- /**
- * Return the matrix
- * interpolating from a face of
- * of one element to the subface of
- * the neighboring element.
- * The size of the matrix is
- * then <tt>source.#dofs_per_face</tt> times
- * <tt>this->#dofs_per_face</tt>.
- *
- * Since the current finite element has no
- * degrees of freedom, the interpolation
- * matrix is necessarily empty.
- */
-
- virtual
- void
- get_subface_interpolation_matrix (const FiniteElement<dim> & source_fe,
- const unsigned int index,
- FullMatrix<double> &interpolation_matrix) const;
+ public:
+
+ /**
+ * Constructor. Argument denotes the
+ * number of components to give this
+ * finite element (default = 1).
+ */
+ FE_Nothing (unsigned int n_components = 1);
+
+ /**
+ * A sort of virtual copy
+ * constructor. Some places in
+ * the library, for example the
+ * constructors of FESystem as
+ * well as the hp::FECollection
+ * class, need to make copied of
+ * finite elements without
+ * knowing their exact type. They
+ * do so through this function.
+ */
+ virtual
+ FiniteElement<dim> *
+ clone() const;
+
+ /**
+ * Return a string that uniquely
+ * identifies a finite
+ * element. In this case it is
+ * <code>FE_Nothing@<dim@></code>.
+ */
+ virtual
+ std::string
+ get_name() const;
+
+ /**
+ * Determine the values a finite
+ * element should compute on
+ * initialization of data for
+ * FEValues.
+ *
+ * Given a set of flags
+ * indicating what quantities are
+ * requested from a FEValues
+ * object, update_once() and
+ * update_each() compute which
+ * values must really be
+ * computed. Then, the
+ * <tt>fill_*_values</tt> functions
+ * are called with the result of
+ * these.
+ *
+ * In this case, since the element
+ * has zero degrees of freedom and
+ * no information can be computed on
+ * it, this function simply returns
+ * the default (empty) set of update
+ * flags.
+ */
+
+ virtual
+ UpdateFlags
+ update_once (const UpdateFlags flags) const;
+
+ /**
+ * Complementary function for
+ * update_once().
+ *
+ * While update_once() returns
+ * the values to be computed on
+ * the unit cell for yielding the
+ * required data, this function
+ * determines the values that
+ * must be recomputed on each
+ * cell.
+ *
+ * Refer to update_once() for
+ * more details.
+ */
+ virtual
+ UpdateFlags
+ update_each (const UpdateFlags flags) const;
+
+ /**
+ * Return the value of the
+ * @p ith shape function at the
+ * point @p p. @p p is a point
+ * on the reference element. Because the
+ * current element has no degrees of freedom,
+ * this function should obviously not be
+ * called in practice. All this function
+ * really does, therefore, is trigger an
+ * exception.
+ */
+ virtual
+ double
+ shape_value (const unsigned int i, const Point<dim> &p) const;
+
+ /**
+ * Fill the fields of
+ * FEValues. This function
+ * performs all the operations
+ * needed to compute the data of an
+ * FEValues object.
+ *
+ * In the current case, this function
+ * returns no meaningful information,
+ * since the element has no degrees of
+ * freedom.
+ */
+ virtual
+ void
+ fill_fe_values (const Mapping<dim> &mapping,
+ const typename Triangulation<dim>::cell_iterator &cell,
+ const Quadrature<dim> &quadrature,
+ typename Mapping<dim>::InternalDataBase &mapping_data,
+ typename Mapping<dim>::InternalDataBase &fedata,
+ FEValuesData<dim,dim> &data,
+ CellSimilarity::Similarity &cell_similarity) const;
+
+ /**
+ * Fill the fields of
+ * FEFaceValues. This function
+ * performs all the operations
+ * needed to compute the data of an
+ * FEFaceValues object.
+ *
+ * In the current case, this function
+ * returns no meaningful information,
+ * since the element has no degrees of
+ * freedom.
+ */
+ virtual
+ void
+ fill_fe_face_values (const Mapping<dim> &mapping,
+ const typename Triangulation<dim> :: cell_iterator &cell,
+ const unsigned int face,
+ const Quadrature<dim-1> & quadrature,
+ typename Mapping<dim> :: InternalDataBase &mapping_data,
+ typename Mapping<dim> :: InternalDataBase &fedata,
+ FEValuesData<dim,dim> &data) const;
+
+ /**
+ * Fill the fields of
+ * FESubFaceValues. This function
+ * performs all the operations
+ * needed to compute the data of an
+ * FESubFaceValues object.
+ *
+ * In the current case, this function
+ * returns no meaningful information,
+ * since the element has no degrees of
+ * freedom.
+ */
+ virtual
+ void
+ fill_fe_subface_values (const Mapping<dim> &mapping,
+ const typename Triangulation<dim>::cell_iterator &cell,
+ const unsigned int face,
+ const unsigned int subface,
+ const Quadrature<dim-1> & quadrature,
+ typename Mapping<dim>::InternalDataBase &mapping_data,
+ typename Mapping<dim>::InternalDataBase &fedata,
+ FEValuesData<dim,dim> &data) const;
+
+ /**
+ * Prepare internal data
+ * structures and fill in values
+ * independent of the
+ * cell. Returns a pointer to an
+ * object of which the caller of
+ * this function then has to
+ * assume ownership (which
+ * includes destruction when it
+ * is no more needed).
+ *
+ * In the current case, this function
+ * just returns a default pointer, since
+ * no meaningful data exists for this
+ * element.
+ */
+ virtual
+ typename Mapping<dim>::InternalDataBase *
+ get_data (const UpdateFlags update_flags,
+ const Mapping<dim> &mapping,
+ const Quadrature<dim> &quadrature) const;
+
+ /**
+ * Return whether this element dominates
+ * the one given as argument when they
+ * meet at a common face,
+ * whether it is the other way around,
+ * whether neither dominates, or if
+ * either could dominate.
+ *
+ * For a definition of domination, see
+ * FiniteElementBase::Domination and in
+ * particular the @ref hp_paper "hp paper".
+ *
+ * In the current case, this element
+ * is always assumed to dominate, unless
+ * it is also of type FE_Nothing(). In
+ * that situation, either element can
+ * dominate.
+ */
+ virtual
+ FiniteElementDomination::Domination
+ compare_for_face_domination (const FiniteElement<dim> &fe_other) const;
+
+
+
+ virtual
+ std::vector<std::pair<unsigned int, unsigned int> >
+ hp_vertex_dof_identities (const FiniteElement<dim> &fe_other) const;
+
+ virtual
+ std::vector<std::pair<unsigned int, unsigned int> >
+ hp_line_dof_identities (const FiniteElement<dim> &fe_other) const;
+
+ virtual
+ std::vector<std::pair<unsigned int, unsigned int> >
+ hp_quad_dof_identities (const FiniteElement<dim> &fe_other) const;
+
+ virtual
+ bool
+ hp_constraints_are_implemented () const;
+
+ /**
+ * Return the matrix
+ * interpolating from a face of
+ * of one element to the face of
+ * the neighboring element.
+ * The size of the matrix is
+ * then <tt>source.#dofs_per_face</tt> times
+ * <tt>this->#dofs_per_face</tt>.
+ *
+ * Since the current finite element has no
+ * degrees of freedom, the interpolation
+ * matrix is necessarily empty.
+ */
+
+ virtual
+ void
+ get_face_interpolation_matrix (const FiniteElement<dim> &source_fe,
+ FullMatrix<double> &interpolation_matrix) const;
+
+
+ /**
+ * Return the matrix
+ * interpolating from a face of
+ * of one element to the subface of
+ * the neighboring element.
+ * The size of the matrix is
+ * then <tt>source.#dofs_per_face</tt> times
+ * <tt>this->#dofs_per_face</tt>.
+ *
+ * Since the current finite element has no
+ * degrees of freedom, the interpolation
+ * matrix is necessarily empty.
+ */
+
+ virtual
+ void
+ get_subface_interpolation_matrix (const FiniteElement<dim> &source_fe,
+ const unsigned int index,
- FullMatrix<double> &interpolation_matrix) const;
++ FullMatrix<double> &interpolation_matrix) const;
};
template <class POLY, int dim, int spacedim=dim>
class FE_PolyTensor : public FiniteElement<dim,spacedim>
{
- public:
- /**
- * Constructor.
- *
- * @arg @c degree: constructor
- * argument for poly. May be
- * different from @p
- * fe_data.degree.
- */
- FE_PolyTensor (const unsigned int degree,
- const FiniteElementData<dim> &fe_data,
- const std::vector<bool> &restriction_is_additive_flags,
- const std::vector<ComponentMask> &nonzero_components);
+ public:
+ /**
+ * Constructor.
+ *
+ * @arg @c degree: constructor
+ * argument for poly. May be
+ * different from @p
+ * fe_data.degree.
+ */
+ FE_PolyTensor (const unsigned int degree,
+ const FiniteElementData<dim> &fe_data,
+ const std::vector<bool> &restriction_is_additive_flags,
+ const std::vector<ComponentMask> &nonzero_components);
- /**
- * Since these elements are
- * vector valued, an exception is
- * thrown.
- */
- virtual double shape_value (const unsigned int i,
- const Point<dim> &p) const;
+ /**
+ * Since these elements are
+ * vector valued, an exception is
+ * thrown.
+ */
+ virtual double shape_value (const unsigned int i,
+ const Point<dim> &p) const;
- virtual double shape_value_component (const unsigned int i,
- const Point<dim> &p,
- const unsigned int component) const;
+ virtual double shape_value_component (const unsigned int i,
+ const Point<dim> &p,
+ const unsigned int component) const;
- /**
- * Since these elements are
- * vector valued, an exception is
- * thrown.
- */
- virtual Tensor<1,dim> shape_grad (const unsigned int i,
- const Point<dim> &p) const;
+ /**
+ * Since these elements are
+ * vector valued, an exception is
+ * thrown.
+ */
+ virtual Tensor<1,dim> shape_grad (const unsigned int i,
+ const Point<dim> &p) const;
- virtual Tensor<1,dim> shape_grad_component (const unsigned int i,
- const Point<dim> &p,
- const unsigned int component) const;
+ virtual Tensor<1,dim> shape_grad_component (const unsigned int i,
+ const Point<dim> &p,
+ const unsigned int component) const;
- /**
- * Since these elements are
- * vector valued, an exception is
- * thrown.
- */
- virtual Tensor<2,dim> shape_grad_grad (const unsigned int i,
- const Point<dim> &p) const;
+ /**
+ * Since these elements are
+ * vector valued, an exception is
+ * thrown.
+ */
+ virtual Tensor<2,dim> shape_grad_grad (const unsigned int i,
+ const Point<dim> &p) const;
- virtual Tensor<2,dim> shape_grad_grad_component (const unsigned int i,
- const Point<dim> &p,
- const unsigned int component) const;
+ virtual Tensor<2,dim> shape_grad_grad_component (const unsigned int i,
+ const Point<dim> &p,
+ const unsigned int component) const;
- /**
- * Given <tt>flags</tt>,
- * determines the values which
- * must be computed only for the
- * reference cell. Make sure,
- * that #mapping_type is set by
- * the derived class, such that
- * this function can operate
- * correctly.
- */
- virtual UpdateFlags update_once (const UpdateFlags flags) const;
- /**
- * Given <tt>flags</tt>,
- * determines the values which
- * must be computed in each cell
- * cell. Make sure, that
- * #mapping_type is set by the
- * derived class, such that this
- * function can operate
- * correctly.
- */
- virtual UpdateFlags update_each (const UpdateFlags flags) const;
+ /**
+ * Given <tt>flags</tt>,
+ * determines the values which
+ * must be computed only for the
+ * reference cell. Make sure,
+ * that #mapping_type is set by
+ * the derived class, such that
+ * this function can operate
+ * correctly.
+ */
+ virtual UpdateFlags update_once (const UpdateFlags flags) const;
+ /**
+ * Given <tt>flags</tt>,
+ * determines the values which
+ * must be computed in each cell
+ * cell. Make sure, that
+ * #mapping_type is set by the
+ * derived class, such that this
+ * function can operate
+ * correctly.
+ */
+ virtual UpdateFlags update_each (const UpdateFlags flags) const;
- protected:
- /**
- * The mapping type to be used to
- * map shape functions from the
- * reference cell to the mesh
- * cell.
- */
- MappingType mapping_type;
+ protected:
+ /**
+ * The mapping type to be used to
+ * map shape functions from the
+ * reference cell to the mesh
+ * cell.
+ */
+ MappingType mapping_type;
- virtual
- typename Mapping<dim,spacedim>::InternalDataBase *
- get_data (const UpdateFlags,
- const Mapping<dim,spacedim>& mapping,
- const Quadrature<dim>& quadrature) const ;
+ virtual
+ typename Mapping<dim,spacedim>::InternalDataBase *
+ get_data (const UpdateFlags,
+ const Mapping<dim,spacedim> &mapping,
+ const Quadrature<dim> &quadrature) const ;
- virtual void
- fill_fe_values (const Mapping<dim,spacedim> &mapping,
- const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const Quadrature<dim> &quadrature,
- typename Mapping<dim,spacedim>::InternalDataBase &mapping_internal,
- typename Mapping<dim,spacedim>::InternalDataBase &fe_internal,
- FEValuesData<dim,spacedim> &data,
- CellSimilarity::Similarity &cell_similarity) const;
+ virtual void
+ fill_fe_values (const Mapping<dim,spacedim> &mapping,
+ const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const Quadrature<dim> &quadrature,
- typename Mapping<dim,spacedim>::InternalDataBase &mapping_internal,
- typename Mapping<dim,spacedim>::InternalDataBase &fe_internal,
++ typename Mapping<dim,spacedim>::InternalDataBase &mapping_internal,
++ typename Mapping<dim,spacedim>::InternalDataBase &fe_internal,
+ FEValuesData<dim,spacedim> &data,
+ CellSimilarity::Similarity &cell_similarity) const;
- virtual void
- fill_fe_face_values (const Mapping<dim,spacedim> &mapping,
- const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const Quadrature<dim-1> &quadrature,
- typename Mapping<dim,spacedim>::InternalDataBase &mapping_internal,
- typename Mapping<dim,spacedim>::InternalDataBase &fe_internal,
- FEValuesData<dim,spacedim>& data) const ;
+ virtual void
+ fill_fe_face_values (const Mapping<dim,spacedim> &mapping,
+ const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const Quadrature<dim-1> &quadrature,
+ typename Mapping<dim,spacedim>::InternalDataBase &mapping_internal,
+ typename Mapping<dim,spacedim>::InternalDataBase &fe_internal,
+ FEValuesData<dim,spacedim> &data) const ;
- virtual void
- fill_fe_subface_values (const Mapping<dim,spacedim> &mapping,
- const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int sub_no,
- const Quadrature<dim-1> &quadrature,
- typename Mapping<dim,spacedim>::InternalDataBase &mapping_internal,
- typename Mapping<dim,spacedim>::InternalDataBase &fe_internal,
- FEValuesData<dim,spacedim>& data) const ;
+ virtual void
+ fill_fe_subface_values (const Mapping<dim,spacedim> &mapping,
+ const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int sub_no,
+ const Quadrature<dim-1> &quadrature,
+ typename Mapping<dim,spacedim>::InternalDataBase &mapping_internal,
+ typename Mapping<dim,spacedim>::InternalDataBase &fe_internal,
+ FEValuesData<dim,spacedim> &data) const ;
- /**
- * Fields of cell-independent
- * data for FE_PolyTensor. Stores
- * the values of the shape
- * functions and their
- * derivatives on the reference
- * cell for later use.
- *
- * All tables are organized in a
- * way, that the value for shape
- * function <i>i</i> at
- * quadrature point <i>k</i> is
- * accessed by indices
- * <i>(i,k)</i>.
- */
- class InternalData : public FiniteElement<dim,spacedim>::InternalDataBase
- {
- public:
- /**
- * Array with shape function
- * values in quadrature
- * points. There is one
- * row for each shape
- * function, containing
- * values for each quadrature
- * point.
- */
- std::vector<std::vector<Tensor<1,dim> > > shape_values;
+ /**
+ * Fields of cell-independent
+ * data for FE_PolyTensor. Stores
+ * the values of the shape
+ * functions and their
+ * derivatives on the reference
+ * cell for later use.
+ *
+ * All tables are organized in a
+ * way, that the value for shape
+ * function <i>i</i> at
+ * quadrature point <i>k</i> is
+ * accessed by indices
+ * <i>(i,k)</i>.
+ */
+ class InternalData : public FiniteElement<dim,spacedim>::InternalDataBase
+ {
+ public:
+ /**
+ * Array with shape function
+ * values in quadrature
+ * points. There is one
+ * row for each shape
+ * function, containing
+ * values for each quadrature
+ * point.
+ */
+ std::vector<std::vector<Tensor<1,dim> > > shape_values;
- /**
- * Array with shape function
- * gradients in quadrature
- * points. There is one
- * row for each shape
- * function, containing
- * values for each quadrature
- * point.
- */
- std::vector< std::vector< DerivativeForm<1, dim, spacedim> > > shape_grads;
- };
+ /**
+ * Array with shape function
+ * gradients in quadrature
+ * points. There is one
+ * row for each shape
+ * function, containing
+ * values for each quadrature
+ * point.
+ */
+ std::vector< std::vector< DerivativeForm<1, dim, spacedim> > > shape_grads;
+ };
- /**
- * The polynomial space. Its type
- * is given by the template
- * parameter POLY.
- */
- POLY poly_space;
+ /**
+ * The polynomial space. Its type
+ * is given by the template
+ * parameter POLY.
+ */
+ POLY poly_space;
- /**
- * The inverse of the matrix
- * <i>a<sub>ij</sub></i> of node
- * values <i>N<sub>i</sub></i>
- * applied to polynomial
- * <i>p<sub>j</sub></i>. This
- * matrix is used to convert
- * polynomials in the "raw" basis
- * provided in #poly_space to the
- * basis dual to the node
- * functionals on the reference cell.
- *
- * This object is not filled by
- * FE_PolyTensor, but is a chance
- * for a derived class to allow
- * for reorganization of the
- * basis functions. If it is left
- * empty, the basis in
- * #poly_space is used.
- */
- FullMatrix<double> inverse_node_matrix;
+ /**
+ * The inverse of the matrix
+ * <i>a<sub>ij</sub></i> of node
+ * values <i>N<sub>i</sub></i>
+ * applied to polynomial
+ * <i>p<sub>j</sub></i>. This
+ * matrix is used to convert
+ * polynomials in the "raw" basis
+ * provided in #poly_space to the
+ * basis dual to the node
+ * functionals on the reference cell.
+ *
+ * This object is not filled by
+ * FE_PolyTensor, but is a chance
+ * for a derived class to allow
+ * for reorganization of the
+ * basis functions. If it is left
+ * empty, the basis in
+ * #poly_space is used.
+ */
+ FullMatrix<double> inverse_node_matrix;
- /**
- * If a shape function is
- * computed at a single point, we
- * must compute all of them to
- * apply #inverse_node_matrix. In
- * order to avoid too much
- * overhead, we cache the point
- * and the function values for
- * the next evaluation.
- */
- mutable Point<dim> cached_point;
+ /**
+ * If a shape function is
+ * computed at a single point, we
+ * must compute all of them to
+ * apply #inverse_node_matrix. In
+ * order to avoid too much
+ * overhead, we cache the point
+ * and the function values for
+ * the next evaluation.
+ */
+ mutable Point<dim> cached_point;
- /**
- * Cached shape function values after
- * call to
- * shape_value_component().
- */
- mutable std::vector<Tensor<1,dim> > cached_values;
+ /**
+ * Cached shape function values after
+ * call to
+ * shape_value_component().
+ */
+ mutable std::vector<Tensor<1,dim> > cached_values;
- /**
- * Cached shape function gradients after
- * call to
- * shape_grad_component().
- */
- mutable std::vector<Tensor<2,dim> > cached_grads;
+ /**
+ * Cached shape function gradients after
+ * call to
+ * shape_grad_component().
+ */
+ mutable std::vector<Tensor<2,dim> > cached_grads;
- /**
- * Cached second derivatives of
- * shape functions after call to
- * shape_grad_grad_component().
- */
- mutable std::vector<Tensor<3,dim> > cached_grad_grads;
+ /**
+ * Cached second derivatives of
+ * shape functions after call to
+ * shape_grad_grad_component().
+ */
+ mutable std::vector<Tensor<3,dim> > cached_grad_grads;
};
DEAL_II_NAMESPACE_CLOSE
template <class FE>
class FEFactory : public FEFactoryBase<FE::dimension,FE::dimension>
{
- public:
- /**
- * Create a FiniteElement and
- * return a pointer to it.
- */
- virtual FiniteElement<FE::dimension,FE::dimension>*
- get (const unsigned int degree) const;
-
- /**
- * Create a FiniteElement from a
- * quadrature formula (currently only
- * implemented for FE_Q) and return a
- * pointer to it.
- */
- virtual FiniteElement<FE::dimension,FE::dimension>*
- get (const Quadrature<1> &quad) const;
+ public:
+ /**
+ * Create a FiniteElement and
+ * return a pointer to it.
+ */
+ virtual FiniteElement<FE::dimension,FE::dimension> *
+ get (const unsigned int degree) const;
+
+ /**
+ * Create a FiniteElement from a
+ * quadrature formula (currently only
+ * implemented for FE_Q) and return a
+ * pointer to it.
+ */
+ virtual FiniteElement<FE::dimension,FE::dimension> *
+ get (const Quadrature<1> &quad) const;
};
- /**
- * @warning In most cases, you
- * will probably want to use
- * compute_base_renumbering().
- *
- * Compute the vector required to
- * renumber the dofs of a cell by
- * component. Furthermore,
- * compute the vector storing the
- * start indices of each
- * component in the local block
- * vector.
- *
- * @param fe: The finite element
- * used, typically an FESystem.
- *
- * @param renumbering: A vector
- * with as many entries as
- * <tt>fe</tt> has dofs per
- * cell. The indices reference the
- * local dofs on the cell, not the
- * global dofs.
- *
- * @param <tt>start_indice</tt>:
- * This vector is organized such
- * that there is a vector for each
- * base element containing the
- * start index for each component
- * served by this base element.
- *
- * While the first vector is
- * checked to have the correct
- * size, the second one is
- * reinitialized for convenience.
- */
+ /**
+ * @warning In most cases, you
+ * will probably want to use
+ * compute_base_renumbering().
+ *
+ * Compute the vector required to
+ * renumber the dofs of a cell by
+ * component. Furthermore,
+ * compute the vector storing the
+ * start indices of each
+ * component in the local block
+ * vector.
+ *
- * The second vector is organized
- * such that there is a vector
- * for each base element
- * containing the start index for
- * each component served by this
- * base element.
++ * @param fe: The finite element
++ * used, typically an FESystem.
++ *
++ * @param renumbering: A vector
++ * with as many entries as
++ * <tt>fe</tt> has dofs per
++ * cell. The indices reference the
++ * local dofs on the cell, not the
++ * global dofs.
++ *
++ * @param <tt>start_indice</tt>:
++ * This vector is organized such
++ * that there is a vector for each
++ * base element containing the
++ * start index for each component
++ * served by this base element.
+ *
+ * While the first vector is
+ * checked to have the correct
+ * size, the second one is
+ * reinitialized for convenience.
+ */
template<int dim, int spacedim>
void compute_component_wise(
- const FiniteElement<dim,spacedim>& fe,
- std::vector<unsigned int>& renumbering,
- std::vector<std::vector<unsigned int> >& start_indices);
-
- /**
- * Compute the vector required to
- * renumber the dofs of a cell by
- * block. Furthermore, compute
- * the vector storing either the
- * start indices or the size of
- * each local block vector.
- *
- * If the @p bool parameter is
- * true, @p block_data is filled
- * with the start indices of each
- * local block. If it is false,
- * then the block sizes are
- * returned.
- *
- * @todo Which way does this
- * vector map the numbers?
- */
+ const FiniteElement<dim,spacedim> &fe,
+ std::vector<unsigned int> &renumbering,
+ std::vector<std::vector<unsigned int> > &start_indices);
+
+ /**
+ * Compute the vector required to
+ * renumber the dofs of a cell by
+ * block. Furthermore, compute
+ * the vector storing either the
+ * start indices or the size of
+ * each local block vector.
+ *
+ * If the @p bool parameter is
+ * true, @p block_data is filled
+ * with the start indices of each
+ * local block. If it is false,
+ * then the block sizes are
+ * returned.
+ *
+ * @todo Which way does this
+ * vector map the numbers?
+ */
template<int dim, int spacedim>
void compute_block_renumbering (
- const FiniteElement<dim,spacedim>& fe,
- std::vector<unsigned int>& renumbering,
- std::vector<types::global_dof_index>& block_data,
- const FiniteElement<dim,spacedim> &fe,
++ const FiniteElement<dim,spacedim> &fe,
+ std::vector<unsigned int> &renumbering,
- std::vector<unsigned int> &block_data,
++ std::vector<types::global_dof_index> &block_data,
bool return_start_indices = true);
- /**
- * @name Generation of local matrices
- * @{
- */
- /**
- * Gives the interpolation matrix
- * that interpolates a @p fe1-
- * function to a @p fe2-function on
- * each cell. The interpolation_matrix
- * needs to be of size
- * <tt>(fe2.dofs_per_cell, fe1.dofs_per_cell)</tt>.
- *
- * Note, that if the finite element
- * space @p fe1 is a subset of
- * the finite element space
- * @p fe2 then the @p interpolation_matrix
- * is an embedding matrix.
- */
+ /**
+ * @name Generation of local matrices
+ * @{
+ */
+ /**
+ * Gives the interpolation matrix
+ * that interpolates a @p fe1-
+ * function to a @p fe2-function on
+ * each cell. The interpolation_matrix
+ * needs to be of size
+ * <tt>(fe2.dofs_per_cell, fe1.dofs_per_cell)</tt>.
+ *
+ * Note, that if the finite element
+ * space @p fe1 is a subset of
+ * the finite element space
+ * @p fe2 then the @p interpolation_matrix
+ * is an embedding matrix.
+ */
template <int dim, typename number, int spacedim>
void
get_interpolation_matrix(const FiniteElement<dim,spacedim> &fe1,
- /**
- * This method implements the
- * FETools::compute_projection_from_quadrature_points_matrix
- * method for faces of a mesh.
- * The matrix that it returns, X, is face specific
- * and its size is fe.dofs_per_cell by
- * rhs_quadrature.size().
- * The dimension, dim must be larger than 1 for this class,
- * since Quadrature<dim-1> objects are required. See the
- * documentation on the Quadrature class for more information.
- */
+ /**
+ * This method implements the
+ * FETools::compute_projection_from_quadrature_points_matrix
+ * method for faces of a mesh.
+ * The matrix that it returns, X, is face specific
+ * and its size is fe.dofs_per_cell by
+ * rhs_quadrature.size().
+ * The dimension, dim must be larger than 1 for this class,
+ * since Quadrature<dim-1> objects are required. See the
+ * documentation on the Quadrature class for more information.
+ */
template <int dim, int spacedim>
void
- compute_projection_from_face_quadrature_points_matrix (const FiniteElement<dim, spacedim> &fe,
- const Quadrature<dim-1> &lhs_quadrature,
- const Quadrature<dim-1> &rhs_quadrature,
- const typename DoFHandler<dim, spacedim>::active_cell_iterator &cell,
- unsigned int face,
- FullMatrix<double> &X);
+ compute_projection_from_face_quadrature_points_matrix (
+ const FiniteElement<dim, spacedim> &fe,
+ const Quadrature<dim-1> &lhs_quadrature,
+ const Quadrature<dim-1> &rhs_quadrature,
- const typename DoFHandler<dim, spacedim>::active_cell_iterator & cell,
++ const typename DoFHandler<dim, spacedim>::active_cell_iterator &cell,
+ unsigned int face,
+ FullMatrix<double> &X);
-
-
-
- //@}
- /**
- * @name Functions which should be in DoFTools
- */
- //@{
- /**
- * Gives the interpolation of a the
- * @p dof1-function @p u1 to a
- * @p dof2-function @p u2. @p dof1 and
- * @p dof2 need to be DoFHandlers
- * based on the same triangulation.
- *
- * If the elements @p fe1 and @p fe2
- * are either both continuous or
- * both discontinuous then this
- * interpolation is the usual point
- * interpolation. The same is true
- * if @p fe1 is a continuous and
- * @p fe2 is a discontinuous finite
- * element. For the case that @p fe1
- * is a discontinuous and @p fe2 is
- * a continuous finite element
- * there is no point interpolation
- * defined at the discontinuities.
- * Therefore the meanvalue is taken
- * at the DoF values on the
- * discontinuities.
- *
- * Note that for continuous
- * elements on grids with hanging
- * nodes (i.e. locally refined
- * grids) this function does not
- * give the expected output.
- * Indeed, the resulting output
- * vector does not necessarily
- * respect continuity
- * requirements at hanging nodes:
- * if, for example, you are
- * interpolating a Q2 field to a
- * Q1 field, then at hanging
- * nodes the output field will
- * have the function value of the
- * input field, which however is
- * not usually the mean value of
- * the two adjacent nodes. It is
- * thus not part of the Q1
- * function space on the whole
- * triangulation, although it is
- * of course Q1 on each cell.
- *
- * For this case (continuous
- * elements on grids with hanging
- * nodes), please use the
- * @p interpolate function with
- * an additional
- * @p ConstraintMatrix argument,
- * see below, or make the field
- * conforming yourself by calling
- * the @p distribute function of
- * your hanging node constraints
- * object.
- */
+
+
+
+ //@}
+ /**
+ * @name Functions which should be in DoFTools
+ */
+ //@{
+ /**
+ * Gives the interpolation of a the
+ * @p dof1-function @p u1 to a
+ * @p dof2-function @p u2. @p dof1 and
+ * @p dof2 need to be DoFHandlers
+ * based on the same triangulation.
+ *
+ * If the elements @p fe1 and @p fe2
+ * are either both continuous or
+ * both discontinuous then this
+ * interpolation is the usual point
+ * interpolation. The same is true
+ * if @p fe1 is a continuous and
+ * @p fe2 is a discontinuous finite
+ * element. For the case that @p fe1
+ * is a discontinuous and @p fe2 is
+ * a continuous finite element
+ * there is no point interpolation
+ * defined at the discontinuities.
+ * Therefore the meanvalue is taken
+ * at the DoF values on the
+ * discontinuities.
+ *
+ * Note that for continuous
+ * elements on grids with hanging
+ * nodes (i.e. locally refined
+ * grids) this function does not
+ * give the expected output.
+ * Indeed, the resulting output
+ * vector does not necessarily
+ * respect continuity
+ * requirements at hanging nodes:
+ * if, for example, you are
+ * interpolating a Q2 field to a
+ * Q1 field, then at hanging
+ * nodes the output field will
+ * have the function value of the
+ * input field, which however is
+ * not usually the mean value of
+ * the two adjacent nodes. It is
+ * thus not part of the Q1
+ * function space on the whole
+ * triangulation, although it is
+ * of course Q1 on each cell.
+ *
+ * For this case (continuous
+ * elements on grids with hanging
+ * nodes), please use the
+ * @p interpolate function with
+ * an additional
+ * @p ConstraintMatrix argument,
+ * see below, or make the field
+ * conforming yourself by calling
+ * the @p distribute function of
+ * your hanging node constraints
+ * object.
+ */
template <int dim, int spacedim,
- template <int,int> class DH1,
- template <int,int> class DH2,
- class InVector, class OutVector>
+ template <int,int> class DH1,
+ template <int,int> class DH2,
+ class InVector, class OutVector>
void
interpolate (const DH1<dim,spacedim> &dof1,
const InVector &u1,
const DH2<dim,spacedim> &dof2,
OutVector &u2);
- /**
- * Gives the interpolation of a
- * the @p dof1-function @p u1 to
- * a @p dof2-function @p u2. @p
- * dof1 and @p dof2 need to be
- * DoFHandlers (or
- * hp::DoFHandlers) based on the
- * same triangulation. @p
- * constraints is a hanging node
- * constraints object
- * corresponding to @p dof2. This
- * object is particular important
- * when interpolating onto
- * continuous elements on grids
- * with hanging nodes (locally
- * refined grids).
- *
- * If the elements @p fe1 and @p fe2
- * are either both continuous or
- * both discontinuous then this
- * interpolation is the usual point
- * interpolation. The same is true
- * if @p fe1 is a continuous and
- * @p fe2 is a discontinuous finite
- * element. For the case that @p fe1
- * is a discontinuous and @p fe2 is
- * a continuous finite element
- * there is no point interpolation
- * defined at the discontinuities.
- * Therefore the meanvalue is taken
- * at the DoF values on the
- * discontinuities.
- */
+ /**
+ * Gives the interpolation of a
+ * the @p dof1-function @p u1 to
+ * a @p dof2-function @p u2. @p
+ * dof1 and @p dof2 need to be
+ * DoFHandlers (or
+ * hp::DoFHandlers) based on the
+ * same triangulation. @p
+ * constraints is a hanging node
+ * constraints object
+ * corresponding to @p dof2. This
+ * object is particular important
+ * when interpolating onto
+ * continuous elements on grids
+ * with hanging nodes (locally
+ * refined grids).
+ *
+ * If the elements @p fe1 and @p fe2
+ * are either both continuous or
+ * both discontinuous then this
+ * interpolation is the usual point
+ * interpolation. The same is true
+ * if @p fe1 is a continuous and
+ * @p fe2 is a discontinuous finite
+ * element. For the case that @p fe1
+ * is a discontinuous and @p fe2 is
+ * a continuous finite element
+ * there is no point interpolation
+ * defined at the discontinuities.
+ * Therefore the meanvalue is taken
+ * at the DoF values on the
+ * discontinuities.
+ */
template <int dim, int spacedim,
- template <int, int> class DH1,
- template <int, int> class DH2,
- class InVector, class OutVector>
+ template <int, int> class DH1,
+ template <int, int> class DH2,
+ class InVector, class OutVector>
- void interpolate (const DH1<dim,spacedim> &dof1,
+ void interpolate (const DH1<dim,spacedim> &dof1,
const InVector &u1,
- const DH2<dim,spacedim> &dof2,
+ const DH2<dim,spacedim> &dof2,
const ConstraintMatrix &constraints,
- OutVector& u2);
-
- /**
- * Gives the interpolation of the
- * @p fe1-function @p u1 to a
- * @p fe2-function, and
- * interpolates this to a second
- * @p fe1-function named
- * @p u1_interpolated.
- *
- * Note, that this function does
- * not work on continuous
- * elements at hanging nodes. For
- * that case use the
- * @p back_interpolate function,
- * below, that takes an
- * additional
- * @p ConstraintMatrix object.
- *
- * Furthermore note, that for the
- * specific case when the finite
- * element space corresponding to
- * @p fe1 is a subset of the
- * finite element space
- * corresponding to @p fe2, this
- * function is simply an identity
- * mapping.
- */
+ OutVector &u2);
+
+ /**
+ * Gives the interpolation of the
+ * @p fe1-function @p u1 to a
+ * @p fe2-function, and
+ * interpolates this to a second
+ * @p fe1-function named
+ * @p u1_interpolated.
+ *
+ * Note, that this function does
+ * not work on continuous
+ * elements at hanging nodes. For
+ * that case use the
+ * @p back_interpolate function,
+ * below, that takes an
+ * additional
+ * @p ConstraintMatrix object.
+ *
+ * Furthermore note, that for the
+ * specific case when the finite
+ * element space corresponding to
+ * @p fe1 is a subset of the
+ * finite element space
+ * corresponding to @p fe2, this
+ * function is simply an identity
+ * mapping.
+ */
template <int dim, class InVector, class OutVector, int spacedim>
void back_interpolate (const DoFHandler<dim,spacedim> &dof1,
const InVector &u1,
const FiniteElement<dim,spacedim> &fe2,
OutVector &u1_interpolated);
- /**
- * Gives the interpolation of the
- * @p dof1-function @p u1 to a
- * @p dof2-function, and
- * interpolates this to a second
- * @p dof1-function named
- * @p u1_interpolated.
- * @p constraints1 and
- * @p constraints2 are the
- * hanging node constraints
- * corresponding to @p dof1 and
- * @p dof2, respectively. These
- * objects are particular
- * important when continuous
- * elements on grids with hanging
- * nodes (locally refined grids)
- * are involved.
- *
- * Furthermore note, that for the
- * specific case when the finite
- * element space corresponding to
- * @p dof1 is a subset of the
- * finite element space
- * corresponding to @p dof2, this
- * function is simply an identity
- * mapping.
- */
+ /**
+ * Gives the interpolation of the
+ * @p dof1-function @p u1 to a
+ * @p dof2-function, and
+ * interpolates this to a second
+ * @p dof1-function named
+ * @p u1_interpolated.
+ * @p constraints1 and
+ * @p constraints2 are the
+ * hanging node constraints
+ * corresponding to @p dof1 and
+ * @p dof2, respectively. These
+ * objects are particular
+ * important when continuous
+ * elements on grids with hanging
+ * nodes (locally refined grids)
+ * are involved.
+ *
+ * Furthermore note, that for the
+ * specific case when the finite
+ * element space corresponding to
+ * @p dof1 is a subset of the
+ * finite element space
+ * corresponding to @p dof2, this
+ * function is simply an identity
+ * mapping.
+ */
template <int dim, class InVector, class OutVector, int spacedim>
- void back_interpolate (const DoFHandler<dim,spacedim>& dof1,
- const ConstraintMatrix& constraints1,
- const InVector& u1,
- const DoFHandler<dim,spacedim>& dof2,
- const ConstraintMatrix& constraints2,
- OutVector& u1_interpolated);
-
- /**
- * Gives $(Id-I_h)z_1$ for a given
- * @p dof1-function $z_1$, where $I_h$
- * is the interpolation from @p fe1
- * to @p fe2. The result $(Id-I_h)z_1$ is
- * written into @p z1_difference.
- *
- * Note, that this function does
- * not work for continuous
- * elements at hanging nodes. For
- * that case use the
- * @p interpolation_difference
- * function, below, that takes an
- * additional
- * @p ConstraintMatrix object.
- */
- void back_interpolate (const DoFHandler<dim,spacedim> &dof1,
++ void back_interpolate (const DoFHandler<dim,spacedim> &dof1,
+ const ConstraintMatrix &constraints1,
+ const InVector &u1,
- const DoFHandler<dim,spacedim> &dof2,
++ const DoFHandler<dim,spacedim> &dof2,
+ const ConstraintMatrix &constraints2,
+ OutVector &u1_interpolated);
+
+ /**
+ * Gives $(Id-I_h)z_1$ for a given
+ * @p dof1-function $z_1$, where $I_h$
+ * is the interpolation from @p fe1
+ * to @p fe2. The result $(Id-I_h)z_1$ is
+ * written into @p z1_difference.
+ *
+ * Note, that this function does
+ * not work for continuous
+ * elements at hanging nodes. For
+ * that case use the
+ * @p interpolation_difference
+ * function, below, that takes an
+ * additional
+ * @p ConstraintMatrix object.
+ */
template <int dim, class InVector, class OutVector, int spacedim>
void interpolation_difference(const DoFHandler<dim,spacedim> &dof1,
const InVector &z1,
const FiniteElement<dim,spacedim> &fe2,
OutVector &z1_difference);
- /**
- * Gives $(Id-I_h)z_1$ for a given
- * @p dof1-function $z_1$, where $I_h$
- * is the interpolation from @p fe1
- * to @p fe2. The result $(Id-I_h)z_1$ is
- * written into @p z1_difference.
- * @p constraints1 and
- * @p constraints2 are the
- * hanging node constraints
- * corresponding to @p dof1 and
- * @p dof2, respectively. These
- * objects are particular
- * important when continuous
- * elements on grids with hanging
- * nodes (locally refined grids)
- * are involved.
- */
+ /**
+ * Gives $(Id-I_h)z_1$ for a given
+ * @p dof1-function $z_1$, where $I_h$
+ * is the interpolation from @p fe1
+ * to @p fe2. The result $(Id-I_h)z_1$ is
+ * written into @p z1_difference.
+ * @p constraints1 and
+ * @p constraints2 are the
+ * hanging node constraints
+ * corresponding to @p dof1 and
+ * @p dof2, respectively. These
+ * objects are particular
+ * important when continuous
+ * elements on grids with hanging
+ * nodes (locally refined grids)
+ * are involved.
+ */
template <int dim, class InVector, class OutVector, int spacedim>
- void interpolation_difference(const DoFHandler<dim,spacedim>& dof1,
- const ConstraintMatrix& constraints1,
- const InVector& z1,
- const DoFHandler<dim,spacedim>& dof2,
- const ConstraintMatrix& constraints2,
- OutVector& z1_difference);
-
-
-
- /**
- * $L^2$ projection for
- * discontinuous
- * elements. Operates the same
- * direction as interpolate.
- *
- * The global projection can be
- * computed by local matrices if
- * the finite element spaces are
- * discontinuous. With continuous
- * elements, this is impossible,
- * since a global mass matrix
- * must be inverted.
- */
- void interpolation_difference(const DoFHandler<dim,spacedim> &dof1,
++ void interpolation_difference(const DoFHandler<dim,spacedim> &dof1,
+ const ConstraintMatrix &constraints1,
+ const InVector &z1,
- const DoFHandler<dim,spacedim> &dof2,
++ const DoFHandler<dim,spacedim> &dof2,
+ const ConstraintMatrix &constraints2,
+ OutVector &z1_difference);
+
+
+
+ /**
+ * $L^2$ projection for
+ * discontinuous
+ * elements. Operates the same
+ * direction as interpolate.
+ *
+ * The global projection can be
+ * computed by local matrices if
+ * the finite element spaces are
+ * discontinuous. With continuous
+ * elements, this is impossible,
+ * since a global mass matrix
+ * must be inverted.
+ */
template <int dim, class InVector, class OutVector, int spacedim>
- void project_dg (const DoFHandler<dim,spacedim>& dof1,
- const InVector& u1,
- const DoFHandler<dim,spacedim>& dof2,
- OutVector& u2);
-
- /**
- * Gives the patchwise
- * extrapolation of a @p dof1
- * function @p z1 to a @p dof2
- * function @p z2. @p dof1 and
- * @p dof2 need to be DoFHandler
- * based on the same triangulation.
- *
- * This function is interesting
- * for e.g. extrapolating
- * patchwise a piecewise linear
- * solution to a piecewise
- * quadratic solution.
- *
- * Note that the resulting field
- * does not satisfy continuity
- * requirements of the given
- * finite elements.
- *
- * When you use continuous
- * elements on grids with hanging
- * nodes, please use the
- * @p extrapolate function with
- * an additional
- * ConstraintMatrix argument,
- * see below.
- *
- * Since this function operates
- * on patches of cells, it is
- * required that the underlying
- * grid is refined at least once
- * for every coarse grid cell. If
- * this is not the case, an
- * exception will be raised.
- */
+ void project_dg (const DoFHandler<dim,spacedim> &dof1,
+ const InVector &u1,
+ const DoFHandler<dim,spacedim> &dof2,
+ OutVector &u2);
+
+ /**
+ * Gives the patchwise
+ * extrapolation of a @p dof1
+ * function @p z1 to a @p dof2
+ * function @p z2. @p dof1 and
+ * @p dof2 need to be DoFHandler
+ * based on the same triangulation.
+ *
+ * This function is interesting
+ * for e.g. extrapolating
+ * patchwise a piecewise linear
+ * solution to a piecewise
+ * quadratic solution.
+ *
+ * Note that the resulting field
+ * does not satisfy continuity
+ * requirements of the given
+ * finite elements.
+ *
+ * When you use continuous
+ * elements on grids with hanging
+ * nodes, please use the
+ * @p extrapolate function with
+ * an additional
+ * ConstraintMatrix argument,
+ * see below.
+ *
+ * Since this function operates
+ * on patches of cells, it is
+ * required that the underlying
+ * grid is refined at least once
+ * for every coarse grid cell. If
+ * this is not the case, an
+ * exception will be raised.
+ */
template <int dim, class InVector, class OutVector, int spacedim>
- void extrapolate (const DoFHandler<dim,spacedim>& dof1,
- const InVector& z1,
- const DoFHandler<dim,spacedim>& dof2,
- OutVector& z2);
-
- /**
- * Gives the patchwise
- * extrapolation of a @p dof1
- * function @p z1 to a @p dof2
- * function @p z2. @p dof1 and
- * @p dof2 need to be DoFHandler
- * based on the same triangulation.
- * @p constraints is a hanging
- * node constraints object
- * corresponding to
- * @p dof2. This object is
- * particular important when
- * interpolating onto continuous
- * elements on grids with hanging
- * nodes (locally refined grids).
- *
- * Otherwise, the same holds as
- * for the other @p extrapolate
- * function.
- */
+ void extrapolate (const DoFHandler<dim,spacedim> &dof1,
+ const InVector &z1,
+ const DoFHandler<dim,spacedim> &dof2,
+ OutVector &z2);
+
+ /**
+ * Gives the patchwise
+ * extrapolation of a @p dof1
+ * function @p z1 to a @p dof2
+ * function @p z2. @p dof1 and
+ * @p dof2 need to be DoFHandler
+ * based on the same triangulation.
+ * @p constraints is a hanging
+ * node constraints object
+ * corresponding to
+ * @p dof2. This object is
+ * particular important when
+ * interpolating onto continuous
+ * elements on grids with hanging
+ * nodes (locally refined grids).
+ *
+ * Otherwise, the same holds as
+ * for the other @p extrapolate
+ * function.
+ */
template <int dim, class InVector, class OutVector, int spacedim>
- void extrapolate (const DoFHandler<dim,spacedim>& dof1,
- const InVector& z1,
- const DoFHandler<dim,spacedim>& dof2,
- const ConstraintMatrix& constraints,
- OutVector& z2);
- //@}
- /**
- * The numbering of the degrees
- * of freedom in continuous finite
- * elements is hierarchic,
- * i.e. in such a way that we
- * first number the vertex dofs,
- * in the order of the vertices
- * as defined by the
- * triangulation, then the line
- * dofs in the order and
- * respecting the direction of
- * the lines, then the dofs on
- * quads, etc. However, we could
- * have, as well, numbered them
- * in a lexicographic way,
- * i.e. with indices first
- * running in x-direction, then
- * in y-direction and finally in
- * z-direction. Discontinuous
- * elements of class FE_DGQ()
- * are numbered in this way, for
- * example.
- *
- * This function constructs a
- * table which lexicographic
- * index each degree of freedom
- * in the hierarchic numbering
- * would have. It operates on the
- * continuous finite element
- * given as first argument, and
- * outputs the lexicographic
- * indices in the second.
- *
- * Note that since this function
- * uses specifics of the
- * continuous finite elements, it
- * can only operate on
- * FiniteElementData<dim> objects
- * inherent in FE_Q(). However,
- * this function does not take a
- * FE_Q object as it is also
- * invoked by the FE_Q()
- * constructor.
- *
- * It is assumed that the size of
- * the output argument already
- * matches the correct size,
- * which is equal to the number
- * of degrees of freedom in the
- * finite element.
- */
- void extrapolate (const DoFHandler<dim,spacedim> &dof1,
++ void extrapolate (const DoFHandler<dim,spacedim> &dof1,
+ const InVector &z1,
- const DoFHandler<dim,spacedim> &dof2,
++ const DoFHandler<dim,spacedim> &dof2,
+ const ConstraintMatrix &constraints,
+ OutVector &z2);
+ //@}
+ /**
+ * The numbering of the degrees
+ * of freedom in continuous finite
+ * elements is hierarchic,
+ * i.e. in such a way that we
+ * first number the vertex dofs,
+ * in the order of the vertices
+ * as defined by the
+ * triangulation, then the line
+ * dofs in the order and
+ * respecting the direction of
+ * the lines, then the dofs on
+ * quads, etc. However, we could
+ * have, as well, numbered them
+ * in a lexicographic way,
+ * i.e. with indices first
+ * running in x-direction, then
+ * in y-direction and finally in
+ * z-direction. Discontinuous
+ * elements of class FE_DGQ()
+ * are numbered in this way, for
+ * example.
+ *
+ * This function constructs a
+ * table which lexicographic
+ * index each degree of freedom
+ * in the hierarchic numbering
+ * would have. It operates on the
+ * continuous finite element
+ * given as first argument, and
+ * outputs the lexicographic
+ * indices in the second.
+ *
+ * Note that since this function
+ * uses specifics of the
+ * continuous finite elements, it
+ * can only operate on
+ * FiniteElementData<dim> objects
+ * inherent in FE_Q(). However,
+ * this function does not take a
+ * FE_Q object as it is also
+ * invoked by the FE_Q()
+ * constructor.
+ *
+ * It is assumed that the size of
+ * the output argument already
+ * matches the correct size,
+ * which is equal to the number
+ * of degrees of freedom in the
+ * finite element.
+ */
template <int dim>
void
hierarchic_to_lexicographic_numbering (const FiniteElementData<dim> &fe_data,
const unsigned int point_no,
const unsigned int component) const;
- /**
- * Compute the gradient of the
- * <tt>i</tt>th shape function at the
- * <tt>j</tt>th quadrature point with
- * respect to real cell
- * coordinates. If you want to
- * get the derivative in one of
- * the coordinate directions, use
- * the appropriate function of
- * the Tensor class to
- * extract one component. Since
- * only a reference to the
- * gradient's value is returned,
- * there should be no major
- * performance drawback.
- *
- * If the shape function is
- * vector-valued, then this
- * returns the only non-zero
- * component. If the shape
- * function has more than one
- * non-zero component (i.e. it is
- * not primitive), then throw an
- * exception of type
- * ExcShapeFunctionNotPrimitive. In
- * that case, use the
- * shape_grad_component()
- * function.
- *
- * The same holds for the arguments
- * of this function as for the
- * shape_value() function.
- */
- const Tensor<1,spacedim> &
- shape_grad (const unsigned int function,
- const unsigned int quadrature_point) const;
-
- /**
- * Return one vector component of
- * the gradient of a shape function
- * at a quadrature point. If the
- * finite element is scalar, then
- * only component zero is allowed
- * and the return value equals
- * that of the shape_grad()
- * function. If the finite
- * element is vector valued but
- * all shape functions are
- * primitive (i.e. they are
- * non-zero in only one
- * component), then the value
- * returned by shape_grad()
- * equals that of this function
- * for exactly one
- * component. This function is
- * therefore only of greater
- * interest if the shape function
- * is not primitive, but then it
- * is necessary since the other
- * function cannot be used.
- *
- * The same holds for the arguments
- * of this function as for the
- * shape_value_component() function.
- */
- Tensor<1,spacedim>
- shape_grad_component (const unsigned int function_no,
- const unsigned int point_no,
- const unsigned int component) const;
-
- /**
- * Second derivatives of
- * the <tt>function_no</tt>th shape function at
- * the <tt>point_no</tt>th quadrature point
- * with respect to real cell
- * coordinates. If you want to
- * get the derivatives in one of
- * the coordinate directions, use
- * the appropriate function of
- * the Tensor class to
- * extract one component. Since
- * only a reference to the
- * derivative values is returned,
- * there should be no major
- * performance drawback.
- *
- * If the shape function is
- * vector-valued, then this
- * returns the only non-zero
- * component. If the shape
- * function has more than one
- * non-zero component (i.e. it is
- * not primitive), then throw an
- * exception of type
- * ExcShapeFunctionNotPrimitive. In
- * that case, use the
- * shape_grad_grad_component()
- * function.
- *
- * The same holds for the arguments
- * of this function as for the
- * shape_value() function.
- */
- const Tensor<2,spacedim> &
- shape_hessian (const unsigned int function_no,
- const unsigned int point_no) const;
-
- /**
- * @deprecated Wrapper for shape_hessian()
- */
- const Tensor<2,spacedim> &
- shape_2nd_derivative (const unsigned int function_no,
- const unsigned int point_no) const;
-
-
- /**
- * Return one vector component of
- * the gradient of a shape
- * function at a quadrature
- * point. If the finite element
- * is scalar, then only component
- * zero is allowed and the return
- * value equals that of the
- * shape_hessian()
- * function. If the finite
- * element is vector valued but
- * all shape functions are
- * primitive (i.e. they are
- * non-zero in only one
- * component), then the value
- * returned by
- * shape_hessian()
- * equals that of this function
- * for exactly one
- * component. This function is
- * therefore only of greater
- * interest if the shape function
- * is not primitive, but then it
- * is necessary since the other
- * function cannot be used.
- *
- * The same holds for the arguments
- * of this function as for the
- * shape_value_component() function.
- */
- Tensor<2,spacedim>
- shape_hessian_component (const unsigned int function_no,
- const unsigned int point_no,
- const unsigned int component) const;
-
- /**
- * @deprecated Wrapper for shape_hessian_component()
- */
- Tensor<2,spacedim>
- shape_2nd_derivative_component (const unsigned int function_no,
- const unsigned int point_no,
- const unsigned int component) const;
-
-
- //@}
- /// @name Access to values of global finite element fields
- //@{
-
- /**
- * Returns the values of a finite
- * element function restricted to
- * the current cell, face or
- * subface selected the last time
- * the <tt>reinit</tt> function
- * of the derived class was
- * called, at the quadrature
- * points.
- *
- * If the present cell is not
- * active then values are
- * interpolated to the current
- * cell and point values are
- * computed from that.
- *
- * This function may only be used
- * if the finite element in use
- * is a scalar one, i.e. has only
- * one vector component. To get
- * values of multi-component
- * elements, there is another
- * get_function_values() below,
- * returning a vector of vectors
- * of results.
- *
- * @param[in] fe_function A
- * vector of values that
- * describes (globally) the
- * finite element function that
- * this function should evaluate
- * at the quadrature points of
- * the current cell.
- *
- * @param[out] values The values
- * of the function specified by
- * fe_function at the quadrature
- * points of the current cell.
- * The object is assume to
- * already have the correct size.
- *
- * @post <code>values[q]</code>
- * will contain the value of the
- * field described by fe_function
- * at the $q$th quadrature point.
- *
- * @note The actual data type of the
- * input vector may be either a
- * Vector<T>,
- * BlockVector<T>, or one
- * of the sequential PETSc or
- * Trilinos vector wrapper
- * classes. It represents a
- * global vector of DoF values
- * associated with the DofHandler
- * object with which this
- * FEValues object was last
- * initialized. Alternatively,
- * if the vector argument is of
- * type IndexSet, then the function
- * is represented as one that
- * is either zero or one, depending
- * on whether a DoF index is in
- * the set or not.
- */
- template <class InputVector, typename number>
- void get_function_values (const InputVector& fe_function,
- std::vector<number>& values) const;
-
- /**
- * This function does the same as
- * the other
- * get_function_values(), but
- * applied to multi-component
- * (vector-valued) elements. The
- * meaning of the arguments is as
- * explained there.
- *
- * @post <code>values[q]</code>
- * is a vector of values of the
- * field described by fe_function
- * at the $q$th quadrature
- * point. The size of the vector
- * accessed by
- * <code>values[q]</code> equals
- * the number of components of
- * the finite element,
- * i.e. <code>values[q](c)</code>
- * returns the value of the $c$th
- * vector component at the $q$th
- * quadrature point.
- */
- template <class InputVector, typename number>
- void get_function_values (const InputVector &fe_function,
- std::vector<Vector<number> > &values) const;
-
- /**
- * Generate function values from
- * an arbitrary vector.
- *
- * This function offers the
- * possibility to extract
- * function values in quadrature
- * points from vectors not
- * corresponding to a whole
- * discretization.
- *
- * The vector <tt>indices</tt>
- * corresponds to the degrees of
- * freedom on a single cell. Its
- * length may even be a multiple
- * of the number of dofs per
- * cell. Then, the vectors in
- * <tt>value</tt> should allow
- * for the same multiple of the
- * components of the finite
- * element.
- *
- * You may want to use this
- * function, if you want to
- * access just a single block
- * from a BlockVector, if you
- * have a multi-level vector or
- * if you already have a local
- * representation of your finite
- * element data.
- */
- template <class InputVector, typename number>
- void get_function_values (const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
- std::vector<number>& values) const;
-
- /**
- * Generate vector function
- * values from an arbitrary
- * vector.
- *
- * This function offers the
- * possibility to extract
- * function values in quadrature
- * points from vectors not
- * corresponding to a whole
- * discretization.
- *
- * The vector <tt>indices</tt>
- * corresponds to the degrees of
- * freedom on a single cell. Its
- * length may even be a multiple
- * of the number of dofs per
- * cell. Then, the vectors in
- * <tt>value</tt> should allow
- * for the same multiple of the
- * components of the finite
- * element.
- *
- * You may want to use this
- * function, if you want to
- * access just a single block
- * from a BlockVector, if you
- * have a multi-level vector or
- * if you already have a local
- * representation of your finite
- * element data.
- *
- * Since this function allows for
- * fairly general combinations of
- * argument sizes, be aware that
- * the checks on the arguments
- * may not detect errors.
- */
- template <class InputVector, typename number>
- void get_function_values (const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
- std::vector<Vector<number> >& values) const;
-
-
- /**
- * Generate vector function
- * values from an arbitrary
- * vector.
- *
- * This function offers the
- * possibility to extract
- * function values in quadrature
- * points from vectors not
- * corresponding to a whole
- * discretization.
- *
- * The vector <tt>indices</tt>
- * corresponds to the degrees of
- * freedom on a single cell. Its
- * length may even be a multiple
- * of the number of dofs per
- * cell. Then, the vectors in
- * <tt>value</tt> should allow
- * for the same multiple of the
- * components of the finite
- * element.
- *
- * Depending on the value of the last
- * argument, the outer vector of
- * <tt>values</tt> has either the
- * length of the quadrature rule
- * (<tt>quadrature_points_fastest
- * == false</tt>) or the length
- * of components to be filled
- * <tt>quadrature_points_fastest
- * == true</tt>. If <tt>p</tt> is
- * the current quadrature point
- * number and <tt>i</tt> is the
- * vector component of the
- * solution desired, the access
- * to <tt>values</tt> is
- * <tt>values[p][i]</tt> if
- * <tt>quadrature_points_fastest
- * == false</tt>, and
- * <tt>values[i][p]</tt>
- * otherwise.
- *
- * You may want to use this
- * function, if you want to
- * access just a single block
- * from a BlockVector, if you
- * have a multi-level vector or
- * if you already have a local
- * representation of your finite
- * element data.
- *
- * Since this function allows for
- * fairly general combinations of
- * argument sizes, be aware that
- * the checks on the arguments
- * may not detect errors.
- */
- template <class InputVector>
- void get_function_values (const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
- VectorSlice<std::vector<std::vector<double> > > values,
- const bool quadrature_points_fastest) const;
-
- //@}
- /// @name Access to derivatives of global finite element fields
- //@{
-
- /**
- * Compute the gradients of a
- * finite element at the
- * quadrature points of a
- * cell. This function is the
- * equivalent of the
- * corresponding
- * get_function_values() function
- * (see there for more
- * information) but evaluates the
- * finite element field's
- * gradient instead of its value.
- *
- * This function may only be used
- * if the finite element in use
- * is a scalar one, i.e. has only
- * one vector component. There is
- * a corresponding function of
- * the same name for
- * vector-valued finite elements.
- *
- * @param[in] fe_function A
- * vector of values that
- * describes (globally) the
- * finite element function that
- * this function should evaluate
- * at the quadrature points of
- * the current cell.
- *
- * @param[out] gradients The gradients
- * of the function specified by
- * fe_function at the quadrature
- * points of the current cell.
- * The gradients are computed
- * in real space (as opposed to
- * on the unit cell).
- * The object is assume to
- * already have the correct size.
- *
- * @post
- * <code>gradients[q]</code> will
- * contain the gradient of the
- * field described by fe_function
- * at the $q$th quadrature
- * point. <code>gradients[q][d]</code>
- * represents the derivative in
- * coordinate direction $d$ at
- * quadrature point $q$.
- *
- * @note The actual data type of the
- * input vector may be either a
- * Vector<T>,
- * BlockVector<T>, or one
- * of the sequential PETSc or
- * Trilinos vector wrapper
- * classes. It represents a
- * global vector of DoF values
- * associated with the DofHandler
- * object with which this
- * FEValues object was last
- * initialized. Alternatively,
- * if the vector argument is of
- * type IndexSet, then the function
- * is represented as one that
- * is either zero or one, depending
- * on whether a DoF index is in
- * the set or not.
- */
- template <class InputVector>
- void get_function_gradients (const InputVector &fe_function,
- std::vector<Tensor<1,spacedim> > &gradients) const;
-
- /**
- * This function does the same as
- * the other
- * get_function_gradients(), but
- * applied to multi-component
- * (vector-valued) elements. The
- * meaning of the arguments is as
- * explained there.
- *
- * @post
- * <code>gradients[q]</code> is a
- * vector of gradients of the
- * field described by fe_function
- * at the $q$th quadrature
- * point. The size of the vector
- * accessed by
- * <code>gradients[q]</code>
- * equals the number of
- * components of the finite
- * element,
- * i.e. <code>gradients[q][c]</code>
- * returns the gradient of the
- * $c$th vector component at the
- * $q$th quadrature
- * point. Consequently,
- * <code>gradients[q][c][d]</code>
- * is the derivative in
- * coordinate direction $d$ of
- * the $c$th vector component of
- * the vector field at quadrature
- * point $q$ of the current cell.
- */
- template <class InputVector>
- void get_function_gradients (const InputVector &fe_function,
- std::vector<std::vector<Tensor<1,spacedim> > > &gradients) const;
-
- /**
- * Function gradient access with
- * more flexibility. see
- * get_function_values() with
- * corresponding arguments.
- */
- template <class InputVector>
- void get_function_gradients (const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
- std::vector<Tensor<1,spacedim> >& gradients) const;
-
- /**
- * Function gradient access with
- * more flexibility. see
- * get_function_values() with
- * corresponding arguments.
- */
- template <class InputVector>
- void get_function_gradients (const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
- VectorSlice<std::vector<std::vector<Tensor<1,spacedim> > > > gradients,
- bool quadrature_points_fastest = false) const;
-
- /**
- * @deprecated Use
- * get_function_gradients() instead.
- */
- template <class InputVector>
- void get_function_grads (const InputVector &fe_function,
- std::vector<Tensor<1,spacedim> > &gradients) const;
-
- /**
- * @deprecated Use
- * get_function_gradients() instead.
- */
- template <class InputVector>
- void get_function_grads (const InputVector &fe_function,
- std::vector<std::vector<Tensor<1,spacedim> > > &gradients) const;
- /**
- * @deprecated Use
- * get_function_gradients() instead.
- */
- template <class InputVector>
- void get_function_grads (const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
- std::vector<Tensor<1,spacedim> >& gradients) const;
-
- /**
- * @deprecated Use
- * get_function_gradients() instead.
- */
- template <class InputVector>
- void get_function_grads (const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
- std::vector<std::vector<Tensor<1,spacedim> > >& gradients,
- bool quadrature_points_fastest = false) const;
-
- //@}
- /// @name Access to second derivatives (Hessian matrices and Laplacians) of global finite element fields
- //@{
-
- /**
- * Compute the tensor of second
- * derivatives of a finite
- * element at the quadrature
- * points of a cell. This
- * function is the equivalent of
- * the corresponding
- * get_function_values() function
- * (see there for more
- * information) but evaluates the
- * finite element field's second
- * derivatives instead of its
- * value.
- *
- * This function may only be used
- * if the finite element in use
- * is a scalar one, i.e. has only
- * one vector component. There is
- * a corresponding function of
- * the same name for
- * vector-valued finite elements.
- *
- * @param[in] fe_function A
- * vector of values that
- * describes (globally) the
- * finite element function that
- * this function should evaluate
- * at the quadrature points of
- * the current cell.
- *
- * @param[out] hessians The Hessians
- * of the function specified by
- * fe_function at the quadrature
- * points of the current cell.
- * The Hessians are computed
- * in real space (as opposed to
- * on the unit cell).
- * The object is assume to
- * already have the correct size.
- *
- * @post <code>hessians[q]</code>
- * will contain the Hessian of
- * the field described by
- * fe_function at the $q$th
- * quadrature
- * point. <code>gradients[q][i][j]</code>
- * represents the $(i,j)$th
- * component of the matrix of
- * second derivatives at
- * quadrature point $q$.
- *
- * @note The actual data type of the
- * input vector may be either a
- * Vector<T>,
- * BlockVector<T>, or one
- * of the sequential PETSc or
- * Trilinos vector wrapper
- * classes. It represents a
- * global vector of DoF values
- * associated with the DofHandler
- * object with which this
- * FEValues object was last
- * initialized. Alternatively,
- * if the vector argument is of
- * type IndexSet, then the function
- * is represented as one that
- * is either zero or one, depending
- * on whether a DoF index is in
- * the set or not.
- */
- template <class InputVector>
- void
- get_function_hessians (const InputVector& fe_function,
- std::vector<Tensor<2,spacedim> >& hessians) const;
-
- /**
- * This function does the same as
- * the other
- * get_function_hessians(), but
- * applied to multi-component
- * (vector-valued) elements. The
- * meaning of the arguments is as
- * explained there.
- *
- * @post <code>hessians[q]</code>
- * is a vector of Hessians of the
- * field described by fe_function
- * at the $q$th quadrature
- * point. The size of the vector
- * accessed by
- * <code>hessians[q]</code>
- * equals the number of
- * components of the finite
- * element,
- * i.e. <code>hessians[q][c]</code>
- * returns the Hessian of the
- * $c$th vector component at the
- * $q$th quadrature
- * point. Consequently,
- * <code>values[q][c][i][j]</code>
- * is the $(i,j)$th component of
- * the matrix of second
- * derivatives of the $c$th
- * vector component of the vector
- * field at quadrature point $q$
- * of the current cell.
- */
- template <class InputVector>
- void
- get_function_hessians (const InputVector &fe_function,
- std::vector<std::vector<Tensor<2,spacedim> > > &hessians,
+ //@}
+ /// @name Access to values of global finite element fields
+ //@{
+
+ /**
+ * Returns the values of a finite
+ * element function restricted to
+ * the current cell, face or
+ * subface selected the last time
+ * the <tt>reinit</tt> function
+ * of the derived class was
+ * called, at the quadrature
+ * points.
+ *
+ * If the present cell is not
+ * active then values are
+ * interpolated to the current
+ * cell and point values are
+ * computed from that.
+ *
+ * This function may only be used
+ * if the finite element in use
+ * is a scalar one, i.e. has only
+ * one vector component. To get
+ * values of multi-component
+ * elements, there is another
+ * get_function_values() below,
+ * returning a vector of vectors
+ * of results.
+ *
+ * @param[in] fe_function A
+ * vector of values that
+ * describes (globally) the
+ * finite element function that
+ * this function should evaluate
+ * at the quadrature points of
+ * the current cell.
+ *
+ * @param[out] values The values
+ * of the function specified by
+ * fe_function at the quadrature
+ * points of the current cell.
+ * The object is assume to
+ * already have the correct size.
+ *
+ * @post <code>values[q]</code>
+ * will contain the value of the
+ * field described by fe_function
+ * at the $q$th quadrature point.
+ *
+ * @note The actual data type of the
+ * input vector may be either a
+ * Vector<T>,
+ * BlockVector<T>, or one
+ * of the sequential PETSc or
+ * Trilinos vector wrapper
+ * classes. It represents a
+ * global vector of DoF values
+ * associated with the DofHandler
+ * object with which this
+ * FEValues object was last
+ * initialized. Alternatively,
+ * if the vector argument is of
+ * type IndexSet, then the function
+ * is represented as one that
+ * is either zero or one, depending
+ * on whether a DoF index is in
+ * the set or not.
+ */
+ template <class InputVector, typename number>
+ void get_function_values (const InputVector &fe_function,
+ std::vector<number> &values) const;
+
+ /**
+ * This function does the same as
+ * the other
+ * get_function_values(), but
+ * applied to multi-component
+ * (vector-valued) elements. The
+ * meaning of the arguments is as
+ * explained there.
+ *
+ * @post <code>values[q]</code>
+ * is a vector of values of the
+ * field described by fe_function
+ * at the $q$th quadrature
+ * point. The size of the vector
+ * accessed by
+ * <code>values[q]</code> equals
+ * the number of components of
+ * the finite element,
+ * i.e. <code>values[q](c)</code>
+ * returns the value of the $c$th
+ * vector component at the $q$th
+ * quadrature point.
+ */
+ template <class InputVector, typename number>
+ void get_function_values (const InputVector &fe_function,
+ std::vector<Vector<number> > &values) const;
+
+ /**
+ * Generate function values from
+ * an arbitrary vector.
+ *
+ * This function offers the
+ * possibility to extract
+ * function values in quadrature
+ * points from vectors not
+ * corresponding to a whole
+ * discretization.
+ *
+ * The vector <tt>indices</tt>
+ * corresponds to the degrees of
+ * freedom on a single cell. Its
+ * length may even be a multiple
+ * of the number of dofs per
+ * cell. Then, the vectors in
+ * <tt>value</tt> should allow
+ * for the same multiple of the
+ * components of the finite
+ * element.
+ *
+ * You may want to use this
+ * function, if you want to
+ * access just a single block
+ * from a BlockVector, if you
+ * have a multi-level vector or
+ * if you already have a local
+ * representation of your finite
+ * element data.
+ */
+ template <class InputVector, typename number>
+ void get_function_values (const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
+ std::vector<number> &values) const;
+
+ /**
+ * Generate vector function
+ * values from an arbitrary
+ * vector.
+ *
+ * This function offers the
+ * possibility to extract
+ * function values in quadrature
+ * points from vectors not
+ * corresponding to a whole
+ * discretization.
+ *
+ * The vector <tt>indices</tt>
+ * corresponds to the degrees of
+ * freedom on a single cell. Its
+ * length may even be a multiple
+ * of the number of dofs per
+ * cell. Then, the vectors in
+ * <tt>value</tt> should allow
+ * for the same multiple of the
+ * components of the finite
+ * element.
+ *
+ * You may want to use this
+ * function, if you want to
+ * access just a single block
+ * from a BlockVector, if you
+ * have a multi-level vector or
+ * if you already have a local
+ * representation of your finite
+ * element data.
+ *
+ * Since this function allows for
+ * fairly general combinations of
+ * argument sizes, be aware that
+ * the checks on the arguments
+ * may not detect errors.
+ */
+ template <class InputVector, typename number>
+ void get_function_values (const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
+ std::vector<Vector<number> > &values) const;
+
+
+ /**
+ * Generate vector function
+ * values from an arbitrary
+ * vector.
+ *
+ * This function offers the
+ * possibility to extract
+ * function values in quadrature
+ * points from vectors not
+ * corresponding to a whole
+ * discretization.
+ *
+ * The vector <tt>indices</tt>
+ * corresponds to the degrees of
+ * freedom on a single cell. Its
+ * length may even be a multiple
+ * of the number of dofs per
+ * cell. Then, the vectors in
+ * <tt>value</tt> should allow
+ * for the same multiple of the
+ * components of the finite
+ * element.
+ *
+ * Depending on the value of the last
+ * argument, the outer vector of
+ * <tt>values</tt> has either the
+ * length of the quadrature rule
+ * (<tt>quadrature_points_fastest
+ * == false</tt>) or the length
+ * of components to be filled
+ * <tt>quadrature_points_fastest
+ * == true</tt>. If <tt>p</tt> is
+ * the current quadrature point
+ * number and <tt>i</tt> is the
+ * vector component of the
+ * solution desired, the access
+ * to <tt>values</tt> is
+ * <tt>values[p][i]</tt> if
+ * <tt>quadrature_points_fastest
+ * == false</tt>, and
+ * <tt>values[i][p]</tt>
+ * otherwise.
+ *
+ * You may want to use this
+ * function, if you want to
+ * access just a single block
+ * from a BlockVector, if you
+ * have a multi-level vector or
+ * if you already have a local
+ * representation of your finite
+ * element data.
+ *
+ * Since this function allows for
+ * fairly general combinations of
+ * argument sizes, be aware that
+ * the checks on the arguments
+ * may not detect errors.
+ */
+ template <class InputVector>
+ void get_function_values (const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
+ VectorSlice<std::vector<std::vector<double> > > values,
+ const bool quadrature_points_fastest) const;
+
+ //@}
+ /// @name Access to derivatives of global finite element fields
+ //@{
+
+ /**
+ * Compute the gradients of a
+ * finite element at the
+ * quadrature points of a
+ * cell. This function is the
+ * equivalent of the
+ * corresponding
+ * get_function_values() function
+ * (see there for more
+ * information) but evaluates the
+ * finite element field's
+ * gradient instead of its value.
+ *
+ * This function may only be used
+ * if the finite element in use
+ * is a scalar one, i.e. has only
+ * one vector component. There is
+ * a corresponding function of
+ * the same name for
+ * vector-valued finite elements.
+ *
+ * @param[in] fe_function A
+ * vector of values that
+ * describes (globally) the
+ * finite element function that
+ * this function should evaluate
+ * at the quadrature points of
+ * the current cell.
+ *
+ * @param[out] gradients The gradients
+ * of the function specified by
+ * fe_function at the quadrature
+ * points of the current cell.
+ * The gradients are computed
+ * in real space (as opposed to
+ * on the unit cell).
+ * The object is assume to
+ * already have the correct size.
+ *
+ * @post
+ * <code>gradients[q]</code> will
+ * contain the gradient of the
+ * field described by fe_function
+ * at the $q$th quadrature
+ * point. <code>gradients[q][d]</code>
+ * represents the derivative in
+ * coordinate direction $d$ at
+ * quadrature point $q$.
+ *
+ * @note The actual data type of the
+ * input vector may be either a
+ * Vector<T>,
+ * BlockVector<T>, or one
+ * of the sequential PETSc or
+ * Trilinos vector wrapper
+ * classes. It represents a
+ * global vector of DoF values
+ * associated with the DofHandler
+ * object with which this
+ * FEValues object was last
+ * initialized. Alternatively,
+ * if the vector argument is of
+ * type IndexSet, then the function
+ * is represented as one that
+ * is either zero or one, depending
+ * on whether a DoF index is in
+ * the set or not.
+ */
+ template <class InputVector>
+ void get_function_gradients (const InputVector &fe_function,
+ std::vector<Tensor<1,spacedim> > &gradients) const;
+
+ /**
+ * This function does the same as
+ * the other
+ * get_function_gradients(), but
+ * applied to multi-component
+ * (vector-valued) elements. The
+ * meaning of the arguments is as
+ * explained there.
+ *
+ * @post
+ * <code>gradients[q]</code> is a
+ * vector of gradients of the
+ * field described by fe_function
+ * at the $q$th quadrature
+ * point. The size of the vector
+ * accessed by
+ * <code>gradients[q]</code>
+ * equals the number of
+ * components of the finite
+ * element,
+ * i.e. <code>gradients[q][c]</code>
+ * returns the gradient of the
+ * $c$th vector component at the
+ * $q$th quadrature
+ * point. Consequently,
+ * <code>gradients[q][c][d]</code>
+ * is the derivative in
+ * coordinate direction $d$ of
+ * the $c$th vector component of
+ * the vector field at quadrature
+ * point $q$ of the current cell.
+ */
+ template <class InputVector>
+ void get_function_gradients (const InputVector &fe_function,
+ std::vector<std::vector<Tensor<1,spacedim> > > &gradients) const;
+
+ /**
+ * Function gradient access with
+ * more flexibility. see
+ * get_function_values() with
+ * corresponding arguments.
+ */
+ template <class InputVector>
+ void get_function_gradients (const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
+ std::vector<Tensor<1,spacedim> > &gradients) const;
+
+ /**
+ * Function gradient access with
+ * more flexibility. see
+ * get_function_values() with
+ * corresponding arguments.
+ */
+ template <class InputVector>
+ void get_function_gradients (const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
+ VectorSlice<std::vector<std::vector<Tensor<1,spacedim> > > > gradients,
+ bool quadrature_points_fastest = false) const;
+
+ /**
+ * @deprecated Use
+ * get_function_gradients() instead.
+ */
+ template <class InputVector>
+ void get_function_grads (const InputVector &fe_function,
+ std::vector<Tensor<1,spacedim> > &gradients) const;
+
+ /**
+ * @deprecated Use
+ * get_function_gradients() instead.
+ */
+ template <class InputVector>
+ void get_function_grads (const InputVector &fe_function,
+ std::vector<std::vector<Tensor<1,spacedim> > > &gradients) const;
+
+ /**
+ * @deprecated Use
+ * get_function_gradients() instead.
+ */
+ template <class InputVector>
+ void get_function_grads (const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
+ std::vector<Tensor<1,spacedim> > &gradients) const;
+
+ /**
+ * @deprecated Use
+ * get_function_gradients() instead.
+ */
+ template <class InputVector>
+ void get_function_grads (const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
+ std::vector<std::vector<Tensor<1,spacedim> > > &gradients,
bool quadrature_points_fastest = false) const;
- /**
- * Access to the second
- * derivatives of a function with
- * more flexibility. see
- * get_function_values() with
- * corresponding arguments.
- */
- template <class InputVector>
- void get_function_hessians (
- const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
- std::vector<Tensor<2,spacedim> >& hessians) const;
-
- /**
- * Access to the second
- * derivatives of a function with
- * more flexibility. see
- * get_function_values() with
- * corresponding arguments.
- */
- template <class InputVector>
- void get_function_hessians (
- const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
- VectorSlice<std::vector<std::vector<Tensor<2,spacedim> > > > hessians,
- bool quadrature_points_fastest = false) const;
-
- /**
- * @deprecated Wrapper for get_function_hessians()
- */
- template <class InputVector>
- void
- get_function_2nd_derivatives (const InputVector&,
- std::vector<Tensor<2,spacedim> >&) const;
-
- /**
- * @deprecated Wrapper for get_function_hessians()
- */
- template <class InputVector>
- void
- get_function_2nd_derivatives (const InputVector&,
- std::vector<std::vector<Tensor<2,spacedim> > >&,
- bool = false) const;
-
- /**
- * Compute the (scalar) Laplacian (i.e. the trace of the tensor of second
- * derivatives) of a finite
- * element at the quadrature
- * points of a cell. This
- * function is the equivalent of
- * the corresponding
- * get_function_values() function
- * (see there for more
- * information) but evaluates the
- * finite element field's second
- * derivatives instead of its
- * value.
- *
- * This function may only be used
- * if the finite element in use
- * is a scalar one, i.e. has only
- * one vector component. There is
- * a corresponding function of
- * the same name for
- * vector-valued finite elements.
- *
- * @param[in] fe_function A
- * vector of values that
- * describes (globally) the
- * finite element function that
- * this function should evaluate
- * at the quadrature points of
- * the current cell.
- *
- * @param[out] laplacians The Laplacians
- * of the function specified by
- * fe_function at the quadrature
- * points of the current cell.
- * The Laplacians are computed
- * in real space (as opposed to
- * on the unit cell).
- * The object is assume to
- * already have the correct size.
- *
- * @post <code>laplacians[q]</code>
- * will contain the Laplacian of
- * the field described by
- * fe_function at the $q$th
- * quadrature
- * point. <code>gradients[q][i][j]</code>
- * represents the $(i,j)$th
- * component of the matrix of
- * second derivatives at
- * quadrature point $q$.
- *
- * @post For each component of
- * the output vector, there holds
- * <code>laplacians[q]=trace(hessians[q])</code>,
- * where <tt>hessians</tt> would
- * be the output of the
- * get_function_hessians()
- * function.
- *
- * @note The actual data type of the
- * input vector may be either a
- * Vector<T>,
- * BlockVector<T>, or one
- * of the sequential PETSc or
- * Trilinos vector wrapper
- * classes. It represents a
- * global vector of DoF values
- * associated with the DofHandler
- * object with which this
- * FEValues object was last
- * initialized. Alternatively,
- * if the vector argument is of
- * type IndexSet, then the function
- * is represented as one that
- * is either zero or one, depending
- * on whether a DoF index is in
- * the set or not.
- */
- template <class InputVector, typename number>
- void
- get_function_laplacians (const InputVector& fe_function,
- std::vector<number>& laplacians) const;
-
- /**
- * This function does the same as
- * the other
- * get_function_laplacians(), but
- * applied to multi-component
- * (vector-valued) elements. The
- * meaning of the arguments is as
- * explained there.
- *
- * @post <code>laplacians[q]</code>
- * is a vector of Laplacians of the
- * field described by fe_function
- * at the $q$th quadrature
- * point. The size of the vector
- * accessed by
- * <code>laplacians[q]</code>
- * equals the number of
- * components of the finite
- * element,
- * i.e. <code>laplacians[q][c]</code>
- * returns the Laplacian of the
- * $c$th vector component at the
- * $q$th quadrature
- * point.
- *
- * @post For each component of
- * the output vector, there holds
- * <code>laplacians[q][c]=trace(hessians[q][c])</code>,
- * where <tt>hessians</tt> would
- * be the output of the
- * get_function_hessians()
- * function.
- */
- template <class InputVector, typename number>
- void
- get_function_laplacians (const InputVector &fe_function,
- std::vector<Vector<number> > &laplacians) const;
-
- /**
- * Access to the second
- * derivatives of a function with
- * more flexibility. see
- * get_function_values() with
- * corresponding arguments.
- */
- template <class InputVector, typename number>
- void get_function_laplacians (
- const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
- std::vector<number>& laplacians) const;
-
- /**
- * Access to the second
- * derivatives of a function with
- * more flexibility. see
- * get_function_values() with
- * corresponding arguments.
- */
- template <class InputVector, typename number>
- void get_function_laplacians (
- const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
- std::vector<Vector<number> >& laplacians) const;
-
- /**
- * Access to the second
- * derivatives of a function with
- * more flexibility. see
- * get_function_values() with
- * corresponding arguments.
- */
- template <class InputVector, typename number>
- void get_function_laplacians (
- const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
- std::vector<std::vector<number> >& laplacians,
- bool quadrature_points_fastest = false) const;
- //@}
-
- /// @name Geometry of the cell
- //@{
-
- /**
- * Position of the <tt>i</tt>th
- * quadrature point in real space.
- */
- const Point<spacedim> & quadrature_point (const unsigned int i) const;
-
- /**
- * Return a pointer to the vector of
- * quadrature points.
- */
- const std::vector<Point<spacedim> > & get_quadrature_points () const;
-
- /**
- * Mapped quadrature weight. If
- * this object refers to a volume
- * evaluation (i.e. the derived
- * class is of type FEValues),
- * then this is the Jacobi
- * determinant times the weight
- * of the *<tt>i</tt>th unit
- * quadrature point.
- *
- * For surface evaluations
- * (i.e. classes FEFaceValues or
- * FESubfaceValues), it is the
- * mapped surface element times
- * the weight of the quadrature
- * point.
- *
- * You can think of the quantity returned
- * by this function as the volume or
- * surface element $dx, ds$ in the
- * integral that we implement here by
- * quadrature.
- */
- double JxW (const unsigned int quadrature_point) const;
-
- /**
- * Pointer to the array holding
- * the values returned by JxW().
- */
- const std::vector<double> & get_JxW_values () const;
-
- /**
- * Return the Jacobian of the
- * transformation at the specified
- * quadrature point, i.e.
- * $J_{ij}=dx_i/d\hat x_j$
- */
- const DerivativeForm<1,dim,spacedim> & jacobian (const unsigned int quadrature_point) const;
-
- /**
- * Pointer to the array holding
- * the values returned by jacobian().
- */
- const std::vector<DerivativeForm<1,dim,spacedim> > & get_jacobians () const;
-
- /**
- * Return the second derivative of the
- * transformation from unit to real cell,
- * i.e. the first derivative of the
- * Jacobian, at the specified quadrature
- * point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.
- */
- const DerivativeForm<2,dim,spacedim> & jacobian_grad (const unsigned int quadrature_point) const;
-
- /**
- * Pointer to the array holding
- * the values returned by
- * jacobian_grads().
- */
- const std::vector<DerivativeForm<2,dim,spacedim> > & get_jacobian_grads () const;
-
- /**
- * Return the inverse Jacobian of the
- * transformation at the specified
- * quadrature point, i.e.
- * $J_{ij}=d\hat x_i/dx_j$
- */
- const DerivativeForm<1,spacedim,dim> & inverse_jacobian (const unsigned int quadrature_point) const;
-
- /**
- * Pointer to the array holding
- * the values returned by
- * inverse_jacobian().
- */
- const std::vector<DerivativeForm<1,spacedim,dim> > & get_inverse_jacobians () const;
- /**
- * For a face, return the outward
- * normal vector to the cell at
- * the <tt>i</tt>th quadrature
- * point.
- *
- * For a cell of codimension one,
- * return the normal vector, as
- * it is specified by the
- * numbering of the vertices.
- *
- * The length of the vector
- * is normalized to one.
- */
- const Point<spacedim> & normal_vector (const unsigned int i) const;
-
- /**
- * Return the normal vectors at
- * the quadrature points. For a
- * face, these are the outward
- * normal vectors to the
- * cell. For a cell of
- * codimension one, the
- * orientation is given by the
- * numbering of vertices.
- */
- const std::vector<Point<spacedim> > & get_normal_vectors () const;
-
- /**
- * Transform a set of vectors,
- * one for each quadrature
- * point. The <tt>mapping</tt>
- * can be any of the ones defined
- * in MappingType.
- */
- void transform (std::vector<Tensor<1,spacedim> >& transformed,
- const std::vector<Tensor<1,dim> >& original,
- MappingType mapping) const;
-
- /**
- * @deprecated Use
- * normal_vector() instead.
- *
- * Return the outward normal vector to
- * the cell at the <tt>i</tt>th quadrature
- * point. The length of the vector
- * is normalized to one.
- */
- const Point<spacedim> & cell_normal_vector (const unsigned int i) const;
-
- /**
- * @deprecated Use
- * get_normal_vectors() instead.
- *
- * Returns the vectors normal to
- * the cell in each of the
- * quadrature points.
- */
- const std::vector<Point<spacedim> > & get_cell_normal_vectors () const;
-
- //@}
-
- /// @name Extractors Methods to extract individual components
- //@{
-
- /**
- * Create a view of the current FEValues
- * object that represents a particular
- * scalar component of the possibly
- * vector-valued finite element. The
- * concept of views is explained in the
- * documentation of the namespace
- * FEValuesViews and in particular
- * in the @ref vector_valued module.
- */
- const FEValuesViews::Scalar<dim,spacedim> &
- operator[] (const FEValuesExtractors::Scalar &scalar) const;
-
- /**
- * Create a view of the current FEValues
- * object that represents a set of
- * <code>dim</code> scalar components
- * (i.e. a vector) of the vector-valued
- * finite element. The concept of views
- * is explained in the documentation of
- * the namespace FEValuesViews and in particular
- * in the @ref vector_valued module.
- */
- const FEValuesViews::Vector<dim,spacedim> &
- operator[] (const FEValuesExtractors::Vector &vector) const;
-
- /**
- * Create a view of the current FEValues
- * object that represents a set of
- * <code>(dim*dim + dim)/2</code> scalar components
- * (i.e. a symmetric 2nd order tensor)
- * of the vector-valued
- * finite element. The concept of views
- * is explained in the documentation of
- * the namespace FEValuesViews and in particular
- * in the @ref vector_valued module.
- */
- const FEValuesViews::SymmetricTensor<2,dim,spacedim> &
- operator[] (const FEValuesExtractors::SymmetricTensor<2> &tensor) const;
-
- //@}
-
- /// @name Access to the raw data
- //@{
-
- /**
- * Constant reference to the
- * selected mapping object.
- */
- const Mapping<dim,spacedim> & get_mapping () const;
-
- /**
- * Constant reference to the
- * selected finite element
- * object.
- */
- const FiniteElement<dim,spacedim> & get_fe () const;
-
- /**
- * Return the update flags set
- * for this object.
- */
- UpdateFlags get_update_flags () const;
-
- /**
- * Return a triangulation
- * iterator to the current cell.
- */
- const typename Triangulation<dim,spacedim>::cell_iterator get_cell () const;
-
- /**
- * Return the relation of the current
- * cell to the previous cell. This
- * allows re-use of some cell data
- * (like local matrices for equations
- * with constant coefficients) if the
- * result is
- * <tt>CellSimilarity::translation</tt>.
- */
- CellSimilarity::Similarity get_cell_similarity () const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
- */
- std::size_t memory_consumption () const;
- //@}
-
-
- /**
- * This exception is thrown if
- * FEValuesBase is asked to
- * return the value of a field
- * which was not required by the
- * UpdateFlags for this
- * FEValuesBase.
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcAccessToUninitializedField);
- /**
- * @todo Document this
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcCannotInitializeField);
- /**
- * @todo Document this
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcInvalidUpdateFlag);
- /**
- * @todo Document this
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcFEDontMatch);
- /**
- * @todo Document this
- *
- * @ingroup Exceptions
- */
- DeclException1 (ExcShapeFunctionNotPrimitive,
- int,
- << "The shape function with index " << arg1
- << " is not primitive, i.e. it is vector-valued and "
- << "has more than one non-zero vector component. This "
- << "function cannot be called for these shape functions. "
- << "Maybe you want to use the same function with the "
- << "_component suffix?");
- /**
- * @todo Document this
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcFENotPrimitive);
-
- protected:
- /**
- * Objects of the FEValues
- * class need to store a pointer
- * (i.e. an iterator) to the
- * present cell in order to be
- * able to extract the values of
- * the degrees of freedom on this
- * cell in the
- * get_function_values() and
- * assorted functions. On the
- * other hand, this class should
- * also work for different
- * iterators, as long as they
- * have the same interface to
- * extract the DoF values (i.e.,
- * for example, they need to have
- * a @p get_interpolated_dof_values
- * function).
- *
- * This calls for a common base
- * class of iterator classes, and
- * making the functions we need
- * here @p virtual. On the other
- * hand, this is the only place
- * in the library where we need
- * this, and introducing a base
- * class of iterators and making
- * a function virtual penalizes
- * <em>all</em> users of the
- * iterators, which are basically
- * intended as very fast accessor
- * functions. So we do not want
- * to do this. Rather, what we do
- * here is making the functions
- * we need virtual only for use
- * with <em>this class</em>. The idea
- * is the following: have a
- * common base class which
- * declares some pure virtual
- * functions, and for each
- * possible iterator type, we
- * have a derived class which
- * stores the iterator to the
- * cell and implements these
- * functions. Since the iterator
- * classes have the same
- * interface, we can make the
- * derived classes a template,
- * templatized on the iterator
- * type.
- *
- * This way, the use of virtual
- * functions is restricted to
- * only this class, and other
- * users of iterators do not have
- * to bear the negative effects.
- *
- * @author Wolfgang Bangerth, 2003
- */
- class CellIteratorBase;
-
- /**
- * Forward declaration of classes derived
- * from CellIteratorBase. Their
- * definition and implementation is given
- * in the .cc file.
- */
- template <typename CI> class CellIterator;
- class TriaCellIterator;
-
- /**
- * Store the cell selected last time the
- * reinit() function was called. This is
- * necessary for the
- * <tt>get_function_*</tt> functions as
- * well as the functions of same name in
- * the extractor classes.
- */
- std::auto_ptr<const CellIteratorBase> present_cell;
-
- /**
- * A signal connection we use to ensure we get informed whenever the
- * triangulation changes. We need to know about that because it
- * invalidates all cell iterators and, as part of that, the
- * 'present_cell' iterator we keep around between subsequent
- * calls to reinit() in order to compute the cell similarity.
- */
- boost::signals2::connection tria_listener;
-
- /**
- * A function that is connected to the triangulation in
- * order to reset the stored 'present_cell' iterator to an invalid
- * one whenever the triangulation is changed and the iterator consequently
- * becomes invalid.
- */
- void invalidate_present_cell ();
-
- /**
- * This function is called by the various reinit() functions in derived
- * classes. Given the cell indicated by the argument, test whether
- * we have to throw away the previously stored present_cell argument
- * because it would require us to compare cells from different
- * triangulations. In checking all this, also make sure that we have
- * tria_listener connected to the triangulation to which we will set
- * present_cell right after calling this function.
- */
- void
- maybe_invalidate_previous_present_cell (const typename Triangulation<dim,spacedim>::cell_iterator &cell);
-
- /**
- * Storage for the mapping object.
- */
- const SmartPointer<const Mapping<dim,spacedim>,FEValuesBase<dim,spacedim> > mapping;
-
- /**
- * Store the finite element for later use.
- */
- const SmartPointer<const FiniteElement<dim,spacedim>,FEValuesBase<dim,spacedim> > fe;
-
-
- /**
- * Internal data of mapping.
- */
- SmartPointer<typename Mapping<dim,spacedim>::InternalDataBase,FEValuesBase<dim,spacedim> > mapping_data;
-
- /**
- * Internal data of finite element.
- */
- SmartPointer<typename Mapping<dim,spacedim>::InternalDataBase,FEValuesBase<dim,spacedim> > fe_data;
-
- /**
- * Initialize some update
- * flags. Called from the
- * @p initialize functions of
- * derived classes, which are in
- * turn called from their
- * constructors.
- *
- * Basically, this function finds
- * out using the finite element
- * and mapping object already
- * stored which flags need to be
- * set to compute everything the
- * user wants, as expressed
- * through the flags passed as
- * argument.
- */
- UpdateFlags compute_update_flags (const UpdateFlags update_flags) const;
-
- /**
- * An enum variable that can store
- * different states of the current cell
- * in comparison to the previously
- * visited cell. If wanted, additional
- * states can be checked here and used
- * in one of the methods used during
- * reinit.
- */
- CellSimilarity::Similarity cell_similarity;
-
- /**
- * A function that checks whether the
- * new cell is similar to the one
- * previously used. Then, a significant
- * amount of the data can be reused,
- * e.g. the derivatives of the basis
- * functions in real space, shape_grad.
- */
- void
- check_cell_similarity (const typename Triangulation<dim,spacedim>::cell_iterator &cell);
-
- private:
- /**
- * Copy constructor. Since
- * objects of this class are not
- * copyable, we make it private,
- * and also do not implement it.
- */
- FEValuesBase (const FEValuesBase &);
-
- /**
- * Copy operator. Since
- * objects of this class are not
- * copyable, we make it private,
- * and also do not implement it.
- */
- FEValuesBase & operator= (const FEValuesBase &);
-
- /**
- * A cache for all possible FEValuesViews
- * objects.
- */
- dealii::internal::FEValuesViews::Cache<dim,spacedim> fe_values_views_cache;
-
- /**
- * Make the view classes friends of this
- * class, since they access internal
- * data.
- */
- template <int, int> friend class FEValuesViews::Scalar;
- template <int, int> friend class FEValuesViews::Vector;
- template <int, int, int> friend class FEValuesViews::SymmetricTensor;
+ //@}
+ /// @name Access to second derivatives (Hessian matrices and Laplacians) of global finite element fields
+ //@{
+
+ /**
+ * Compute the tensor of second
+ * derivatives of a finite
+ * element at the quadrature
+ * points of a cell. This
+ * function is the equivalent of
+ * the corresponding
+ * get_function_values() function
+ * (see there for more
+ * information) but evaluates the
+ * finite element field's second
+ * derivatives instead of its
+ * value.
+ *
+ * This function may only be used
+ * if the finite element in use
+ * is a scalar one, i.e. has only
+ * one vector component. There is
+ * a corresponding function of
+ * the same name for
+ * vector-valued finite elements.
+ *
+ * @param[in] fe_function A
+ * vector of values that
+ * describes (globally) the
+ * finite element function that
+ * this function should evaluate
+ * at the quadrature points of
+ * the current cell.
+ *
+ * @param[out] hessians The Hessians
+ * of the function specified by
+ * fe_function at the quadrature
+ * points of the current cell.
+ * The Hessians are computed
+ * in real space (as opposed to
+ * on the unit cell).
+ * The object is assume to
+ * already have the correct size.
+ *
+ * @post <code>hessians[q]</code>
+ * will contain the Hessian of
+ * the field described by
+ * fe_function at the $q$th
+ * quadrature
+ * point. <code>gradients[q][i][j]</code>
+ * represents the $(i,j)$th
+ * component of the matrix of
+ * second derivatives at
+ * quadrature point $q$.
+ *
+ * @note The actual data type of the
+ * input vector may be either a
+ * Vector<T>,
+ * BlockVector<T>, or one
+ * of the sequential PETSc or
+ * Trilinos vector wrapper
+ * classes. It represents a
+ * global vector of DoF values
+ * associated with the DofHandler
+ * object with which this
+ * FEValues object was last
+ * initialized. Alternatively,
+ * if the vector argument is of
+ * type IndexSet, then the function
+ * is represented as one that
+ * is either zero or one, depending
+ * on whether a DoF index is in
+ * the set or not.
+ */
+ template <class InputVector>
+ void
+ get_function_hessians (const InputVector &fe_function,
+ std::vector<Tensor<2,spacedim> > &hessians) const;
+
+ /**
+ * This function does the same as
+ * the other
+ * get_function_hessians(), but
+ * applied to multi-component
+ * (vector-valued) elements. The
+ * meaning of the arguments is as
+ * explained there.
+ *
+ * @post <code>hessians[q]</code>
+ * is a vector of Hessians of the
+ * field described by fe_function
+ * at the $q$th quadrature
+ * point. The size of the vector
+ * accessed by
+ * <code>hessians[q]</code>
+ * equals the number of
+ * components of the finite
+ * element,
+ * i.e. <code>hessians[q][c]</code>
+ * returns the Hessian of the
+ * $c$th vector component at the
+ * $q$th quadrature
+ * point. Consequently,
+ * <code>values[q][c][i][j]</code>
+ * is the $(i,j)$th component of
+ * the matrix of second
+ * derivatives of the $c$th
+ * vector component of the vector
+ * field at quadrature point $q$
+ * of the current cell.
+ */
+ template <class InputVector>
+ void
+ get_function_hessians (const InputVector &fe_function,
+ std::vector<std::vector<Tensor<2,spacedim> > > &hessians,
+ bool quadrature_points_fastest = false) const;
+
+ /**
+ * Access to the second
+ * derivatives of a function with
+ * more flexibility. see
+ * get_function_values() with
+ * corresponding arguments.
+ */
+ template <class InputVector>
+ void get_function_hessians (
+ const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
+ std::vector<Tensor<2,spacedim> > &hessians) const;
+
+ /**
+ * Access to the second
+ * derivatives of a function with
+ * more flexibility. see
+ * get_function_values() with
+ * corresponding arguments.
+ */
+ template <class InputVector>
+ void get_function_hessians (
+ const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
+ VectorSlice<std::vector<std::vector<Tensor<2,spacedim> > > > hessians,
+ bool quadrature_points_fastest = false) const;
+
+ /**
+ * @deprecated Wrapper for get_function_hessians()
+ */
+ template <class InputVector>
+ void
+ get_function_2nd_derivatives (const InputVector &,
+ std::vector<Tensor<2,spacedim> > &) const;
+
+ /**
+ * @deprecated Wrapper for get_function_hessians()
+ */
+ template <class InputVector>
+ void
+ get_function_2nd_derivatives (const InputVector &,
+ std::vector<std::vector<Tensor<2,spacedim> > > &,
+ bool = false) const;
+
+ /**
+ * Compute the (scalar) Laplacian (i.e. the trace of the tensor of second
+ * derivatives) of a finite
+ * element at the quadrature
+ * points of a cell. This
+ * function is the equivalent of
+ * the corresponding
+ * get_function_values() function
+ * (see there for more
+ * information) but evaluates the
+ * finite element field's second
+ * derivatives instead of its
+ * value.
+ *
+ * This function may only be used
+ * if the finite element in use
+ * is a scalar one, i.e. has only
+ * one vector component. There is
+ * a corresponding function of
+ * the same name for
+ * vector-valued finite elements.
+ *
+ * @param[in] fe_function A
+ * vector of values that
+ * describes (globally) the
+ * finite element function that
+ * this function should evaluate
+ * at the quadrature points of
+ * the current cell.
+ *
+ * @param[out] laplacians The Laplacians
+ * of the function specified by
+ * fe_function at the quadrature
+ * points of the current cell.
+ * The Laplacians are computed
+ * in real space (as opposed to
+ * on the unit cell).
+ * The object is assume to
+ * already have the correct size.
+ *
+ * @post <code>laplacians[q]</code>
+ * will contain the Laplacian of
+ * the field described by
+ * fe_function at the $q$th
+ * quadrature
+ * point. <code>gradients[q][i][j]</code>
+ * represents the $(i,j)$th
+ * component of the matrix of
+ * second derivatives at
+ * quadrature point $q$.
+ *
+ * @post For each component of
+ * the output vector, there holds
+ * <code>laplacians[q]=trace(hessians[q])</code>,
+ * where <tt>hessians</tt> would
+ * be the output of the
+ * get_function_hessians()
+ * function.
+ *
+ * @note The actual data type of the
+ * input vector may be either a
+ * Vector<T>,
+ * BlockVector<T>, or one
+ * of the sequential PETSc or
+ * Trilinos vector wrapper
+ * classes. It represents a
+ * global vector of DoF values
+ * associated with the DofHandler
+ * object with which this
+ * FEValues object was last
+ * initialized. Alternatively,
+ * if the vector argument is of
+ * type IndexSet, then the function
+ * is represented as one that
+ * is either zero or one, depending
+ * on whether a DoF index is in
+ * the set or not.
+ */
+ template <class InputVector, typename number>
+ void
+ get_function_laplacians (const InputVector &fe_function,
+ std::vector<number> &laplacians) const;
+
+ /**
+ * This function does the same as
+ * the other
+ * get_function_laplacians(), but
+ * applied to multi-component
+ * (vector-valued) elements. The
+ * meaning of the arguments is as
+ * explained there.
+ *
+ * @post <code>laplacians[q]</code>
+ * is a vector of Laplacians of the
+ * field described by fe_function
+ * at the $q$th quadrature
+ * point. The size of the vector
+ * accessed by
+ * <code>laplacians[q]</code>
+ * equals the number of
+ * components of the finite
+ * element,
+ * i.e. <code>laplacians[q][c]</code>
+ * returns the Laplacian of the
+ * $c$th vector component at the
+ * $q$th quadrature
+ * point.
+ *
+ * @post For each component of
+ * the output vector, there holds
+ * <code>laplacians[q][c]=trace(hessians[q][c])</code>,
+ * where <tt>hessians</tt> would
+ * be the output of the
+ * get_function_hessians()
+ * function.
+ */
+ template <class InputVector, typename number>
+ void
+ get_function_laplacians (const InputVector &fe_function,
+ std::vector<Vector<number> > &laplacians) const;
+
+ /**
+ * Access to the second
+ * derivatives of a function with
+ * more flexibility. see
+ * get_function_values() with
+ * corresponding arguments.
+ */
+ template <class InputVector, typename number>
+ void get_function_laplacians (
+ const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
+ std::vector<number> &laplacians) const;
+
+ /**
+ * Access to the second
+ * derivatives of a function with
+ * more flexibility. see
+ * get_function_values() with
+ * corresponding arguments.
+ */
+ template <class InputVector, typename number>
+ void get_function_laplacians (
+ const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
+ std::vector<Vector<number> > &laplacians) const;
+
+ /**
+ * Access to the second
+ * derivatives of a function with
+ * more flexibility. see
+ * get_function_values() with
+ * corresponding arguments.
+ */
+ template <class InputVector, typename number>
+ void get_function_laplacians (
+ const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
+ std::vector<std::vector<number> > &laplacians,
+ bool quadrature_points_fastest = false) const;
+ //@}
+
+ /// @name Geometry of the cell
+ //@{
+
+ /**
+ * Position of the <tt>i</tt>th
+ * quadrature point in real space.
+ */
+ const Point<spacedim> &quadrature_point (const unsigned int i) const;
+
+ /**
+ * Return a pointer to the vector of
+ * quadrature points.
+ */
+ const std::vector<Point<spacedim> > &get_quadrature_points () const;
+
+ /**
+ * Mapped quadrature weight. If
+ * this object refers to a volume
+ * evaluation (i.e. the derived
+ * class is of type FEValues),
+ * then this is the Jacobi
+ * determinant times the weight
+ * of the *<tt>i</tt>th unit
+ * quadrature point.
+ *
+ * For surface evaluations
+ * (i.e. classes FEFaceValues or
+ * FESubfaceValues), it is the
+ * mapped surface element times
+ * the weight of the quadrature
+ * point.
+ *
+ * You can think of the quantity returned
+ * by this function as the volume or
+ * surface element $dx, ds$ in the
+ * integral that we implement here by
+ * quadrature.
+ */
+ double JxW (const unsigned int quadrature_point) const;
+
+ /**
+ * Pointer to the array holding
+ * the values returned by JxW().
+ */
+ const std::vector<double> &get_JxW_values () const;
+
+ /**
+ * Return the Jacobian of the
+ * transformation at the specified
+ * quadrature point, i.e.
+ * $J_{ij}=dx_i/d\hat x_j$
+ */
+ const DerivativeForm<1,dim,spacedim> &jacobian (const unsigned int quadrature_point) const;
+
+ /**
+ * Pointer to the array holding
+ * the values returned by jacobian().
+ */
+ const std::vector<DerivativeForm<1,dim,spacedim> > &get_jacobians () const;
+
+ /**
+ * Return the second derivative of the
+ * transformation from unit to real cell,
+ * i.e. the first derivative of the
+ * Jacobian, at the specified quadrature
+ * point, i.e. $G_{ijk}=dJ_{jk}/d\hat x_i$.
+ */
+ const DerivativeForm<2,dim,spacedim> &jacobian_grad (const unsigned int quadrature_point) const;
+
+ /**
+ * Pointer to the array holding
+ * the values returned by
+ * jacobian_grads().
+ */
+ const std::vector<DerivativeForm<2,dim,spacedim> > &get_jacobian_grads () const;
+
+ /**
+ * Return the inverse Jacobian of the
+ * transformation at the specified
+ * quadrature point, i.e.
+ * $J_{ij}=d\hat x_i/dx_j$
+ */
+ const DerivativeForm<1,spacedim,dim> &inverse_jacobian (const unsigned int quadrature_point) const;
+
+ /**
+ * Pointer to the array holding
+ * the values returned by
+ * inverse_jacobian().
+ */
+ const std::vector<DerivativeForm<1,spacedim,dim> > &get_inverse_jacobians () const;
+ /**
+ * For a face, return the outward
+ * normal vector to the cell at
+ * the <tt>i</tt>th quadrature
+ * point.
+ *
+ * For a cell of codimension one,
+ * return the normal vector, as
+ * it is specified by the
+ * numbering of the vertices.
+ *
+ * The length of the vector
+ * is normalized to one.
+ */
+ const Point<spacedim> &normal_vector (const unsigned int i) const;
+
+ /**
+ * Return the normal vectors at
+ * the quadrature points. For a
+ * face, these are the outward
+ * normal vectors to the
+ * cell. For a cell of
+ * codimension one, the
+ * orientation is given by the
+ * numbering of vertices.
+ */
+ const std::vector<Point<spacedim> > &get_normal_vectors () const;
+
+ /**
+ * Transform a set of vectors,
+ * one for each quadrature
+ * point. The <tt>mapping</tt>
+ * can be any of the ones defined
+ * in MappingType.
+ */
+ void transform (std::vector<Tensor<1,spacedim> > &transformed,
+ const std::vector<Tensor<1,dim> > &original,
+ MappingType mapping) const;
+
+ /**
+ * @deprecated Use
+ * normal_vector() instead.
+ *
+ * Return the outward normal vector to
+ * the cell at the <tt>i</tt>th quadrature
+ * point. The length of the vector
+ * is normalized to one.
+ */
+ const Point<spacedim> &cell_normal_vector (const unsigned int i) const;
+
+ /**
+ * @deprecated Use
+ * get_normal_vectors() instead.
+ *
+ * Returns the vectors normal to
+ * the cell in each of the
+ * quadrature points.
+ */
+ const std::vector<Point<spacedim> > &get_cell_normal_vectors () const;
+
+ //@}
+
+ /// @name Extractors Methods to extract individual components
+ //@{
+
+ /**
+ * Create a view of the current FEValues
+ * object that represents a particular
+ * scalar component of the possibly
+ * vector-valued finite element. The
+ * concept of views is explained in the
+ * documentation of the namespace
+ * FEValuesViews and in particular
+ * in the @ref vector_valued module.
+ */
+ const FEValuesViews::Scalar<dim,spacedim> &
+ operator[] (const FEValuesExtractors::Scalar &scalar) const;
+
+ /**
+ * Create a view of the current FEValues
+ * object that represents a set of
+ * <code>dim</code> scalar components
+ * (i.e. a vector) of the vector-valued
+ * finite element. The concept of views
+ * is explained in the documentation of
+ * the namespace FEValuesViews and in particular
+ * in the @ref vector_valued module.
+ */
+ const FEValuesViews::Vector<dim,spacedim> &
+ operator[] (const FEValuesExtractors::Vector &vector) const;
+
+ /**
+ * Create a view of the current FEValues
+ * object that represents a set of
+ * <code>(dim*dim + dim)/2</code> scalar components
+ * (i.e. a symmetric 2nd order tensor)
+ * of the vector-valued
+ * finite element. The concept of views
+ * is explained in the documentation of
+ * the namespace FEValuesViews and in particular
+ * in the @ref vector_valued module.
+ */
+ const FEValuesViews::SymmetricTensor<2,dim,spacedim> &
+ operator[] (const FEValuesExtractors::SymmetricTensor<2> &tensor) const;
+
+ //@}
+
+ /// @name Access to the raw data
+ //@{
+
+ /**
+ * Constant reference to the
+ * selected mapping object.
+ */
+ const Mapping<dim,spacedim> &get_mapping () const;
+
+ /**
+ * Constant reference to the
+ * selected finite element
+ * object.
+ */
+ const FiniteElement<dim,spacedim> &get_fe () const;
+
+ /**
+ * Return the update flags set
+ * for this object.
+ */
+ UpdateFlags get_update_flags () const;
+
+ /**
+ * Return a triangulation
+ * iterator to the current cell.
+ */
+ const typename Triangulation<dim,spacedim>::cell_iterator get_cell () const;
+
+ /**
+ * Return the relation of the current
+ * cell to the previous cell. This
+ * allows re-use of some cell data
+ * (like local matrices for equations
+ * with constant coefficients) if the
+ * result is
+ * <tt>CellSimilarity::translation</tt>.
+ */
+ CellSimilarity::Similarity get_cell_similarity () const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object.
+ */
+ std::size_t memory_consumption () const;
+ //@}
+
+
+ /**
+ * This exception is thrown if
+ * FEValuesBase is asked to
+ * return the value of a field
+ * which was not required by the
+ * UpdateFlags for this
+ * FEValuesBase.
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcAccessToUninitializedField);
+ /**
+ * @todo Document this
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcCannotInitializeField);
+ /**
+ * @todo Document this
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcInvalidUpdateFlag);
+ /**
+ * @todo Document this
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcFEDontMatch);
+ /**
+ * @todo Document this
+ *
+ * @ingroup Exceptions
+ */
+ DeclException1 (ExcShapeFunctionNotPrimitive,
+ int,
+ << "The shape function with index " << arg1
+ << " is not primitive, i.e. it is vector-valued and "
+ << "has more than one non-zero vector component. This "
+ << "function cannot be called for these shape functions. "
+ << "Maybe you want to use the same function with the "
+ << "_component suffix?");
+ /**
+ * @todo Document this
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcFENotPrimitive);
+
+ protected:
+ /**
+ * Objects of the FEValues
+ * class need to store a pointer
+ * (i.e. an iterator) to the
+ * present cell in order to be
+ * able to extract the values of
+ * the degrees of freedom on this
+ * cell in the
+ * get_function_values() and
+ * assorted functions. On the
+ * other hand, this class should
+ * also work for different
+ * iterators, as long as they
+ * have the same interface to
+ * extract the DoF values (i.e.,
+ * for example, they need to have
+ * a @p get_interpolated_dof_values
+ * function).
+ *
+ * This calls for a common base
+ * class of iterator classes, and
+ * making the functions we need
+ * here @p virtual. On the other
+ * hand, this is the only place
+ * in the library where we need
+ * this, and introducing a base
+ * class of iterators and making
+ * a function virtual penalizes
+ * <em>all</em> users of the
+ * iterators, which are basically
+ * intended as very fast accessor
+ * functions. So we do not want
+ * to do this. Rather, what we do
+ * here is making the functions
+ * we need virtual only for use
+ * with <em>this class</em>. The idea
+ * is the following: have a
+ * common base class which
+ * declares some pure virtual
+ * functions, and for each
+ * possible iterator type, we
+ * have a derived class which
+ * stores the iterator to the
+ * cell and implements these
+ * functions. Since the iterator
+ * classes have the same
+ * interface, we can make the
+ * derived classes a template,
+ * templatized on the iterator
+ * type.
+ *
+ * This way, the use of virtual
+ * functions is restricted to
+ * only this class, and other
+ * users of iterators do not have
+ * to bear the negative effects.
+ *
+ * @author Wolfgang Bangerth, 2003
+ */
+ class CellIteratorBase;
+
+ /**
+ * Forward declaration of classes derived
+ * from CellIteratorBase. Their
+ * definition and implementation is given
+ * in the .cc file.
+ */
+ template <typename CI> class CellIterator;
+ class TriaCellIterator;
+
+ /**
+ * Store the cell selected last time the
+ * reinit() function was called. This is
+ * necessary for the
+ * <tt>get_function_*</tt> functions as
+ * well as the functions of same name in
+ * the extractor classes.
+ */
+ std::auto_ptr<const CellIteratorBase> present_cell;
+
+ /**
+ * A signal connection we use to ensure we get informed whenever the
+ * triangulation changes. We need to know about that because it
+ * invalidates all cell iterators and, as part of that, the
+ * 'present_cell' iterator we keep around between subsequent
+ * calls to reinit() in order to compute the cell similarity.
+ */
+ boost::signals2::connection tria_listener;
+
+ /**
+ * A function that is connected to the triangulation in
+ * order to reset the stored 'present_cell' iterator to an invalid
+ * one whenever the triangulation is changed and the iterator consequently
+ * becomes invalid.
+ */
+ void invalidate_present_cell ();
+
+ /**
+ * This function is called by the various reinit() functions in derived
+ * classes. Given the cell indicated by the argument, test whether
+ * we have to throw away the previously stored present_cell argument
+ * because it would require us to compare cells from different
+ * triangulations. In checking all this, also make sure that we have
+ * tria_listener connected to the triangulation to which we will set
+ * present_cell right after calling this function.
+ */
+ void
+ maybe_invalidate_previous_present_cell (const typename Triangulation<dim,spacedim>::cell_iterator &cell);
+
+ /**
+ * Storage for the mapping object.
+ */
+ const SmartPointer<const Mapping<dim,spacedim>,FEValuesBase<dim,spacedim> > mapping;
+
+ /**
+ * Store the finite element for later use.
+ */
+ const SmartPointer<const FiniteElement<dim,spacedim>,FEValuesBase<dim,spacedim> > fe;
+
+
+ /**
+ * Internal data of mapping.
+ */
+ SmartPointer<typename Mapping<dim,spacedim>::InternalDataBase,FEValuesBase<dim,spacedim> > mapping_data;
+
+ /**
+ * Internal data of finite element.
+ */
+ SmartPointer<typename Mapping<dim,spacedim>::InternalDataBase,FEValuesBase<dim,spacedim> > fe_data;
+
+ /**
+ * Initialize some update
+ * flags. Called from the
+ * @p initialize functions of
+ * derived classes, which are in
+ * turn called from their
+ * constructors.
+ *
+ * Basically, this function finds
+ * out using the finite element
+ * and mapping object already
+ * stored which flags need to be
+ * set to compute everything the
+ * user wants, as expressed
+ * through the flags passed as
+ * argument.
+ */
+ UpdateFlags compute_update_flags (const UpdateFlags update_flags) const;
+
+ /**
+ * An enum variable that can store
+ * different states of the current cell
+ * in comparison to the previously
+ * visited cell. If wanted, additional
+ * states can be checked here and used
+ * in one of the methods used during
+ * reinit.
+ */
+ CellSimilarity::Similarity cell_similarity;
+
+ /**
+ * A function that checks whether the
+ * new cell is similar to the one
+ * previously used. Then, a significant
+ * amount of the data can be reused,
+ * e.g. the derivatives of the basis
+ * functions in real space, shape_grad.
+ */
+ void
+ check_cell_similarity (const typename Triangulation<dim,spacedim>::cell_iterator &cell);
+
+ private:
+ /**
+ * Copy constructor. Since
+ * objects of this class are not
+ * copyable, we make it private,
+ * and also do not implement it.
+ */
+ FEValuesBase (const FEValuesBase &);
+
+ /**
+ * Copy operator. Since
+ * objects of this class are not
+ * copyable, we make it private,
+ * and also do not implement it.
+ */
+ FEValuesBase &operator= (const FEValuesBase &);
+
+ /**
+ * A cache for all possible FEValuesViews
+ * objects.
+ */
+ dealii::internal::FEValuesViews::Cache<dim,spacedim> fe_values_views_cache;
+
+ /**
+ * Make the view classes friends of this
+ * class, since they access internal
+ * data.
+ */
+ template <int, int> friend class FEValuesViews::Scalar;
+ template <int, int> friend class FEValuesViews::Vector;
+ template <int, int, int> friend class FEValuesViews::SymmetricTensor;
};
template <int dim, class VECTOR = Vector<double>, int spacedim=dim >
class MappingQ1Eulerian : public MappingQ1<dim,spacedim>
{
- public:
-
- /**
- * Constructor. It takes a
- * <tt>Vector<double> &</tt> as its
- * first argument to specify the
- * transformation of the whole
- * problem from the reference to
- * the current configuration.
- * The organization of the
- * elements in the @p Vector
- * must follow the concept how
- * deal.II stores solutions that
- * are associated to a
- * triangulation. This is
- * automatically the case if the
- * @p Vector represents the
- * solution of the previous step
- * of a nonlinear problem.
- * Alternatively, the @p Vector
- * can be initialized by
- * <tt>DoFAccessor::set_dof_values()</tt>.
- */
- MappingQ1Eulerian (const VECTOR &euler_transform_vectors,
- const DoFHandler<dim,spacedim> &shiftmap_dof_handler);
-
- /**
- * Return a pointer to a copy of the
- * present object. The caller of this
- * copy then assumes ownership of it.
- */
- virtual
- Mapping<dim,spacedim> * clone () const;
-
- /**
- * Always returns @p false because
- * MappingQ1Eulerian does not in general
- * preserve vertex locations (unless the
- * translation vector happens to provide
- * for zero displacements at vertex
- * locations).
- */
- bool preserves_vertex_locations () const;
-
- /**
- * Exception.
- */
- DeclException0 (ExcInactiveCell);
-
-
-
- protected:
- /**
- * Implementation of the interface in
- * MappingQ1. Overrides the function in
- * the base class, since we cannot use
- * any cell similarity for this class.
- */
- virtual void
- fill_fe_values (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const Quadrature<dim> &quadrature,
- typename Mapping<dim,spacedim>::InternalDataBase &mapping_data,
- typename std::vector<Point<spacedim> > &quadrature_points,
- std::vector<double> &JxW_values,
- std::vector<DerivativeForm<1,dim,spacedim> > &jacobians,
- std::vector<DerivativeForm<2,dim,spacedim> > &jacobian_grads,
- std::vector<DerivativeForm<1,spacedim,dim> > &inverse_jacobians,
- std::vector<Point<spacedim> > &cell_normal_vectors,
- CellSimilarity::Similarity &cell_similarity) const;
-
- /**
- * Reference to the vector of
- * shifts.
- */
- SmartPointer<const VECTOR, MappingQ1Eulerian<dim,VECTOR,spacedim> > euler_transform_vectors;
-
- /**
- * Pointer to the DoFHandler to
- * which the mapping vector is
- * associated.
- */
- SmartPointer<const DoFHandler<dim,spacedim>,MappingQ1Eulerian<dim,VECTOR,spacedim> > shiftmap_dof_handler;
-
-
- private:
- /**
- * Computes the support points of
- * the mapping. For
- * @p MappingQ1Eulerian these
- * are the vertices.
- */
- virtual void compute_mapping_support_points(
- const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- std::vector<Point<spacedim> > &a) const;
+ public:
+
+ /**
+ * Constructor. It takes a
+ * <tt>Vector<double> &</tt> as its
+ * first argument to specify the
+ * transformation of the whole
+ * problem from the reference to
+ * the current configuration.
+ * The organization of the
+ * elements in the @p Vector
+ * must follow the concept how
+ * deal.II stores solutions that
+ * are associated to a
+ * triangulation. This is
+ * automatically the case if the
+ * @p Vector represents the
+ * solution of the previous step
+ * of a nonlinear problem.
+ * Alternatively, the @p Vector
+ * can be initialized by
+ * <tt>DoFAccessor::set_dof_values()</tt>.
+ */
- MappingQ1Eulerian (const VECTOR &euler_transform_vectors,
++ MappingQ1Eulerian (const VECTOR &euler_transform_vectors,
+ const DoFHandler<dim,spacedim> &shiftmap_dof_handler);
+
+ /**
+ * Return a pointer to a copy of the
+ * present object. The caller of this
+ * copy then assumes ownership of it.
+ */
+ virtual
+ Mapping<dim,spacedim> *clone () const;
+
+ /**
+ * Always returns @p false because
+ * MappingQ1Eulerian does not in general
+ * preserve vertex locations (unless the
+ * translation vector happens to provide
+ * for zero displacements at vertex
+ * locations).
+ */
+ bool preserves_vertex_locations () const;
+
+ /**
+ * Exception.
+ */
+ DeclException0 (ExcInactiveCell);
+
+
+
+ protected:
+ /**
+ * Implementation of the interface in
+ * MappingQ1. Overrides the function in
+ * the base class, since we cannot use
+ * any cell similarity for this class.
+ */
+ virtual void
+ fill_fe_values (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const Quadrature<dim> &quadrature,
+ typename Mapping<dim,spacedim>::InternalDataBase &mapping_data,
+ typename std::vector<Point<spacedim> > &quadrature_points,
+ std::vector<double> &JxW_values,
+ std::vector<DerivativeForm<1,dim,spacedim> > &jacobians,
+ std::vector<DerivativeForm<2,dim,spacedim> > &jacobian_grads,
+ std::vector<DerivativeForm<1,spacedim,dim> > &inverse_jacobians,
+ std::vector<Point<spacedim> > &cell_normal_vectors,
+ CellSimilarity::Similarity &cell_similarity) const;
+
+ /**
+ * Reference to the vector of
+ * shifts.
+ */
+ SmartPointer<const VECTOR, MappingQ1Eulerian<dim,VECTOR,spacedim> > euler_transform_vectors;
+
+ /**
+ * Pointer to the DoFHandler to
+ * which the mapping vector is
+ * associated.
+ */
+ SmartPointer<const DoFHandler<dim,spacedim>,MappingQ1Eulerian<dim,VECTOR,spacedim> > shiftmap_dof_handler;
+
+
+ private:
+ /**
+ * Computes the support points of
+ * the mapping. For
+ * @p MappingQ1Eulerian these
+ * are the vertices.
+ */
+ virtual void compute_mapping_support_points(
+ const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ std::vector<Point<spacedim> > &a) const;
};
template <int dim, class VECTOR = Vector<double>, int spacedim=dim >
class MappingQEulerian : public MappingQ<dim, spacedim>
{
- const VECTOR &euler_vector,
- const DoFHandler<dim,spacedim> &euler_dof_handler);
+ public:
+ /**
+ * Constructor. The first argument is
+ * the polynomical degree of the desired
+ * Qp mapping. It then takes a
+ * <tt>Vector<double> &</tt> to specify the
+ * transformation of the domain
+ * from the reference to
+ * the current configuration.
+ * The organization of the
+ * elements in the @p Vector
+ * must follow the concept how
+ * deal.II stores solutions that
+ * are associated to a
+ * triangulation. This is
+ * automatically the case if the
+ * @p Vector represents the
+ * solution of the previous step
+ * of a nonlinear problem.
+ * Alternatively, the @p Vector
+ * can be initialized by
+ * <tt>DoFAccessor::set_dof_values()</tt>.
+ */
+
+ MappingQEulerian (const unsigned int degree,
++ const VECTOR &euler_vector,
++ const DoFHandler<dim,spacedim> &euler_dof_handler);
+
+ /**
+ * Return a pointer to a copy of the
+ * present object. The caller of this
+ * copy then assumes ownership of it.
+ */
+ virtual
+ Mapping<dim,spacedim> *clone () const;
+
+ /**
+ * Always returns @p false because
+ * MappingQ1Eulerian does not in general
+ * preserve vertex locations (unless the
+ * translation vector happens to provide
+ * for zero displacements at vertex
+ * locations).
+ */
+ bool preserves_vertex_locations () const;
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInactiveCell);
+
+ protected:
+ /**
+ * Implementation of the interface in
+ * MappingQ. Overrides the function in
+ * the base class, since we cannot use
+ * any cell similarity for this class.
+ */
+ virtual void
+ fill_fe_values (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const Quadrature<dim> &quadrature,
+ typename Mapping<dim,spacedim>::InternalDataBase &mapping_data,
+ typename std::vector<Point<spacedim> > &quadrature_points,
+ std::vector<double> &JxW_values,
+ std::vector<DerivativeForm<1,dim,spacedim> > &jacobians,
+ std::vector<DerivativeForm<2,dim,spacedim> > &jacobian_grads,
+ std::vector<DerivativeForm<1,spacedim,dim> > &inverse_jacobians,
+ std::vector<Point<spacedim> > &cell_normal_vectors,
+ CellSimilarity::Similarity &cell_similarity) const;
+
+ /**
+ * Reference to the vector of
+ * shifts.
+ */
+
+ SmartPointer<const VECTOR, MappingQEulerian<dim,VECTOR,spacedim> > euler_vector;
+
+ /**
+ * Pointer to the DoFHandler to
+ * which the mapping vector is
+ * associated.
+ */
+
+ SmartPointer<const DoFHandler<dim,spacedim>,MappingQEulerian<dim,VECTOR,spacedim> > euler_dof_handler;
+
+
+ private:
+
+ /**
+ * Special quadrature rule used
+ * to define the support points
+ * in the reference configuration.
+ */
+
+ class SupportQuadrature : public Quadrature<dim>
+ {
public:
- /**
- * Constructor. The first argument is
- * the polynomical degree of the desired
- * Qp mapping. It then takes a
- * <tt>Vector<double> &</tt> to specify the
- * transformation of the domain
- * from the reference to
- * the current configuration.
- * The organization of the
- * elements in the @p Vector
- * must follow the concept how
- * deal.II stores solutions that
- * are associated to a
- * triangulation. This is
- * automatically the case if the
- * @p Vector represents the
- * solution of the previous step
- * of a nonlinear problem.
- * Alternatively, the @p Vector
- * can be initialized by
- * <tt>DoFAccessor::set_dof_values()</tt>.
- */
-
- MappingQEulerian (const unsigned int degree,
- const VECTOR &euler_vector,
- const DoFHandler<dim,spacedim> &euler_dof_handler);
-
- /**
- * Return a pointer to a copy of the
- * present object. The caller of this
- * copy then assumes ownership of it.
- */
- virtual
- Mapping<dim,spacedim> * clone () const;
-
- /**
- * Always returns @p false because
- * MappingQ1Eulerian does not in general
- * preserve vertex locations (unless the
- * translation vector happens to provide
- * for zero displacements at vertex
- * locations).
- */
- bool preserves_vertex_locations () const;
-
- /**
- * Exception
- */
- DeclException0 (ExcInactiveCell);
-
- protected:
- /**
- * Implementation of the interface in
- * MappingQ. Overrides the function in
- * the base class, since we cannot use
- * any cell similarity for this class.
- */
- virtual void
- fill_fe_values (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const Quadrature<dim> &quadrature,
- typename Mapping<dim,spacedim>::InternalDataBase &mapping_data,
- typename std::vector<Point<spacedim> > &quadrature_points,
- std::vector<double> &JxW_values,
- std::vector<DerivativeForm<1,dim,spacedim> > &jacobians,
- std::vector<DerivativeForm<2,dim,spacedim> > &jacobian_grads,
- std::vector<DerivativeForm<1,spacedim,dim> > &inverse_jacobians,
- std::vector<Point<spacedim> > &cell_normal_vectors,
- CellSimilarity::Similarity &cell_similarity) const;
-
- /**
- * Reference to the vector of
- * shifts.
- */
-
- SmartPointer<const VECTOR, MappingQEulerian<dim,VECTOR,spacedim> > euler_vector;
-
- /**
- * Pointer to the DoFHandler to
- * which the mapping vector is
- * associated.
- */
-
- SmartPointer<const DoFHandler<dim,spacedim>,MappingQEulerian<dim,VECTOR,spacedim> > euler_dof_handler;
-
-
- private:
-
- /**
- * Special quadrature rule used
- * to define the support points
- * in the reference configuration.
- */
-
- class SupportQuadrature : public Quadrature<dim>
- {
- public:
- /**
- * Constructor, with an argument
- * defining the desired polynomial
- * degree.
- */
-
- SupportQuadrature (const unsigned int map_degree);
-
- };
-
- /**
- * A member variable holding the
- * quadrature points in the right
- * order.
- */
- const SupportQuadrature support_quadrature;
-
- /**
- * FEValues object used to query the
- * the given finite element field
- * at the support points in the
- * reference configuration.
- *
- * The variable is marked as
- * mutable since we have to call
- * FEValues::reinit from
- * compute_mapping_support_points,
- * a function that is 'const'.
- */
- mutable FEValues<dim,spacedim> fe_values;
-
- /**
- * A variable to guard access to
- * the fe_values variable.
- */
- mutable Threads::ThreadMutex fe_values_mutex;
-
- /**
- * Compute the positions of the
- * support points in the current
- * configuration
- */
- virtual void compute_mapping_support_points(
- const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- std::vector<Point<spacedim> > &a) const;
+ /**
+ * Constructor, with an argument
+ * defining the desired polynomial
+ * degree.
+ */
+
+ SupportQuadrature (const unsigned int map_degree);
+
+ };
+
+ /**
+ * A member variable holding the
+ * quadrature points in the right
+ * order.
+ */
+ const SupportQuadrature support_quadrature;
+
+ /**
+ * FEValues object used to query the
+ * the given finite element field
+ * at the support points in the
+ * reference configuration.
+ *
+ * The variable is marked as
+ * mutable since we have to call
+ * FEValues::reinit from
+ * compute_mapping_support_points,
+ * a function that is 'const'.
+ */
+ mutable FEValues<dim,spacedim> fe_values;
+
+ /**
+ * A variable to guard access to
+ * the fe_values variable.
+ */
+ mutable Threads::ThreadMutex fe_values_mutex;
+
+ /**
+ * Compute the positions of the
+ * support points in the current
+ * configuration
+ */
+ virtual void compute_mapping_support_points(
+ const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ std::vector<Point<spacedim> > &a) const;
};
*/
class GridGenerator
{
- public:
- /**
- * Initialize the given triangulation
- * with a hypercube (line in 1D, square
- * in 2D, etc) consisting of exactly one
- * cell. The hypercube volume is the
- * tensor product interval
- * <i>[left,right]<sup>dim</sup></i> in
- * the present number of dimensions,
- * where the limits are given as
- * arguments. They default to zero and
- * unity, then producing the unit
- * hypercube. All boundary indicators are
- * set to zero ("not colorized") for 2d
- * and 3d. In 1d the indicators are
- * colorized, see hyper_rectangle().
- *
- * @image html hyper_cubes.png
- *
- * See also
- * subdivided_hyper_cube() for a
- * coarse mesh consisting of
- * several cells. See
- * hyper_rectangle(), if
- * different lengths in different
- * ordinate directions are
- * required.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim, int spacedim>
- static void hyper_cube (Triangulation<dim,spacedim> &tria,
- const double left = 0.,
- const double right= 1.);
+ public:
+ /**
+ * Initialize the given triangulation
+ * with a hypercube (line in 1D, square
+ * in 2D, etc) consisting of exactly one
+ * cell. The hypercube volume is the
+ * tensor product interval
+ * <i>[left,right]<sup>dim</sup></i> in
+ * the present number of dimensions,
+ * where the limits are given as
+ * arguments. They default to zero and
+ * unity, then producing the unit
+ * hypercube. All boundary indicators are
+ * set to zero ("not colorized") for 2d
+ * and 3d. In 1d the indicators are
+ * colorized, see hyper_rectangle().
+ *
+ * @image html hyper_cubes.png
+ *
+ * See also
+ * subdivided_hyper_cube() for a
+ * coarse mesh consisting of
+ * several cells. See
+ * hyper_rectangle(), if
+ * different lengths in different
+ * ordinate directions are
+ * required.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim, int spacedim>
- static void hyper_cube (Triangulation<dim,spacedim> &tria,
++ static void hyper_cube (Triangulation<dim,spacedim> &tria,
+ const double left = 0.,
+ const double right= 1.);
- /**
- * Same as hyper_cube(), but
- * with the difference that not
- * only one cell is created but
- * each coordinate direction is
- * subdivided into
- * @p repetitions cells. Thus,
- * the number of cells filling
- * the given volume is
- * <tt>repetitions<sup>dim</sup></tt>.
- *
- * If spacedim=dim+1 the same
- * mesh as in the case
- * spacedim=dim is created, but
- * the vertices have an
- * additional coordinate =0. So,
- * if dim=1 one obtains line
- * along the x axis in the xy
- * plane, and if dim=3 one
- * obtains a square in lying in
- * the xy plane in 3d space.
- *
- * @note The triangulation needs
- * to be void upon calling this
- * function.
- */
- template <int dim>
- static void subdivided_hyper_cube (Triangulation<dim> &tria,
- const unsigned int repetitions,
- const double left = 0.,
- const double right= 1.);
+ /**
+ * Same as hyper_cube(), but
+ * with the difference that not
+ * only one cell is created but
+ * each coordinate direction is
+ * subdivided into
+ * @p repetitions cells. Thus,
+ * the number of cells filling
+ * the given volume is
+ * <tt>repetitions<sup>dim</sup></tt>.
+ *
+ * If spacedim=dim+1 the same
+ * mesh as in the case
+ * spacedim=dim is created, but
+ * the vertices have an
+ * additional coordinate =0. So,
+ * if dim=1 one obtains line
+ * along the x axis in the xy
+ * plane, and if dim=3 one
+ * obtains a square in lying in
+ * the xy plane in 3d space.
+ *
+ * @note The triangulation needs
+ * to be void upon calling this
+ * function.
+ */
+ template <int dim>
- static void subdivided_hyper_cube (Triangulation<dim> &tria,
++ static void subdivided_hyper_cube (Triangulation<dim> &tria,
+ const unsigned int repetitions,
+ const double left = 0.,
+ const double right= 1.);
- /**
- * Create a coordinate-parallel
- * brick from the two
- * diagonally opposite corner
- * points @p p1 and @p p2.
- *
- * If the @p colorize flag is
- * set, the
- * @p boundary_indicators of the
- * surfaces are assigned, such
- * that the lower one in
- * @p x-direction is 0, the
- * upper one is 1. The indicators
- * for the surfaces in
- * @p y-direction are 2 and 3,
- * the ones for @p z are 4 and
- * 5. Additionally, material ids
- * are assigned to the cells
- * according to the octant their
- * center is in: being in the right half
- * plane for any coordinate
- * direction <i>x<sub>i</sub></i>
- * adds 2<sup>i</sup>. For
- * instance, the center point
- * (1,-1,1) yields a material id 5.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim, int spacedim>
- static void hyper_rectangle (Triangulation<dim,spacedim> &tria,
- const Point<spacedim> &p1,
- const Point<spacedim> &p2,
- const bool colorize = false);
+ /**
+ * Create a coordinate-parallel
+ * brick from the two
+ * diagonally opposite corner
+ * points @p p1 and @p p2.
+ *
+ * If the @p colorize flag is
+ * set, the
+ * @p boundary_indicators of the
+ * surfaces are assigned, such
+ * that the lower one in
+ * @p x-direction is 0, the
+ * upper one is 1. The indicators
+ * for the surfaces in
+ * @p y-direction are 2 and 3,
+ * the ones for @p z are 4 and
+ * 5. Additionally, material ids
+ * are assigned to the cells
+ * according to the octant their
+ * center is in: being in the right half
+ * plane for any coordinate
+ * direction <i>x<sub>i</sub></i>
+ * adds 2<sup>i</sup>. For
+ * instance, the center point
+ * (1,-1,1) yields a material id 5.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim, int spacedim>
+ static void hyper_rectangle (Triangulation<dim,spacedim> &tria,
+ const Point<spacedim> &p1,
+ const Point<spacedim> &p2,
+ const bool colorize = false);
- /**
- * Create a coordinate-parallel
- * parallelepiped from the two
- * diagonally opposite corner
- * points @p p1 and @p p2. In
- * dimension @p i,
- * <tt>repetitions[i]</tt> cells are
- * generated.
- *
- * To get cells with an aspect
- * ratio different from that of
- * the domain, use different
- * numbers of subdivisions in
- * different coordinate
- * directions. The minimum number
- * of subdivisions in each
- * direction is
- * 1. @p repetitions is a list
- * of integers denoting the
- * number of subdivisions in each
- * coordinate direction.
- *
- * If the @p colorize flag is
- * set, the
- * @p boundary_indicators of the
- * surfaces are assigned, such
- * that the lower one in
- * @p x-direction is 0, the
- * upper one is 1. The indicators
- * for the surfaces in
- * @p y-direction are 2 and 3,
- * the ones for @p z are 4 and
- * 5. Additionally, material ids
- * are assigned to the cells
- * according to the octant their
- * center is in: being in the right half
- * plane for any coordinate
- * direction <i>x<sub>i</sub></i>
- * adds 2<sup>i</sup>. For
- * instance, the center point
- * (1,-1,1) yields a material id 5.
- *
- * Note that the @p colorize flag is
- * ignored in 1d and is assumed to always
- * be true. That means the boundary
- * indicator is 0 on the left and 1 on
- * the right. See step-15 for details.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- *
- * @note For an example of the
- * use of this function see the
- * step-28
- * tutorial program.
- */
- template <int dim>
- static
- void
- subdivided_hyper_rectangle (Triangulation<dim> &tria,
- const std::vector<unsigned int> &repetitions,
- const Point<dim> &p1,
- const Point<dim> &p2,
- const bool colorize=false);
+ /**
+ * Create a coordinate-parallel
+ * parallelepiped from the two
+ * diagonally opposite corner
+ * points @p p1 and @p p2. In
+ * dimension @p i,
+ * <tt>repetitions[i]</tt> cells are
+ * generated.
+ *
+ * To get cells with an aspect
+ * ratio different from that of
+ * the domain, use different
+ * numbers of subdivisions in
+ * different coordinate
+ * directions. The minimum number
+ * of subdivisions in each
+ * direction is
+ * 1. @p repetitions is a list
+ * of integers denoting the
+ * number of subdivisions in each
+ * coordinate direction.
+ *
+ * If the @p colorize flag is
+ * set, the
+ * @p boundary_indicators of the
+ * surfaces are assigned, such
+ * that the lower one in
+ * @p x-direction is 0, the
+ * upper one is 1. The indicators
+ * for the surfaces in
+ * @p y-direction are 2 and 3,
+ * the ones for @p z are 4 and
+ * 5. Additionally, material ids
+ * are assigned to the cells
+ * according to the octant their
+ * center is in: being in the right half
+ * plane for any coordinate
+ * direction <i>x<sub>i</sub></i>
+ * adds 2<sup>i</sup>. For
+ * instance, the center point
+ * (1,-1,1) yields a material id 5.
+ *
+ * Note that the @p colorize flag is
+ * ignored in 1d and is assumed to always
+ * be true. That means the boundary
+ * indicator is 0 on the left and 1 on
+ * the right. See step-15 for details.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ *
+ * @note For an example of the
+ * use of this function see the
+ * step-28
+ * tutorial program.
+ */
+ template <int dim>
+ static
+ void
+ subdivided_hyper_rectangle (Triangulation<dim> &tria,
+ const std::vector<unsigned int> &repetitions,
+ const Point<dim> &p1,
+ const Point<dim> &p2,
+ const bool colorize=false);
- /**
- * Like the previous
- * function. However, here the
- * second argument does not
- * denote the number of
- * subdivisions in each
- * coordinate direction, but a
- * sequence of step sizes for
- * each coordinate direction. The
- * domain will therefore be
- * subdivided into
- * <code>step_sizes[i].size()</code>
- * cells in coordinate direction
- * <code>i</code>, with widths
- * <code>step_sizes[i][j]</code>
- * for the <code>j</code>th cell.
- *
- * This function is therefore the
- * right one to generate graded
- * meshes where cells are
- * concentrated in certain areas,
- * rather than a uniformly
- * subdivided mesh as the
- * previous function generates.
- *
- * The step sizes have to add up
- * to the dimensions of the hyper
- * rectangle specified by the
- * points @p p1 and @p p2.
- */
- template <int dim>
- static
- void
- subdivided_hyper_rectangle(Triangulation<dim> &tria,
- const std::vector<std::vector<double> > &step_sizes,
- const Point<dim> &p_1,
- const Point<dim> &p_2,
- const bool colorize);
+ /**
+ * Like the previous
+ * function. However, here the
+ * second argument does not
+ * denote the number of
+ * subdivisions in each
+ * coordinate direction, but a
+ * sequence of step sizes for
+ * each coordinate direction. The
+ * domain will therefore be
+ * subdivided into
+ * <code>step_sizes[i].size()</code>
+ * cells in coordinate direction
+ * <code>i</code>, with widths
+ * <code>step_sizes[i][j]</code>
+ * for the <code>j</code>th cell.
+ *
+ * This function is therefore the
+ * right one to generate graded
+ * meshes where cells are
+ * concentrated in certain areas,
+ * rather than a uniformly
+ * subdivided mesh as the
+ * previous function generates.
+ *
+ * The step sizes have to add up
+ * to the dimensions of the hyper
+ * rectangle specified by the
+ * points @p p1 and @p p2.
+ */
+ template <int dim>
+ static
+ void
+ subdivided_hyper_rectangle(Triangulation<dim> &tria,
+ const std::vector<std::vector<double> > &step_sizes,
+ const Point<dim> &p_1,
+ const Point<dim> &p_2,
+ const bool colorize);
- /**
- * Like the previous function, but with
- * the following twist: the @p
- * material_id argument is a
- * dim-dimensional array that, for each
- * cell, indicates which material_id
- * should be set. In addition, and this
- * is the major new functionality, if the
- * material_id of a cell is <tt>(unsigned
- * char)(-1)</tt>, then that cell is
- * deleted from the triangulation,
- * i.e. the domain will have a void
- * there.
- */
- template <int dim>
- static
- void
- subdivided_hyper_rectangle (Triangulation<dim> &tria,
- const std::vector< std::vector<double> > &spacing,
- const Point<dim> &p,
- const Table<dim,types::material_id> &material_id,
- const bool colorize=false);
+ /**
+ * Like the previous function, but with
+ * the following twist: the @p
+ * material_id argument is a
+ * dim-dimensional array that, for each
+ * cell, indicates which material_id
+ * should be set. In addition, and this
+ * is the major new functionality, if the
+ * material_id of a cell is <tt>(unsigned
+ * char)(-1)</tt>, then that cell is
+ * deleted from the triangulation,
+ * i.e. the domain will have a void
+ * there.
+ */
+ template <int dim>
+ static
+ void
+ subdivided_hyper_rectangle (Triangulation<dim> &tria,
+ const std::vector< std::vector<double> > &spacing,
+ const Point<dim> &p,
+ const Table<dim,types::material_id> &material_id,
+ const bool colorize=false);
- /**
- * A parallelogram. The first
- * corner point is the
- * origin. The <tt>dim</tt>
- * adjacent points are the
- * one-dimensional subtensors of
- * the tensor provided and
- * additional points will be sums
- * of these two vectors.
- * Colorizing is done according
- * to hyper_rectangle().
- *
- * @note This function is
- * implemented in 2d only.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void
- parallelogram(Triangulation<dim>& tria,
- const Tensor<2,dim>& corners,
- const bool colorize=false);
+ /**
+ * A parallelogram. The first
+ * corner point is the
+ * origin. The <tt>dim</tt>
+ * adjacent points are the
+ * one-dimensional subtensors of
+ * the tensor provided and
+ * additional points will be sums
+ * of these two vectors.
+ * Colorizing is done according
+ * to hyper_rectangle().
+ *
+ * @note This function is
+ * implemented in 2d only.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void
- parallelogram(Triangulation<dim> &tria,
++ parallelogram(Triangulation<dim> &tria,
+ const Tensor<2,dim> &corners,
+ const bool colorize=false);
- /**
- * Hypercube with a layer of
- * hypercubes around it. The
- * first two parameters give the
- * lower and upper bound of the
- * inner hypercube in all
- * coordinate directions.
- * @p thickness marks the size of
- * the layer cells.
- *
- * If the flag colorize is set,
- * the outer cells get material
- * id's according to the
- * following scheme: extending
- * over the inner cube in
- * (+/-) x-direction: 1/2. In y-direction
- * 4/8, in z-direction 16/32. The cells
- * at corners and edges (3d) get
- * these values bitwise or'd.
- *
- * Presently only available in 2d
- * and 3d.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void enclosed_hyper_cube (Triangulation<dim> &tria,
- const double left = 0.,
- const double right= 1.,
- const double thickness = 1.,
- const bool colorize = false);
+ /**
+ * Hypercube with a layer of
+ * hypercubes around it. The
+ * first two parameters give the
+ * lower and upper bound of the
+ * inner hypercube in all
+ * coordinate directions.
+ * @p thickness marks the size of
+ * the layer cells.
+ *
+ * If the flag colorize is set,
+ * the outer cells get material
+ * id's according to the
+ * following scheme: extending
+ * over the inner cube in
+ * (+/-) x-direction: 1/2. In y-direction
+ * 4/8, in z-direction 16/32. The cells
+ * at corners and edges (3d) get
+ * these values bitwise or'd.
+ *
+ * Presently only available in 2d
+ * and 3d.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void enclosed_hyper_cube (Triangulation<dim> &tria,
+ const double left = 0.,
+ const double right= 1.,
+ const double thickness = 1.,
+ const bool colorize = false);
- /**
- * Initialize the given
- * triangulation with a
- * hyperball, i.e. a circle or a
- * ball around <tt>center</tt>
- * with given <tt>radius</tt>.
- *
- * In order to avoid degenerate
- * cells at the boundaries, the
- * circle is triangulated by five
- * cells, the ball by seven
- * cells. The diameter of the
- * center cell is chosen so that
- * the aspect ratio of the
- * boundary cells after one
- * refinement is optimized.
- *
- * This function is declared to
- * exist for triangulations of
- * all space dimensions, but
- * throws an error if called in
- * 1d.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void hyper_ball (Triangulation<dim> &tria,
- const Point<dim> ¢er = Point<dim>(),
- const double radius = 1.);
+ /**
+ * Initialize the given
+ * triangulation with a
+ * hyperball, i.e. a circle or a
+ * ball around <tt>center</tt>
+ * with given <tt>radius</tt>.
+ *
+ * In order to avoid degenerate
+ * cells at the boundaries, the
+ * circle is triangulated by five
+ * cells, the ball by seven
+ * cells. The diameter of the
+ * center cell is chosen so that
+ * the aspect ratio of the
+ * boundary cells after one
+ * refinement is optimized.
+ *
+ * This function is declared to
+ * exist for triangulations of
+ * all space dimensions, but
+ * throws an error if called in
+ * 1d.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void hyper_ball (Triangulation<dim> &tria,
+ const Point<dim> ¢er = Point<dim>(),
+ const double radius = 1.);
- /**
- * This class produces a half
- * hyper-ball around
- * <tt>center</tt>, which
- * contains four elements in 2d
- * and 6 in 3d. The cut plane is
- * perpendicular to the
- * <i>x</i>-axis.
- *
- * The boundary indicators for the final
- * triangulation are 0 for the curved boundary and
- * 1 for the cut plane.
- *
- * The appropriate
- * boundary class is
- * HalfHyperBallBoundary, or HyperBallBoundary.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void half_hyper_ball (Triangulation<dim> &tria,
- const Point<dim> ¢er = Point<dim>(),
- const double radius = 1.);
+ /**
+ * This class produces a half
+ * hyper-ball around
+ * <tt>center</tt>, which
+ * contains four elements in 2d
+ * and 6 in 3d. The cut plane is
+ * perpendicular to the
+ * <i>x</i>-axis.
+ *
+ * The boundary indicators for the final
+ * triangulation are 0 for the curved boundary and
+ * 1 for the cut plane.
+ *
+ * The appropriate
+ * boundary class is
+ * HalfHyperBallBoundary, or HyperBallBoundary.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void half_hyper_ball (Triangulation<dim> &tria,
+ const Point<dim> ¢er = Point<dim>(),
+ const double radius = 1.);
- /**
- * Create a cylinder around the
- * x-axis. The cylinder extends
- * from <tt>x=-half_length</tt> to
- * <tt>x=+half_length</tt> and its
- * projection into the
- * @p yz-plane is a circle of
- * radius @p radius.
- *
- * In two dimensions, the
- * cylinder is a rectangle from
- * <tt>x=-half_length</tt> to
- * <tt>x=+half_length</tt> and
- * from <tt>y=-radius</tt> to
- * <tt>y=radius</tt>.
- *
- * The boundaries are colored
- * according to the following
- * scheme: 0 for the hull of the
- * cylinder, 1 for the left hand
- * face and 2 for the right hand
- * face.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void cylinder (Triangulation<dim> &tria,
- const double radius = 1.,
- const double half_length = 1.);
+ /**
+ * Create a cylinder around the
+ * x-axis. The cylinder extends
+ * from <tt>x=-half_length</tt> to
+ * <tt>x=+half_length</tt> and its
+ * projection into the
+ * @p yz-plane is a circle of
+ * radius @p radius.
+ *
+ * In two dimensions, the
+ * cylinder is a rectangle from
+ * <tt>x=-half_length</tt> to
+ * <tt>x=+half_length</tt> and
+ * from <tt>y=-radius</tt> to
+ * <tt>y=radius</tt>.
+ *
+ * The boundaries are colored
+ * according to the following
+ * scheme: 0 for the hull of the
+ * cylinder, 1 for the left hand
+ * face and 2 for the right hand
+ * face.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void cylinder (Triangulation<dim> &tria,
+ const double radius = 1.,
+ const double half_length = 1.);
- /**
- * Create a cutted cone around
- * the x-axis. The cone extends
- * from <tt>x=-half_length</tt>
- * to <tt>x=half_length</tt> and
- * its projection into the @p
- * yz-plane is a circle of radius
- * @p radius_0 at
- * <tt>x=-half_length</tt> and a
- * circle of radius @p radius_1
- * at <tt>x=+half_length</tt>.
- * In between the radius is
- * linearly decreasing.
- *
- * In two dimensions, the cone is
- * a trapezoid from
- * <tt>x=-half_length</tt> to
- * <tt>x=+half_length</tt> and
- * from <tt>y=-radius_0</tt> to
- * <tt>y=radius_0</tt> at
- * <tt>x=-half_length</tt> and
- * from <tt>y=-radius_1</tt> to
- * <tt>y=radius_1</tt> at
- * <tt>x=+half_length</tt>. In
- * between the range of
- * <tt>y</tt> is linearly
- * decreasing.
- *
- * The boundaries are colored
- * according to the following
- * scheme: 0 for the hull of the
- * cone, 1 for the left hand
- * face and 2 for the right hand
- * face.
- *
- * An example of use can be found in the
- * documentation of the ConeBoundary
- * class, with which you probably want to
- * associate boundary indicator 0 (the
- * hull of the cone).
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- *
- * @author Markus Bürg, 2009
- */
- template <int dim>
- static void
- truncated_cone (Triangulation<dim> &tria,
- const double radius_0 = 1.0,
- const double radius_1 = 0.5,
- const double half_length = 1.0);
+ /**
+ * Create a cutted cone around
+ * the x-axis. The cone extends
+ * from <tt>x=-half_length</tt>
+ * to <tt>x=half_length</tt> and
+ * its projection into the @p
+ * yz-plane is a circle of radius
+ * @p radius_0 at
+ * <tt>x=-half_length</tt> and a
+ * circle of radius @p radius_1
+ * at <tt>x=+half_length</tt>.
+ * In between the radius is
+ * linearly decreasing.
+ *
+ * In two dimensions, the cone is
+ * a trapezoid from
+ * <tt>x=-half_length</tt> to
+ * <tt>x=+half_length</tt> and
+ * from <tt>y=-radius_0</tt> to
+ * <tt>y=radius_0</tt> at
+ * <tt>x=-half_length</tt> and
+ * from <tt>y=-radius_1</tt> to
+ * <tt>y=radius_1</tt> at
+ * <tt>x=+half_length</tt>. In
+ * between the range of
+ * <tt>y</tt> is linearly
+ * decreasing.
+ *
+ * The boundaries are colored
+ * according to the following
+ * scheme: 0 for the hull of the
+ * cone, 1 for the left hand
+ * face and 2 for the right hand
+ * face.
+ *
+ * An example of use can be found in the
+ * documentation of the ConeBoundary
+ * class, with which you probably want to
+ * associate boundary indicator 0 (the
+ * hull of the cone).
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ *
+ * @author Markus Bürg, 2009
+ */
+ template <int dim>
+ static void
+ truncated_cone (Triangulation<dim> &tria,
+ const double radius_0 = 1.0,
+ const double radius_1 = 0.5,
+ const double half_length = 1.0);
- /**
- * Initialize the given
- * triangulation with a hyper-L
- * consisting of exactly
- * <tt>2^dim-1</tt> cells. It
- * produces the hypercube with
- * the interval [<i>left,right</i>] without
- * the hypercube made out of the
- * interval [<i>(a+b)/2,b</i>].
- *
- * @image html hyper_l.png
- *
- * The triangulation needs to be
- * void upon calling this
- * function.
- *
- * This function is declared to
- * exist for triangulations of
- * all space dimensions, but
- * throws an error if called in
- * 1d.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void hyper_L (Triangulation<dim> &tria,
- const double left = -1.,
- const double right= 1.);
+ /**
+ * Initialize the given
+ * triangulation with a hyper-L
+ * consisting of exactly
+ * <tt>2^dim-1</tt> cells. It
+ * produces the hypercube with
+ * the interval [<i>left,right</i>] without
+ * the hypercube made out of the
+ * interval [<i>(a+b)/2,b</i>].
+ *
+ * @image html hyper_l.png
+ *
+ * The triangulation needs to be
+ * void upon calling this
+ * function.
+ *
+ * This function is declared to
+ * exist for triangulations of
+ * all space dimensions, but
+ * throws an error if called in
+ * 1d.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void hyper_L (Triangulation<dim> &tria,
+ const double left = -1.,
+ const double right= 1.);
- /**
- * Initialize the given
- * Triangulation with a hypercube
- * with a slit. In each
- * coordinate direction, the
- * hypercube extends from @p left
- * to @p right.
- *
- * In 2d, the split goes in
- * vertical direction from
- * <tt>x=(left+right)/2,
- * y=left</tt> to the center of
- * the square at
- * <tt>x=y=(left+right)/2</tt>.
- *
- * In 3d, the 2d domain is just
- * extended in the
- * <i>z</i>-direction, such that
- * a plane cuts the lower half of
- * a rectangle in two.
+ /**
+ * Initialize the given
+ * Triangulation with a hypercube
+ * with a slit. In each
+ * coordinate direction, the
+ * hypercube extends from @p left
+ * to @p right.
+ *
+ * In 2d, the split goes in
+ * vertical direction from
+ * <tt>x=(left+right)/2,
+ * y=left</tt> to the center of
+ * the square at
+ * <tt>x=y=(left+right)/2</tt>.
+ *
+ * In 3d, the 2d domain is just
+ * extended in the
+ * <i>z</i>-direction, such that
+ * a plane cuts the lower half of
+ * a rectangle in two.
- * This function is declared to
- * exist for triangulations of
- * all space dimensions, but
- * throws an error if called in
- * 1d.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void hyper_cube_slit (Triangulation<dim> &tria,
- const double left = 0.,
- const double right= 1.,
- const bool colorize = false);
+ * This function is declared to
+ * exist for triangulations of
+ * all space dimensions, but
+ * throws an error if called in
+ * 1d.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void hyper_cube_slit (Triangulation<dim> &tria,
+ const double left = 0.,
+ const double right= 1.,
+ const bool colorize = false);
- /**
- * Produce a hyper-shell,
- * the region between two
- * spheres around <tt>center</tt>,
- * with given
- * <tt>inner_radius</tt> and
- * <tt>outer_radius</tt>. The number
- * <tt>n_cells</tt> indicates the
- * number of cells of the resulting
- * triangulation, i.e., how many cells
- * form the ring (in 2d) or the shell
- * (in 3d).
- *
- * If the flag @p colorize is @p true,
- * then the outer boundary will have the
- * indicator 1, while the inner boundary
- * has id zero. If the flag is @p false,
- * both have indicator zero.
- *
- * In 2D, the number
- * <tt>n_cells</tt> of elements
- * for this initial triangulation
- * can be chosen arbitrarily. If
- * the number of initial cells is
- * zero (as is the default), then
- * it is computed adaptively such
- * that the resulting elements
- * have the least aspect ratio.
- *
- * In 3D, only two different numbers are
- * meaningful, 6 for a surface based on a
- * hexahedron (i.e. 6 panels on the inner
- * sphere extruded in radial direction to
- * form 6 cells) and 12 for the rhombic
- * dodecahedron. These give rise to the
- * following meshes upon one refinement:
- *
- * @image html hypershell3d-6.png
- * @image html hypershell3d-12.png
- *
- * Neither of these meshes is
- * particularly good since one ends up
- * with poorly shaped cells at the inner
- * edge upon refinement. For example,
- * this is the middle plane of the mesh
- * for the <code>n_cells=6</code>:
- *
- * @image html hyper_shell_6_cross_plane.png
- *
- * The mesh generated with
- * <code>n_cells=6</code> is better but
- * still not good. As a consequence, you
- * may also specify
- * <code>n_cells=96</code> as a third
- * option. The mesh generated in this way
- * is based on a once refined version of
- * the one with <code>n_cells=12</code>,
- * where all internal nodes are re-placed
- * along a shell somewhere between the
- * inner and outer boundary of the
- * domain. The following two images
- * compare half of the hyper shell for
- * <code>n_cells=12</code> and
- * <code>n_cells=96</code> (note that the
- * doubled radial lines on the cross
- * section are artifacts of the
- * visualization):
- *
- * @image html hyper_shell_12_cut.png
- * @image html hyper_shell_96_cut.png
- *
- * @note This function is declared to
- * exist for triangulations of
- * all space dimensions, but
- * throws an error if called in
- * 1d.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void hyper_shell (Triangulation<dim> &tria,
- const Point<dim> ¢er,
- const double inner_radius,
- const double outer_radius,
- const unsigned int n_cells = 0,
- bool colorize = false);
+ /**
+ * Produce a hyper-shell,
+ * the region between two
+ * spheres around <tt>center</tt>,
+ * with given
+ * <tt>inner_radius</tt> and
+ * <tt>outer_radius</tt>. The number
+ * <tt>n_cells</tt> indicates the
+ * number of cells of the resulting
+ * triangulation, i.e., how many cells
+ * form the ring (in 2d) or the shell
+ * (in 3d).
+ *
+ * If the flag @p colorize is @p true,
+ * then the outer boundary will have the
+ * indicator 1, while the inner boundary
+ * has id zero. If the flag is @p false,
+ * both have indicator zero.
+ *
+ * In 2D, the number
+ * <tt>n_cells</tt> of elements
+ * for this initial triangulation
+ * can be chosen arbitrarily. If
+ * the number of initial cells is
+ * zero (as is the default), then
+ * it is computed adaptively such
+ * that the resulting elements
+ * have the least aspect ratio.
+ *
+ * In 3D, only two different numbers are
+ * meaningful, 6 for a surface based on a
+ * hexahedron (i.e. 6 panels on the inner
+ * sphere extruded in radial direction to
+ * form 6 cells) and 12 for the rhombic
+ * dodecahedron. These give rise to the
+ * following meshes upon one refinement:
+ *
+ * @image html hypershell3d-6.png
+ * @image html hypershell3d-12.png
+ *
+ * Neither of these meshes is
+ * particularly good since one ends up
+ * with poorly shaped cells at the inner
+ * edge upon refinement. For example,
+ * this is the middle plane of the mesh
+ * for the <code>n_cells=6</code>:
+ *
+ * @image html hyper_shell_6_cross_plane.png
+ *
+ * The mesh generated with
+ * <code>n_cells=6</code> is better but
+ * still not good. As a consequence, you
+ * may also specify
+ * <code>n_cells=96</code> as a third
+ * option. The mesh generated in this way
+ * is based on a once refined version of
+ * the one with <code>n_cells=12</code>,
+ * where all internal nodes are re-placed
+ * along a shell somewhere between the
+ * inner and outer boundary of the
+ * domain. The following two images
+ * compare half of the hyper shell for
+ * <code>n_cells=12</code> and
+ * <code>n_cells=96</code> (note that the
+ * doubled radial lines on the cross
+ * section are artifacts of the
+ * visualization):
+ *
+ * @image html hyper_shell_12_cut.png
+ * @image html hyper_shell_96_cut.png
+ *
+ * @note This function is declared to
+ * exist for triangulations of
+ * all space dimensions, but
+ * throws an error if called in
+ * 1d.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void hyper_shell (Triangulation<dim> &tria,
+ const Point<dim> ¢er,
+ const double inner_radius,
+ const double outer_radius,
+ const unsigned int n_cells = 0,
+ bool colorize = false);
- /**
- * Produce a half hyper-shell,
- * i.e. the space between two
- * circles in two space
- * dimensions and the region
- * between two spheres in 3d,
- * with given inner and outer
- * radius and a given number of
- * elements for this initial
- * triangulation. However,
- * opposed to the previous
- * function, it does not produce
- * a whole shell, but only one
- * half of it, namely that part
- * for which the first component
- * is restricted to non-negative
- * values. The purpose of this
- * class is to enable
- * computations for solutions
- * which have rotational
- * symmetry, in which case the
- * half shell in 2d represents a
- * shell in 3d.
- *
- * If the number of
- * initial cells is zero (as is
- * the default), then it is
- * computed adaptively such that
- * the resulting elements have
- * the least aspect ratio.
- *
- * If colorize is set to true, the
- * inner, outer, left, and right
- * boundary get indicator 0, 1, 2,
- * and 3, respectively. Otherwise
- * all indicators are set to 0.
- *
- * @note The triangulation needs to be
- * void upon calling this
- * function.
- */
- template <int dim>
- static void half_hyper_shell (Triangulation<dim> &tria,
- const Point<dim> ¢er,
- const double inner_radius,
- const double outer_radius,
- const unsigned int n_cells = 0,
- const bool colorize = false);
+ /**
+ * Produce a half hyper-shell,
+ * i.e. the space between two
+ * circles in two space
+ * dimensions and the region
+ * between two spheres in 3d,
+ * with given inner and outer
+ * radius and a given number of
+ * elements for this initial
+ * triangulation. However,
+ * opposed to the previous
+ * function, it does not produce
+ * a whole shell, but only one
+ * half of it, namely that part
+ * for which the first component
+ * is restricted to non-negative
+ * values. The purpose of this
+ * class is to enable
+ * computations for solutions
+ * which have rotational
+ * symmetry, in which case the
+ * half shell in 2d represents a
+ * shell in 3d.
+ *
+ * If the number of
+ * initial cells is zero (as is
+ * the default), then it is
+ * computed adaptively such that
+ * the resulting elements have
+ * the least aspect ratio.
+ *
+ * If colorize is set to true, the
+ * inner, outer, left, and right
+ * boundary get indicator 0, 1, 2,
+ * and 3, respectively. Otherwise
+ * all indicators are set to 0.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void half_hyper_shell (Triangulation<dim> &tria,
+ const Point<dim> ¢er,
+ const double inner_radius,
+ const double outer_radius,
+ const unsigned int n_cells = 0,
+ const bool colorize = false);
- /**
- * Produce a domain that is the
- * intersection between a
- * hyper-shell with given inner
- * and outer radius, i.e. the
- * space between two circles in
- * two space dimensions and the
- * region between two spheres in
- * 3d, and the positive quadrant
- * (in 2d) or octant (in 3d). In
- * 2d, this is indeed a quarter
- * of the full annulus, while the
- * function is a misnomer in 3d
- * because there the domain is
- * not a quarter but one eighth
- * of the full shell.
- *
- * If the number of initial cells is zero
- * (as is the default), then it is
- * computed adaptively such that the
- * resulting elements have the least
- * aspect ratio in 2d.
- *
- * If colorize is set to true, the inner,
- * outer, left, and right boundary get
- * indicator 0, 1, 2, and 3 in 2d,
- * respectively. Otherwise all indicators
- * are set to 0. In 3d indicator 2 is at
- * the face x=0, 3 at y=0, 4 at z=0.
- *
- * @note The triangulation needs to be
- * void upon calling this function.
- */
- template <int dim>
- static void quarter_hyper_shell (Triangulation<dim> &tria,
- const Point<dim> ¢er,
- const double inner_radius,
- const double outer_radius,
- const unsigned int n_cells = 0,
- const bool colorize = false);
+ /**
+ * Produce a domain that is the
+ * intersection between a
+ * hyper-shell with given inner
+ * and outer radius, i.e. the
+ * space between two circles in
+ * two space dimensions and the
+ * region between two spheres in
+ * 3d, and the positive quadrant
+ * (in 2d) or octant (in 3d). In
+ * 2d, this is indeed a quarter
+ * of the full annulus, while the
+ * function is a misnomer in 3d
+ * because there the domain is
+ * not a quarter but one eighth
+ * of the full shell.
+ *
+ * If the number of initial cells is zero
+ * (as is the default), then it is
+ * computed adaptively such that the
+ * resulting elements have the least
+ * aspect ratio in 2d.
+ *
+ * If colorize is set to true, the inner,
+ * outer, left, and right boundary get
+ * indicator 0, 1, 2, and 3 in 2d,
+ * respectively. Otherwise all indicators
+ * are set to 0. In 3d indicator 2 is at
+ * the face x=0, 3 at y=0, 4 at z=0.
+ *
+ * @note The triangulation needs to be
+ * void upon calling this function.
+ */
+ template <int dim>
+ static void quarter_hyper_shell (Triangulation<dim> &tria,
+ const Point<dim> ¢er,
+ const double inner_radius,
+ const double outer_radius,
+ const unsigned int n_cells = 0,
+ const bool colorize = false);
- /**
- * Produce a domain that is the space
- * between two cylinders in 3d, with
- * given length, inner and outer radius
- * and a given number of elements for
- * this initial triangulation. If @p
- * n_radial_cells is zero (as is the
- * default), then it is computed
- * adaptively such that the resulting
- * elements have the least aspect
- * ratio. The same holds for @p
- * n_axial_cells.
- *
- * @note Although this function
- * is declared as a template, it
- * does not make sense in 1D and
- * 2D.
- *
- * @note The triangulation needs
- * to be void upon calling this
- * function.
- */
- template <int dim>
- static void cylinder_shell (Triangulation<dim> &tria,
- const double length,
- const double inner_radius,
- const double outer_radius,
- const unsigned int n_radial_cells = 0,
- const unsigned int n_axial_cells = 0);
+ /**
+ * Produce a domain that is the space
+ * between two cylinders in 3d, with
+ * given length, inner and outer radius
+ * and a given number of elements for
+ * this initial triangulation. If @p
+ * n_radial_cells is zero (as is the
+ * default), then it is computed
+ * adaptively such that the resulting
+ * elements have the least aspect
+ * ratio. The same holds for @p
+ * n_axial_cells.
+ *
+ * @note Although this function
+ * is declared as a template, it
+ * does not make sense in 1D and
+ * 2D.
+ *
+ * @note The triangulation needs
+ * to be void upon calling this
+ * function.
+ */
+ template <int dim>
+ static void cylinder_shell (Triangulation<dim> &tria,
+ const double length,
+ const double inner_radius,
+ const double outer_radius,
+ const unsigned int n_radial_cells = 0,
+ const unsigned int n_axial_cells = 0);
- /**
- * Produce the surface meshing of the
- * torus. The axis of the torus is the
- * $y$-axis while the plane of the torus
- * is the $x$-$z$ plane. The boundary of
- * this object can be described by the
- * TorusBoundary class.
- *
- * @param tria The triangulation to be
- * filled.
- *
- * @param R The radius of the circle,
- * which forms the middle line of the
- * torus containing the loop of
- * cells. Must be greater than @p r.
- *
- * @param r The inner radius of the
- * torus.
- */
+ /**
+ * Produce the surface meshing of the
+ * torus. The axis of the torus is the
+ * $y$-axis while the plane of the torus
+ * is the $x$-$z$ plane. The boundary of
+ * this object can be described by the
+ * TorusBoundary class.
+ *
+ * @param tria The triangulation to be
+ * filled.
+ *
+ * @param R The radius of the circle,
+ * which forms the middle line of the
+ * torus containing the loop of
+ * cells. Must be greater than @p r.
+ *
+ * @param r The inner radius of the
+ * torus.
+ */
- static void torus (Triangulation<2,3>& tria,
- const double R,
- const double r);
- static void torus (Triangulation<2,3> &tria,
++ static void torus (Triangulation<2,3> &tria,
+ const double R,
+ const double r);
- /**
- * This class produces a square
- * on the <i>xy</i>-plane with a
- * circular hole in the middle,
- * times the interval [0.L]
- * (only in 3d).
- *
- * @image html cubes_hole.png
- *
- * It is implemented in 2d and
- * 3d, and takes the following
- * arguments:
- *
- * @arg @p inner_radius: size of the
- * internal hole
- * @arg @p outer_radius: size of the
- * biggest enclosed cylinder
- * @arg @p L: extension on the @p z-direction
- * @arg @p repetitions: number of subdivisions
- * along the @p z-direction
- * @arg @p colorize: wether to assign different
- * boundary indicators to different faces.
- * The colors are given in lexicographic
- * ordering for the flat faces (0 to 3 in 2d,
- * 0 to 5 in 3d) plus the curved hole
- * (4 in 2d, and 6 in 3d).
- * If @p colorize is set to false, then flat faces
- * get the number 0 and the hole gets number 1.
- */
- template<int dim>
- static void hyper_cube_with_cylindrical_hole (Triangulation<dim> &triangulation,
+ /**
+ * This class produces a square
+ * on the <i>xy</i>-plane with a
+ * circular hole in the middle,
+ * times the interval [0.L]
+ * (only in 3d).
+ *
+ * @image html cubes_hole.png
+ *
+ * It is implemented in 2d and
+ * 3d, and takes the following
+ * arguments:
+ *
+ * @arg @p inner_radius: size of the
+ * internal hole
+ * @arg @p outer_radius: size of the
+ * biggest enclosed cylinder
+ * @arg @p L: extension on the @p z-direction
+ * @arg @p repetitions: number of subdivisions
+ * along the @p z-direction
+ * @arg @p colorize: wether to assign different
+ * boundary indicators to different faces.
+ * The colors are given in lexicographic
+ * ordering for the flat faces (0 to 3 in 2d,
+ * 0 to 5 in 3d) plus the curved hole
+ * (4 in 2d, and 6 in 3d).
+ * If @p colorize is set to false, then flat faces
+ * get the number 0 and the hole gets number 1.
+ */
+ template<int dim>
+ static void hyper_cube_with_cylindrical_hole (Triangulation<dim> &triangulation,
const double inner_radius = .25,
const double outer_radius = .5,
const double L = .5,
const unsigned int repetition = 1,
const bool colorize = false);
- /**
- * Produce a ring of cells in 3D that is
- * cut open, twisted and glued together
- * again. This results in a kind of
- * moebius-loop.
- *
- * @param tria The triangulation to be worked on.
- * @param n_cells The number of cells in the loop. Must be greater than 4.
- * @param n_rotations The number of rotations (Pi/2 each) to be performed before glueing the loop together.
- * @param R The radius of the circle, which forms the middle line of the torus containing the loop of cells. Must be greater than @p r.
- * @param r The radius of the cylinder bend together as loop.
- */
- static void moebius (Triangulation<3,3>& tria,
- const unsigned int n_cells,
- const unsigned int n_rotations,
- const double R,
- const double r);
+ /**
+ * Produce a ring of cells in 3D that is
+ * cut open, twisted and glued together
+ * again. This results in a kind of
+ * moebius-loop.
+ *
+ * @param tria The triangulation to be worked on.
+ * @param n_cells The number of cells in the loop. Must be greater than 4.
+ * @param n_rotations The number of rotations (Pi/2 each) to be performed before glueing the loop together.
+ * @param R The radius of the circle, which forms the middle line of the torus containing the loop of cells. Must be greater than @p r.
+ * @param r The radius of the cylinder bend together as loop.
+ */
- static void moebius (Triangulation<3,3> &tria,
++ static void moebius (Triangulation<3,3> &tria,
+ const unsigned int n_cells,
+ const unsigned int n_rotations,
+ const double R,
+ const double r);
- /**
- * Given the two triangulations
- * specified as the first two
- * arguments, create the
- * triangulation that contains
- * the cells of both
- * triangulation and store it in
- * the third parameter. Previous
- * content of @p result will be
- * deleted.
- *
- * This function is most often used
- * to compose meshes for more
- * complicated geometries if the
- * geometry can be composed of
- * simpler parts for which functions
- * exist to generate coarse meshes.
- * For example, the channel mesh used
- * in step-35 could in principle be
- * created using a mesh created by the
- * GridGenerator::hyper_cube_with_cylindrical_hole
- * function and several rectangles,
- * and merging them using the current
- * function. The rectangles will
- * have to be translated to the
- * right for this, a task that can
- * be done using the GridTools::shift
- * function (other tools to transform
- * individual mesh building blocks are
- * GridTools::transform, GridTools::rotate,
- * and GridTools::scale).
- *
- * @note The two input triangulations
- * must be coarse meshes that have
- * no refined cells.
- *
- * @note The function copies the material ids
- * of the cells of the two input
- * triangulations into the output
- * triangulation but it currently makes
- * no attempt to do the same for boundary
- * ids. In other words, if the two
- * coarse meshes have anything but
- * the default boundary indicators,
- * then you will currently have to set
- * boundary indicators again by hand
- * in the output triangulation.
- *
- * @note For a related operation
- * on refined meshes when both
- * meshes are derived from the
- * same coarse mesh, see
- * GridTools::create_union_triangulation .
- */
- template <int dim, int spacedim>
- static
- void
- merge_triangulations (const Triangulation<dim, spacedim> &triangulation_1,
- const Triangulation<dim, spacedim> &triangulation_2,
- Triangulation<dim, spacedim> &result);
+ /**
+ * Given the two triangulations
+ * specified as the first two
+ * arguments, create the
+ * triangulation that contains
+ * the cells of both
+ * triangulation and store it in
+ * the third parameter. Previous
+ * content of @p result will be
+ * deleted.
+ *
+ * This function is most often used
+ * to compose meshes for more
+ * complicated geometries if the
+ * geometry can be composed of
+ * simpler parts for which functions
+ * exist to generate coarse meshes.
+ * For example, the channel mesh used
+ * in step-35 could in principle be
+ * created using a mesh created by the
+ * GridGenerator::hyper_cube_with_cylindrical_hole
+ * function and several rectangles,
+ * and merging them using the current
+ * function. The rectangles will
+ * have to be translated to the
+ * right for this, a task that can
+ * be done using the GridTools::shift
+ * function (other tools to transform
+ * individual mesh building blocks are
+ * GridTools::transform, GridTools::rotate,
+ * and GridTools::scale).
+ *
+ * @note The two input triangulations
+ * must be coarse meshes that have
+ * no refined cells.
+ *
+ * @note The function copies the material ids
+ * of the cells of the two input
+ * triangulations into the output
+ * triangulation but it currently makes
+ * no attempt to do the same for boundary
+ * ids. In other words, if the two
+ * coarse meshes have anything but
+ * the default boundary indicators,
+ * then you will currently have to set
+ * boundary indicators again by hand
+ * in the output triangulation.
+ *
+ * @note For a related operation
+ * on refined meshes when both
+ * meshes are derived from the
+ * same coarse mesh, see
+ * GridTools::create_union_triangulation .
+ */
+ template <int dim, int spacedim>
+ static
+ void
+ merge_triangulations (const Triangulation<dim, spacedim> &triangulation_1,
+ const Triangulation<dim, spacedim> &triangulation_2,
+ Triangulation<dim, spacedim> &result);
- /**
- * This function transformes the
- * @p Triangulation @p tria
- * smoothly to a domain that is
- * described by the boundary
- * points in the map
- * @p new_points. This map maps
- * the point indices to the
- * boundary points in the
- * transformed domain.
- *
- * Note, that the
- * @p Triangulation is changed
- * in-place, therefore you don't
- * need to keep two
- * triangulations, but the given
- * triangulation is changed
- * (overwritten).
- *
- * In 1d, this function is not
- * currently implemented.
- */
- template <int dim>
- static void laplace_transformation (Triangulation<dim> &tria,
- const std::map<unsigned int,Point<dim> > &new_points);
+ /**
+ * This function transformes the
+ * @p Triangulation @p tria
+ * smoothly to a domain that is
+ * described by the boundary
+ * points in the map
+ * @p new_points. This map maps
+ * the point indices to the
+ * boundary points in the
+ * transformed domain.
+ *
+ * Note, that the
+ * @p Triangulation is changed
+ * in-place, therefore you don't
+ * need to keep two
+ * triangulations, but the given
+ * triangulation is changed
+ * (overwritten).
+ *
+ * In 1d, this function is not
+ * currently implemented.
+ */
+ template <int dim>
+ static void laplace_transformation (Triangulation<dim> &tria,
+ const std::map<unsigned int,Point<dim> > &new_points);
- /**
- * Exception
- */
- DeclException0 (ExcInvalidRadii);
- /**
- * Exception
- */
- DeclException1 (ExcInvalidRepetitions,
- int,
- << "The number of repetitions " << arg1
- << " must be >=1.");
- /**
- * Exception
- */
- DeclException1 (ExcInvalidRepetitionsDimension,
- int,
- << "The vector of repetitions must have "
- << arg1 <<" elements.");
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidRadii);
+ /**
+ * Exception
+ */
+ DeclException1 (ExcInvalidRepetitions,
+ int,
+ << "The number of repetitions " << arg1
+ << " must be >=1.");
+ /**
+ * Exception
+ */
+ DeclException1 (ExcInvalidRepetitionsDimension,
+ int,
+ << "The vector of repetitions must have "
+ << arg1 <<" elements.");
- private:
- /**
- * Perform the action specified
- * by the @p colorize flag of
- * the hyper_rectangle()
- * function of this class.
- */
- template <int dim, int spacedim>
- static
- void
- colorize_hyper_rectangle (Triangulation<dim,spacedim> &tria);
+ private:
+ /**
+ * Perform the action specified
+ * by the @p colorize flag of
+ * the hyper_rectangle()
+ * function of this class.
+ */
+ template <int dim, int spacedim>
+ static
+ void
+ colorize_hyper_rectangle (Triangulation<dim,spacedim> &tria);
- /**
- * Perform the action specified
- * by the @p colorize flag of
- * the
- * subdivided_hyper_rectangle()
- * function of this class. This
- * function is singled out
- * because it is dimension
- * specific.
- */
- template <int dim>
- static
- void
- colorize_subdivided_hyper_rectangle (Triangulation<dim> &tria,
- const Point<dim> &p1,
- const Point<dim> &p2,
- const double epsilon);
+ /**
+ * Perform the action specified
+ * by the @p colorize flag of
+ * the
+ * subdivided_hyper_rectangle()
+ * function of this class. This
+ * function is singled out
+ * because it is dimension
+ * specific.
+ */
+ template <int dim>
+ static
+ void
+ colorize_subdivided_hyper_rectangle (Triangulation<dim> &tria,
+ const Point<dim> &p1,
+ const Point<dim> &p2,
+ const double epsilon);
- /**
- * Assign boundary number zero to
- * the inner shell boundary and 1
- * to the outer.
- */
- template<int dim>
- static
- void
- colorize_hyper_shell (Triangulation<dim>& tria,
- const Point<dim>& center,
- const double inner_radius,
- const double outer_radius);
+ /**
+ * Assign boundary number zero to
+ * the inner shell boundary and 1
+ * to the outer.
+ */
+ template<int dim>
+ static
+ void
+ colorize_hyper_shell (Triangulation<dim> &tria,
+ const Point<dim> ¢er,
+ const double inner_radius,
+ const double outer_radius);
- /**
- * Assign boundary number zero the inner
- * shell boundary, one to the outer shell
- * boundary, two to the face with x=0,
- * three to the face with y=0, four to
- * the face with z=0.
- */
- template<int dim>
- static
- void
- colorize_quarter_hyper_shell(Triangulation<dim> & tria,
- const Point<dim>& center,
- const double inner_radius,
- const double outer_radius);
+ /**
+ * Assign boundary number zero the inner
+ * shell boundary, one to the outer shell
+ * boundary, two to the face with x=0,
+ * three to the face with y=0, four to
+ * the face with z=0.
+ */
+ template<int dim>
+ static
+ void
+ colorize_quarter_hyper_shell(Triangulation<dim> &tria,
+ const Point<dim> ¢er,
+ const double inner_radius,
+ const double outer_radius);
- /**
- * Solve the Laplace equation for
- * @p laplace_transformation
- * function for one of the
- * @p dim space
- * dimensions. Externalized into
- * a function of its own in order
- * to allow parallel execution.
- */
- static
- void
- laplace_solve (const SparseMatrix<double> &S,
- const std::map<unsigned int,double> &m,
- Vector<double> &u);
+ /**
+ * Solve the Laplace equation for
+ * @p laplace_transformation
+ * function for one of the
+ * @p dim space
+ * dimensions. Externalized into
+ * a function of its own in order
+ * to allow parallel execution.
+ */
+ static
+ void
+ laplace_solve (const SparseMatrix<double> &S,
+ const std::map<unsigned int,double> &m,
+ Vector<double> &u);
};
template <int dim, int spacedim=dim>
class GridIn
{
- public:
- /**
- * List of possible mesh input
- * formats. These values are used
- * when calling the function
- * read() in order to determine
- * the actual reader to be
- * called.
- */
- enum Format
- {
- /// Use GridIn::default_format stored in this object
- Default,
- /// Use read_unv()
- unv,
- /// Use read_ucd()
- ucd,
- /// Use read_dbmesh()
- dbmesh,
- /// Use read_xda()
- xda,
- /// Use read_msh()
- msh,
- /// Use read_netcdf()
- netcdf,
- /// Use read_tecplot()
- tecplot
- };
-
- /**
- * Constructor.
- */
- GridIn ();
-
- /**
- * Attach this triangulation
- * to be fed with the grid data.
- */
- void attach_triangulation (Triangulation<dim,spacedim> &tria);
-
- /**
- * Read from the given stream. If
- * no format is given,
- * GridIn::Format::Default is
- * used.
- */
- void read (std::istream &in, Format format=Default);
-
- /**
- * Open the file given by the
- * string and call the previous
- * function read(). This function
- * uses the PathSearch mechanism
- * to find files. The file class
- * used is <code>MESH</code>.
- */
- void read (const std::string &in, Format format=Default);
-
- /**
- * Read grid data from an unv
- * file as generated by the
- * Salome mesh generator.
- * Numerical data is ignored.
- *
- * Note the comments on
- * generating this file format in
- * the general documentation of
- * this class.
- */
- void read_unv(std::istream &in);
-
- /**
- * Read grid data from an ucd file.
- * Numerical data is ignored.
- */
- void read_ucd (std::istream &in);
-
- /**
- * Read grid data from a file
- * containing data in the DB mesh
- * format.
- */
- void read_dbmesh (std::istream &in);
-
- /**
- * Read grid data from a file
- * containing data in the XDA
- * format.
- */
- void read_xda (std::istream &in);
-
- /**
- * Read grid data from an msh
- * file, either version 1 or
- * version 2 of that file
- * format. The GMSH formats are
- * documented at
- * http://www.geuz.org/gmsh/ .
- *
- * @note The input function of
- * deal.II does not distinguish
- * between newline and other
- * whitespace. Therefore, deal.II
- * will be able to read files in
- * a slightly more general format
- * than Gmsh.
- */
- void read_msh (std::istream &in);
-
- /**
- * Read grid data from a NetCDF
- * file. The only data format
- * currently supported is the
- * <tt>TAU grid format</tt>.
- *
- * This function requires the
- * library to be linked with the
- * NetCDF library.
- */
- void read_netcdf (const std::string &filename);
-
- /**
- * Read grid data from a file containing
- * tecplot ASCII data. This also works in
- * the absence of any tecplot
- * installation.
- */
- void read_tecplot (std::istream &in);
-
- /**
- * Returns the standard suffix
- * for a file in this format.
- */
- static std::string default_suffix (const Format format);
-
- /**
- * Return the enum Format for the
- * format name.
- */
- static Format parse_format (const std::string &format_name);
-
- /**
- * Return a list of implemented input
- * formats. The different names are
- * separated by vertical bar signs (<tt>`|'</tt>)
- * as used by the ParameterHandler
- * classes.
- */
- static std::string get_format_names ();
-
- /**
- * Exception
- */
- DeclException1(ExcUnknownSectionType,
- int,
- << "The section type <" << arg1 << "> in an UNV "
- << "input file is not implemented.");
-
- /**
- * Exception
- */
- DeclException1(ExcUnknownElementType,
- int,
- << "The element type <" << arg1 << "> in an UNV "
- << "input file is not implemented.");
-
- /**
- * Exception
- */
- DeclException1 (ExcUnknownIdentifier,
- std::string,
- << "The identifier <" << arg1 << "> as name of a "
- << "part in an UCD input file is unknown or the "
- << "respective input routine is not implemented."
- << "(Maybe the space dimension of triangulation and "
- << "input file do not match?");
- /**
- * Exception
- */
- DeclException0 (ExcNoTriangulationSelected);
- /**
- * Exception
- */
- DeclException2 (ExcInvalidVertexIndex,
- int, int,
- << "Trying to access invalid vertex index " << arg2
- << " while creating cell " << arg1);
- /**
- * Exception
- */
- DeclException0 (ExcInvalidDBMeshFormat);
- /**
- * Exception
- */
- DeclException1 (ExcInvalidDBMESHInput,
- std::string,
- << "The string <" << arg1 << "> is not recognized at the present"
- << " position of a DB Mesh file.");
-
- /**
- * Exception
- */
- DeclException1 (ExcDBMESHWrongDimension,
- int,
- << "The specified dimension " << arg1
- << " is not the same as that of the triangulation to be created.");
-
- DeclException1 (ExcInvalidGMSHInput,
- std::string,
- << "The string <" << arg1 << "> is not recognized at the present"
- << " position of a Gmsh Mesh file.");
-
- DeclException1 (ExcGmshUnsupportedGeometry,
- int,
- << "The Element Identifier <" << arg1 << "> is not "
- << "supported in the Deal.II Library.\n"
- << "Supported elements are: \n"
- << "ELM-TYPE\n"
- << "1 Line (2 nodes, 1 edge).\n"
- << "3 Quadrilateral (4 nodes, 4 edges).\n"
- << "5 Hexahedron (8 nodes, 12 edges, 6 faces).\n"
- << "15 Point (1 node, ignored when read)");
-
-
- DeclException0 (ExcGmshNoCellInformation);
- protected:
- /**
- * Store address of the triangulation to
- * be fed with the data read in.
- */
- SmartPointer<Triangulation<dim,spacedim>,GridIn<dim,spacedim> > tria;
-
- /**
- * This function can write the
- * raw cell data objects created
- * by the <tt>read_*</tt> functions in
- * Gnuplot format to a
- * stream. This is sometimes
- * handy if one would like to see
- * what actually was created, if
- * it is known that the data is
- * not correct in some way, but
- * the Triangulation class
- * refuses to generate a
- * triangulation because of these
- * errors. In particular, the
- * output of this class writes
- * out the cell numbers along
- * with the direction of the
- * faces of each cell. In
- * particular the latter
- * information is needed to
- * verify whether the cell data
- * objects follow the
- * requirements of the ordering
- * of cells and their faces,
- * i.e. that all faces need to
- * have unique directions and
- * specified orientations with
- * respect to neighboring cells
- * (see the documentations to
- * this class and the
- * GridReordering class).
- *
- * The output of this function
- * consists of vectors for each
- * line bounding the cells
- * indicating the direction it
- * has with respect to the
- * orientation of this cell, and
- * the cell number. The whole
- * output is in a form such that
- * it can be read in by Gnuplot
- * and generate the full plot
- * without further ado by the
- * user.
- */
- static void debug_output_grid (const std::vector<CellData<dim> > &cells,
- const std::vector<Point<spacedim> > &vertices,
- std::ostream &out);
-
- private:
-
- /**
- * Skip empty lines in the input
- * stream, i.e. lines that
- * contain either nothing or only
- * whitespace.
- */
- static void skip_empty_lines (std::istream &in);
-
- /**
- * Skip lines of comment that
- * start with the indicated
- * character (e.g. <tt>#</tt>)
- * following the point where the
- * given input stream presently
- * is. After the call to this
- * function, the stream is at the
- * start of the first line after
- * the comment lines, or at the
- * same position as before if
- * there were no lines of
- * comments.
- */
- static void skip_comment_lines (std::istream &in,
- const char comment_start);
-
- /**
- * This function does the nasty work (due
- * to very lax conventions and different
- * versions of the tecplot format) of
- * extracting the important parameters from
- * a tecplot header, contained in the
- * string @p header. The other variables
- * are output variables, their value has no
- * influence on the function execution..
- */
- static void parse_tecplot_header(std::string &header,
- std::vector<unsigned int> &tecplot2deal,
- unsigned int &n_vars,
- unsigned int &n_vertices,
- unsigned int &n_cells,
- std::vector<unsigned int> &IJK,
- bool &structured,
- bool &blocked);
-
- /**
- * Input format used by read() if
- * no format is given.
- */
- Format default_format;
+ public:
+ /**
+ * List of possible mesh input
+ * formats. These values are used
+ * when calling the function
+ * read() in order to determine
+ * the actual reader to be
+ * called.
+ */
+ enum Format
+ {
+ /// Use GridIn::default_format stored in this object
+ Default,
+ /// Use read_unv()
+ unv,
+ /// Use read_ucd()
+ ucd,
+ /// Use read_dbmesh()
+ dbmesh,
+ /// Use read_xda()
+ xda,
+ /// Use read_msh()
+ msh,
+ /// Use read_netcdf()
+ netcdf,
+ /// Use read_tecplot()
+ tecplot
+ };
+
+ /**
+ * Constructor.
+ */
+ GridIn ();
+
+ /**
+ * Attach this triangulation
+ * to be fed with the grid data.
+ */
+ void attach_triangulation (Triangulation<dim,spacedim> &tria);
+
+ /**
+ * Read from the given stream. If
+ * no format is given,
+ * GridIn::Format::Default is
+ * used.
+ */
+ void read (std::istream &in, Format format=Default);
+
+ /**
+ * Open the file given by the
+ * string and call the previous
+ * function read(). This function
+ * uses the PathSearch mechanism
+ * to find files. The file class
+ * used is <code>MESH</code>.
+ */
+ void read (const std::string &in, Format format=Default);
+
+ /**
+ * Read grid data from an unv
+ * file as generated by the
+ * Salome mesh generator.
+ * Numerical data is ignored.
+ *
+ * Note the comments on
+ * generating this file format in
+ * the general documentation of
+ * this class.
+ */
+ void read_unv(std::istream &in);
+
+ /**
+ * Read grid data from an ucd file.
+ * Numerical data is ignored.
+ */
+ void read_ucd (std::istream &in);
+
+ /**
+ * Read grid data from a file
+ * containing data in the DB mesh
+ * format.
+ */
+ void read_dbmesh (std::istream &in);
+
+ /**
+ * Read grid data from a file
+ * containing data in the XDA
+ * format.
+ */
+ void read_xda (std::istream &in);
+
+ /**
+ * Read grid data from an msh
+ * file, either version 1 or
+ * version 2 of that file
+ * format. The GMSH formats are
+ * documented at
+ * http://www.geuz.org/gmsh/ .
+ *
+ * @note The input function of
+ * deal.II does not distinguish
+ * between newline and other
+ * whitespace. Therefore, deal.II
+ * will be able to read files in
+ * a slightly more general format
+ * than Gmsh.
+ */
+ void read_msh (std::istream &in);
+
+ /**
+ * Read grid data from a NetCDF
+ * file. The only data format
+ * currently supported is the
+ * <tt>TAU grid format</tt>.
+ *
+ * This function requires the
+ * library to be linked with the
+ * NetCDF library.
+ */
+ void read_netcdf (const std::string &filename);
+
+ /**
+ * Read grid data from a file containing
+ * tecplot ASCII data. This also works in
+ * the absence of any tecplot
+ * installation.
+ */
+ void read_tecplot (std::istream &in);
+
+ /**
+ * Returns the standard suffix
+ * for a file in this format.
+ */
+ static std::string default_suffix (const Format format);
+
+ /**
+ * Return the enum Format for the
+ * format name.
+ */
+ static Format parse_format (const std::string &format_name);
+
+ /**
+ * Return a list of implemented input
+ * formats. The different names are
+ * separated by vertical bar signs (<tt>`|'</tt>)
+ * as used by the ParameterHandler
+ * classes.
+ */
+ static std::string get_format_names ();
+
+ /**
+ * Exception
+ */
+ DeclException1(ExcUnknownSectionType,
+ int,
+ << "The section type <" << arg1 << "> in an UNV "
+ << "input file is not implemented.");
+
+ /**
+ * Exception
+ */
+ DeclException1(ExcUnknownElementType,
+ int,
+ << "The element type <" << arg1 << "> in an UNV "
+ << "input file is not implemented.");
+
+ /**
+ * Exception
+ */
+ DeclException1 (ExcUnknownIdentifier,
+ std::string,
+ << "The identifier <" << arg1 << "> as name of a "
+ << "part in an UCD input file is unknown or the "
+ << "respective input routine is not implemented."
+ << "(Maybe the space dimension of triangulation and "
+ << "input file do not match?");
+ /**
+ * Exception
+ */
+ DeclException0 (ExcNoTriangulationSelected);
+ /**
+ * Exception
+ */
+ DeclException2 (ExcInvalidVertexIndex,
+ int, int,
+ << "Trying to access invalid vertex index " << arg2
+ << " while creating cell " << arg1);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidDBMeshFormat);
+ /**
+ * Exception
+ */
+ DeclException1 (ExcInvalidDBMESHInput,
+ std::string,
+ << "The string <" << arg1 << "> is not recognized at the present"
+ << " position of a DB Mesh file.");
+
+ /**
+ * Exception
+ */
+ DeclException1 (ExcDBMESHWrongDimension,
+ int,
+ << "The specified dimension " << arg1
+ << " is not the same as that of the triangulation to be created.");
+
+ DeclException1 (ExcInvalidGMSHInput,
+ std::string,
+ << "The string <" << arg1 << "> is not recognized at the present"
+ << " position of a Gmsh Mesh file.");
+
+ DeclException1 (ExcGmshUnsupportedGeometry,
+ int,
+ << "The Element Identifier <" << arg1 << "> is not "
+ << "supported in the Deal.II Library.\n"
+ << "Supported elements are: \n"
+ << "ELM-TYPE\n"
+ << "1 Line (2 nodes, 1 edge).\n"
+ << "3 Quadrilateral (4 nodes, 4 edges).\n"
+ << "5 Hexahedron (8 nodes, 12 edges, 6 faces).\n"
+ << "15 Point (1 node, ignored when read)");
+
+
+ DeclException0 (ExcGmshNoCellInformation);
+ protected:
+ /**
+ * Store address of the triangulation to
+ * be fed with the data read in.
+ */
+ SmartPointer<Triangulation<dim,spacedim>,GridIn<dim,spacedim> > tria;
+
+ /**
+ * This function can write the
+ * raw cell data objects created
+ * by the <tt>read_*</tt> functions in
+ * Gnuplot format to a
+ * stream. This is sometimes
+ * handy if one would like to see
+ * what actually was created, if
+ * it is known that the data is
+ * not correct in some way, but
+ * the Triangulation class
+ * refuses to generate a
+ * triangulation because of these
+ * errors. In particular, the
+ * output of this class writes
+ * out the cell numbers along
+ * with the direction of the
+ * faces of each cell. In
+ * particular the latter
+ * information is needed to
+ * verify whether the cell data
+ * objects follow the
+ * requirements of the ordering
+ * of cells and their faces,
+ * i.e. that all faces need to
+ * have unique directions and
+ * specified orientations with
+ * respect to neighboring cells
+ * (see the documentations to
+ * this class and the
+ * GridReordering class).
+ *
+ * The output of this function
+ * consists of vectors for each
+ * line bounding the cells
+ * indicating the direction it
+ * has with respect to the
+ * orientation of this cell, and
+ * the cell number. The whole
+ * output is in a form such that
+ * it can be read in by Gnuplot
+ * and generate the full plot
+ * without further ado by the
+ * user.
+ */
+ static void debug_output_grid (const std::vector<CellData<dim> > &cells,
+ const std::vector<Point<spacedim> > &vertices,
+ std::ostream &out);
+
+ private:
+
+ /**
+ * Skip empty lines in the input
+ * stream, i.e. lines that
+ * contain either nothing or only
+ * whitespace.
+ */
+ static void skip_empty_lines (std::istream &in);
+
+ /**
+ * Skip lines of comment that
+ * start with the indicated
+ * character (e.g. <tt>#</tt>)
+ * following the point where the
+ * given input stream presently
+ * is. After the call to this
+ * function, the stream is at the
+ * start of the first line after
+ * the comment lines, or at the
+ * same position as before if
+ * there were no lines of
+ * comments.
+ */
+ static void skip_comment_lines (std::istream &in,
+ const char comment_start);
+
+ /**
+ * This function does the nasty work (due
+ * to very lax conventions and different
+ * versions of the tecplot format) of
+ * extracting the important parameters from
+ * a tecplot header, contained in the
+ * string @p header. The other variables
+ * are output variables, their value has no
+ * influence on the function execution..
+ */
+ static void parse_tecplot_header(std::string &header,
+ std::vector<unsigned int> &tecplot2deal,
- unsigned int &n_vars,
- unsigned int &n_vertices,
- unsigned int &n_cells,
++ unsigned int &n_vars,
++ unsigned int &n_vertices,
++ unsigned int &n_cells,
+ std::vector<unsigned int> &IJK,
+ bool &structured,
+ bool &blocked);
+
+ /**
+ * Input format used by read() if
+ * no format is given.
+ */
+ Format default_format;
};
};
- /**
- * An enriched quad with information about how the mesh fits together
- * so that we can move around the mesh efficiently.
- *
- * @author Michael Anderson, 2003
- */
+ /**
+ * An enriched quad with information about how the mesh fits together
+ * so that we can move around the mesh efficiently.
+ *
+ * @author Michael Anderson, 2003
+ */
class MQuad
{
- public:
- /**
- * v0 - v3 are indexes of the
- * vertices of the quad, s0 -
- * s3 are indexes for the
- * sides of the quad
- */
- MQuad (const unsigned int v0,
- const unsigned int v1,
- const unsigned int v2,
- const unsigned int v3,
- const unsigned int s0,
- const unsigned int s1,
- const unsigned int s2,
- const unsigned int s3,
- const CellData<2> &cd);
-
- /**
- * Stores the vertex numbers
- */
- unsigned int v[4];
- /**
- * Stores the side numbers
- */
- unsigned int side[4];
-
- /**
- * Copy of the @p CellData object
- * from which we construct the
- * data of this object.
- */
- CellData<2> original_cell_data;
+ public:
+ /**
+ * v0 - v3 are indexes of the
+ * vertices of the quad, s0 -
+ * s3 are indexes for the
+ * sides of the quad
+ */
+ MQuad (const unsigned int v0,
+ const unsigned int v1,
+ const unsigned int v2,
+ const unsigned int v3,
+ const unsigned int s0,
+ const unsigned int s1,
+ const unsigned int s2,
+ const unsigned int s3,
- const CellData<2> &cd);
++ const CellData<2> &cd);
+
+ /**
+ * Stores the vertex numbers
+ */
+ unsigned int v[4];
+ /**
+ * Stores the side numbers
+ */
+ unsigned int side[4];
+
+ /**
+ * Copy of the @p CellData object
+ * from which we construct the
+ * data of this object.
+ */
+ CellData<2> original_cell_data;
};
- /**
- * The enriched side class containing connectivity information.
- * Orientation is from v0 to v1; Initially this should have v0<v1.
- * After global orientation could be either way.
- *
- * @author Michael Anderson, 2003
- */
+ /**
+ * The enriched side class containing connectivity information.
+ * Orientation is from v0 to v1; Initially this should have v0<v1.
+ * After global orientation could be either way.
+ *
+ * @author Michael Anderson, 2003
+ */
struct MSide
{
- /**
- * Constructor.
- */
- MSide (const unsigned int initv0,
- const unsigned int initv1);
-
- /**
- * Return whether the sides
- * are equal, even if their
- * ends are reversed.
- */
- bool operator==(const MSide& s2) const;
-
- /**
- * Return the opposite.
- */
- bool operator!=(const MSide& s2) const;
-
- unsigned int v0;
- unsigned int v1;
- unsigned int Q0;
- unsigned int Q1;
-
- /**
- * Local side numbers on quads 0 and 1.
- */
- unsigned int lsn0, lsn1;
- bool Oriented;
-
- /**
- * This class makes a MSide have v0<v1
- */
- struct SideRectify;
-
- /**
- * Provides a side ordering,
- * s1<s2, without assuming
- * v0<v1 in either of the
- * sides.
- */
- struct SideSortLess;
+ /**
+ * Constructor.
+ */
+ MSide (const unsigned int initv0,
+ const unsigned int initv1);
+
+ /**
+ * Return whether the sides
+ * are equal, even if their
+ * ends are reversed.
+ */
+ bool operator==(const MSide &s2) const;
+
+ /**
+ * Return the opposite.
+ */
+ bool operator!=(const MSide &s2) const;
+
+ unsigned int v0;
+ unsigned int v1;
+ unsigned int Q0;
+ unsigned int Q1;
+
+ /**
+ * Local side numbers on quads 0 and 1.
+ */
+ unsigned int lsn0, lsn1;
+ bool Oriented;
+
+ /**
+ * This class makes a MSide have v0<v1
+ */
+ struct SideRectify;
+
+ /**
+ * Provides a side ordering,
+ * s1<s2, without assuming
+ * v0<v1 in either of the
+ * sides.
+ */
+ struct SideSortLess;
};
const unsigned int vertex);
- /**
- * Find and return an iterator to
- * the active cell that surrounds
- * a given point @p ref. The
- * type of the first parameter
- * may be either
- * Triangulation,
- * DoFHandler, or
- * MGDoFHandler, i.e. we
- * can find the cell around a
- * point for iterators into each
- * of these classes.
- *
- * This is solely a wrapper function
- * for the @p interpolate function
- * given below,
- * providing backward compatibility.
- * A Q1 mapping is used for the
- * boundary, and the iterator to
- * the cell in which the point
- * resides is returned.
- *
- * It is recommended to use the
- * other version of this function,
- * as it simultaneously delivers the
- * local coordinate of the given point
- * without additional computational cost.
- */
+ /**
+ * Find and return an iterator to
+ * the active cell that surrounds
+ * a given point @p ref. The
+ * type of the first parameter
+ * may be either
+ * Triangulation,
+ * DoFHandler, or
+ * MGDoFHandler, i.e. we
+ * can find the cell around a
+ * point for iterators into each
+ * of these classes.
+ *
+ * This is solely a wrapper function
+ * for the @p interpolate function
+ * given below,
+ * providing backward compatibility.
+ * A Q1 mapping is used for the
+ * boundary, and the iterator to
+ * the cell in which the point
+ * resides is returned.
+ *
+ * It is recommended to use the
+ * other version of this function,
+ * as it simultaneously delivers the
+ * local coordinate of the given point
+ * without additional computational cost.
+ */
template <int dim, template <int,int> class Container, int spacedim>
typename Container<dim,spacedim>::active_cell_iterator
- find_active_cell_around_point (const Container<dim,spacedim> &container,
+ find_active_cell_around_point (const Container<dim,spacedim> &container,
const Point<spacedim> &p);
- /**
- * Find and return an iterator to
- * the active cell that surrounds
- * a given point @p p. The
- * type of the first parameter
- * may be either
- * Triangulation,
- * DoFHandler, hp::DoFHandler, or
- * MGDoFHandler, i.e., we
- * can find the cell around a
- * point for iterators into each
- * of these classes.
- *
- * The algorithm used in this
- * function proceeds by first
- * looking for vertex located
- * closest to the given point, see
- * find_closest_vertex(). Secondly,
- * all adjacent cells to this point
- * are found in the mesh, see
- * find_cells_adjacent_to_vertex().
- * Lastly, for each of these cells,
- * it is tested whether the point is
- * inside. This check is performed
- * using arbitrary boundary mappings.
- * Still, it is possible that due
- * to roundoff errors, the point
- * cannot be located exactly inside
- * the unit cell. In this case,
- * even points at a very small
- * distance outside the unit cell
- * are allowed.
- *
- * If a point lies on the
- * boundary of two or more cells,
- * then the algorithm tries to identify
- * the cell that is of highest
- * refinement level.
- *
- * The function returns an
- * iterator to the cell, as well
- * as the local position of the
- * point inside the unit
- * cell. This local position
- * might be located slightly
- * outside an actual unit cell,
- * due to numerical roundoff.
- * Therefore, the point returned
- * by this function should
- * be projected onto the unit cell,
- * using GeometryInfo::project_to_unit_cell.
- * This is not automatically performed
- * by the algorithm.
- */
+ /**
+ * Find and return an iterator to
+ * the active cell that surrounds
+ * a given point @p p. The
+ * type of the first parameter
+ * may be either
+ * Triangulation,
+ * DoFHandler, hp::DoFHandler, or
+ * MGDoFHandler, i.e., we
+ * can find the cell around a
+ * point for iterators into each
+ * of these classes.
+ *
+ * The algorithm used in this
+ * function proceeds by first
+ * looking for vertex located
+ * closest to the given point, see
+ * find_closest_vertex(). Secondly,
+ * all adjacent cells to this point
+ * are found in the mesh, see
+ * find_cells_adjacent_to_vertex().
+ * Lastly, for each of these cells,
+ * it is tested whether the point is
+ * inside. This check is performed
+ * using arbitrary boundary mappings.
+ * Still, it is possible that due
+ * to roundoff errors, the point
+ * cannot be located exactly inside
+ * the unit cell. In this case,
+ * even points at a very small
+ * distance outside the unit cell
+ * are allowed.
+ *
+ * If a point lies on the
+ * boundary of two or more cells,
+ * then the algorithm tries to identify
+ * the cell that is of highest
+ * refinement level.
+ *
+ * The function returns an
+ * iterator to the cell, as well
+ * as the local position of the
+ * point inside the unit
+ * cell. This local position
+ * might be located slightly
+ * outside an actual unit cell,
+ * due to numerical roundoff.
+ * Therefore, the point returned
+ * by this function should
+ * be projected onto the unit cell,
+ * using GeometryInfo::project_to_unit_cell.
+ * This is not automatically performed
+ * by the algorithm.
+ */
template <int dim, template<int, int> class Container, int spacedim>
std::pair<typename Container<dim,spacedim>::active_cell_iterator, Point<dim> >
find_active_cell_around_point (const Mapping<dim,spacedim> &mapping,
const SparsityPattern &cell_connection_graph,
Triangulation<dim,spacedim> &triangulation);
- /**
- * For each active cell, return in the
- * output array to which subdomain (as
- * given by the <tt>cell->subdomain_id()</tt>
- * function) it belongs. The output array
- * is supposed to have the right size
- * already when calling this function.
- *
- * This function returns the association
- * of each cell with one subdomain. If
- * you are looking for the association of
- * each @em DoF with a subdomain, use the
- * <tt>DoFTools::get_subdomain_association</tt>
- * function.
- */
+ /**
+ * For each active cell, return in the
+ * output array to which subdomain (as
+ * given by the <tt>cell->subdomain_id()</tt>
+ * function) it belongs. The output array
+ * is supposed to have the right size
+ * already when calling this function.
+ *
+ * This function returns the association
+ * of each cell with one subdomain. If
+ * you are looking for the association of
+ * each @em DoF with a subdomain, use the
+ * <tt>DoFTools::get_subdomain_association</tt>
+ * function.
+ */
template <int dim, int spacedim>
void
- get_subdomain_association (const Triangulation<dim, spacedim> &triangulation,
+ get_subdomain_association (const Triangulation<dim, spacedim> &triangulation,
std::vector<types::subdomain_id> &subdomain);
- /**
- * Count how many cells are uniquely
- * associated with the given @p subdomain
- * index.
- *
- * This function may return zero
- * if there are no cells with the
- * given @p subdomain index. This
- * can happen, for example, if
- * you try to partition a coarse
- * mesh into more partitions (one
- * for each processor) than there
- * are cells in the mesh.
- *
- * This function returns the number of
- * cells associated with one
- * subdomain. If you are looking for the
- * association of @em DoFs with this
- * subdomain, use the
- * <tt>DoFTools::count_dofs_with_subdomain_association</tt>
- * function.
- */
+ /**
+ * Count how many cells are uniquely
+ * associated with the given @p subdomain
+ * index.
+ *
+ * This function may return zero
+ * if there are no cells with the
+ * given @p subdomain index. This
+ * can happen, for example, if
+ * you try to partition a coarse
+ * mesh into more partitions (one
+ * for each processor) than there
+ * are cells in the mesh.
+ *
+ * This function returns the number of
+ * cells associated with one
+ * subdomain. If you are looking for the
+ * association of @em DoFs with this
+ * subdomain, use the
+ * <tt>DoFTools::count_dofs_with_subdomain_association</tt>
+ * function.
+ */
template <int dim, int spacedim>
unsigned int
count_cells_with_subdomain_association (const Triangulation<dim, spacedim> &triangulation,
template <typename G>
class TriaObjects
{
- public:
- /**
- * Constructor resetting some data.
- */
- TriaObjects();
-
- /**
- * Vector of the objects belonging to
- * this level. The index of the object
- * equals the index in this container.
- */
- std::vector<G> cells;
- /**
- * Index of the even children of an object.
- * Since when objects are refined, all
- * children are created at the same
- * time, they are appended to the list
- * at least in pairs after each other.
- * We therefore only store the index
- * of the even children, the uneven
- * follow immediately afterwards.
- *
- * If an object has no children, -1 is
- * stored in this list. An object is
- * called active if it has no
- * children. The function
- * TriaAccessorBase::has_children()
- * tests for this.
- */
- std::vector<int> children;
-
- /**
- * Store the refinement
- * case each of the
- * cells is refined
- * with. This vector
- * might be replaced by
- * vector<vector<bool> >
- * (dim, vector<bool>
- * (n_cells)) which is
- * more memory efficient.
- */
- std::vector<RefinementCase<G::dimension> > refinement_cases;
-
- /**
- * Vector storing whether an object is
- * used in the @p cells vector.
- *
- * Since it is difficult to delete
- * elements in a @p vector, when an
- * element is not needed any more
- * (e.g. after derefinement), it is
- * not deleted from the list, but
- * rather the according @p used flag
- * is set to @p false.
- */
- std::vector<bool> used;
-
- /**
- * Make available a field for user data,
- * one bit per object. This field is usually
- * used when an operation runs over all
- * cells and needs information whether
- * another cell (e.g. a neighbor) has
- * already been processed.
- *
- * You can clear all used flags using
- * dealii::Triangulation::clear_user_flags().
- */
- std::vector<bool> user_flags;
-
-
- /**
- * We use this union to store
- * boundary and material
- * data. Because only one one
- * out of these two is
- * actually needed here, we
- * use an union.
- */
- struct BoundaryOrMaterialId
+ public:
+ /**
+ * Constructor resetting some data.
+ */
+ TriaObjects();
+
+ /**
+ * Vector of the objects belonging to
+ * this level. The index of the object
+ * equals the index in this container.
+ */
+ std::vector<G> cells;
+ /**
+ * Index of the even children of an object.
+ * Since when objects are refined, all
+ * children are created at the same
+ * time, they are appended to the list
+ * at least in pairs after each other.
+ * We therefore only store the index
+ * of the even children, the uneven
+ * follow immediately afterwards.
+ *
+ * If an object has no children, -1 is
+ * stored in this list. An object is
+ * called active if it has no
+ * children. The function
+ * TriaAccessorBase::has_children()
+ * tests for this.
+ */
+ std::vector<int> children;
+
+ /**
+ * Store the refinement
+ * case each of the
+ * cells is refined
+ * with. This vector
+ * might be replaced by
+ * vector<vector<bool> >
+ * (dim, vector<bool>
+ * (n_cells)) which is
+ * more memory efficient.
+ */
+ std::vector<RefinementCase<G::dimension> > refinement_cases;
+
+ /**
+ * Vector storing whether an object is
+ * used in the @p cells vector.
+ *
+ * Since it is difficult to delete
+ * elements in a @p vector, when an
+ * element is not needed any more
+ * (e.g. after derefinement), it is
+ * not deleted from the list, but
+ * rather the according @p used flag
+ * is set to @p false.
+ */
+ std::vector<bool> used;
+
+ /**
+ * Make available a field for user data,
+ * one bit per object. This field is usually
+ * used when an operation runs over all
+ * cells and needs information whether
+ * another cell (e.g. a neighbor) has
+ * already been processed.
+ *
+ * You can clear all used flags using
+ * dealii::Triangulation::clear_user_flags().
+ */
+ std::vector<bool> user_flags;
+
+
+ /**
+ * We use this union to store
+ * boundary and material
+ * data. Because only one one
+ * out of these two is
+ * actually needed here, we
+ * use an union.
+ */
+ struct BoundaryOrMaterialId
+ {
+ union
{
- union
- {
- types::boundary_id boundary_id;
- types::material_id material_id;
- };
-
-
- /**
- * Default constructor.
- */
- BoundaryOrMaterialId ();
-
- /**
- * Return the size of objects
- * of this kind.
- */
- static
- std::size_t memory_consumption ();
-
- /**
- * Read or write the data
- * of this object to or
- * from a stream for the
- * purpose of
- * serialization
- */
- template <class Archive>
- void serialize(Archive & ar,
- const unsigned int version);
+ types::boundary_id boundary_id;
+ types::material_id material_id;
};
- /**
- * Store boundary and material data. For
- * example, in one dimension, this field
- * stores the material id of a line, which
- * is a number between 0 and
- * numbers::invalid_material_id-1. In more
- * than one dimension, lines have no
- * material id, but they may be at the
- * boundary; then, we store the
- * boundary indicator in this field,
- * which denotes to which part of the
- * boundary this line belongs and which
- * boundary conditions hold on this
- * part. The boundary indicator also
- * is a number between zero and
- * numbers::internal_face_boundary_id-1;
- * the id numbers::internal_face_boundary_id
- * is reserved for lines
- * in the interior and may be used
- * to check whether a line is at the
- * boundary or not, which otherwise
- * is not possible if you don't know
- * which cell it belongs to.
- */
- std::vector<BoundaryOrMaterialId> boundary_or_material_id;
-
- /**
- * Assert that enough space
- * is allocated to
- * accommodate
- * <code>new_objs_in_pairs</code>
- * new objects, stored in
- * pairs, plus
- * <code>new_obj_single</code>
- * stored individually.
- * This function does not
- * only call
- * <code>vector::reserve()</code>,
- * but does really append
- * the needed elements.
- *
- * In 2D e.g. refined lines have to be
- * stored in pairs, whereas new lines in the
- * interior of refined cells can be stored as
- * single lines.
- */
- void reserve_space (const unsigned int new_objs_in_pairs,
- const unsigned int new_objs_single = 0);
-
- /**
- * Return an iterator to the
- * next free slot for a
- * single object. This
- * function is only used by
- * dealii::Triangulation::execute_refinement()
- * in 3D.
- *
- * @warning Interestingly,
- * this function is not used
- * for 1D or 2D
- * triangulations, where it
- * seems the authors of the
- * refinement function insist
- * on reimplementing its
- * contents.
- *
- * @todo This function is
- * not instantiated for the
- * codim-one case
- */
- template <int dim, int spacedim>
- dealii::TriaRawIterator<dealii::TriaAccessor<G::dimension,dim,spacedim> >
- next_free_single_object (const dealii::Triangulation<dim,spacedim> &tria);
-
- /**
- * Return an iterator to the
- * next free slot for a pair
- * of objects. This
- * function is only used by
- * dealii::Triangulation::execute_refinement()
- * in 3D.
- *
- * @warning Interestingly,
- * this function is not used
- * for 1D or 2D
- * triangulations, where it
- * seems the authors of the
- * refinement function insist
- * on reimplementing its
- * contents.
- *
- * @todo This function is
- * not instantiated for the
- * codim-one case
- */
- template <int dim, int spacedim>
- dealii::TriaRawIterator<dealii::TriaAccessor<G::dimension,dim,spacedim> >
- next_free_pair_object (const dealii::Triangulation<dim,spacedim> &tria);
-
- /**
- * Return an iterator to the
- * next free slot for a pair
- * of hexes. Only implemented
- * for
- * <code>G=Hexahedron</code>.
- */
- template <int dim, int spacedim>
- typename dealii::Triangulation<dim,spacedim>::raw_hex_iterator
- next_free_hex (const dealii::Triangulation<dim,spacedim> &tria,
- const unsigned int level);
-
- /**
- * Clear all the data contained in this object.
- */
- void clear();
-
- /**
- * The orientation of the
- * face number <code>face</code>
- * of the cell with number
- * <code>cell</code>. The return
- * value is <code>true</code>, if
- * the normal vector points
- * the usual way
- * (GeometryInfo::unit_normal_orientation)
- * and <code>false</code> else.
- *
- * The result is always
- * <code>true</code> in this
- * class, but derived classes
- * will reimplement this.
- *
- * @warning There is a bug in
- * the class hierarchy right
- * now. Avoid ever calling
- * this function through a
- * reference, since you might
- * end up with the base class
- * function instead of the
- * derived class. Still, we
- * do not want to make it
- * virtual for efficiency
- * reasons.
- */
- bool face_orientation(const unsigned int cell, const unsigned int face) const;
-
-
- /**
- * Access to user pointers.
- */
- void*& user_pointer(const unsigned int i);
-
- /**
- * Read-only access to user pointers.
- */
- const void* user_pointer(const unsigned int i) const;
-
- /**
- * Access to user indices.
- */
- unsigned int& user_index(const unsigned int i);
-
- /**
- * Read-only access to user pointers.
- */
- unsigned int user_index(const unsigned int i) const;
-
- /**
- * Reset user data to zero.
- */
- void clear_user_data(const unsigned int i);
-
- /**
- * Clear all user pointers or
- * indices and reset their
- * type, such that the next
- * access may be aither or.
- */
- void clear_user_data();
-
- /**
- * Clear all user flags.
- */
- void clear_user_flags();
-
- /**
- * Check the memory consistency of the
- * different containers. Should only be
- * called with the prepro flag @p DEBUG
- * set. The function should be called from
- * the functions of the higher
- * TriaLevel classes.
- */
- void monitor_memory (const unsigned int true_dimension) const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
- */
- std::size_t memory_consumption () const;
-
- /**
- * Read or write the data of this object to or
- * from a stream for the purpose of serialization
- */
+
+
+ /**
+ * Default constructor.
+ */
+ BoundaryOrMaterialId ();
+
+ /**
+ * Return the size of objects
+ * of this kind.
+ */
+ static
+ std::size_t memory_consumption ();
+
+ /**
+ * Read or write the data
+ * of this object to or
+ * from a stream for the
+ * purpose of
+ * serialization
+ */
template <class Archive>
- void serialize(Archive & ar,
+ void serialize(Archive &ar,
const unsigned int version);
-
- /**
- * Exception
- */
- DeclException3 (ExcMemoryWasted,
- char*, int, int,
- << "The container " << arg1 << " contains "
- << arg2 << " elements, but it`s capacity is "
- << arg3 << ".");
- /**
- * Exception
- * @ingroup Exceptions
- */
- DeclException2 (ExcMemoryInexact,
- int, int,
- << "The containers have sizes " << arg1 << " and "
- << arg2 << ", which is not as expected.");
-
- /**
- * Exception
- */
- DeclException2 (ExcWrongIterator,
- char*, char*,
- << "You asked for the next free " << arg1 << "_iterator, "
- "but you can only ask for " << arg2 <<"_iterators.");
-
- /**
- * dealii::Triangulation objects can
- * either access a user
- * pointer or a user
- * index. What you tried to
- * do is trying to access one
- * of those after using the
- * other.
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcPointerIndexClash);
-
- protected:
- /**
- * Counter for next_free_single_* functions
- */
- unsigned int next_free_single;
-
- /**
- * Counter for next_free_pair_* functions
- */
- unsigned int next_free_pair;
-
- /**
- * Bool flag for next_free_single_* functions
- */
- bool reverse_order_next_free_single;
-
- /**
- * The data type storing user
- * pointers or user indices.
- */
- struct UserData
+ };
+ /**
+ * Store boundary and material data. For
+ * example, in one dimension, this field
+ * stores the material id of a line, which
+ * is a number between 0 and
+ * numbers::invalid_material_id-1. In more
+ * than one dimension, lines have no
+ * material id, but they may be at the
+ * boundary; then, we store the
+ * boundary indicator in this field,
+ * which denotes to which part of the
+ * boundary this line belongs and which
+ * boundary conditions hold on this
+ * part. The boundary indicator also
+ * is a number between zero and
+ * numbers::internal_face_boundary_id-1;
+ * the id numbers::internal_face_boundary_id
+ * is reserved for lines
+ * in the interior and may be used
+ * to check whether a line is at the
+ * boundary or not, which otherwise
+ * is not possible if you don't know
+ * which cell it belongs to.
+ */
+ std::vector<BoundaryOrMaterialId> boundary_or_material_id;
+
+ /**
+ * Assert that enough space
+ * is allocated to
+ * accommodate
+ * <code>new_objs_in_pairs</code>
+ * new objects, stored in
+ * pairs, plus
+ * <code>new_obj_single</code>
+ * stored individually.
+ * This function does not
+ * only call
+ * <code>vector::reserve()</code>,
+ * but does really append
+ * the needed elements.
+ *
+ * In 2D e.g. refined lines have to be
+ * stored in pairs, whereas new lines in the
+ * interior of refined cells can be stored as
+ * single lines.
+ */
+ void reserve_space (const unsigned int new_objs_in_pairs,
+ const unsigned int new_objs_single = 0);
+
+ /**
+ * Return an iterator to the
+ * next free slot for a
+ * single object. This
+ * function is only used by
+ * dealii::Triangulation::execute_refinement()
+ * in 3D.
+ *
+ * @warning Interestingly,
+ * this function is not used
+ * for 1D or 2D
+ * triangulations, where it
+ * seems the authors of the
+ * refinement function insist
+ * on reimplementing its
+ * contents.
+ *
+ * @todo This function is
+ * not instantiated for the
+ * codim-one case
+ */
+ template <int dim, int spacedim>
+ dealii::TriaRawIterator<dealii::TriaAccessor<G::dimension,dim,spacedim> >
+ next_free_single_object (const dealii::Triangulation<dim,spacedim> &tria);
+
+ /**
+ * Return an iterator to the
+ * next free slot for a pair
+ * of objects. This
+ * function is only used by
+ * dealii::Triangulation::execute_refinement()
+ * in 3D.
+ *
+ * @warning Interestingly,
+ * this function is not used
+ * for 1D or 2D
+ * triangulations, where it
+ * seems the authors of the
+ * refinement function insist
+ * on reimplementing its
+ * contents.
+ *
+ * @todo This function is
+ * not instantiated for the
+ * codim-one case
+ */
+ template <int dim, int spacedim>
+ dealii::TriaRawIterator<dealii::TriaAccessor<G::dimension,dim,spacedim> >
+ next_free_pair_object (const dealii::Triangulation<dim,spacedim> &tria);
+
+ /**
+ * Return an iterator to the
+ * next free slot for a pair
+ * of hexes. Only implemented
+ * for
+ * <code>G=Hexahedron</code>.
+ */
+ template <int dim, int spacedim>
+ typename dealii::Triangulation<dim,spacedim>::raw_hex_iterator
+ next_free_hex (const dealii::Triangulation<dim,spacedim> &tria,
+ const unsigned int level);
+
+ /**
+ * Clear all the data contained in this object.
+ */
+ void clear();
+
+ /**
+ * The orientation of the
+ * face number <code>face</code>
+ * of the cell with number
+ * <code>cell</code>. The return
+ * value is <code>true</code>, if
+ * the normal vector points
+ * the usual way
+ * (GeometryInfo::unit_normal_orientation)
+ * and <code>false</code> else.
+ *
+ * The result is always
+ * <code>true</code> in this
+ * class, but derived classes
+ * will reimplement this.
+ *
+ * @warning There is a bug in
+ * the class hierarchy right
+ * now. Avoid ever calling
+ * this function through a
+ * reference, since you might
+ * end up with the base class
+ * function instead of the
+ * derived class. Still, we
+ * do not want to make it
+ * virtual for efficiency
+ * reasons.
+ */
+ bool face_orientation(const unsigned int cell, const unsigned int face) const;
+
+
+ /**
+ * Access to user pointers.
+ */
- void *&user_pointer(const unsigned int i);
++ void *&user_pointer(const unsigned int i);
+
+ /**
+ * Read-only access to user pointers.
+ */
+ const void *user_pointer(const unsigned int i) const;
+
+ /**
+ * Access to user indices.
+ */
+ unsigned int &user_index(const unsigned int i);
+
+ /**
+ * Read-only access to user pointers.
+ */
+ unsigned int user_index(const unsigned int i) const;
+
+ /**
+ * Reset user data to zero.
+ */
+ void clear_user_data(const unsigned int i);
+
+ /**
+ * Clear all user pointers or
+ * indices and reset their
+ * type, such that the next
+ * access may be aither or.
+ */
+ void clear_user_data();
+
+ /**
+ * Clear all user flags.
+ */
+ void clear_user_flags();
+
+ /**
+ * Check the memory consistency of the
+ * different containers. Should only be
+ * called with the prepro flag @p DEBUG
+ * set. The function should be called from
+ * the functions of the higher
+ * TriaLevel classes.
+ */
+ void monitor_memory (const unsigned int true_dimension) const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object.
+ */
+ std::size_t memory_consumption () const;
+
+ /**
+ * Read or write the data of this object to or
+ * from a stream for the purpose of serialization
+ */
+ template <class Archive>
+ void serialize(Archive &ar,
+ const unsigned int version);
+
+ /**
+ * Exception
+ */
+ DeclException3 (ExcMemoryWasted,
+ char *, int, int,
+ << "The container " << arg1 << " contains "
+ << arg2 << " elements, but it`s capacity is "
+ << arg3 << ".");
+ /**
+ * Exception
+ * @ingroup Exceptions
+ */
+ DeclException2 (ExcMemoryInexact,
+ int, int,
+ << "The containers have sizes " << arg1 << " and "
+ << arg2 << ", which is not as expected.");
+
+ /**
+ * Exception
+ */
+ DeclException2 (ExcWrongIterator,
+ char *, char *,
+ << "You asked for the next free " << arg1 << "_iterator, "
+ "but you can only ask for " << arg2 <<"_iterators.");
+
+ /**
+ * dealii::Triangulation objects can
+ * either access a user
+ * pointer or a user
+ * index. What you tried to
+ * do is trying to access one
+ * of those after using the
+ * other.
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcPointerIndexClash);
+
+ protected:
+ /**
+ * Counter for next_free_single_* functions
+ */
+ unsigned int next_free_single;
+
+ /**
+ * Counter for next_free_pair_* functions
+ */
+ unsigned int next_free_pair;
+
+ /**
+ * Bool flag for next_free_single_* functions
+ */
+ bool reverse_order_next_free_single;
+
+ /**
+ * The data type storing user
+ * pointers or user indices.
+ */
+ struct UserData
+ {
+ union
{
- union
- {
- /// The entry used as user
- /// pointer.
- void* p;
- /// The entry used as user
- /// index.
- unsigned int i;
- };
-
- /**
- * Default constructor.
- */
- UserData()
- {
- p = 0;
- }
-
- /**
- * Write the data of this object
- * to a stream for the purpose of
- * serialization.
- */
- template <class Archive>
- void serialize (Archive & ar, const unsigned int version);
+ /// The entry used as user
+ /// pointer.
+ void *p;
+ /// The entry used as user
+ /// index.
+ unsigned int i;
};
- /**
- * Enum descibing the
- * possible types of
- * userdata.
- */
- enum UserDataType
+ /**
+ * Default constructor.
+ */
+ UserData()
{
- /// No userdata used yet.
- data_unknown,
- /// UserData contains pointers.
- data_pointer,
- /// UserData contains indices.
- data_index
- };
+ p = 0;
+ }
-
- /**
- * Pointer which is not used by the
- * library but may be accessed and set
- * by the user to handle data local to
- * a line/quad/etc.
- */
- std::vector<UserData> user_data;
- /**
- * In order to avoid
- * confusion between user
- * pointers and indices, this
- * enum is set by the first
- * function accessing either
- * and subsequent access will
- * not be allowed to change
- * the type of data accessed.
- */
- mutable UserDataType user_data_type;
+ /**
+ * Write the data of this object
+ * to a stream for the purpose of
+ * serialization.
+ */
+ template <class Archive>
+ void serialize (Archive &ar, const unsigned int version);
+ };
+
+ /**
+ * Enum descibing the
+ * possible types of
+ * userdata.
+ */
+ enum UserDataType
+ {
+ /// No userdata used yet.
+ data_unknown,
+ /// UserData contains pointers.
+ data_pointer,
+ /// UserData contains indices.
+ data_index
+ };
+
+
+ /**
+ * Pointer which is not used by the
+ * library but may be accessed and set
+ * by the user to handle data local to
+ * a line/quad/etc.
+ */
+ std::vector<UserData> user_data;
+ /**
+ * In order to avoid
+ * confusion between user
+ * pointers and indices, this
+ * enum is set by the first
+ * function accessing either
+ * and subsequent access will
+ * not be allowed to change
+ * the type of data accessed.
+ */
+ mutable UserDataType user_data_type;
};
- /**
- * For hexahedrons the data of TriaObjects needs to be extended, as we can obtain faces
- * (quads) in non-standard-orientation, therefore we declare a class TriaObjectsHex, which
- * additionally contains a bool-vector of the face-orientations.
- */
+ /**
+ * For hexahedrons the data of TriaObjects needs to be extended, as we can obtain faces
+ * (quads) in non-standard-orientation, therefore we declare a class TriaObjectsHex, which
+ * additionally contains a bool-vector of the face-orientations.
+ */
class TriaObjectsHex : public TriaObjects<TriaObject<3> >
{
namespace hp
{
- /**
- * Manage the distribution and numbering of the degrees of freedom for
- * hp-FEM algorithms.
- *
- * This class has not yet been implemented for the use in the codimension
- * one case (<tt>spacedim != dim </tt>).
- *
- * @ingroup dofs
- * @ingroup hp
- */
+ /**
+ * Manage the distribution and numbering of the degrees of freedom for
+ * hp-FEM algorithms.
+ *
+ * This class has not yet been implemented for the use in the codimension
+ * one case (<tt>spacedim != dim </tt>).
+ *
+ * @ingroup dofs
+ * @ingroup hp
+ */
template <int dim, int spacedim=dim>
- class DoFHandler : public Subscriptor
+ class DoFHandler : public Subscriptor,
- protected Triangulation<dim,spacedim>::RefinementListener
++ protected Triangulation<dim,spacedim>::RefinementListener
{
- typedef dealii::internal::DoFHandler::Iterators<DoFHandler<dim,spacedim> > IteratorSelector;
+ typedef dealii::internal::DoFHandler::Iterators<DoFHandler<dim,spacedim> > IteratorSelector;
+ public:
+ typedef typename IteratorSelector::CellAccessor cell_accessor;
+ typedef typename IteratorSelector::FaceAccessor face_accessor;
+
+ typedef typename IteratorSelector::line_iterator line_iterator;
+ typedef typename IteratorSelector::active_line_iterator active_line_iterator;
+
+ typedef typename IteratorSelector::quad_iterator quad_iterator;
+ typedef typename IteratorSelector::active_quad_iterator active_quad_iterator;
+
+ typedef typename IteratorSelector::hex_iterator hex_iterator;
+ typedef typename IteratorSelector::active_hex_iterator active_hex_iterator;
+
+ typedef typename IteratorSelector::cell_iterator cell_iterator;
+ typedef typename IteratorSelector::active_cell_iterator active_cell_iterator;
+
+ typedef typename IteratorSelector::face_iterator face_iterator;
+ typedef typename IteratorSelector::active_face_iterator active_face_iterator;
+
+ /**
+ * Alias the @p FunctionMap type
+ * declared elsewhere.
+ */
+ typedef typename FunctionMap<spacedim>::type FunctionMap;
+
+ /**
+ * Make the dimension available
+ * in function templates.
+ */
+ static const unsigned int dimension = dim;
+
+ /**
+ * Make the space dimension available
+ * in function templates.
+ */
+ static const unsigned int space_dimension = spacedim;
+
+ /**
+ * When the arrays holding the
+ * DoF indices are set up, but
+ * before they are filled with
+ * actual values, they are set to
+ * an invalid value, in order to
+ * monitor possible
+ * problems. This invalid value
+ * is the constant defined here.
+ *
+ * Please note that you should
+ * not rely on it having a
+ * certain value, but rather take
+ * its symbolic name.
+ */
+ static const unsigned int invalid_dof_index = numbers::invalid_unsigned_int;
+
+ /**
+ * The default index of the
+ * finite element to be used on
+ * a given cell. For the usual,
+ * non-hp dealii::DoFHandler class
+ * that only supports the same
+ * finite element to be used on
+ * all cells, the index of the
+ * finite element needs to be
+ * the same on all cells
+ * anyway, and by convention we
+ * pick zero for this
+ * value. The situation here is
+ * different, since the hp
+ * classes support the case
+ * where different finite
+ * element indices may be used
+ * on different cells. The
+ * default index consequently
+ * corresponds to an invalid
+ * value.
+ */
+ static const unsigned int default_fe_index = numbers::invalid_unsigned_int;
+
+
+ /**
+ * Constructor. Take @p tria as the
+ * triangulation to work on.
+ */
+ DoFHandler (const Triangulation<dim,spacedim> &tria);
+
+ /**
+ * Destructor.
+ */
+ virtual ~DoFHandler ();
+
+ /**
+ * Go through the triangulation and
+ * distribute the degrees of freedoms
+ * needed for the given finite element
+ * according to the current distribution
+ * of active fe indices.
+ *
+ * A pointer of the transferred
+ * finite element is
+ * stored. Therefore, the
+ * lifetime of the finite element
+ * object shall be longer than
+ * that of this object. If you
+ * don't want this behaviour, you
+ * may want to call the @p clear
+ * member function which also
+ * releases the lock of this
+ * object to the finite element.
+ */
+ virtual void distribute_dofs (const hp::FECollection<dim,spacedim> &fe);
+
+ /**
+ * Go through the triangulation and set
+ * the active FE indices of all active
+ * cells to the values given in @p
+ * active_fe_indices.
+ */
+ void set_active_fe_indices (const std::vector<unsigned int> &active_fe_indices);
+
+ /**
+ * Go through the triangulation and
+ * store the active FE indices of all
+ * active cells to the vector @p
+ * active_fe_indices. This vector is
+ * resized, if necessary.
+ */
+ void get_active_fe_indices (std::vector<unsigned int> &active_fe_indices) const;
+
+ /**
+ * Clear all data of this object and
+ * especially delete the lock this object
+ * has to the finite element used the last
+ * time when @p distribute_dofs was called.
+ */
+ virtual void clear ();
+
+ /**
+ * Renumber degrees of freedom based on
+ * a list of new dof numbers for all the
+ * dofs.
+ *
+ * @p new_numbers is an array of integers
+ * with size equal to the number of dofs
+ * on the present grid. It stores the new
+ * indices after renumbering in the
+ * order of the old indices.
+ *
+ * This function is called by
+ * the functions in
+ * DoFRenumbering function
+ * after computing the ordering
+ * of the degrees of freedom.
+ * However, you can call this
+ * function yourself, which is
+ * necessary if a user wants to
+ * implement an ordering scheme
+ * herself, for example
+ * downwind numbering.
+ *
+ * The @p new_number array must
+ * have a size equal to the
+ * number of degrees of
+ * freedom. Each entry must
+ * state the new global DoF
+ * number of the degree of
+ * freedom referenced.
+ */
- void renumber_dofs (const std::vector<unsigned int> &new_numbers);
++ void renumber_dofs (const std::vector<types::global_dof_index> &new_numbers);
+
+ /**
+ * Return the maximum number of
+ * degrees of freedom a degree of freedom
+ * in the given triangulation with the
+ * given finite element may couple with.
+ * This is the maximum number of entries
+ * per line in the system matrix; this
+ * information can therefore be used upon
+ * construction of the SparsityPattern
+ * object.
+ *
+ * The returned number is not really the
+ * maximum number but an estimate based
+ * on the finite element and the maximum
+ * number of cells meeting at a vertex.
+ * The number holds for the constrained
+ * matrix also.
+ *
+ * As for
+ * DoFHandler::max_couplings_between_dofs(),
+ * the result of this function is often
+ * not very accurate for 3d and/or high
+ * polynomial degrees. The consequences
+ * are discussed in the documentation
+ * of the module on @ref Sparsity.
+ */
+ unsigned int max_couplings_between_dofs () const;
+
+ /**
+ * Return the number of degrees of freedom
+ * located on the boundary another dof on
+ * the boundary can couple with.
+ *
+ * The number is the same as for
+ * @p max_coupling_between_dofs in one
+ * dimension less.
+ */
+ unsigned int max_couplings_between_boundary_dofs () const;
+
+ /**
+ * @name Cell iterator functions
+ */
+ /*@{*/
+ /**
+ * Iterator to the first used
+ * cell on level @p level.
+ */
+ cell_iterator begin (const unsigned int level = 0) const;
+
+ /**
+ * Iterator to the first active
+ * cell on level @p level.
+ */
+ active_cell_iterator begin_active(const unsigned int level = 0) const;
+
+ /**
+ * Iterator past the end; this
+ * iterator serves for
+ * comparisons of iterators with
+ * past-the-end or
+ * before-the-beginning states.
+ */
+ cell_iterator end () const;
+
+ /**
+ * Return an iterator which is
+ * the first iterator not on
+ * level. If @p level is the
+ * last level, then this returns
+ * <tt>end()</tt>.
+ */
+ cell_iterator end (const unsigned int level) const;
+
+ /**
+ * Return an active iterator
+ * which is the first iterator
+ * not on level. If @p level is
+ * the last level, then this
+ * returns <tt>end()</tt>.
+ */
+ active_cell_iterator end_active (const unsigned int level) const;
+
+ //@}
+
+ /*---------------------------------------*/
+
+
+ /**
+ * Return the global number of
+ * degrees of freedom. If the
+ * current object handles all
+ * degrees of freedom itself
+ * (even if you may intend to
+ * solve your linear system in
+ * parallel, such as in step-17
+ * or step-18), then this number
+ * equals the number of locally
+ * owned degrees of freedom since
+ * this object doesn't know
+ * anything about what you want
+ * to do with it and believes
+ * that it owns every degree of
+ * freedom it knows about.
+ *
+ * On the other hand, if this
+ * object operates on a
+ * parallel::distributed::Triangulation
+ * object, then this function
+ * returns the global number of
+ * degrees of freedom,
+ * accumulated over all
+ * processors.
+ *
+ * In either case, included in
+ * the returned number are those
+ * DoFs which are constrained by
+ * hanging nodes, see @ref constraints.
+ */
- unsigned int n_dofs () const;
++ types::global_dof_index n_dofs () const;
+
+ /**
+ * Return the number of degrees of freedom
+ * located on the boundary.
+ */
- unsigned int n_boundary_dofs () const;
++ types::global_dof_index n_boundary_dofs () const;
+
+ /**
+ * Return the number of degrees
+ * of freedom located on those
+ * parts of the boundary which
+ * have a boundary indicator
+ * listed in the given set. The
+ * reason that a @p map rather
+ * than a @p set is used is the
+ * same as described in the
+ * section on the
+ * @p make_boundary_sparsity_pattern
+ * function.
+ */
- unsigned int
++ types::global_dof_index
+ n_boundary_dofs (const FunctionMap &boundary_indicators) const;
+
+ /**
+ * Same function, but with
+ * different data type of the
+ * argument, which is here simply
+ * a list of the boundary
+ * indicators under
+ * consideration.
+ */
- unsigned int
++ types::global_dof_index
+ n_boundary_dofs (const std::set<types::boundary_id> &boundary_indicators) const;
+
+ /**
+ * Return the number of
+ * degrees of freedom that
+ * belong to this
+ * process.
+ *
+ * If this is a sequential job,
+ * then the result equals that
+ * produced by n_dofs(). On the
+ * other hand, if we are
+ * operating on a
+ * parallel::distributed::Triangulation,
+ * then it includes only the
+ * degrees of freedom that the
+ * current processor owns. Note
+ * that in this case this does
+ * not include all degrees of
+ * freedom that have been
+ * distributed on the current
+ * processor's image of the mesh:
+ * in particular, some of the
+ * degrees of freedom on the
+ * interface between the cells
+ * owned by this processor and
+ * cells owned by other
+ * processors may be theirs, and
+ * degrees of freedom on ghost
+ * cells are also not necessarily
+ * included.
+ */
+ unsigned int n_locally_owned_dofs() const;
+
+ /**
+ * Return an IndexSet describing
+ * the set of locally owned DoFs
+ * as a subset of
+ * 0..n_dofs(). The number of
+ * elements of this set equals
+ * n_locally_owned_dofs().
+ */
+ const IndexSet &locally_owned_dofs() const;
+
+
+ /**
+ * Returns a vector that
+ * stores the locally owned
+ * DoFs of each processor. If
+ * you are only interested in
+ * the number of elements
+ * each processor owns then
+ * n_dofs_per_processor() is
+ * a better choice.
+ *
+ * If this is a sequential job,
+ * then the vector has a single
+ * element that equals the
+ * IndexSet representing the
+ * entire range [0,n_dofs()].
+ */
+ const std::vector<IndexSet> &
+ locally_owned_dofs_per_processor () const;
+
+ /**
+ * Return a vector that
+ * stores the number of
+ * degrees of freedom each
+ * processor that
+ * participates in this
+ * triangulation owns
+ * locally. The sum of all
+ * these numbers equals the
+ * number of degrees of
+ * freedom that exist
+ * globally, i.e. what
+ * n_dofs() returns.
+ *
+ * Each element of the vector
+ * returned by this function
+ * equals the number of
+ * elements of the
+ * corresponding sets
+ * returned by
+ * global_dof_indices().
+ *
+ * If this is a sequential job,
+ * then the vector has a single
+ * element equal to n_dofs().
+ */
+ const std::vector<unsigned int> &
+ n_locally_owned_dofs_per_processor () const;
+
+ /**
+ * Return a constant reference to
+ * the set of finite element
+ * objects that are used by this
+ * @p DoFHandler.
+ */
+ const hp::FECollection<dim,spacedim> &get_fe () const;
+
+ /**
+ * Return a constant reference to the
+ * triangulation underlying this object.
+ */
+ const Triangulation<dim,spacedim> &get_tria () const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object.
+ *
+ * This function is made virtual,
+ * since a dof handler object
+ * might be accessed through a
+ * pointers to thisr base class,
+ * although the actual object
+ * might be a derived class.
+ */
+ virtual std::size_t memory_consumption () const;
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidTriangulation);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcNoFESelected);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcRenumberingIncomplete);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcGridsDoNotMatch);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidBoundaryIndicator);
+ /**
+ * Exception
+ */
+ DeclException1 (ExcMatrixHasWrongSize,
+ int,
+ << "The matrix has the wrong dimension " << arg1);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcFunctionNotUseful);
+ /**
+ * Exception
+ */
+ DeclException1 (ExcNewNumbersNotConsecutive,
+ int,
+ << "The given list of new dof indices is not consecutive: "
+ << "the index " << arg1 << " does not exist.");
+ /**
+ * Exception
+ */
+ DeclException2 (ExcInvalidFEIndex,
+ int, int,
+ << "The mesh contains a cell with an active_fe_index of "
+ << arg1 << ", but the finite element collection only has "
+ << arg2 << " elements");
+ /**
+ * Exception
+ */
+ DeclException1 (ExcInvalidLevel,
+ int,
+ << "The given level " << arg1
+ << " is not in the valid range!");
+ /**
+ * Exception
+ */
+ DeclException0 (ExcFacesHaveNoLevel);
+ /**
+ * The triangulation level you
+ * accessed is empty.
+ */
+ DeclException1 (ExcEmptyLevel,
+ int,
+ << "You tried to do something on level " << arg1
+ << ", but this level is empty.");
+
+ protected:
+
+ /**
+ * Address of the triangulation to
+ * work on.
+ */
+ SmartPointer<const Triangulation<dim,spacedim>,DoFHandler<dim,spacedim> > tria;
+
+ /**
+ * Store a pointer to the finite
+ * element set given latest for
+ * the distribution of dofs. In
+ * order to avoid destruction of
+ * the object before the lifetime
+ * of the DoF handler, we
+ * subscribe to the finite
+ * element object. To unlock the
+ * FE before the end of the
+ * lifetime of this DoF handler,
+ * use the <tt>clear()</tt> function
+ * (this clears all data of this
+ * object as well, though).
+ */
+ SmartPointer<const hp::FECollection<dim,spacedim>,hp::DoFHandler<dim,spacedim> > finite_elements;
+
+ private:
+
+ /**
+ * Copy constructor. I can see no reason
+ * why someone might want to use it, so
+ * I don't provide it. Since this class
+ * has pointer members, making it private
+ * prevents the compiler to provide it's
+ * own, incorrect one if anyone chose to
+ * copy such an object.
+ */
+ DoFHandler (const DoFHandler &);
+
+ /**
+ * Copy operator. I can see no reason
+ * why someone might want to use it, so
+ * I don't provide it. Since this class
+ * has pointer members, making it private
+ * prevents the compiler to provide it's
+ * own, incorrect one if anyone chose to
+ * copy such an object.
+ */
+ DoFHandler &operator = (const DoFHandler &);
+
++ class MGVertexDoFs
++ {
+ public:
- typedef typename IteratorSelector::CellAccessor cell_accessor;
- typedef typename IteratorSelector::FaceAccessor face_accessor;
-
- typedef typename IteratorSelector::line_iterator line_iterator;
- typedef typename IteratorSelector::active_line_iterator active_line_iterator;
-
- typedef typename IteratorSelector::quad_iterator quad_iterator;
- typedef typename IteratorSelector::active_quad_iterator active_quad_iterator;
-
- typedef typename IteratorSelector::hex_iterator hex_iterator;
- typedef typename IteratorSelector::active_hex_iterator active_hex_iterator;
-
- typedef typename IteratorSelector::cell_iterator cell_iterator;
- typedef typename IteratorSelector::active_cell_iterator active_cell_iterator;
-
- typedef typename IteratorSelector::face_iterator face_iterator;
- typedef typename IteratorSelector::active_face_iterator active_face_iterator;
-
- /**
- * Alias the @p FunctionMap type
- * declared elsewhere.
- */
- typedef typename FunctionMap<spacedim>::type FunctionMap;
-
- /**
- * Make the dimension available
- * in function templates.
- */
- static const unsigned int dimension = dim;
-
- /**
- * Make the space dimension available
- * in function templates.
- */
- static const unsigned int space_dimension = spacedim;
-
- /**
- * When the arrays holding the
- * DoF indices are set up, but
- * before they are filled with
- * actual values, they are set to
- * an invalid value, in order to
- * monitor possible
- * problems. This invalid value
- * is the constant defined here.
- *
- * Please note that you should
- * not rely on it having a
- * certain value, but rather take
- * its symbolic name.
- */
- static const unsigned int invalid_dof_index = numbers::invalid_unsigned_int;
-
- /**
- * The default index of the
- * finite element to be used on
- * a given cell. For the usual,
- * non-hp dealii::DoFHandler class
- * that only supports the same
- * finite element to be used on
- * all cells, the index of the
- * finite element needs to be
- * the same on all cells
- * anyway, and by convention we
- * pick zero for this
- * value. The situation here is
- * different, since the hp
- * classes support the case
- * where different finite
- * element indices may be used
- * on different cells. The
- * default index consequently
- * corresponds to an invalid
- * value.
- */
- static const unsigned int default_fe_index = numbers::invalid_unsigned_int;
-
-
- /**
- * Constructor. Take @p tria as the
- * triangulation to work on.
- */
- DoFHandler (const Triangulation<dim,spacedim> &tria);
-
- /**
- * Destructor.
- */
- virtual ~DoFHandler ();
-
- /**
- * Go through the triangulation and
- * distribute the degrees of freedoms
- * needed for the given finite element
- * according to the current distribution
- * of active fe indices.
- *
- * A pointer of the transferred
- * finite element is
- * stored. Therefore, the
- * lifetime of the finite element
- * object shall be longer than
- * that of this object. If you
- * don't want this behaviour, you
- * may want to call the @p clear
- * member function which also
- * releases the lock of this
- * object to the finite element.
- */
- virtual void distribute_dofs (const hp::FECollection<dim,spacedim> &fe);
-
- /**
- * Go through the triangulation and set
- * the active FE indices of all active
- * cells to the values given in @p
- * active_fe_indices.
- */
- void set_active_fe_indices (const std::vector<unsigned int>& active_fe_indices);
-
- /**
- * Go through the triangulation and
- * store the active FE indices of all
- * active cells to the vector @p
- * active_fe_indices. This vector is
- * resized, if necessary.
- */
- void get_active_fe_indices (std::vector<unsigned int>& active_fe_indices) const;
-
- /**
- * Clear all data of this object and
- * especially delete the lock this object
- * has to the finite element used the last
- * time when @p distribute_dofs was called.
- */
- virtual void clear ();
-
- /**
- * Renumber degrees of freedom based on
- * a list of new dof numbers for all the
- * dofs.
- *
- * @p new_numbers is an array of integers
- * with size equal to the number of dofs
- * on the present grid. It stores the new
- * indices after renumbering in the
- * order of the old indices.
- *
- * This function is called by
- * the functions in
- * DoFRenumbering function
- * after computing the ordering
- * of the degrees of freedom.
- * However, you can call this
- * function yourself, which is
- * necessary if a user wants to
- * implement an ordering scheme
- * herself, for example
- * downwind numbering.
- *
- * The @p new_number array must
- * have a size equal to the
- * number of degrees of
- * freedom. Each entry must
- * state the new global DoF
- * number of the degree of
- * freedom referenced.
- */
- void renumber_dofs (const std::vector<types::global_dof_index> &new_numbers);
-
- /**
- * Return the maximum number of
- * degrees of freedom a degree of freedom
- * in the given triangulation with the
- * given finite element may couple with.
- * This is the maximum number of entries
- * per line in the system matrix; this
- * information can therefore be used upon
- * construction of the SparsityPattern
- * object.
- *
- * The returned number is not really the
- * maximum number but an estimate based
- * on the finite element and the maximum
- * number of cells meeting at a vertex.
- * The number holds for the constrained
- * matrix also.
- *
- * As for
- * DoFHandler::max_couplings_between_dofs(),
- * the result of this function is often
- * not very accurate for 3d and/or high
- * polynomial degrees. The consequences
- * are discussed in the documentation
- * of the module on @ref Sparsity.
- */
- unsigned int max_couplings_between_dofs () const;
-
- /**
- * Return the number of degrees of freedom
- * located on the boundary another dof on
- * the boundary can couple with.
- *
- * The number is the same as for
- * @p max_coupling_between_dofs in one
- * dimension less.
- */
- unsigned int max_couplings_between_boundary_dofs () const;
-
- /**
- * @name Cell iterator functions
- */
- /*@{*/
- /**
- * Iterator to the first used
- * cell on level @p level.
- */
- cell_iterator begin (const unsigned int level = 0) const;
-
- /**
- * Iterator to the first active
- * cell on level @p level.
- */
- active_cell_iterator begin_active(const unsigned int level = 0) const;
-
- /**
- * Iterator past the end; this
- * iterator serves for
- * comparisons of iterators with
- * past-the-end or
- * before-the-beginning states.
- */
- cell_iterator end () const;
-
- /**
- * Return an iterator which is
- * the first iterator not on
- * level. If @p level is the
- * last level, then this returns
- * <tt>end()</tt>.
- */
- cell_iterator end (const unsigned int level) const;
-
- /**
- * Return an active iterator
- * which is the first iterator
- * not on level. If @p level is
- * the last level, then this
- * returns <tt>end()</tt>.
- */
- active_cell_iterator end_active (const unsigned int level) const;
-
- //@}
-
- /*---------------------------------------*/
-
-
- /**
- * Return the global number of
- * degrees of freedom. If the
- * current object handles all
- * degrees of freedom itself
- * (even if you may intend to
- * solve your linear system in
- * parallel, such as in step-17
- * or step-18), then this number
- * equals the number of locally
- * owned degrees of freedom since
- * this object doesn't know
- * anything about what you want
- * to do with it and believes
- * that it owns every degree of
- * freedom it knows about.
- *
- * On the other hand, if this
- * object operates on a
- * parallel::distributed::Triangulation
- * object, then this function
- * returns the global number of
- * degrees of freedom,
- * accumulated over all
- * processors.
- *
- * In either case, included in
- * the returned number are those
- * DoFs which are constrained by
- * hanging nodes, see @ref constraints.
- */
- types::global_dof_index n_dofs () const;
-
- /**
- * Return the number of degrees of freedom
- * located on the boundary.
- */
- types::global_dof_index n_boundary_dofs () const;
-
- /**
- * Return the number of degrees
- * of freedom located on those
- * parts of the boundary which
- * have a boundary indicator
- * listed in the given set. The
- * reason that a @p map rather
- * than a @p set is used is the
- * same as described in the
- * section on the
- * @p make_boundary_sparsity_pattern
- * function.
- */
- types::global_dof_index
- n_boundary_dofs (const FunctionMap &boundary_indicators) const;
-
- /**
- * Same function, but with
- * different data type of the
- * argument, which is here simply
- * a list of the boundary
- * indicators under
- * consideration.
- */
- types::global_dof_index
- n_boundary_dofs (const std::set<types::boundary_id> &boundary_indicators) const;
-
- /**
- * Return the number of
- * degrees of freedom that
- * belong to this
- * process.
- *
- * If this is a sequential job,
- * then the result equals that
- * produced by n_dofs(). On the
- * other hand, if we are
- * operating on a
- * parallel::distributed::Triangulation,
- * then it includes only the
- * degrees of freedom that the
- * current processor owns. Note
- * that in this case this does
- * not include all degrees of
- * freedom that have been
- * distributed on the current
- * processor's image of the mesh:
- * in particular, some of the
- * degrees of freedom on the
- * interface between the cells
- * owned by this processor and
- * cells owned by other
- * processors may be theirs, and
- * degrees of freedom on ghost
- * cells are also not necessarily
- * included.
- */
- unsigned int n_locally_owned_dofs() const;
-
- /**
- * Return an IndexSet describing
- * the set of locally owned DoFs
- * as a subset of
- * 0..n_dofs(). The number of
- * elements of this set equals
- * n_locally_owned_dofs().
- */
- const IndexSet & locally_owned_dofs() const;
-
-
- /**
- * Returns a vector that
- * stores the locally owned
- * DoFs of each processor. If
- * you are only interested in
- * the number of elements
- * each processor owns then
- * n_dofs_per_processor() is
- * a better choice.
- *
- * If this is a sequential job,
- * then the vector has a single
- * element that equals the
- * IndexSet representing the
- * entire range [0,n_dofs()].
- */
- const std::vector<IndexSet> &
- locally_owned_dofs_per_processor () const;
-
- /**
- * Return a vector that
- * stores the number of
- * degrees of freedom each
- * processor that
- * participates in this
- * triangulation owns
- * locally. The sum of all
- * these numbers equals the
- * number of degrees of
- * freedom that exist
- * globally, i.e. what
- * n_dofs() returns.
- *
- * Each element of the vector
- * returned by this function
- * equals the number of
- * elements of the
- * corresponding sets
- * returned by
- * global_dof_indices().
- *
- * If this is a sequential job,
- * then the vector has a single
- * element equal to n_dofs().
- */
- const std::vector<unsigned int> &
- n_locally_owned_dofs_per_processor () const;
-
- /**
- * Return a constant reference to
- * the set of finite element
- * objects that are used by this
- * @p DoFHandler.
- */
- const hp::FECollection<dim,spacedim> & get_fe () const;
-
- /**
- * Return a constant reference to the
- * triangulation underlying this object.
- */
- const Triangulation<dim,spacedim> & get_tria () const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
- *
- * This function is made virtual,
- * since a dof handler object
- * might be accessed through a
- * pointers to thisr base class,
- * although the actual object
- * might be a derived class.
- */
- virtual std::size_t memory_consumption () const;
-
- /**
- * Exception
- */
- DeclException0 (ExcInvalidTriangulation);
- /**
- * Exception
- */
- DeclException0 (ExcNoFESelected);
- /**
- * Exception
- */
- DeclException0 (ExcRenumberingIncomplete);
- /**
- * Exception
- */
- DeclException0 (ExcGridsDoNotMatch);
- /**
- * Exception
- */
- DeclException0 (ExcInvalidBoundaryIndicator);
- /**
- * Exception
- */
- DeclException1 (ExcMatrixHasWrongSize,
- int,
- << "The matrix has the wrong dimension " << arg1);
- /**
- * Exception
- */
- DeclException0 (ExcFunctionNotUseful);
- /**
- * Exception
- */
- DeclException1 (ExcNewNumbersNotConsecutive,
- int,
- << "The given list of new dof indices is not consecutive: "
- << "the index " << arg1 << " does not exist.");
- /**
- * Exception
- */
- DeclException2 (ExcInvalidFEIndex,
- int, int,
- << "The mesh contains a cell with an active_fe_index of "
- << arg1 << ", but the finite element collection only has "
- << arg2 << " elements");
- /**
- * Exception
- */
- DeclException1 (ExcInvalidLevel,
- int,
- << "The given level " << arg1
- << " is not in the valid range!");
- /**
- * Exception
- */
- DeclException0 (ExcFacesHaveNoLevel);
- /**
- * The triangulation level you
- * accessed is empty.
- */
- DeclException1 (ExcEmptyLevel,
- int,
- << "You tried to do something on level " << arg1
- << ", but this level is empty.");
-
- protected:
-
- /**
- * Address of the triangulation to
- * work on.
- */
- SmartPointer<const Triangulation<dim,spacedim>,DoFHandler<dim,spacedim> > tria;
-
- /**
- * Store a pointer to the finite
- * element set given latest for
- * the distribution of dofs. In
- * order to avoid destruction of
- * the object before the lifetime
- * of the DoF handler, we
- * subscribe to the finite
- * element object. To unlock the
- * FE before the end of the
- * lifetime of this DoF handler,
- * use the <tt>clear()</tt> function
- * (this clears all data of this
- * object as well, though).
- */
- SmartPointer<const hp::FECollection<dim,spacedim>,hp::DoFHandler<dim,spacedim> > finite_elements;
-
- private:
-
- /**
- * Copy constructor. I can see no reason
- * why someone might want to use it, so
- * I don't provide it. Since this class
- * has pointer members, making it private
- * prevents the compiler to provide it's
- * own, incorrect one if anyone chose to
- * copy such an object.
- */
- DoFHandler (const DoFHandler &);
-
- /**
- * Copy operator. I can see no reason
- * why someone might want to use it, so
- * I don't provide it. Since this class
- * has pointer members, making it private
- * prevents the compiler to provide it's
- * own, incorrect one if anyone chose to
- * copy such an object.
- */
- DoFHandler & operator = (const DoFHandler &);
-
- class MGVertexDoFs {
- public:
- MGVertexDoFs ();
- ~MGVertexDoFs ();
- types::global_dof_index get_index (const unsigned int level, const unsigned int dof_number) const;
- void set_index (const unsigned int level, const unsigned int dof_number, const types::global_dof_index index);
- };
-
- /**
- * Free all used memory.
- */
- void clear_space ();
-
- template<int structdim>
- types::global_dof_index get_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index) const;
-
- template<int structdim>
- void set_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index) const;
-
- /**
- * Create default tables for
- * the active_fe_indices in
- * the
- * dealii::internal::hp::DoFLevel. They
- * are initialized with the a
- * zero indicator, meaning
- * that fe[0] is going to be
- * used by default. This
- * method is called before
- * refinement and before
- * distribute_dofs is
- * called. It ensures each
- * cell has a valid
- * active_fe_index.
- */
-
- void create_active_fe_table ();
-
- /**
- * Functions that will be triggered
- * through signals whenever the
- * triangulation is modified.
- *
- * Here they are used to
- * administrate the the
- * active_fe_fields during the
- * spatial refinement.
- */
- virtual void pre_refinement_action ();
- virtual void post_refinement_action ();
-
- /**
- * Compute identities between
- * DoFs located on
- * vertices. Called from
- * distribute_dofs().
- */
- void
- compute_vertex_dof_identities (std::vector<types::global_dof_index> &new_dof_indices) const;
-
- /**
- * Compute identities between
- * DoFs located on
- * lines. Called from
- * distribute_dofs().
- */
- void
- compute_line_dof_identities (std::vector<types::global_dof_index> &new_dof_indices) const;
-
- /**
- * Compute identities between
- * DoFs located on
- * quads. Called from
- * distribute_dofs().
- */
- void
- compute_quad_dof_identities (std::vector<types::global_dof_index> &new_dof_indices) const;
-
- /**
- * Renumber the objects with
- * the given and all lower
- * structural dimensions,
- * i.e. renumber vertices by
- * giving a template argument
- * of zero to the int2type
- * argument, lines and vertices
- * with one, etc.
- *
- * Note that in contrast to the
- * public renumber_dofs()
- * function, these internal
- * functions do not ensure that
- * the new DoFs are
- * contiguously numbered. The
- * function may therefore also
- * be used to assign different
- * DoFs the same number, for
- * example to unify hp DoFs
- * corresponding to different
- * finite elements but
- * co-located on the same
- * entity.
- */
- void renumber_dofs_internal (const std::vector<types::global_dof_index> &new_numbers,
- dealii::internal::int2type<0>);
-
- void renumber_dofs_internal (const std::vector<types::global_dof_index> &new_numbers,
- dealii::internal::int2type<1>);
-
- void renumber_dofs_internal (const std::vector<types::global_dof_index> &new_numbers,
- dealii::internal::int2type<2>);
-
- void renumber_dofs_internal (const std::vector<types::global_dof_index> &new_numbers,
- dealii::internal::int2type<3>);
-
- /**
- * Space to store the DoF
- * numbers for the different
- * levels. Analogous to the
- * <tt>levels[]</tt> tree of
- * the Triangulation objects.
- */
- std::vector<dealii::internal::hp::DoFLevel<dim>*> levels;
- /**
- * Space to store the DoF
- * numbers for the faces.
- * Analogous to the
- * <tt>faces</tt> pointer of
- * the Triangulation objects.
- */
- dealii::internal::hp::DoFFaces<dim> * faces;
-
- /**
- * A structure that contains all
- * sorts of numbers that
- * characterize the degrees of
- * freedom this object works on.
- *
- * For most members of this
- * structure, there is an
- * accessor function in this
- * class that returns its value.
- */
- dealii::internal::DoFHandler::NumberCache number_cache;
-
- /**
- * Array to store the indices
- * for degrees of freedom
- * located at vertices.
- *
- * The format used here, in the
- * form of a linked list, is
- * the same as used for the
- * arrays used in the
- * internal::hp::DoFLevel
- * hierarchy. Starting indices
- * into this array are provided
- * by the vertex_dofs_offsets
- * field.
- *
- * Access to this field is
- * generally through the
- * DoFAccessor::get_vertex_dof_index() and
- * DoFAccessor::set_vertex_dof_index()
- * functions, encapsulating the
- * actual data format used to
- * the present class.
- */
- std::vector<types::global_dof_index> vertex_dofs;
-
- /**
- * For each vertex in the
- * triangulation, store the
- * offset within the
- * vertex_dofs array where the
- * dofs for this vertex start.
- *
- * As for that array, the
- * format is the same as
- * described in the
- * documentation of
- * hp::DoFLevel.
- *
- * Access to this field is
- * generally through the
- * Accessor::get_vertex_dof_index() and
- * Accessor::set_vertex_dof_index()
- * functions, encapsulating the
- * actual data format used to
- * the present class.
- */
- std::vector<unsigned int> vertex_dofs_offsets;
-
- std::vector<MGVertexDoFs> mg_vertex_dofs;
-
- /**
- * Array to store the
- * information, if a cell on
- * some level has children or
- * not. It is used by the
- * refinement listeners as a
- * persistent buffer during the
- * refinement, i.e. from between
- * when pre_refinement_action is
- * called and when post_refinement_action
- * runs.
- */
- std::vector<std::vector<bool> *> has_children;
-
- /**
- * A list of connections with which this object connects
- * to the triangulation to get information about when the
- * triangulation changes.
- */
- std::vector<boost::signals2::connection> tria_listeners;
-
- /**
- * Make accessor objects friends.
- */
- template <int, class> friend class dealii::DoFAccessor;
- template <class> friend class dealii::DoFCellAccessor;
- friend struct dealii::internal::DoFAccessor::Implementation;
- friend struct dealii::internal::DoFCellAccessor::Implementation;
-
- /**
- * Likewise for DoFLevel
- * objects since they need to
- * access the vertex dofs in
- * the functions that set and
- * retrieve vertex dof indices.
- */
- template <int> friend class dealii::internal::hp::DoFLevel;
- template <int> friend class dealii::internal::hp::DoFObjects;
- friend struct dealii::internal::hp::DoFHandler::Implementation;
++ MGVertexDoFs ();
++ ~MGVertexDoFs ();
++ types::global_dof_index get_index (const unsigned int level, const unsigned int dof_number) const;
++ void set_index (const unsigned int level, const unsigned int dof_number, const types::global_dof_index index);
++ };
++
+ /**
+ * Free all used memory.
+ */
+ void clear_space ();
+
++ template<int structdim>
++ types::global_dof_index get_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index) const;
++
++ template<int structdim>
++ void set_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index) const;
++
+ /**
+ * Create default tables for
+ * the active_fe_indices in
+ * the
+ * dealii::internal::hp::DoFLevel. They
+ * are initialized with the a
+ * zero indicator, meaning
+ * that fe[0] is going to be
+ * used by default. This
+ * method is called before
+ * refinement and before
+ * distribute_dofs is
+ * called. It ensures each
+ * cell has a valid
+ * active_fe_index.
+ */
+
+ void create_active_fe_table ();
+
+ /**
+ * Functions that will be triggered
+ * through signals whenever the
+ * triangulation is modified.
+ *
+ * Here they are used to
+ * administrate the the
+ * active_fe_fields during the
+ * spatial refinement.
+ */
- void pre_refinement_action ();
- void post_refinement_action ();
-
++ virtual void pre_refinement_action ();
++ virtual void post_refinement_action ();
+
+ /**
+ * Compute identities between
+ * DoFs located on
+ * vertices. Called from
+ * distribute_dofs().
+ */
+ void
- compute_vertex_dof_identities (std::vector<unsigned int> &new_dof_indices) const;
++ compute_vertex_dof_identities (std::vector<types::global_dof_index> &new_dof_indices) const;
+
+ /**
+ * Compute identities between
+ * DoFs located on
+ * lines. Called from
+ * distribute_dofs().
+ */
+ void
- compute_line_dof_identities (std::vector<unsigned int> &new_dof_indices) const;
++ compute_line_dof_identities (std::vector<types::global_dof_index> &new_dof_indices) const;
+
+ /**
+ * Compute identities between
+ * DoFs located on
+ * quads. Called from
+ * distribute_dofs().
+ */
+ void
- compute_quad_dof_identities (std::vector<unsigned int> &new_dof_indices) const;
++ compute_quad_dof_identities (std::vector<types::global_dof_index> &new_dof_indices) const;
+
+ /**
+ * Renumber the objects with
+ * the given and all lower
+ * structural dimensions,
+ * i.e. renumber vertices by
+ * giving a template argument
+ * of zero to the int2type
+ * argument, lines and vertices
+ * with one, etc.
+ *
+ * Note that in contrast to the
+ * public renumber_dofs()
+ * function, these internal
+ * functions do not ensure that
+ * the new DoFs are
+ * contiguously numbered. The
+ * function may therefore also
+ * be used to assign different
+ * DoFs the same number, for
+ * example to unify hp DoFs
+ * corresponding to different
+ * finite elements but
+ * co-located on the same
+ * entity.
+ */
- void renumber_dofs_internal (const std::vector<unsigned int> &new_numbers,
++ void renumber_dofs_internal (const std::vector<types::global_dof_index> &new_numbers,
+ dealii::internal::int2type<0>);
+
- void renumber_dofs_internal (const std::vector<unsigned int> &new_numbers,
++ void renumber_dofs_internal (const std::vector<types::global_dof_index> &new_numbers,
+ dealii::internal::int2type<1>);
+
- void renumber_dofs_internal (const std::vector<unsigned int> &new_numbers,
++ void renumber_dofs_internal (const std::vector<types::global_dof_index> &new_numbers,
+ dealii::internal::int2type<2>);
+
- void renumber_dofs_internal (const std::vector<unsigned int> &new_numbers,
++ void renumber_dofs_internal (const std::vector<types::global_dof_index> &new_numbers,
+ dealii::internal::int2type<3>);
+
+ /**
+ * Space to store the DoF
+ * numbers for the different
+ * levels. Analogous to the
+ * <tt>levels[]</tt> tree of
+ * the Triangulation objects.
+ */
+ std::vector<dealii::internal::hp::DoFLevel<dim>*> levels;
+ /**
+ * Space to store the DoF
+ * numbers for the faces.
+ * Analogous to the
+ * <tt>faces</tt> pointer of
+ * the Triangulation objects.
+ */
+ dealii::internal::hp::DoFFaces<dim> *faces;
+
+ /**
+ * A structure that contains all
+ * sorts of numbers that
+ * characterize the degrees of
+ * freedom this object works on.
+ *
+ * For most members of this
+ * structure, there is an
+ * accessor function in this
+ * class that returns its value.
+ */
+ dealii::internal::DoFHandler::NumberCache number_cache;
+
+ /**
+ * Array to store the indices
+ * for degrees of freedom
+ * located at vertices.
+ *
+ * The format used here, in the
+ * form of a linked list, is
+ * the same as used for the
+ * arrays used in the
+ * internal::hp::DoFLevel
+ * hierarchy. Starting indices
+ * into this array are provided
+ * by the vertex_dofs_offsets
+ * field.
+ *
+ * Access to this field is
+ * generally through the
+ * DoFAccessor::get_vertex_dof_index() and
+ * DoFAccessor::set_vertex_dof_index()
+ * functions, encapsulating the
+ * actual data format used to
+ * the present class.
+ */
- std::vector<unsigned int> vertex_dofs;
++ std::vector<types::global_dof_index> vertex_dofs;
+
+ /**
+ * For each vertex in the
+ * triangulation, store the
+ * offset within the
+ * vertex_dofs array where the
+ * dofs for this vertex start.
+ *
+ * As for that array, the
+ * format is the same as
+ * described in the
+ * documentation of
+ * hp::DoFLevel.
+ *
+ * Access to this field is
+ * generally through the
+ * Accessor::get_vertex_dof_index() and
+ * Accessor::set_vertex_dof_index()
+ * functions, encapsulating the
+ * actual data format used to
+ * the present class.
+ */
+ std::vector<unsigned int> vertex_dofs_offsets;
+
++ std::vector<MGVertexDoFs> mg_vertex_dofs;
++
+ /**
+ * Array to store the
+ * information, if a cell on
+ * some level has children or
+ * not. It is used by the
+ * refinement listeners as a
+ * persistent buffer during the
+ * refinement, i.e. from between
+ * when pre_refinement_action is
+ * called and when post_refinement_action
+ * runs.
+ */
+ std::vector<std::vector<bool> *> has_children;
+
+ /**
+ * A list of connections with which this object connects
+ * to the triangulation to get information about when the
+ * triangulation changes.
+ */
+ std::vector<boost::signals2::connection> tria_listeners;
+
+ /**
+ * Make accessor objects friends.
+ */
+ template <int, class> friend class dealii::DoFAccessor;
+ template <class> friend class dealii::DoFCellAccessor;
+ friend struct dealii::internal::DoFAccessor::Implementation;
+ friend struct dealii::internal::DoFCellAccessor::Implementation;
+
+ /**
+ * Likewise for DoFLevel
+ * objects since they need to
+ * access the vertex dofs in
+ * the functions that set and
+ * retrieve vertex dof indices.
+ */
+ template <int> friend class dealii::internal::hp::DoFLevel;
+ template <int> friend class dealii::internal::hp::DoFObjects;
+ friend struct dealii::internal::hp::DoFHandler::Implementation;
};
return *tria;
}
+ template<int dim, int spacedim>
+ inline
+ DoFHandler<dim, spacedim>::MGVertexDoFs::MGVertexDoFs()
+ {
+ Assert (false, ExcNotImplemented ());
+ }
- const unsigned int) const
+ template<int dim, int spacedim>
+ inline
+ DoFHandler<dim, spacedim>::MGVertexDoFs::~MGVertexDoFs()
+ {
+ Assert (false, ExcNotImplemented ());
+ }
+
+ template<int dim, int spacedim>
+ inline
+ types::global_dof_index DoFHandler<dim, spacedim>::MGVertexDoFs::get_index (const unsigned int,
- const unsigned int,
- types::global_dof_index)
++ const unsigned int) const
+ {
+ Assert (false, ExcNotImplemented ());
+ return invalid_dof_index;
+ }
+
+ template<int dim, int spacedim>
+ inline
+ void DoFHandler<dim, spacedim>::MGVertexDoFs::set_index (const unsigned int,
++ const unsigned int,
++ types::global_dof_index)
+ {
+ Assert (false, ExcNotImplemented ());
+ }
#endif
template <int dim>
class DoFObjects
{
- public:
- /**
- * Store the start index for
- * the degrees of freedom of each
- * hex in the @p hex_dofs array.
- */
- std::vector<types::global_dof_index> dof_offsets;
-
- /**
- * Store the global indices of
- * the degrees of freedom. See
- * DoFLevel() for detailed
- * information.
- */
- std::vector<types::global_dof_index > dofs;
-
- /**
- * Set the global index of
- * the @p local_index-th
- * degree of freedom located
- * on the hex with number @p
- * hex_index to the value
- * given by the last
- * argument. The @p
- * dof_handler argument is
- * used to access the finite
- * element that is to be used
- * to compute the location
- * where this data is stored.
- *
- * The third argument, @p
- * fe_index, denotes which of
- * the finite elements
- * associated with this
- * object we shall
- * access. Refer to the
- * general documentation of
- * the internal::hp::DoFLevel
- * class template for more
- * information.
- */
- template <int dimm, int spacedim>
- void
- set_dof_index (const dealii::hp::DoFHandler<dimm,spacedim> &dof_handler,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- const types::global_dof_index global_index,
- const unsigned int obj_level);
-
- /**
- * Return the global index of
- * the @p local_index-th
- * degree of freedom located
- * on the hex with number @p
- * hex_index. The @p
- * dof_handler argument is
- * used to access the finite
- * element that is to be used
- * to compute the location
- * where this data is stored.
- *
- * The third argument, @p
- * fe_index, denotes which of
- * the finite elements
- * associated with this
- * object we shall
- * access. Refer to the
- * general documentation of
- * the internal::hp::DoFLevel
- * class template for more
- * information.
- */
- template <int dimm, int spacedim>
- types::global_dof_index
- get_dof_index (const dealii::hp::DoFHandler<dimm,spacedim> &dof_handler,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int local_index,
- const unsigned int obj_level) const;
-
- /**
- * Return the number of
- * finite elements that are
- * active on a given
- * object. If this is a cell,
- * the answer is of course
- * one. If it is a face, the
- * answer may be one or two,
- * depending on whether the
- * two adjacent cells use the
- * same finite element or
- * not. If it is an edge in
- * 3d, the possible return
- * value may be one or any
- * other value larger than
- * that.
- *
- * If the object is not part
- * of an active cell, then no
- * degrees of freedom have
- * been distributed and zero
- * is returned.
- */
- template <int dimm, int spacedim>
- unsigned int
- n_active_fe_indices (const dealii::hp::DoFHandler<dimm,spacedim> &dof_handler,
- const unsigned int obj_index) const;
-
- /**
- * Return the fe_index of the
- * n-th active finite element
- * on this object.
- */
- template <int dimm, int spacedim>
- unsigned int
- nth_active_fe_index (const dealii::hp::DoFHandler<dimm,spacedim> &dof_handler,
- const unsigned int obj_level,
- const unsigned int obj_index,
- const unsigned int n) const;
-
- /**
- * Check whether a given
- * finite element index is
- * used on the present
- * object or not.
- */
- template <int dimm, int spacedim>
- bool
- fe_index_is_active (const dealii::hp::DoFHandler<dimm,spacedim> &dof_handler,
- const unsigned int obj_index,
- const unsigned int fe_index,
- const unsigned int obj_level) const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
- */
- std::size_t memory_consumption () const;
+ public:
+ /**
+ * Store the start index for
+ * the degrees of freedom of each
+ * hex in the @p hex_dofs array.
+ */
- std::vector<unsigned int> dof_offsets;
++ std::vector<types::global_dof_index> dof_offsets;
+
+ /**
+ * Store the global indices of
+ * the degrees of freedom. See
+ * DoFLevel() for detailed
+ * information.
+ */
- std::vector<unsigned int> dofs;
++ std::vector<types::global_dof_index > dofs;
+
+ /**
+ * Set the global index of
+ * the @p local_index-th
+ * degree of freedom located
+ * on the hex with number @p
+ * hex_index to the value
+ * given by the last
+ * argument. The @p
+ * dof_handler argument is
+ * used to access the finite
+ * element that is to be used
+ * to compute the location
+ * where this data is stored.
+ *
+ * The third argument, @p
+ * fe_index, denotes which of
+ * the finite elements
+ * associated with this
+ * object we shall
+ * access. Refer to the
+ * general documentation of
+ * the internal::hp::DoFLevel
+ * class template for more
+ * information.
+ */
+ template <int dimm, int spacedim>
+ void
+ set_dof_index (const dealii::hp::DoFHandler<dimm,spacedim> &dof_handler,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
- const unsigned int global_index,
++ const types::global_dof_index global_index,
+ const unsigned int obj_level);
+
+ /**
+ * Return the global index of
+ * the @p local_index-th
+ * degree of freedom located
+ * on the hex with number @p
+ * hex_index. The @p
+ * dof_handler argument is
+ * used to access the finite
+ * element that is to be used
+ * to compute the location
+ * where this data is stored.
+ *
+ * The third argument, @p
+ * fe_index, denotes which of
+ * the finite elements
+ * associated with this
+ * object we shall
+ * access. Refer to the
+ * general documentation of
+ * the internal::hp::DoFLevel
+ * class template for more
+ * information.
+ */
+ template <int dimm, int spacedim>
- unsigned int
++ types::global_dof_index
+ get_dof_index (const dealii::hp::DoFHandler<dimm,spacedim> &dof_handler,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ const unsigned int obj_level) const;
+
+ /**
+ * Return the number of
+ * finite elements that are
+ * active on a given
+ * object. If this is a cell,
+ * the answer is of course
+ * one. If it is a face, the
+ * answer may be one or two,
+ * depending on whether the
+ * two adjacent cells use the
+ * same finite element or
+ * not. If it is an edge in
+ * 3d, the possible return
+ * value may be one or any
+ * other value larger than
+ * that.
+ *
+ * If the object is not part
+ * of an active cell, then no
+ * degrees of freedom have
+ * been distributed and zero
+ * is returned.
+ */
+ template <int dimm, int spacedim>
+ unsigned int
+ n_active_fe_indices (const dealii::hp::DoFHandler<dimm,spacedim> &dof_handler,
+ const unsigned int obj_index) const;
+
+ /**
+ * Return the fe_index of the
+ * n-th active finite element
+ * on this object.
+ */
+ template <int dimm, int spacedim>
+ unsigned int
+ nth_active_fe_index (const dealii::hp::DoFHandler<dimm,spacedim> &dof_handler,
+ const unsigned int obj_level,
+ const unsigned int obj_index,
+ const unsigned int n) const;
+
+ /**
+ * Check whether a given
+ * finite element index is
+ * used on the present
+ * object or not.
+ */
+ template <int dimm, int spacedim>
+ bool
+ fe_index_is_active (const dealii::hp::DoFHandler<dimm,spacedim> &dof_handler,
+ const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int obj_level) const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object.
+ */
+ std::size_t memory_consumption () const;
};
}
else
{
- // we are in higher space
- // dimensions, so there may
- // be multiple finite
- // elements associated with
- // this object. hop along
- // the list of index sets
- // until we find the one
- // with the correct
- // fe_index, and then poke
- // into that part. trigger
- // an exception if we can't
- // find a set for this
- // particular fe_index
- const types::global_dof_index starting_offset = dof_offsets[obj_index];
+ // we are in higher space
+ // dimensions, so there may
+ // be multiple finite
+ // elements associated with
+ // this object. hop along
+ // the list of index sets
+ // until we find the one
+ // with the correct
+ // fe_index, and then poke
+ // into that part. trigger
+ // an exception if we can't
+ // find a set for this
+ // particular fe_index
- const unsigned int starting_offset = dof_offsets[obj_index];
++ const types::global_dof_index starting_offset = dof_offsets[obj_index];
const unsigned int *pointer = &dofs[starting_offset];
while (true)
{
}
else
{
- // we are in higher space
- // dimensions, so there may
- // be multiple finite
- // elements associated with
- // this object. hop along
- // the list of index sets
- // until we find the one
- // with the correct
- // fe_index, and then poke
- // into that part. trigger
- // an exception if we can't
- // find a set for this
- // particular fe_index
- const types::global_dof_index starting_offset = dof_offsets[obj_index];
+ // we are in higher space
+ // dimensions, so there may
+ // be multiple finite
+ // elements associated with
+ // this object. hop along
+ // the list of index sets
+ // until we find the one
+ // with the correct
+ // fe_index, and then poke
+ // into that part. trigger
+ // an exception if we can't
+ // find a set for this
+ // particular fe_index
- const unsigned int starting_offset = dof_offsets[obj_index];
++ const types::global_dof_index starting_offset = dof_offsets[obj_index];
unsigned int *pointer = &dofs[starting_offset];
while (true)
{
return 1;
else
{
- // otherwise, there may be
- // multiple finite elements
- // associated with this
- // object. hop along the
- // list of index sets until
- // we find the one with the
- // correct fe_index, and
- // then poke into that
- // part. trigger an
- // exception if we can't
- // find a set for this
- // particular fe_index
+ // otherwise, there may be
+ // multiple finite elements
+ // associated with this
+ // object. hop along the
+ // list of index sets until
+ // we find the one with the
+ // correct fe_index, and
+ // then poke into that
+ // part. trigger an
+ // exception if we can't
+ // find a set for this
+ // particular fe_index
const unsigned int starting_offset = dof_offsets[obj_index];
- const unsigned int *pointer = &dofs[starting_offset];
+ const types::global_dof_index *pointer = &dofs[starting_offset];
unsigned int counter = 0;
while (true)
{
ExcIndexRange (n, 0,
n_active_fe_indices(dof_handler, obj_index)));
- // we are in higher space
- // dimensions, so there may
- // be multiple finite
- // elements associated with
- // this object. hop along
- // the list of index sets
- // until we find the one
- // with the correct
- // fe_index, and then poke
- // into that part. trigger
- // an exception if we can't
- // find a set for this
- // particular fe_index
+ // we are in higher space
+ // dimensions, so there may
+ // be multiple finite
+ // elements associated with
+ // this object. hop along
+ // the list of index sets
+ // until we find the one
+ // with the correct
+ // fe_index, and then poke
+ // into that part. trigger
+ // an exception if we can't
+ // find a set for this
+ // particular fe_index
const unsigned int starting_offset = dof_offsets[obj_index];
- const unsigned int *pointer = &dofs[starting_offset];
+ const types::global_dof_index *pointer = &dofs[starting_offset];
unsigned int counter = 0;
while (true)
{
}
else
{
- // we are in higher space
- // dimensions, so there may
- // be multiple finite
- // elements associated with
- // this object. hop along
- // the list of index sets
- // until we find the one
- // with the correct
- // fe_index, and then poke
- // into that part. trigger
- // an exception if we can't
- // find a set for this
- // particular fe_index
+ // we are in higher space
+ // dimensions, so there may
+ // be multiple finite
+ // elements associated with
+ // this object. hop along
+ // the list of index sets
+ // until we find the one
+ // with the correct
+ // fe_index, and then poke
+ // into that part. trigger
+ // an exception if we can't
+ // find a set for this
+ // particular fe_index
const unsigned int starting_offset = dof_offsets[obj_index];
- const unsigned int *pointer = &dofs[starting_offset];
+ const types::global_dof_index *pointer = &dofs[starting_offset];
while (true)
{
if (*pointer == numbers::invalid_unsigned_int)
template <int dim, int spacedim=dim>
class FEValues : public dealii::internal::hp::FEValuesBase<dim,dim,dealii::FEValues<dim,spacedim> >
{
- public:
+ public:
- static const unsigned int dimension = dim;
+ static const unsigned int dimension = dim;
- static const unsigned int space_dimension = spacedim;
+ static const unsigned int space_dimension = spacedim;
- /**
- * Constructor. Initialize this
- * object with the given
- * parameters.
- *
- * The finite element
- * collection parameter is
- * actually ignored, but is in
- * the signature of this
- * function to make it
- * compatible with the
- * signature of the respective
- * constructor of the usual
- * FEValues object, with
- * the respective parameter in
- * that function also being the
- * return value of the
- * <tt>DoFHandler::get_fe()</tt>
- * function.
- */
- FEValues (const dealii::hp::MappingCollection<dim,spacedim> &mapping_collection,
- const dealii::hp::FECollection<dim,spacedim> &fe_collection,
- const dealii::hp::QCollection<dim> &q_collection,
- const UpdateFlags update_flags);
+ /**
+ * Constructor. Initialize this
+ * object with the given
+ * parameters.
+ *
+ * The finite element
+ * collection parameter is
+ * actually ignored, but is in
+ * the signature of this
+ * function to make it
+ * compatible with the
+ * signature of the respective
+ * constructor of the usual
+ * FEValues object, with
+ * the respective parameter in
+ * that function also being the
+ * return value of the
+ * <tt>DoFHandler::get_fe()</tt>
+ * function.
+ */
+ FEValues (const dealii::hp::MappingCollection<dim,spacedim> &mapping_collection,
- const dealii::hp::FECollection<dim,spacedim> &fe_collection,
++ const dealii::hp::FECollection<dim,spacedim> &fe_collection,
+ const dealii::hp::QCollection<dim> &q_collection,
+ const UpdateFlags update_flags);
- /**
- * Constructor. Initialize this
- * object with the given
- * parameters, and choose a
- * @p MappingQ1 object for the
- * mapping object.
- *
- * The finite element
- * collection parameter is
- * actually ignored, but is in
- * the signature of this
- * function to make it
- * compatible with the
- * signature of the respective
- * constructor of the usual
- * FEValues object, with
- * the respective parameter in
- * that function also being the
- * return value of the
- * <tt>DoFHandler::get_fe()</tt>
- * function.
- */
- FEValues (const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim> &q_collection,
- const UpdateFlags update_flags);
+ /**
+ * Constructor. Initialize this
+ * object with the given
+ * parameters, and choose a
+ * @p MappingQ1 object for the
+ * mapping object.
+ *
+ * The finite element
+ * collection parameter is
+ * actually ignored, but is in
+ * the signature of this
+ * function to make it
+ * compatible with the
+ * signature of the respective
+ * constructor of the usual
+ * FEValues object, with
+ * the respective parameter in
+ * that function also being the
+ * return value of the
+ * <tt>DoFHandler::get_fe()</tt>
+ * function.
+ */
+ FEValues (const hp::FECollection<dim,spacedim> &fe_collection,
+ const hp::QCollection<dim> &q_collection,
+ const UpdateFlags update_flags);
- /**
- * Reinitialize the object for
- * the given cell.
- *
- * After the call, you can get
- * an FEValues object using the
- * get_present_fe_values()
- * function that corresponds to
- * the present cell. For this
- * FEValues object, we use the
- * additional arguments
- * described below to determine
- * which finite element,
- * mapping, and quadrature
- * formula to use. They are
- * order in such a way that the
- * arguments one may want to
- * change most frequently come
- * first. The rules for these
- * arguments are as follows:
- *
- * If the @p fe_index argument
- * to this function is left at
- * its default value, then we
- * use that finite element
- * within the hp::FECollection
- * passed to the constructor of
- * this class with index given
- * by
- * <code>cell-@>active_fe_index()</code>. Consequently,
- * the hp::FECollection
- * argument given to this
- * object should really be the
- * same as that used in the
- * construction of the
- * hp::DofHandler associated
- * with the present cell. On
- * the other hand, if a value
- * is given for this argument,
- * it overrides the choice of
- * <code>cell-@>active_fe_index()</code>.
- *
- * If the @p q_index argument
- * is left at its default
- * value, then we use that
- * quadrature formula within
- * the hp::QCollection passed
- * to the constructor of this
- * class with index given by
- * <code>cell-@>active_fe_index()</code>,
- * i.e. the same index as that
- * of the finite element. In
- * this case, there should be a
- * corresponding quadrature
- * formula for each finite
- * element in the
- * hp::FECollection. As a
- * special case, if the
- * quadrature collection
- * contains only a single
- * element (a frequent case if
- * one wants to use the same
- * quadrature object for all
- * finite elements in an hp
- * discretization, even if that
- * may not be the most
- * efficient), then this single
- * quadrature is used unless a
- * different value for this
- * argument is specified. On
- * the other hand, if a value
- * is given for this argument,
- * it overrides the choice of
- * <code>cell-@>active_fe_index()</code>
- * or the choice for the single
- * quadrature.
- *
- * If the @p mapping_index
- * argument is left at its
- * default value, then we use
- * that mapping object within
- * the hp::MappingCollection
- * passed to the constructor of
- * this class with index given
- * by
- * <code>cell-@>active_fe_index()</code>,
- * i.e. the same index as that
- * of the finite
- * element. As above, if the
- * mapping collection contains
- * only a single element (a
- * frequent case if one wants
- * to use a MappingQ1 object
- * for all finite elements in
- * an hp discretization), then
- * this single mapping is used
- * unless a different value for
- * this argument is specified.
- */
- void
- reinit (const typename hp::DoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Reinitialize the object for
+ * the given cell.
+ *
+ * After the call, you can get
+ * an FEValues object using the
+ * get_present_fe_values()
+ * function that corresponds to
+ * the present cell. For this
+ * FEValues object, we use the
+ * additional arguments
+ * described below to determine
+ * which finite element,
+ * mapping, and quadrature
+ * formula to use. They are
+ * order in such a way that the
+ * arguments one may want to
+ * change most frequently come
+ * first. The rules for these
+ * arguments are as follows:
+ *
+ * If the @p fe_index argument
+ * to this function is left at
+ * its default value, then we
+ * use that finite element
+ * within the hp::FECollection
+ * passed to the constructor of
+ * this class with index given
+ * by
+ * <code>cell-@>active_fe_index()</code>. Consequently,
+ * the hp::FECollection
+ * argument given to this
+ * object should really be the
+ * same as that used in the
+ * construction of the
+ * hp::DofHandler associated
+ * with the present cell. On
+ * the other hand, if a value
+ * is given for this argument,
+ * it overrides the choice of
+ * <code>cell-@>active_fe_index()</code>.
+ *
+ * If the @p q_index argument
+ * is left at its default
+ * value, then we use that
+ * quadrature formula within
+ * the hp::QCollection passed
+ * to the constructor of this
+ * class with index given by
+ * <code>cell-@>active_fe_index()</code>,
+ * i.e. the same index as that
+ * of the finite element. In
+ * this case, there should be a
+ * corresponding quadrature
+ * formula for each finite
+ * element in the
+ * hp::FECollection. As a
+ * special case, if the
+ * quadrature collection
+ * contains only a single
+ * element (a frequent case if
+ * one wants to use the same
+ * quadrature object for all
+ * finite elements in an hp
+ * discretization, even if that
+ * may not be the most
+ * efficient), then this single
+ * quadrature is used unless a
+ * different value for this
+ * argument is specified. On
+ * the other hand, if a value
+ * is given for this argument,
+ * it overrides the choice of
+ * <code>cell-@>active_fe_index()</code>
+ * or the choice for the single
+ * quadrature.
+ *
+ * If the @p mapping_index
+ * argument is left at its
+ * default value, then we use
+ * that mapping object within
+ * the hp::MappingCollection
+ * passed to the constructor of
+ * this class with index given
+ * by
+ * <code>cell-@>active_fe_index()</code>,
+ * i.e. the same index as that
+ * of the finite
+ * element. As above, if the
+ * mapping collection contains
+ * only a single element (a
+ * frequent case if one wants
+ * to use a MappingQ1 object
+ * for all finite elements in
+ * an hp discretization), then
+ * this single mapping is used
+ * unless a different value for
+ * this argument is specified.
+ */
+ void
+ reinit (const typename hp::DoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * always returns zero for
- * non-hp iterators, this
- * function chooses the zero-th
- * finite element, mapping, and
- * quadrature object from the
- * relevant constructions
- * passed to the constructor of
- * this object. The only
- * exception is if you specify
- * a value different from the
- * default value for any of
- * these last three arguments.
- */
- void
- reinit (const typename dealii::DoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * always returns zero for
+ * non-hp iterators, this
+ * function chooses the zero-th
+ * finite element, mapping, and
+ * quadrature object from the
+ * relevant constructions
+ * passed to the constructor of
+ * this object. The only
+ * exception is if you specify
+ * a value different from the
+ * default value for any of
+ * these last three arguments.
+ */
+ void
+ reinit (const typename dealii::DoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * always returns zero for
- * non-hp iterators, this
- * function chooses the zero-th
- * finite element, mapping, and
- * quadrature object from the
- * relevant constructions
- * passed to the constructor of
- * this object. The only
- * exception is if you specify
- * a value different from the
- * default value for any of
- * these last three arguments.
- */
- void
- reinit (const typename MGDoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * always returns zero for
+ * non-hp iterators, this
+ * function chooses the zero-th
+ * finite element, mapping, and
+ * quadrature object from the
+ * relevant constructions
+ * passed to the constructor of
+ * this object. The only
+ * exception is if you specify
+ * a value different from the
+ * default value for any of
+ * these last three arguments.
+ */
+ void
+ reinit (const typename MGDoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * doesn't make sense for
- * triangulation iterators,
- * this function chooses the
- * zero-th finite element,
- * mapping, and quadrature
- * object from the relevant
- * constructions passed to the
- * constructor of this
- * object. The only exception
- * is if you specify a value
- * different from the default
- * value for any of these last
- * three arguments.
- */
- void
- reinit (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * doesn't make sense for
+ * triangulation iterators,
+ * this function chooses the
+ * zero-th finite element,
+ * mapping, and quadrature
+ * object from the relevant
+ * constructions passed to the
+ * constructor of this
+ * object. The only exception
+ * is if you specify a value
+ * different from the default
+ * value for any of these last
+ * three arguments.
+ */
+ void
+ reinit (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
};
template <int dim, int spacedim=dim>
class FEFaceValues : public dealii::internal::hp::FEValuesBase<dim,dim-1,dealii::FEFaceValues<dim,spacedim> >
{
- public:
- /**
- * Constructor. Initialize this
- * object with the given
- * parameters.
- *
- * The finite element
- * collection parameter is
- * actually ignored, but is in
- * the signature of this
- * function to make it
- * compatible with the
- * signature of the respective
- * constructor of the usual
- * FEValues object, with
- * the respective parameter in
- * that function also being the
- * return value of the
- * <tt>DoFHandler::get_fe()</tt>
- * function.
- */
- FEFaceValues (const hp::MappingCollection<dim,spacedim> &mapping_collection,
- const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim-1> &q_collection,
- const UpdateFlags update_flags);
+ public:
+ /**
+ * Constructor. Initialize this
+ * object with the given
+ * parameters.
+ *
+ * The finite element
+ * collection parameter is
+ * actually ignored, but is in
+ * the signature of this
+ * function to make it
+ * compatible with the
+ * signature of the respective
+ * constructor of the usual
+ * FEValues object, with
+ * the respective parameter in
+ * that function also being the
+ * return value of the
+ * <tt>DoFHandler::get_fe()</tt>
+ * function.
+ */
+ FEFaceValues (const hp::MappingCollection<dim,spacedim> &mapping_collection,
- const hp::FECollection<dim,spacedim> &fe_collection,
++ const hp::FECollection<dim,spacedim> &fe_collection,
+ const hp::QCollection<dim-1> &q_collection,
+ const UpdateFlags update_flags);
- /**
- * Constructor. Initialize this
- * object with the given
- * parameters, and choose a
- * @p MappingQ1 object for the
- * mapping object.
- *
- * The finite element
- * collection parameter is
- * actually ignored, but is in
- * the signature of this
- * function to make it
- * compatible with the
- * signature of the respective
- * constructor of the usual
- * FEValues object, with
- * the respective parameter in
- * that function also being the
- * return value of the
- * <tt>DoFHandler::get_fe()</tt>
- * function.
- */
- FEFaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim-1> &q_collection,
- const UpdateFlags update_flags);
+ /**
+ * Constructor. Initialize this
+ * object with the given
+ * parameters, and choose a
+ * @p MappingQ1 object for the
+ * mapping object.
+ *
+ * The finite element
+ * collection parameter is
+ * actually ignored, but is in
+ * the signature of this
+ * function to make it
+ * compatible with the
+ * signature of the respective
+ * constructor of the usual
+ * FEValues object, with
+ * the respective parameter in
+ * that function also being the
+ * return value of the
+ * <tt>DoFHandler::get_fe()</tt>
+ * function.
+ */
- FEFaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
++ FEFaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
+ const hp::QCollection<dim-1> &q_collection,
+ const UpdateFlags update_flags);
- /**
- * Reinitialize the object for
- * the given cell and face.
- *
- * After the call, you can get
- * an FEFaceValues object using the
- * get_present_fe_values()
- * function that corresponds to
- * the present cell. For this
- * FEFaceValues object, we use the
- * additional arguments
- * described below to determine
- * which finite element,
- * mapping, and quadrature
- * formula to use. They are
- * order in such a way that the
- * arguments one may want to
- * change most frequently come
- * first. The rules for these
- * arguments are as follows:
- *
- * If the @p fe_index argument
- * to this function is left at
- * its default value, then we
- * use that finite element
- * within the hp::FECollection
- * passed to the constructor of
- * this class with index given
- * by
- * <code>cell-@>active_fe_index()</code>. Consequently,
- * the hp::FECollection
- * argument given to this
- * object should really be the
- * same as that used in the
- * construction of the
- * hp::DofHandler associated
- * with the present cell. On
- * the other hand, if a value
- * is given for this argument,
- * it overrides the choice of
- * <code>cell-@>active_fe_index()</code>.
- *
- * If the @p q_index argument
- * is left at its default
- * value, then we use that
- * quadrature formula within
- * the hp::QCollection passed
- * to the constructor of this
- * class with index given by
- * <code>cell-@>active_fe_index()</code>,
- * i.e. the same index as that
- * of the finite element. In
- * this case, there should be a
- * corresponding quadrature
- * formula for each finite
- * element in the
- * hp::FECollection. As a
- * special case, if the
- * quadrature collection
- * contains only a single
- * element (a frequent case if
- * one wants to use the same
- * quadrature object for all
- * finite elements in an hp
- * discretization, even if that
- * may not be the most
- * efficient), then this single
- * quadrature is used unless a
- * different value for this
- * argument is specified. On
- * the other hand, if a value
- * is given for this argument,
- * it overrides the choice of
- * <code>cell-@>active_fe_index()</code>
- * or the choice for the single
- * quadrature.
- *
- * If the @p mapping_index
- * argument is left at its
- * default value, then we use
- * that mapping object within
- * the hp::MappingCollection
- * passed to the constructor of
- * this class with index given
- * by
- * <code>cell-@>active_fe_index()</code>,
- * i.e. the same index as that
- * of the finite
- * element. As above, if the
- * mapping collection contains
- * only a single element (a
- * frequent case if one wants
- * to use a MappingQ1 object
- * for all finite elements in
- * an hp discretization), then
- * this single mapping is used
- * unless a different value for
- * this argument is specified.
- */
- void
- reinit (const typename hp::DoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Reinitialize the object for
+ * the given cell and face.
+ *
+ * After the call, you can get
+ * an FEFaceValues object using the
+ * get_present_fe_values()
+ * function that corresponds to
+ * the present cell. For this
+ * FEFaceValues object, we use the
+ * additional arguments
+ * described below to determine
+ * which finite element,
+ * mapping, and quadrature
+ * formula to use. They are
+ * order in such a way that the
+ * arguments one may want to
+ * change most frequently come
+ * first. The rules for these
+ * arguments are as follows:
+ *
+ * If the @p fe_index argument
+ * to this function is left at
+ * its default value, then we
+ * use that finite element
+ * within the hp::FECollection
+ * passed to the constructor of
+ * this class with index given
+ * by
+ * <code>cell-@>active_fe_index()</code>. Consequently,
+ * the hp::FECollection
+ * argument given to this
+ * object should really be the
+ * same as that used in the
+ * construction of the
+ * hp::DofHandler associated
+ * with the present cell. On
+ * the other hand, if a value
+ * is given for this argument,
+ * it overrides the choice of
+ * <code>cell-@>active_fe_index()</code>.
+ *
+ * If the @p q_index argument
+ * is left at its default
+ * value, then we use that
+ * quadrature formula within
+ * the hp::QCollection passed
+ * to the constructor of this
+ * class with index given by
+ * <code>cell-@>active_fe_index()</code>,
+ * i.e. the same index as that
+ * of the finite element. In
+ * this case, there should be a
+ * corresponding quadrature
+ * formula for each finite
+ * element in the
+ * hp::FECollection. As a
+ * special case, if the
+ * quadrature collection
+ * contains only a single
+ * element (a frequent case if
+ * one wants to use the same
+ * quadrature object for all
+ * finite elements in an hp
+ * discretization, even if that
+ * may not be the most
+ * efficient), then this single
+ * quadrature is used unless a
+ * different value for this
+ * argument is specified. On
+ * the other hand, if a value
+ * is given for this argument,
+ * it overrides the choice of
+ * <code>cell-@>active_fe_index()</code>
+ * or the choice for the single
+ * quadrature.
+ *
+ * If the @p mapping_index
+ * argument is left at its
+ * default value, then we use
+ * that mapping object within
+ * the hp::MappingCollection
+ * passed to the constructor of
+ * this class with index given
+ * by
+ * <code>cell-@>active_fe_index()</code>,
+ * i.e. the same index as that
+ * of the finite
+ * element. As above, if the
+ * mapping collection contains
+ * only a single element (a
+ * frequent case if one wants
+ * to use a MappingQ1 object
+ * for all finite elements in
+ * an hp discretization), then
+ * this single mapping is used
+ * unless a different value for
+ * this argument is specified.
+ */
+ void
+ reinit (const typename hp::DoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * always returns zero for
- * non-hp iterators, this
- * function chooses the zero-th
- * finite element, mapping, and
- * quadrature object from the
- * relevant constructions
- * passed to the constructor of
- * this object. The only
- * exception is if you specify
- * a value different from the
- * default value for any of
- * these last three arguments.
- */
- void
- reinit (const typename dealii::DoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * always returns zero for
+ * non-hp iterators, this
+ * function chooses the zero-th
+ * finite element, mapping, and
+ * quadrature object from the
+ * relevant constructions
+ * passed to the constructor of
+ * this object. The only
+ * exception is if you specify
+ * a value different from the
+ * default value for any of
+ * these last three arguments.
+ */
+ void
+ reinit (const typename dealii::DoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * always returns zero for
- * non-hp iterators, this
- * function chooses the zero-th
- * finite element, mapping, and
- * quadrature object from the
- * relevant constructions
- * passed to the constructor of
- * this object. The only
- * exception is if you specify
- * a value different from the
- * default value for any of
- * these last three arguments.
- */
- void
- reinit (const typename MGDoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * always returns zero for
+ * non-hp iterators, this
+ * function chooses the zero-th
+ * finite element, mapping, and
+ * quadrature object from the
+ * relevant constructions
+ * passed to the constructor of
+ * this object. The only
+ * exception is if you specify
+ * a value different from the
+ * default value for any of
+ * these last three arguments.
+ */
+ void
+ reinit (const typename MGDoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * doesn't make sense for
- * triangulation iterators,
- * this function chooses the
- * zero-th finite element,
- * mapping, and quadrature
- * object from the relevant
- * constructions passed to the
- * constructor of this
- * object. The only exception
- * is if you specify a value
- * different from the default
- * value for any of these last
- * three arguments.
- */
- void
- reinit (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * doesn't make sense for
+ * triangulation iterators,
+ * this function chooses the
+ * zero-th finite element,
+ * mapping, and quadrature
+ * object from the relevant
+ * constructions passed to the
+ * constructor of this
+ * object. The only exception
+ * is if you specify a value
+ * different from the default
+ * value for any of these last
+ * three arguments.
+ */
+ void
+ reinit (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
};
template <int dim, int spacedim=dim>
class FESubfaceValues : public dealii::internal::hp::FEValuesBase<dim,dim-1,dealii::FESubfaceValues<dim,spacedim> >
{
- public:
- /**
- * Constructor. Initialize this
- * object with the given
- * parameters.
- *
- * The finite element
- * collection parameter is
- * actually ignored, but is in
- * the signature of this
- * function to make it
- * compatible with the
- * signature of the respective
- * constructor of the usual
- * FEValues object, with
- * the respective parameter in
- * that function also being the
- * return value of the
- * <tt>DoFHandler::get_fe()</tt>
- * function.
- */
- FESubfaceValues (const hp::MappingCollection<dim,spacedim> &mapping_collection,
- const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim-1> &q_collection,
- const UpdateFlags update_flags);
+ public:
+ /**
+ * Constructor. Initialize this
+ * object with the given
+ * parameters.
+ *
+ * The finite element
+ * collection parameter is
+ * actually ignored, but is in
+ * the signature of this
+ * function to make it
+ * compatible with the
+ * signature of the respective
+ * constructor of the usual
+ * FEValues object, with
+ * the respective parameter in
+ * that function also being the
+ * return value of the
+ * <tt>DoFHandler::get_fe()</tt>
+ * function.
+ */
+ FESubfaceValues (const hp::MappingCollection<dim,spacedim> &mapping_collection,
- const hp::FECollection<dim,spacedim> &fe_collection,
++ const hp::FECollection<dim,spacedim> &fe_collection,
+ const hp::QCollection<dim-1> &q_collection,
+ const UpdateFlags update_flags);
- /**
- * Constructor. Initialize this
- * object with the given
- * parameters, and choose a
- * @p MappingQ1 object for the
- * mapping object.
- *
- * The finite element
- * collection parameter is
- * actually ignored, but is in
- * the signature of this
- * function to make it
- * compatible with the
- * signature of the respective
- * constructor of the usual
- * FEValues object, with
- * the respective parameter in
- * that function also being the
- * return value of the
- * <tt>DoFHandler::get_fe()</tt>
- * function.
- */
- FESubfaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim-1> &q_collection,
- const UpdateFlags update_flags);
+ /**
+ * Constructor. Initialize this
+ * object with the given
+ * parameters, and choose a
+ * @p MappingQ1 object for the
+ * mapping object.
+ *
+ * The finite element
+ * collection parameter is
+ * actually ignored, but is in
+ * the signature of this
+ * function to make it
+ * compatible with the
+ * signature of the respective
+ * constructor of the usual
+ * FEValues object, with
+ * the respective parameter in
+ * that function also being the
+ * return value of the
+ * <tt>DoFHandler::get_fe()</tt>
+ * function.
+ */
+ FESubfaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
+ const hp::QCollection<dim-1> &q_collection,
+ const UpdateFlags update_flags);
- /**
- * Reinitialize the object for
- * the given cell, face, and subface.
- *
- * After the call, you can get
- * an FESubfaceValues object using the
- * get_present_fe_values()
- * function that corresponds to
- * the present cell. For this
- * FESubfaceValues object, we use the
- * additional arguments
- * described below to determine
- * which finite element,
- * mapping, and quadrature
- * formula to use. They are
- * order in such a way that the
- * arguments one may want to
- * change most frequently come
- * first. The rules for these
- * arguments are as follows:
- *
- * If the @p q_index argument
- * is left at its default
- * value, then we use that
- * quadrature formula within
- * the hp::QCollection passed
- * to the constructor of this
- * class with index given by
- * <code>cell-@>active_fe_index()</code>,
- * i.e. the same index as that
- * of the finite element. In
- * this case, there should be a
- * corresponding quadrature
- * formula for each finite
- * element in the
- * hp::FECollection. As a
- * special case, if the
- * quadrature collection
- * contains only a single
- * element (a frequent case if
- * one wants to use the same
- * quadrature object for all
- * finite elements in an hp
- * discretization, even if that
- * may not be the most
- * efficient), then this single
- * quadrature is used unless a
- * different value for this
- * argument is specified. On
- * the other hand, if a value
- * is given for this argument,
- * it overrides the choice of
- * <code>cell-@>active_fe_index()</code>
- * or the choice for the single
- * quadrature.
- *
- * If the @p mapping_index
- * argument is left at its
- * default value, then we use
- * that mapping object within
- * the hp::MappingCollection
- * passed to the constructor of
- * this class with index given
- * by
- * <code>cell-@>active_fe_index()</code>,
- * i.e. the same index as that
- * of the finite
- * element. As above, if the
- * mapping collection contains
- * only a single element (a
- * frequent case if one wants
- * to use a MappingQ1 object
- * for all finite elements in
- * an hp discretization), then
- * this single mapping is used
- * unless a different value for
- * this argument is specified.
- */
- void
- reinit (const typename hp::DoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int subface_no,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Reinitialize the object for
+ * the given cell, face, and subface.
+ *
+ * After the call, you can get
+ * an FESubfaceValues object using the
+ * get_present_fe_values()
+ * function that corresponds to
+ * the present cell. For this
+ * FESubfaceValues object, we use the
+ * additional arguments
+ * described below to determine
+ * which finite element,
+ * mapping, and quadrature
+ * formula to use. They are
+ * order in such a way that the
+ * arguments one may want to
+ * change most frequently come
+ * first. The rules for these
+ * arguments are as follows:
+ *
+ * If the @p q_index argument
+ * is left at its default
+ * value, then we use that
+ * quadrature formula within
+ * the hp::QCollection passed
+ * to the constructor of this
+ * class with index given by
+ * <code>cell-@>active_fe_index()</code>,
+ * i.e. the same index as that
+ * of the finite element. In
+ * this case, there should be a
+ * corresponding quadrature
+ * formula for each finite
+ * element in the
+ * hp::FECollection. As a
+ * special case, if the
+ * quadrature collection
+ * contains only a single
+ * element (a frequent case if
+ * one wants to use the same
+ * quadrature object for all
+ * finite elements in an hp
+ * discretization, even if that
+ * may not be the most
+ * efficient), then this single
+ * quadrature is used unless a
+ * different value for this
+ * argument is specified. On
+ * the other hand, if a value
+ * is given for this argument,
+ * it overrides the choice of
+ * <code>cell-@>active_fe_index()</code>
+ * or the choice for the single
+ * quadrature.
+ *
+ * If the @p mapping_index
+ * argument is left at its
+ * default value, then we use
+ * that mapping object within
+ * the hp::MappingCollection
+ * passed to the constructor of
+ * this class with index given
+ * by
+ * <code>cell-@>active_fe_index()</code>,
+ * i.e. the same index as that
+ * of the finite
+ * element. As above, if the
+ * mapping collection contains
+ * only a single element (a
+ * frequent case if one wants
+ * to use a MappingQ1 object
+ * for all finite elements in
+ * an hp discretization), then
+ * this single mapping is used
+ * unless a different value for
+ * this argument is specified.
+ */
+ void
+ reinit (const typename hp::DoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int subface_no,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * always returns zero for
- * non-hp iterators, this
- * function chooses the zero-th
- * finite element, mapping, and
- * quadrature object from the
- * relevant constructions
- * passed to the constructor of
- * this object. The only
- * exception is if you specify
- * a value different from the
- * default value for any of
- * these last three arguments.
- */
- void
- reinit (const typename dealii::DoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int subface_no,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * always returns zero for
+ * non-hp iterators, this
+ * function chooses the zero-th
+ * finite element, mapping, and
+ * quadrature object from the
+ * relevant constructions
+ * passed to the constructor of
+ * this object. The only
+ * exception is if you specify
+ * a value different from the
+ * default value for any of
+ * these last three arguments.
+ */
+ void
+ reinit (const typename dealii::DoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int subface_no,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * always returns zero for
- * non-hp iterators, this
- * function chooses the zero-th
- * finite element, mapping, and
- * quadrature object from the
- * relevant constructions
- * passed to the constructor of
- * this object. The only
- * exception is if you specify
- * a value different from the
- * default value for any of
- * these last three arguments.
- */
- void
- reinit (const typename MGDoFHandler<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int subface_no,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * always returns zero for
+ * non-hp iterators, this
+ * function chooses the zero-th
+ * finite element, mapping, and
+ * quadrature object from the
+ * relevant constructions
+ * passed to the constructor of
+ * this object. The only
+ * exception is if you specify
+ * a value different from the
+ * default value for any of
+ * these last three arguments.
+ */
+ void
+ reinit (const typename MGDoFHandler<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int subface_no,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
- /**
- * Like the previous function,
- * but for non-hp
- * iterators. The reason this
- * (and the other non-hp
- * iterator) function exists is
- * so that one can use
- * hp::FEValues not only for
- * hp::DoFhandler objects, but
- * for all sorts of DoFHandler
- * objects, and triangulations
- * not associated with
- * DoFHandlers in general.
- *
- * Since
- * <code>cell-@>active_fe_index()</code>
- * doesn't make sense for
- * triangulation iterators,
- * this function chooses the
- * zero-th finite element,
- * mapping, and quadrature
- * object from the relevant
- * constructions passed to the
- * constructor of this
- * object. The only exception
- * is if you specify a value
- * different from the default
- * value for any of these last
- * three arguments.
- */
- void
- reinit (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int subface_no,
- const unsigned int q_index = numbers::invalid_unsigned_int,
- const unsigned int mapping_index = numbers::invalid_unsigned_int,
- const unsigned int fe_index = numbers::invalid_unsigned_int);
+ /**
+ * Like the previous function,
+ * but for non-hp
+ * iterators. The reason this
+ * (and the other non-hp
+ * iterator) function exists is
+ * so that one can use
+ * hp::FEValues not only for
+ * hp::DoFhandler objects, but
+ * for all sorts of DoFHandler
+ * objects, and triangulations
+ * not associated with
+ * DoFHandlers in general.
+ *
+ * Since
+ * <code>cell-@>active_fe_index()</code>
+ * doesn't make sense for
+ * triangulation iterators,
+ * this function chooses the
+ * zero-th finite element,
+ * mapping, and quadrature
+ * object from the relevant
+ * constructions passed to the
+ * constructor of this
+ * object. The only exception
+ * is if you specify a value
+ * different from the default
+ * value for any of these last
+ * three arguments.
+ */
+ void
+ reinit (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const unsigned int face_no,
+ const unsigned int subface_no,
+ const unsigned int q_index = numbers::invalid_unsigned_int,
+ const unsigned int mapping_index = numbers::invalid_unsigned_int,
+ const unsigned int fe_index = numbers::invalid_unsigned_int);
};
}
template <typename MatrixType>
class BlockMatrixBase : public Subscriptor
{
- public:
- /**
- * Typedef the type of the underlying
- * matrix.
- */
- typedef MatrixType BlockType;
-
- /**
- * Type of matrix entries. In analogy to
- * the STL container classes.
- */
- typedef typename BlockType::value_type value_type;
- typedef value_type *pointer;
- typedef const value_type *const_pointer;
- typedef value_type &reference;
- typedef const value_type &const_reference;
- typedef std::size_t size_type;
-
- typedef
- MatrixIterator<BlockMatrixIterators::Accessor<BlockMatrixBase, false> >
- iterator;
-
- typedef
- MatrixIterator<BlockMatrixIterators::Accessor<BlockMatrixBase, true> >
- const_iterator;
-
-
- /**
- * Default constructor.
- */
- BlockMatrixBase ();
-
- /**
- * Copy the given matrix to this
- * one. The operation throws an
- * error if the sparsity patterns
- * of the two involved matrices
- * do not point to the same
- * object, since in this case the
- * copy operation is
- * cheaper. Since this operation
- * is notheless not for free, we
- * do not make it available
- * through operator=(), since
- * this may lead to unwanted
- * usage, e.g. in copy arguments
- * to functions, which should
- * really be arguments by
- * reference.
- *
- * The source matrix may be a
- * matrix of arbitrary type, as
- * long as its data type is
- * convertible to the data type
- * of this matrix.
- *
- * The function returns a
- * reference to <tt>this</tt>.
- */
- template <class BlockMatrixType>
- BlockMatrixBase &
- copy_from (const BlockMatrixType &source);
-
- /**
- * Access the block with the
- * given coordinates.
- */
- BlockType &
- block (const unsigned int row,
- const unsigned int column);
-
-
- /**
- * Access the block with the
- * given coordinates. Version for
- * constant objects.
- */
- const BlockType &
- block (const unsigned int row,
- const unsigned int column) const;
-
- /**
- * Return the dimension of the
- * image space. To remember: the
- * matrix is of dimension
- * $m \times n$.
- */
- unsigned int m () const;
-
- /**
- * Return the dimension of the
- * range space. To remember: the
- * matrix is of dimension
- * $m \times n$.
- */
- unsigned int n () const;
-
-
- /**
- * Return the number of blocks in
- * a column. Returns zero if no
- * sparsity pattern is presently
- * associated to this matrix.
- */
- unsigned int n_block_rows () const;
-
- /**
- * Return the number of blocks in
- * a row. Returns zero if no
- * sparsity pattern is presently
- * associated to this matrix.
- */
- unsigned int n_block_cols () const;
-
- /**
- * Set the element <tt>(i,j)</tt>
- * to <tt>value</tt>. Throws an
- * error if the entry does not
- * exist or if <tt>value</tt> is
- * not a finite number. Still, it
- * is allowed to store zero
- * values in non-existent fields.
- */
- void set (const unsigned int i,
- const unsigned int j,
- const value_type value);
-
- /**
- * Set all elements given in a
- * FullMatrix into the sparse matrix
- * locations given by
- * <tt>indices</tt>. In other words,
- * this function writes the elements
- * in <tt>full_matrix</tt> into the
- * calling matrix, using the
- * local-to-global indexing specified
- * by <tt>indices</tt> for both the
- * rows and the columns of the
- * matrix. This function assumes a
- * quadratic sparse matrix and a
- * quadratic full_matrix, the usual
- * situation in FE calculations.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be set anyway or
- * they should be filtered away (and
- * not change the previous content in
- * the respective element if it
- * exists). The default value is
- * <tt>false</tt>, i.e., even zero
- * values are treated.
- */
- template <typename number>
- void set (const std::vector<unsigned int> &indices,
- const FullMatrix<number> &full_matrix,
- const bool elide_zero_values = false);
-
- /**
- * Same function as before, but now
- * including the possibility to use
- * rectangular full_matrices and
- * different local-to-global indexing
- * on rows and columns, respectively.
- */
- template <typename number>
- void set (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<number> &full_matrix,
- const bool elide_zero_values = false);
-
- /**
- * Set several elements in the
- * specified row of the matrix with
- * column indices as given by
- * <tt>col_indices</tt> to the
- * respective value.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be set anyway or
- * they should be filtered away (and
- * not change the previous content in
- * the respective element if it
- * exists). The default value is
- * <tt>false</tt>, i.e., even zero
- * values are treated.
- */
- template <typename number>
- void set (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<number> &values,
- const bool elide_zero_values = false);
-
- /**
- * Set several elements to values
- * given by <tt>values</tt> in a
- * given row in columns given by
- * col_indices into the sparse
- * matrix.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be inserted anyway
- * or they should be filtered
- * away. The default value is
- * <tt>false</tt>, i.e., even zero
- * values are inserted/replaced.
- */
- template <typename number>
- void set (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const number *values,
- const bool elide_zero_values = false);
-
- /**
- * Add <tt>value</tt> to the
- * element (<i>i,j</i>). Throws
- * an error if the entry does not
- * exist or if <tt>value</tt> is
- * not a finite number. Still, it
- * is allowed to store zero
- * values in non-existent fields.
- */
- void add (const unsigned int i,
- const unsigned int j,
- const value_type value);
-
- /**
- * Add all elements given in a
- * FullMatrix<double> into sparse
- * matrix locations given by
- * <tt>indices</tt>. In other words,
- * this function adds the elements in
- * <tt>full_matrix</tt> to the
- * respective entries in calling
- * matrix, using the local-to-global
- * indexing specified by
- * <tt>indices</tt> for both the rows
- * and the columns of the
- * matrix. This function assumes a
- * quadratic sparse matrix and a
- * quadratic full_matrix, the usual
- * situation in FE calculations.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- template <typename number>
- void add (const std::vector<unsigned int> &indices,
- const FullMatrix<number> &full_matrix,
- const bool elide_zero_values = true);
-
- /**
- * Same function as before, but now
- * including the possibility to use
- * rectangular full_matrices and
- * different local-to-global indexing
- * on rows and columns, respectively.
- */
- template <typename number>
- void add (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<number> &full_matrix,
- const bool elide_zero_values = true);
-
- /**
- * Set several elements in the
- * specified row of the matrix with
- * column indices as given by
- * <tt>col_indices</tt> to the
- * respective value.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- template <typename number>
- void add (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<number> &values,
- const bool elide_zero_values = true);
-
- /**
- * Add an array of values given by
- * <tt>values</tt> in the given
- * global matrix row at columns
- * specified by col_indices in the
- * sparse matrix.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- template <typename number>
- void add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const number *values,
- const bool elide_zero_values = true,
- const bool col_indices_are_sorted = false);
-
- /**
- * Return the value of the entry
- * (i,j). This may be an
- * expensive operation and you
- * should always take care where
- * to call this function. In
- * order to avoid abuse, this
- * function throws an exception
- * if the wanted element does not
- * exist in the matrix.
- */
- value_type operator () (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * This function is mostly like
- * operator()() in that it
- * returns the value of the
- * matrix entry <tt>(i,j)</tt>. The only
- * difference is that if this
- * entry does not exist in the
- * sparsity pattern, then instead
- * of raising an exception, zero
- * is returned. While this may be
- * convenient in some cases, note
- * that it is simple to write
- * algorithms that are slow
- * compared to an optimal
- * solution, since the sparsity
- * of the matrix is not used.
- */
- value_type el (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * Return the main diagonal element in
- * the <i>i</i>th row. This function
- * throws an error if the matrix is not
- * quadratic and also if the diagonal
- * blocks of the matrix are not
- * quadratic.
- *
- * This function is considerably
- * faster than the operator()(),
- * since for quadratic matrices, the
- * diagonal entry may be the
- * first to be stored in each row
- * and access therefore does not
- * involve searching for the
- * right column number.
- */
- value_type diag_element (const unsigned int i) const;
-
- /**
- * Call the compress() function on all
- * the subblocks of the matrix.
- *
- *
- * See @ref GlossCompress "Compressing
- * distributed objects" for more
- * information.
- */
- void compress (::dealii::VectorOperation::values operation
- =::dealii::VectorOperation::unknown);
-
- /**
- * Multiply the entire matrix by a
- * fixed factor.
- */
- BlockMatrixBase & operator *= (const value_type factor);
-
- /**
- * Divide the entire matrix by a
- * fixed factor.
- */
- BlockMatrixBase & operator /= (const value_type factor);
-
- /**
- * Add <tt>matrix</tt> scaled by
- * <tt>factor</tt> to this matrix,
- * i.e. the matrix <tt>factor*matrix</tt>
- * is added to <tt>this</tt>. This
- * function throws an error if the
- * sparsity patterns of the two involved
- * matrices do not point to the same
- * object, since in this case the
- * operation is cheaper.
- *
- * The source matrix may be a sparse
- * matrix over an arbitrary underlying
- * scalar type, as long as its data type
- * is convertible to the data type of
- * this matrix.
- */
- template <class BlockMatrixType>
- void add (const value_type factor,
- const BlockMatrixType &matrix);
-
-
- /**
- * Adding Matrix-vector
- * multiplication. Add $M*src$ on
- * $dst$ with $M$ being this
- * matrix.
- */
- template <class BlockVectorType>
- void vmult_add (BlockVectorType &dst,
- const BlockVectorType &src) const;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M<sup>T</sup>src</i> to
- * <i>dst</i> with <i>M</i> being
- * this matrix. This function
- * does the same as vmult_add()
- * but takes the transposed
- * matrix.
- */
- template <class BlockVectorType>
- void Tvmult_add (BlockVectorType &dst,
- const BlockVectorType &src) const;
-
- /**
- * Return the norm of the vector
- * <i>v</i> with respect to the
- * norm induced by this matrix,
- * i.e. <i>v<sup>T</sup>Mv)</i>. This
- * is useful, e.g. in the finite
- * element context, where the
- * <i>L<sup>T</sup></i>-norm of a
- * function equals the matrix
- * norm with respect to the mass
- * matrix of the vector
- * representing the nodal values
- * of the finite element
- * function. Note that even
- * though the function's name
- * might suggest something
- * different, for historic
- * reasons not the norm but its
- * square is returned, as defined
- * above by the scalar product.
- *
- * Obviously, the matrix needs to
- * be square for this operation.
- */
- template <class BlockVectorType>
- value_type
- matrix_norm_square (const BlockVectorType &v) const;
-
- /**
- * Compute the matrix scalar
- * product $\left(u,Mv\right)$.
- */
- template <class BlockVectorType>
- value_type
- matrix_scalar_product (const BlockVectorType &u,
- const BlockVectorType &v) const;
-
- /**
- * Compute the residual
- * <i>r=b-Ax</i>. Write the
- * residual into <tt>dst</tt>.
- */
- template <class BlockVectorType>
- value_type residual (BlockVectorType &dst,
- const BlockVectorType &x,
- const BlockVectorType &b) const;
-
- /**
- * STL-like iterator with the
- * first entry.
- */
- iterator begin ();
-
- /**
- * Final iterator.
- */
- iterator end ();
-
- /**
- * STL-like iterator with the
- * first entry of row <tt>r</tt>.
- */
- iterator begin (const unsigned int r);
-
- /**
- * Final iterator of row <tt>r</tt>.
- */
- iterator end (const unsigned int r);
- /**
- * STL-like iterator with the
- * first entry.
- */
- const_iterator begin () const;
-
- /**
- * Final iterator.
- */
- const_iterator end () const;
-
- /**
- * STL-like iterator with the
- * first entry of row <tt>r</tt>.
- */
- const_iterator begin (const unsigned int r) const;
-
- /**
- * Final iterator of row <tt>r</tt>.
- */
- const_iterator end (const unsigned int r) const;
-
- /**
- * Return a reference to the underlying
- * BlockIndices data of the rows.
- */
- const BlockIndices & get_row_indices () const;
-
- /**
- * Return a reference to the underlying
- * BlockIndices data of the rows.
- */
- const BlockIndices & get_column_indices () const;
-
- /**
- * Determine an estimate for the memory
- * consumption (in bytes) of this
- * object. Note that only the memory
- * reserved on the current processor is
- * returned in case this is called in
- * an MPI-based program.
- */
- std::size_t memory_consumption () const;
-
- /** @addtogroup Exceptions
- * @{ */
-
- /**
- * Exception
- */
- DeclException4 (ExcIncompatibleRowNumbers,
- int, int, int, int,
- << "The blocks [" << arg1 << ',' << arg2 << "] and ["
- << arg3 << ',' << arg4 << "] have differing row numbers.");
- /**
- * Exception
- */
- DeclException4 (ExcIncompatibleColNumbers,
- int, int, int, int,
- << "The blocks [" << arg1 << ',' << arg2 << "] and ["
- << arg3 << ',' << arg4 << "] have differing column numbers.");
- //@}
- protected:
- /**
- * Release all memory and return
- * to a state just like after
- * having called the default
- * constructor. It also forgets
- * the sparsity pattern it was
- * previously tied to.
- *
- * This calls clear for all
- * sub-matrices and then resets this
- * object to have no blocks at all.
- *
- * This function is protected
- * since it may be necessary to
- * release additional structures.
- * A derived class can make it
- * public again, if it is
- * sufficient.
- */
- void clear ();
-
- /**
- * Index arrays for rows and columns.
- */
- BlockIndices row_block_indices;
- BlockIndices column_block_indices;
-
- /**
- * Array of sub-matrices.
- */
- Table<2,SmartPointer<BlockType, BlockMatrixBase<MatrixType> > > sub_objects;
-
- /**
- * This function collects the
- * sizes of the sub-objects and
- * stores them in internal
- * arrays, in order to be able to
- * relay global indices into the
- * matrix to indices into the
- * subobjects. You *must* call
- * this function each time after
- * you have changed the size of
- * the sub-objects.
- *
- * Derived classes should call this
- * function whenever the size of the
- * sub-objects has changed and the @p
- * X_block_indices arrays need to be
- * updated.
- *
- * Note that this function is not public
- * since not all derived classes need to
- * export its interface. For example, for
- * the usual deal.II SparseMatrix class,
- * the sizes are implicitly determined
- * whenever reinit() is called, and
- * individual blocks cannot be
- * resized. For that class, this function
- * therefore does not have to be
- * public. On the other hand, for the
- * PETSc classes, there is no associated
- * sparsity pattern object that
- * determines the block sizes, and for
- * these the function needs to be
- * publicly available. These classes
- * therefore export this function.
- */
- void collect_sizes ();
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M*src$ with $M$
- * being this matrix.
- *
- * Due to problems with deriving template
- * arguments between the block and
- * non-block versions of the vmult/Tvmult
- * functions, the actual functions are
- * implemented in derived classes, with
- * implementations forwarding the calls
- * to the implementations provided here
- * under a unique name for which template
- * arguments can be derived by the
- * compiler.
- */
- template <class BlockVectorType>
- void vmult_block_block (BlockVectorType &dst,
- const BlockVectorType &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- *
- * Due to problems with deriving template
- * arguments between the block and
- * non-block versions of the vmult/Tvmult
- * functions, the actual functions are
- * implemented in derived classes, with
- * implementations forwarding the calls
- * to the implementations provided here
- * under a unique name for which template
- * arguments can be derived by the
- * compiler.
- */
- template <class BlockVectorType,
- class VectorType>
- void vmult_block_nonblock (BlockVectorType &dst,
- const VectorType &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- *
- * Due to problems with deriving template
- * arguments between the block and
- * non-block versions of the vmult/Tvmult
- * functions, the actual functions are
- * implemented in derived classes, with
- * implementations forwarding the calls
- * to the implementations provided here
- * under a unique name for which template
- * arguments can be derived by the
- * compiler.
- */
- template <class BlockVectorType,
- class VectorType>
- void vmult_nonblock_block (VectorType &dst,
- const BlockVectorType &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- *
- * Due to problems with deriving template
- * arguments between the block and
- * non-block versions of the vmult/Tvmult
- * functions, the actual functions are
- * implemented in derived classes, with
- * implementations forwarding the calls
- * to the implementations provided here
- * under a unique name for which template
- * arguments can be derived by the
- * compiler.
- */
- template <class VectorType>
- void vmult_nonblock_nonblock (VectorType &dst,
- const VectorType &src) const;
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M^T*src$ with $M$
- * being this matrix. This
- * function does the same as
- * vmult() but takes the
- * transposed matrix.
- *
- * Due to problems with deriving template
- * arguments between the block and
- * non-block versions of the vmult/Tvmult
- * functions, the actual functions are
- * implemented in derived classes, with
- * implementations forwarding the calls
- * to the implementations provided here
- * under a unique name for which template
- * arguments can be derived by the
- * compiler.
- */
- template <class BlockVectorType>
- void Tvmult_block_block (BlockVectorType &dst,
+ public:
+ /**
+ * Typedef the type of the underlying
+ * matrix.
+ */
+ typedef MatrixType BlockType;
+
+ /**
+ * Type of matrix entries. In analogy to
+ * the STL container classes.
+ */
+ typedef typename BlockType::value_type value_type;
+ typedef value_type *pointer;
+ typedef const value_type *const_pointer;
+ typedef value_type &reference;
+ typedef const value_type &const_reference;
+ typedef std::size_t size_type;
+
+ typedef
+ MatrixIterator<BlockMatrixIterators::Accessor<BlockMatrixBase, false> >
+ iterator;
+
+ typedef
+ MatrixIterator<BlockMatrixIterators::Accessor<BlockMatrixBase, true> >
+ const_iterator;
+
+
+ /**
+ * Default constructor.
+ */
+ BlockMatrixBase ();
+
+ /**
+ * Copy the given matrix to this
+ * one. The operation throws an
+ * error if the sparsity patterns
+ * of the two involved matrices
+ * do not point to the same
+ * object, since in this case the
+ * copy operation is
+ * cheaper. Since this operation
+ * is notheless not for free, we
+ * do not make it available
+ * through operator=(), since
+ * this may lead to unwanted
+ * usage, e.g. in copy arguments
+ * to functions, which should
+ * really be arguments by
+ * reference.
+ *
+ * The source matrix may be a
+ * matrix of arbitrary type, as
+ * long as its data type is
+ * convertible to the data type
+ * of this matrix.
+ *
+ * The function returns a
+ * reference to <tt>this</tt>.
+ */
+ template <class BlockMatrixType>
+ BlockMatrixBase &
+ copy_from (const BlockMatrixType &source);
+
+ /**
+ * Access the block with the
+ * given coordinates.
+ */
+ BlockType &
+ block (const unsigned int row,
+ const unsigned int column);
+
+
+ /**
+ * Access the block with the
+ * given coordinates. Version for
+ * constant objects.
+ */
+ const BlockType &
+ block (const unsigned int row,
+ const unsigned int column) const;
+
+ /**
+ * Return the dimension of the
+ * image space. To remember: the
+ * matrix is of dimension
+ * $m \times n$.
+ */
+ unsigned int m () const;
+
+ /**
+ * Return the dimension of the
+ * range space. To remember: the
+ * matrix is of dimension
+ * $m \times n$.
+ */
+ unsigned int n () const;
+
+
+ /**
+ * Return the number of blocks in
+ * a column. Returns zero if no
+ * sparsity pattern is presently
+ * associated to this matrix.
+ */
+ unsigned int n_block_rows () const;
+
+ /**
+ * Return the number of blocks in
+ * a row. Returns zero if no
+ * sparsity pattern is presently
+ * associated to this matrix.
+ */
+ unsigned int n_block_cols () const;
+
+ /**
+ * Set the element <tt>(i,j)</tt>
+ * to <tt>value</tt>. Throws an
+ * error if the entry does not
+ * exist or if <tt>value</tt> is
+ * not a finite number. Still, it
+ * is allowed to store zero
+ * values in non-existent fields.
+ */
+ void set (const unsigned int i,
+ const unsigned int j,
+ const value_type value);
+
+ /**
+ * Set all elements given in a
+ * FullMatrix into the sparse matrix
+ * locations given by
+ * <tt>indices</tt>. In other words,
+ * this function writes the elements
+ * in <tt>full_matrix</tt> into the
+ * calling matrix, using the
+ * local-to-global indexing specified
+ * by <tt>indices</tt> for both the
+ * rows and the columns of the
+ * matrix. This function assumes a
+ * quadratic sparse matrix and a
+ * quadratic full_matrix, the usual
+ * situation in FE calculations.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be set anyway or
+ * they should be filtered away (and
+ * not change the previous content in
+ * the respective element if it
+ * exists). The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are treated.
+ */
+ template <typename number>
+ void set (const std::vector<unsigned int> &indices,
+ const FullMatrix<number> &full_matrix,
+ const bool elide_zero_values = false);
+
+ /**
+ * Same function as before, but now
+ * including the possibility to use
+ * rectangular full_matrices and
+ * different local-to-global indexing
+ * on rows and columns, respectively.
+ */
+ template <typename number>
+ void set (const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<number> &full_matrix,
+ const bool elide_zero_values = false);
+
+ /**
+ * Set several elements in the
+ * specified row of the matrix with
+ * column indices as given by
+ * <tt>col_indices</tt> to the
+ * respective value.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be set anyway or
+ * they should be filtered away (and
+ * not change the previous content in
+ * the respective element if it
+ * exists). The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are treated.
+ */
+ template <typename number>
+ void set (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
+ const std::vector<number> &values,
+ const bool elide_zero_values = false);
+
+ /**
+ * Set several elements to values
+ * given by <tt>values</tt> in a
+ * given row in columns given by
+ * col_indices into the sparse
+ * matrix.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be inserted anyway
+ * or they should be filtered
+ * away. The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are inserted/replaced.
+ */
+ template <typename number>
+ void set (const unsigned int row,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
+ const number *values,
+ const bool elide_zero_values = false);
+
+ /**
+ * Add <tt>value</tt> to the
+ * element (<i>i,j</i>). Throws
+ * an error if the entry does not
+ * exist or if <tt>value</tt> is
+ * not a finite number. Still, it
+ * is allowed to store zero
+ * values in non-existent fields.
+ */
+ void add (const unsigned int i,
+ const unsigned int j,
+ const value_type value);
+
+ /**
+ * Add all elements given in a
+ * FullMatrix<double> into sparse
+ * matrix locations given by
+ * <tt>indices</tt>. In other words,
+ * this function adds the elements in
+ * <tt>full_matrix</tt> to the
+ * respective entries in calling
+ * matrix, using the local-to-global
+ * indexing specified by
+ * <tt>indices</tt> for both the rows
+ * and the columns of the
+ * matrix. This function assumes a
+ * quadratic sparse matrix and a
+ * quadratic full_matrix, the usual
+ * situation in FE calculations.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ template <typename number>
+ void add (const std::vector<unsigned int> &indices,
+ const FullMatrix<number> &full_matrix,
+ const bool elide_zero_values = true);
+
+ /**
+ * Same function as before, but now
+ * including the possibility to use
+ * rectangular full_matrices and
+ * different local-to-global indexing
+ * on rows and columns, respectively.
+ */
+ template <typename number>
+ void add (const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<number> &full_matrix,
+ const bool elide_zero_values = true);
+
+ /**
+ * Set several elements in the
+ * specified row of the matrix with
+ * column indices as given by
+ * <tt>col_indices</tt> to the
+ * respective value.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ template <typename number>
+ void add (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
+ const std::vector<number> &values,
+ const bool elide_zero_values = true);
+
+ /**
+ * Add an array of values given by
+ * <tt>values</tt> in the given
+ * global matrix row at columns
+ * specified by col_indices in the
+ * sparse matrix.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ template <typename number>
+ void add (const unsigned int row,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
+ const number *values,
+ const bool elide_zero_values = true,
+ const bool col_indices_are_sorted = false);
+
+ /**
+ * Return the value of the entry
+ * (i,j). This may be an
+ * expensive operation and you
+ * should always take care where
+ * to call this function. In
+ * order to avoid abuse, this
+ * function throws an exception
+ * if the wanted element does not
+ * exist in the matrix.
+ */
+ value_type operator () (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * This function is mostly like
+ * operator()() in that it
+ * returns the value of the
+ * matrix entry <tt>(i,j)</tt>. The only
+ * difference is that if this
+ * entry does not exist in the
+ * sparsity pattern, then instead
+ * of raising an exception, zero
+ * is returned. While this may be
+ * convenient in some cases, note
+ * that it is simple to write
+ * algorithms that are slow
+ * compared to an optimal
+ * solution, since the sparsity
+ * of the matrix is not used.
+ */
+ value_type el (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * Return the main diagonal element in
+ * the <i>i</i>th row. This function
+ * throws an error if the matrix is not
+ * quadratic and also if the diagonal
+ * blocks of the matrix are not
+ * quadratic.
+ *
+ * This function is considerably
+ * faster than the operator()(),
+ * since for quadratic matrices, the
+ * diagonal entry may be the
+ * first to be stored in each row
+ * and access therefore does not
+ * involve searching for the
+ * right column number.
+ */
+ value_type diag_element (const unsigned int i) const;
+
+ /**
+ * Call the compress() function on all
+ * the subblocks of the matrix.
+ *
+ *
+ * See @ref GlossCompress "Compressing
+ * distributed objects" for more
+ * information.
+ */
+ void compress (::dealii::VectorOperation::values operation
+ =::dealii::VectorOperation::unknown);
+
+ /**
+ * Multiply the entire matrix by a
+ * fixed factor.
+ */
+ BlockMatrixBase &operator *= (const value_type factor);
+
+ /**
+ * Divide the entire matrix by a
+ * fixed factor.
+ */
+ BlockMatrixBase &operator /= (const value_type factor);
+
+ /**
+ * Add <tt>matrix</tt> scaled by
+ * <tt>factor</tt> to this matrix,
+ * i.e. the matrix <tt>factor*matrix</tt>
+ * is added to <tt>this</tt>. This
+ * function throws an error if the
+ * sparsity patterns of the two involved
+ * matrices do not point to the same
+ * object, since in this case the
+ * operation is cheaper.
+ *
+ * The source matrix may be a sparse
+ * matrix over an arbitrary underlying
+ * scalar type, as long as its data type
+ * is convertible to the data type of
+ * this matrix.
+ */
+ template <class BlockMatrixType>
+ void add (const value_type factor,
+ const BlockMatrixType &matrix);
+
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add $M*src$ on
+ * $dst$ with $M$ being this
+ * matrix.
+ */
+ template <class BlockVectorType>
+ void vmult_add (BlockVectorType &dst,
+ const BlockVectorType &src) const;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M<sup>T</sup>src</i> to
+ * <i>dst</i> with <i>M</i> being
+ * this matrix. This function
+ * does the same as vmult_add()
+ * but takes the transposed
+ * matrix.
+ */
+ template <class BlockVectorType>
+ void Tvmult_add (BlockVectorType &dst,
+ const BlockVectorType &src) const;
+
+ /**
+ * Return the norm of the vector
+ * <i>v</i> with respect to the
+ * norm induced by this matrix,
+ * i.e. <i>v<sup>T</sup>Mv)</i>. This
+ * is useful, e.g. in the finite
+ * element context, where the
+ * <i>L<sup>T</sup></i>-norm of a
+ * function equals the matrix
+ * norm with respect to the mass
+ * matrix of the vector
+ * representing the nodal values
+ * of the finite element
+ * function. Note that even
+ * though the function's name
+ * might suggest something
+ * different, for historic
+ * reasons not the norm but its
+ * square is returned, as defined
+ * above by the scalar product.
+ *
+ * Obviously, the matrix needs to
+ * be square for this operation.
+ */
+ template <class BlockVectorType>
+ value_type
+ matrix_norm_square (const BlockVectorType &v) const;
+
+ /**
+ * Compute the matrix scalar
+ * product $\left(u,Mv\right)$.
+ */
+ template <class BlockVectorType>
+ value_type
+ matrix_scalar_product (const BlockVectorType &u,
+ const BlockVectorType &v) const;
+
+ /**
+ * Compute the residual
+ * <i>r=b-Ax</i>. Write the
+ * residual into <tt>dst</tt>.
+ */
+ template <class BlockVectorType>
+ value_type residual (BlockVectorType &dst,
+ const BlockVectorType &x,
+ const BlockVectorType &b) const;
+
+ /**
+ * STL-like iterator with the
+ * first entry.
+ */
+ iterator begin ();
+
+ /**
+ * Final iterator.
+ */
+ iterator end ();
+
+ /**
+ * STL-like iterator with the
+ * first entry of row <tt>r</tt>.
+ */
+ iterator begin (const unsigned int r);
+
+ /**
+ * Final iterator of row <tt>r</tt>.
+ */
+ iterator end (const unsigned int r);
+ /**
+ * STL-like iterator with the
+ * first entry.
+ */
+ const_iterator begin () const;
+
+ /**
+ * Final iterator.
+ */
+ const_iterator end () const;
+
+ /**
+ * STL-like iterator with the
+ * first entry of row <tt>r</tt>.
+ */
+ const_iterator begin (const unsigned int r) const;
+
+ /**
+ * Final iterator of row <tt>r</tt>.
+ */
+ const_iterator end (const unsigned int r) const;
+
+ /**
+ * Return a reference to the underlying
+ * BlockIndices data of the rows.
+ */
+ const BlockIndices &get_row_indices () const;
+
+ /**
+ * Return a reference to the underlying
+ * BlockIndices data of the rows.
+ */
+ const BlockIndices &get_column_indices () const;
+
+ /**
+ * Determine an estimate for the memory
+ * consumption (in bytes) of this
+ * object. Note that only the memory
+ * reserved on the current processor is
+ * returned in case this is called in
+ * an MPI-based program.
+ */
+ std::size_t memory_consumption () const;
+
+ /** @addtogroup Exceptions
+ * @{ */
+
+ /**
+ * Exception
+ */
+ DeclException4 (ExcIncompatibleRowNumbers,
+ int, int, int, int,
+ << "The blocks [" << arg1 << ',' << arg2 << "] and ["
+ << arg3 << ',' << arg4 << "] have differing row numbers.");
+ /**
+ * Exception
+ */
+ DeclException4 (ExcIncompatibleColNumbers,
+ int, int, int, int,
+ << "The blocks [" << arg1 << ',' << arg2 << "] and ["
+ << arg3 << ',' << arg4 << "] have differing column numbers.");
+ //@}
+ protected:
+ /**
+ * Release all memory and return
+ * to a state just like after
+ * having called the default
+ * constructor. It also forgets
+ * the sparsity pattern it was
+ * previously tied to.
+ *
+ * This calls clear for all
+ * sub-matrices and then resets this
+ * object to have no blocks at all.
+ *
+ * This function is protected
+ * since it may be necessary to
+ * release additional structures.
+ * A derived class can make it
+ * public again, if it is
+ * sufficient.
+ */
+ void clear ();
+
+ /**
+ * Index arrays for rows and columns.
+ */
+ BlockIndices row_block_indices;
+ BlockIndices column_block_indices;
+
+ /**
+ * Array of sub-matrices.
+ */
+ Table<2,SmartPointer<BlockType, BlockMatrixBase<MatrixType> > > sub_objects;
+
+ /**
+ * This function collects the
+ * sizes of the sub-objects and
+ * stores them in internal
+ * arrays, in order to be able to
+ * relay global indices into the
+ * matrix to indices into the
+ * subobjects. You *must* call
+ * this function each time after
+ * you have changed the size of
+ * the sub-objects.
+ *
+ * Derived classes should call this
+ * function whenever the size of the
+ * sub-objects has changed and the @p
+ * X_block_indices arrays need to be
+ * updated.
+ *
+ * Note that this function is not public
+ * since not all derived classes need to
+ * export its interface. For example, for
+ * the usual deal.II SparseMatrix class,
+ * the sizes are implicitly determined
+ * whenever reinit() is called, and
+ * individual blocks cannot be
+ * resized. For that class, this function
+ * therefore does not have to be
+ * public. On the other hand, for the
+ * PETSc classes, there is no associated
+ * sparsity pattern object that
+ * determines the block sizes, and for
+ * these the function needs to be
+ * publicly available. These classes
+ * therefore export this function.
+ */
+ void collect_sizes ();
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M*src$ with $M$
+ * being this matrix.
+ *
+ * Due to problems with deriving template
+ * arguments between the block and
+ * non-block versions of the vmult/Tvmult
+ * functions, the actual functions are
+ * implemented in derived classes, with
+ * implementations forwarding the calls
+ * to the implementations provided here
+ * under a unique name for which template
+ * arguments can be derived by the
+ * compiler.
+ */
+ template <class BlockVectorType>
+ void vmult_block_block (BlockVectorType &dst,
+ const BlockVectorType &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ *
+ * Due to problems with deriving template
+ * arguments between the block and
+ * non-block versions of the vmult/Tvmult
+ * functions, the actual functions are
+ * implemented in derived classes, with
+ * implementations forwarding the calls
+ * to the implementations provided here
+ * under a unique name for which template
+ * arguments can be derived by the
+ * compiler.
+ */
+ template <class BlockVectorType,
+ class VectorType>
+ void vmult_block_nonblock (BlockVectorType &dst,
+ const VectorType &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ *
+ * Due to problems with deriving template
+ * arguments between the block and
+ * non-block versions of the vmult/Tvmult
+ * functions, the actual functions are
+ * implemented in derived classes, with
+ * implementations forwarding the calls
+ * to the implementations provided here
+ * under a unique name for which template
+ * arguments can be derived by the
+ * compiler.
+ */
+ template <class BlockVectorType,
+ class VectorType>
+ void vmult_nonblock_block (VectorType &dst,
const BlockVectorType &src) const;
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- *
- * Due to problems with deriving template
- * arguments between the block and
- * non-block versions of the vmult/Tvmult
- * functions, the actual functions are
- * implemented in derived classes, with
- * implementations forwarding the calls
- * to the implementations provided here
- * under a unique name for which template
- * arguments can be derived by the
- * compiler.
- */
- template <class BlockVectorType,
- class VectorType>
- void Tvmult_block_nonblock (BlockVectorType &dst,
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ *
+ * Due to problems with deriving template
+ * arguments between the block and
+ * non-block versions of the vmult/Tvmult
+ * functions, the actual functions are
+ * implemented in derived classes, with
+ * implementations forwarding the calls
+ * to the implementations provided here
+ * under a unique name for which template
+ * arguments can be derived by the
+ * compiler.
+ */
+ template <class VectorType>
+ void vmult_nonblock_nonblock (VectorType &dst,
const VectorType &src) const;
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- *
- * Due to problems with deriving template
- * arguments between the block and
- * non-block versions of the vmult/Tvmult
- * functions, the actual functions are
- * implemented in derived classes, with
- * implementations forwarding the calls
- * to the implementations provided here
- * under a unique name for which template
- * arguments can be derived by the
- * compiler.
- */
- template <class BlockVectorType,
- class VectorType>
- void Tvmult_nonblock_block (VectorType &dst,
- const BlockVectorType &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- *
- * Due to problems with deriving template
- * arguments between the block and
- * non-block versions of the vmult/Tvmult
- * functions, the actual functions are
- * implemented in derived classes, with
- * implementations forwarding the calls
- * to the implementations provided here
- * under a unique name for which template
- * arguments can be derived by the
- * compiler.
- */
- template <class VectorType>
- void Tvmult_nonblock_nonblock (VectorType &dst,
- const VectorType &src) const;
-
-
- protected:
-
- /**
- * Some matrix types, in particular PETSc,
- * need to synchronize set and add
- * operations. This has to be done for all
- * matrices in the BlockMatrix.
- * This routine prepares adding of elements
- * by notifying all blocks. Called by all
- * internal routines before adding
- * elements.
- */
- void prepare_add_operation();
-
- /**
- * Notifies all blocks to let them prepare
- * for setting elements, see
- * prepare_add_operation().
- */
- void prepare_set_operation();
-
-
- private:
- /**
- * Temporary vector for counting the
- * elements written into the
- * individual blocks when doing a
- * collective add or set.
- */
- std::vector<unsigned int> counter_within_block;
-
- /**
- * Temporary vector for column
- * indices on each block when writing
- * local to global data on each
- * sparse matrix.
- */
- std::vector<std::vector<unsigned int> > column_indices;
-
- /**
- * Temporary vector for storing the
- * local values (they need to be
- * reordered when writing local to
- * global).
- */
- std::vector<std::vector<double> > column_values;
-
-
- /**
- * Make the iterator class a
- * friend. We have to work around
- * a compiler bug here again.
- */
- template <typename, bool>
- friend class BlockMatrixIterators::Accessor;
-
- template <typename>
- friend class MatrixIterator;
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M^T*src$ with $M$
+ * being this matrix. This
+ * function does the same as
+ * vmult() but takes the
+ * transposed matrix.
+ *
+ * Due to problems with deriving template
+ * arguments between the block and
+ * non-block versions of the vmult/Tvmult
+ * functions, the actual functions are
+ * implemented in derived classes, with
+ * implementations forwarding the calls
+ * to the implementations provided here
+ * under a unique name for which template
+ * arguments can be derived by the
+ * compiler.
+ */
+ template <class BlockVectorType>
+ void Tvmult_block_block (BlockVectorType &dst,
+ const BlockVectorType &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ *
+ * Due to problems with deriving template
+ * arguments between the block and
+ * non-block versions of the vmult/Tvmult
+ * functions, the actual functions are
+ * implemented in derived classes, with
+ * implementations forwarding the calls
+ * to the implementations provided here
+ * under a unique name for which template
+ * arguments can be derived by the
+ * compiler.
+ */
+ template <class BlockVectorType,
+ class VectorType>
- void Tvmult_block_nonblock (BlockVectorType &dst,
++ void Tvmult_block_nonblock (BlockVectorType &dst,
+ const VectorType &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ *
+ * Due to problems with deriving template
+ * arguments between the block and
+ * non-block versions of the vmult/Tvmult
+ * functions, the actual functions are
+ * implemented in derived classes, with
+ * implementations forwarding the calls
+ * to the implementations provided here
+ * under a unique name for which template
+ * arguments can be derived by the
+ * compiler.
+ */
+ template <class BlockVectorType,
+ class VectorType>
+ void Tvmult_nonblock_block (VectorType &dst,
+ const BlockVectorType &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ *
+ * Due to problems with deriving template
+ * arguments between the block and
+ * non-block versions of the vmult/Tvmult
+ * functions, the actual functions are
+ * implemented in derived classes, with
+ * implementations forwarding the calls
+ * to the implementations provided here
+ * under a unique name for which template
+ * arguments can be derived by the
+ * compiler.
+ */
+ template <class VectorType>
+ void Tvmult_nonblock_nonblock (VectorType &dst,
+ const VectorType &src) const;
+
+
+ protected:
+
+ /**
+ * Some matrix types, in particular PETSc,
+ * need to synchronize set and add
+ * operations. This has to be done for all
+ * matrices in the BlockMatrix.
+ * This routine prepares adding of elements
+ * by notifying all blocks. Called by all
+ * internal routines before adding
+ * elements.
+ */
+ void prepare_add_operation();
+
+ /**
+ * Notifies all blocks to let them prepare
+ * for setting elements, see
+ * prepare_add_operation().
+ */
+ void prepare_set_operation();
+
+
+ private:
+ /**
+ * Temporary vector for counting the
+ * elements written into the
+ * individual blocks when doing a
+ * collective add or set.
+ */
+ std::vector<unsigned int> counter_within_block;
+
+ /**
+ * Temporary vector for column
+ * indices on each block when writing
+ * local to global data on each
+ * sparse matrix.
+ */
+ std::vector<std::vector<unsigned int> > column_indices;
+
+ /**
+ * Temporary vector for storing the
+ * local values (they need to be
+ * reordered when writing local to
+ * global).
+ */
+ std::vector<std::vector<double> > column_values;
+
+
+ /**
+ * Make the iterator class a
+ * friend. We have to work around
+ * a compiler bug here again.
+ */
+ template <typename, bool>
+ friend class BlockMatrixIterators::Accessor;
+
+ template <typename>
+ friend class MatrixIterator;
};
template <class BlockMatrix>
inline
Accessor<BlockMatrix, true>::Accessor (
- const BlockMatrix *matrix,
+ const BlockMatrix *matrix,
const unsigned int row,
const unsigned int col)
- :
- matrix(matrix),
- base_iterator(matrix->block(0,0).begin())
+ :
+ matrix(matrix),
+ base_iterator(matrix->block(0,0).begin())
{
Assert(col==0, ExcNotImplemented());
template <class BlockMatrix>
inline
Accessor<BlockMatrix, false>::Accessor (
- BlockMatrix *matrix,
+ BlockMatrix *matrix,
const unsigned int row,
const unsigned int col)
- :
- matrix(matrix),
- base_iterator(matrix->block(0,0).begin())
+ :
+ matrix(matrix),
+ base_iterator(matrix->block(0,0).begin())
{
Assert(col==0, ExcNotImplemented());
- // check if this is a regular row or
- // the end of the matrix
+ // check if this is a regular row or
+ // the end of the matrix
if (row < matrix->m())
{
const std::pair<unsigned int,unsigned int> indices
template <class MatrixType>
template <class BlockVectorType,
- class VectorType>
+ class VectorType>
void
BlockMatrixBase<MatrixType>::
-vmult_block_nonblock (BlockVectorType &dst,
+vmult_block_nonblock (BlockVectorType &dst,
const VectorType &src) const
{
Assert (dst.n_blocks() == n_block_rows(),
template <class MatrixType>
template <class BlockVectorType,
- class VectorType>
+ class VectorType>
void
BlockMatrixBase<MatrixType>::
-Tvmult_block_nonblock (BlockVectorType &dst,
+Tvmult_block_nonblock (BlockVectorType &dst,
const VectorType &src) const
{
Assert (dst.n_blocks() == n_block_cols(),
template <typename number>
class BlockSparseMatrix : public BlockMatrixBase<SparseMatrix<number> >
{
- public:
- /**
- * Typedef the base class for simpler
- * access to its own typedefs.
- */
- typedef BlockMatrixBase<SparseMatrix<number> > BaseClass;
-
- /**
- * Typedef the type of the underlying
- * matrix.
- */
- typedef typename BaseClass::BlockType BlockType;
-
- /**
- * Import the typedefs from the base
- * class.
- */
- typedef typename BaseClass::value_type value_type;
- typedef typename BaseClass::pointer pointer;
- typedef typename BaseClass::const_pointer const_pointer;
- typedef typename BaseClass::reference reference;
- typedef typename BaseClass::const_reference const_reference;
- typedef typename BaseClass::size_type size_type;
- typedef typename BaseClass::iterator iterator;
- typedef typename BaseClass::const_iterator const_iterator;
-
- /**
- * @name Constructors and initalization
- */
+ public:
+ /**
+ * Typedef the base class for simpler
+ * access to its own typedefs.
+ */
+ typedef BlockMatrixBase<SparseMatrix<number> > BaseClass;
+
+ /**
+ * Typedef the type of the underlying
+ * matrix.
+ */
+ typedef typename BaseClass::BlockType BlockType;
+
+ /**
+ * Import the typedefs from the base
+ * class.
+ */
+ typedef typename BaseClass::value_type value_type;
+ typedef typename BaseClass::pointer pointer;
+ typedef typename BaseClass::const_pointer const_pointer;
+ typedef typename BaseClass::reference reference;
+ typedef typename BaseClass::const_reference const_reference;
+ typedef typename BaseClass::size_type size_type;
+ typedef typename BaseClass::iterator iterator;
+ typedef typename BaseClass::const_iterator const_iterator;
+
+ /**
+ * @name Constructors and initalization
+ */
//@{
- /**
- * Constructor; initializes the
- * matrix to be empty, without
- * any structure, i.e. the
- * matrix is not usable at
- * all. This constructor is
- * therefore only useful for
- * matrices which are members of
- * a class. All other matrices
- * should be created at a point
- * in the data flow where all
- * necessary information is
- * available.
- *
- * You have to initialize the
- * matrix before usage with
- * reinit(BlockSparsityPattern). The
- * number of blocks per row and
- * column are then determined by
- * that function.
- */
- BlockSparseMatrix ();
-
- /**
- * Constructor. Takes the given
- * matrix sparsity structure to
- * represent the sparsity pattern
- * of this matrix. You can change
- * the sparsity pattern later on
- * by calling the reinit()
- * function.
- *
- * This constructor initializes
- * all sub-matrices with the
- * sub-sparsity pattern within
- * the argument.
- *
- * You have to make sure that the
- * lifetime of the sparsity
- * structure is at least as long
- * as that of this matrix or as
- * long as reinit() is not called
- * with a new sparsity structure.
- */
- BlockSparseMatrix (const BlockSparsityPattern &sparsity);
-
- /**
- * Destructor.
- */
- virtual ~BlockSparseMatrix ();
-
-
-
- /**
- * Pseudo copy operator only copying
- * empty objects. The sizes of the block
- * matrices need to be the same.
- */
- BlockSparseMatrix &
- operator = (const BlockSparseMatrix &);
-
- /**
- * This operator assigns a scalar to a
- * matrix. Since this does usually not
- * make much sense (should we set all
- * matrix entries to this value? Only
- * the nonzero entries of the sparsity
- * pattern?), this operation is only
- * allowed if the actual value to be
- * assigned is zero. This operator only
- * exists to allow for the obvious
- * notation <tt>matrix=0</tt>, which
- * sets all elements of the matrix to
- * zero, but keep the sparsity pattern
- * previously used.
- */
- BlockSparseMatrix &
- operator = (const double d);
-
- /**
- * Release all memory and return
- * to a state just like after
- * having called the default
- * constructor. It also forgets
- * the sparsity pattern it was
- * previously tied to.
- *
- * This calls SparseMatrix::clear on all
- * sub-matrices and then resets this
- * object to have no blocks at all.
- */
- void clear ();
-
- /**
- * Reinitialize the sparse matrix
- * with the given sparsity
- * pattern. The latter tells the
- * matrix how many nonzero
- * elements there need to be
- * reserved.
- *
- * Basically, this function only
- * calls SparseMatrix::reinit() of the
- * sub-matrices with the block
- * sparsity patterns of the
- * parameter.
- *
- * The elements of the matrix are
- * set to zero by this function.
- */
- virtual void reinit (const BlockSparsityPattern &sparsity);
+ /**
+ * Constructor; initializes the
+ * matrix to be empty, without
+ * any structure, i.e. the
+ * matrix is not usable at
+ * all. This constructor is
+ * therefore only useful for
+ * matrices which are members of
+ * a class. All other matrices
+ * should be created at a point
+ * in the data flow where all
+ * necessary information is
+ * available.
+ *
+ * You have to initialize the
+ * matrix before usage with
+ * reinit(BlockSparsityPattern). The
+ * number of blocks per row and
+ * column are then determined by
+ * that function.
+ */
+ BlockSparseMatrix ();
+
+ /**
+ * Constructor. Takes the given
+ * matrix sparsity structure to
+ * represent the sparsity pattern
+ * of this matrix. You can change
+ * the sparsity pattern later on
+ * by calling the reinit()
+ * function.
+ *
+ * This constructor initializes
+ * all sub-matrices with the
+ * sub-sparsity pattern within
+ * the argument.
+ *
+ * You have to make sure that the
+ * lifetime of the sparsity
+ * structure is at least as long
+ * as that of this matrix or as
+ * long as reinit() is not called
+ * with a new sparsity structure.
+ */
+ BlockSparseMatrix (const BlockSparsityPattern &sparsity);
+
+ /**
+ * Destructor.
+ */
+ virtual ~BlockSparseMatrix ();
+
+
+
+ /**
+ * Pseudo copy operator only copying
+ * empty objects. The sizes of the block
+ * matrices need to be the same.
+ */
+ BlockSparseMatrix &
+ operator = (const BlockSparseMatrix &);
+
+ /**
+ * This operator assigns a scalar to a
+ * matrix. Since this does usually not
+ * make much sense (should we set all
+ * matrix entries to this value? Only
+ * the nonzero entries of the sparsity
+ * pattern?), this operation is only
+ * allowed if the actual value to be
+ * assigned is zero. This operator only
+ * exists to allow for the obvious
+ * notation <tt>matrix=0</tt>, which
+ * sets all elements of the matrix to
+ * zero, but keep the sparsity pattern
+ * previously used.
+ */
+ BlockSparseMatrix &
+ operator = (const double d);
+
+ /**
+ * Release all memory and return
+ * to a state just like after
+ * having called the default
+ * constructor. It also forgets
+ * the sparsity pattern it was
+ * previously tied to.
+ *
+ * This calls SparseMatrix::clear on all
+ * sub-matrices and then resets this
+ * object to have no blocks at all.
+ */
+ void clear ();
+
+ /**
+ * Reinitialize the sparse matrix
+ * with the given sparsity
+ * pattern. The latter tells the
+ * matrix how many nonzero
+ * elements there need to be
+ * reserved.
+ *
+ * Basically, this function only
+ * calls SparseMatrix::reinit() of the
+ * sub-matrices with the block
+ * sparsity patterns of the
+ * parameter.
+ *
+ * The elements of the matrix are
+ * set to zero by this function.
+ */
+ virtual void reinit (const BlockSparsityPattern &sparsity);
//@}
- /**
- * @name Information on the matrix
- */
+ /**
+ * @name Information on the matrix
+ */
//@{
- /**
- * Return whether the object is
- * empty. It is empty if either
- * both dimensions are zero or no
- * BlockSparsityPattern is
- * associated.
- */
- bool empty () const;
-
- /**
- * Return the number of entries
- * in a specific row.
- */
- unsigned int get_row_length (const types::global_dof_index row) const;
-
- /**
- * Return the number of nonzero
- * elements of this
- * matrix. Actually, it returns
- * the number of entries in the
- * sparsity pattern; if any of
- * the entries should happen to
- * be zero, it is counted anyway.
- */
- unsigned int n_nonzero_elements () const;
-
- /**
- * Return the number of actually
- * nonzero elements. Just counts the
- * number of actually nonzero elements
- * (with absolute value larger than
- * threshold) of all the blocks.
- */
- unsigned int n_actually_nonzero_elements (const double threshold = 0.0) const;
-
- /**
- * Return a (constant) reference
- * to the underlying sparsity
- * pattern of this matrix.
- *
- * Though the return value is
- * declared <tt>const</tt>, you
- * should be aware that it may
- * change if you call any
- * nonconstant function of
- * objects which operate on it.
- */
- const BlockSparsityPattern &
- get_sparsity_pattern () const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
- */
- std::size_t memory_consumption () const;
+ /**
+ * Return whether the object is
+ * empty. It is empty if either
+ * both dimensions are zero or no
+ * BlockSparsityPattern is
+ * associated.
+ */
+ bool empty () const;
+
+ /**
+ * Return the number of entries
+ * in a specific row.
+ */
- unsigned int get_row_length (const unsigned int row) const;
++ unsigned int get_row_length (const types::global_dof_index row) const;
+
+ /**
+ * Return the number of nonzero
+ * elements of this
+ * matrix. Actually, it returns
+ * the number of entries in the
+ * sparsity pattern; if any of
+ * the entries should happen to
+ * be zero, it is counted anyway.
+ */
+ unsigned int n_nonzero_elements () const;
+
+ /**
+ * Return the number of actually
+ * nonzero elements. Just counts the
+ * number of actually nonzero elements
+ * (with absolute value larger than
+ * threshold) of all the blocks.
+ */
+ unsigned int n_actually_nonzero_elements (const double threshold = 0.0) const;
+
+ /**
+ * Return a (constant) reference
+ * to the underlying sparsity
+ * pattern of this matrix.
+ *
+ * Though the return value is
+ * declared <tt>const</tt>, you
+ * should be aware that it may
+ * change if you call any
+ * nonconstant function of
+ * objects which operate on it.
+ */
+ const BlockSparsityPattern &
+ get_sparsity_pattern () const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object.
+ */
+ std::size_t memory_consumption () const;
//@}
- /**
- * @name Multiplications
- */
+ /**
+ * @name Multiplications
+ */
//@{
- /**
- * Matrix-vector multiplication:
- * let $dst = M*src$ with $M$
- * being this matrix.
- */
- template <typename block_number>
- void vmult (BlockVector<block_number> &dst,
- const BlockVector<block_number> &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- template <typename block_number,
- typename nonblock_number>
- void vmult (BlockVector<block_number> &dst,
- const Vector<nonblock_number> &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- template <typename block_number,
- typename nonblock_number>
- void vmult (Vector<nonblock_number> &dst,
- const BlockVector<block_number> &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- */
- template <typename nonblock_number>
- void vmult (Vector<nonblock_number> &dst,
- const Vector<nonblock_number> &src) const;
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M^T*src$ with $M$
- * being this matrix. This
- * function does the same as
- * vmult() but takes the
- * transposed matrix.
- */
- template <typename block_number>
- void Tvmult (BlockVector<block_number> &dst,
- const BlockVector<block_number> &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- template <typename block_number,
- typename nonblock_number>
- void Tvmult (BlockVector<block_number> &dst,
- const Vector<nonblock_number> &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- template <typename block_number,
- typename nonblock_number>
- void Tvmult (Vector<nonblock_number> &dst,
- const BlockVector<block_number> &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- */
- template <typename nonblock_number>
- void Tvmult (Vector<nonblock_number> &dst,
- const Vector<nonblock_number> &src) const;
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M*src$ with $M$
+ * being this matrix.
+ */
+ template <typename block_number>
+ void vmult (BlockVector<block_number> &dst,
+ const BlockVector<block_number> &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ */
+ template <typename block_number,
+ typename nonblock_number>
+ void vmult (BlockVector<block_number> &dst,
+ const Vector<nonblock_number> &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ */
+ template <typename block_number,
+ typename nonblock_number>
+ void vmult (Vector<nonblock_number> &dst,
+ const BlockVector<block_number> &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ */
+ template <typename nonblock_number>
+ void vmult (Vector<nonblock_number> &dst,
+ const Vector<nonblock_number> &src) const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M^T*src$ with $M$
+ * being this matrix. This
+ * function does the same as
+ * vmult() but takes the
+ * transposed matrix.
+ */
+ template <typename block_number>
+ void Tvmult (BlockVector<block_number> &dst,
+ const BlockVector<block_number> &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ */
+ template <typename block_number,
+ typename nonblock_number>
- void Tvmult (BlockVector<block_number> &dst,
++ void Tvmult (BlockVector<block_number> &dst,
+ const Vector<nonblock_number> &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ */
+ template <typename block_number,
+ typename nonblock_number>
+ void Tvmult (Vector<nonblock_number> &dst,
+ const BlockVector<block_number> &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ */
+ template <typename nonblock_number>
+ void Tvmult (Vector<nonblock_number> &dst,
+ const Vector<nonblock_number> &src) const;
//@}
- /**
- * @name Preconditioning methods
- */
+ /**
+ * @name Preconditioning methods
+ */
//@{
- /**
- * Apply the Jacobi
- * preconditioner, which
- * multiplies every element of
- * the <tt>src</tt> vector by the
- * inverse of the respective
- * diagonal element and
- * multiplies the result with the
- * relaxation parameter
- * <tt>omega</tt>.
- *
- * All diagonal blocks must be
- * square matrices for this
- * operation.
- */
- template <class BlockVectorType>
- void precondition_Jacobi (BlockVectorType &dst,
- const BlockVectorType &src,
- const number omega = 1.) const;
-
- /**
- * Apply the Jacobi
- * preconditioner to a simple vector.
- *
- * The matrix must be a single
- * square block for this.
- */
- template <typename number2>
- void precondition_Jacobi (Vector<number2> &dst,
- const Vector<number2> &src,
- const number omega = 1.) const;
+ /**
+ * Apply the Jacobi
+ * preconditioner, which
+ * multiplies every element of
+ * the <tt>src</tt> vector by the
+ * inverse of the respective
+ * diagonal element and
+ * multiplies the result with the
+ * relaxation parameter
+ * <tt>omega</tt>.
+ *
+ * All diagonal blocks must be
+ * square matrices for this
+ * operation.
+ */
+ template <class BlockVectorType>
+ void precondition_Jacobi (BlockVectorType &dst,
+ const BlockVectorType &src,
+ const number omega = 1.) const;
+
+ /**
+ * Apply the Jacobi
+ * preconditioner to a simple vector.
+ *
+ * The matrix must be a single
+ * square block for this.
+ */
+ template <typename number2>
+ void precondition_Jacobi (Vector<number2> &dst,
+ const Vector<number2> &src,
+ const number omega = 1.) const;
//@}
- /**
- * @name Input/Output
- */
+ /**
+ * @name Input/Output
+ */
//@{
- /**
- * Print the matrix in the usual
- * format, i.e. as a matrix and
- * not as a list of nonzero
- * elements. For better
- * readability, elements not in
- * the matrix are displayed as
- * empty space, while matrix
- * elements which are explicitly
- * set to zero are displayed as
- * such.
- *
- * The parameters allow for a
- * flexible setting of the output
- * format: <tt>precision</tt> and
- * <tt>scientific</tt> are used
- * to determine the number
- * format, where <tt>scientific =
- * false</tt> means fixed point
- * notation. A zero entry for
- * <tt>width</tt> makes the
- * function compute a width, but
- * it may be changed to a
- * positive value, if output is
- * crude.
- *
- * Additionally, a character for
- * an empty value may be
- * specified.
- *
- * Finally, the whole matrix can
- * be multiplied with a common
- * denominator to produce more
- * readable output, even
- * integers.
- *
- * @attention This function may
- * produce <b>large</b> amounts
- * of output if applied to a
- * large matrix!
- */
- void print_formatted (std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const unsigned int width = 0,
- const char *zero_string = " ",
- const double denominator = 1.) const;
+ /**
+ * Print the matrix in the usual
+ * format, i.e. as a matrix and
+ * not as a list of nonzero
+ * elements. For better
+ * readability, elements not in
+ * the matrix are displayed as
+ * empty space, while matrix
+ * elements which are explicitly
+ * set to zero are displayed as
+ * such.
+ *
+ * The parameters allow for a
+ * flexible setting of the output
+ * format: <tt>precision</tt> and
+ * <tt>scientific</tt> are used
+ * to determine the number
+ * format, where <tt>scientific =
+ * false</tt> means fixed point
+ * notation. A zero entry for
+ * <tt>width</tt> makes the
+ * function compute a width, but
+ * it may be changed to a
+ * positive value, if output is
+ * crude.
+ *
+ * Additionally, a character for
+ * an empty value may be
+ * specified.
+ *
+ * Finally, the whole matrix can
+ * be multiplied with a common
+ * denominator to produce more
+ * readable output, even
+ * integers.
+ *
+ * @attention This function may
+ * produce <b>large</b> amounts
+ * of output if applied to a
+ * large matrix!
+ */
+ void print_formatted (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const unsigned int width = 0,
+ const char *zero_string = " ",
+ const double denominator = 1.) const;
//@}
- /** @addtogroup Exceptions
- * @{ */
-
- /**
- * Exception
- */
- DeclException0 (ExcBlockDimensionMismatch);
- //@}
-
- private:
- /**
- * Pointer to the block sparsity
- * pattern used for this
- * matrix. In order to guarantee
- * that it is not deleted while
- * still in use, we subscribe to
- * it using the SmartPointer
- * class.
- */
- SmartPointer<const BlockSparsityPattern,BlockSparseMatrix<number> > sparsity_pattern;
+ /** @addtogroup Exceptions
+ * @{ */
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcBlockDimensionMismatch);
+ //@}
+
+ private:
+ /**
+ * Pointer to the block sparsity
+ * pattern used for this
+ * matrix. In order to guarantee
+ * that it is not deleted while
+ * still in use, we subscribe to
+ * it using the SmartPointer
+ * class.
+ */
+ SmartPointer<const BlockSparsityPattern,BlockSparseMatrix<number> > sparsity_pattern;
};
template<typename Number>
class BlockSparseMatrixEZ : public Subscriptor
{
- public:
- /**
- * Default constructor. The
- * result is an empty object with
- * zero dimensions.
- */
- BlockSparseMatrixEZ ();
-
- /**
- * Constructor setting up an
- * object with given unmber of
- * block rows and columns. The
- * blocks themselves still have
- * zero dimension.
- */
- BlockSparseMatrixEZ (const unsigned int block_rows,
- const unsigned int block_cols);
-
- /**
- * Copy constructor. This is
- * needed for some container
- * classes. It creates an object
- * of the same number of block
- * rows and columns. Since it
- * calls the copy constructor of
- * SparseMatrixEZ, the
- * block s must be empty.
- */
- BlockSparseMatrixEZ (const BlockSparseMatrixEZ<Number>&);
-
- /**
- * Copy operator. Like the copy
- * constructor, this may be
- * called for objects with empty
- * blocks only.
- */
- BlockSparseMatrixEZ & operator = (const BlockSparseMatrixEZ<Number>&);
-
- /**
- * This operator assigns a scalar to
- * a matrix. Since this does usually
- * not make much sense (should we set
- * all matrix entries to this value?
- * Only the nonzero entries of the
- * sparsity pattern?), this operation
- * is only allowed if the actual
- * value to be assigned is zero. This
- * operator only exists to allow for
- * the obvious notation
- * <tt>matrix=0</tt>, which sets all
- * elements of the matrix to zero,
- * but keep the sparsity pattern
- * previously used.
- */
- BlockSparseMatrixEZ & operator = (const double d);
-
-
- /**
- * Set matrix to zero dimensions
- * and release memory.
- */
- void clear ();
-
- /**
- * Initialize to given block
- * numbers. After this
- * operation, the matrix will
- * have the block dimensions
- * provided. Each block will have
- * zero dimensions and must be
- * initialized
- * subsequently. After setting
- * the sizes of the blocks,
- * collect_sizes() must be
- * called to update internal data
- * structures.
- */
- void reinit (const unsigned int n_block_rows,
- const unsigned int n_block_cols);
- /**
- * This function collects the
- * sizes of the sub-objects and
- * stores them in internal
- * arrays, in order to be able to
- * relay global indices into the
- * matrix to indices into the
- * subobjects. You *must* call
- * this function each time after
- * you have changed the size of
- * the sub-objects.
- */
- void collect_sizes ();
-
- /**
- * Access the block with the
- * given coordinates.
- */
- SparseMatrixEZ<Number>&
- block (const unsigned int row,
- const unsigned int column);
-
-
- /**
- * Access the block with the
- * given coordinates. Version for
- * constant objects.
- */
- const SparseMatrixEZ<Number>&
- block (const unsigned int row,
- const unsigned int column) const;
-
- /**
- * Return the number of blocks in a
- * column.
- */
- unsigned int n_block_rows () const;
-
- /**
- * Return the number of blocks in a
- * row.
- */
- unsigned int n_block_cols () const;
-
- /**
- * Return whether the object is
- * empty. It is empty if no
- * memory is allocated, which is
- * the same as that both
- * dimensions are zero. This
- * function is just the
- * concatenation of the
- * respective call to all
- * sub-matrices.
- */
- bool empty () const;
-
- /**
- * Return number of rows of this
- * matrix, which equals the
- * dimension of the image
- * space. It is the sum of rows
- * of the rows of sub-matrices.
- */
- types::global_dof_index n_rows () const;
-
- /**
- * Return number of columns of
- * this matrix, which equals the
- * dimension of the range
- * space. It is the sum of
- * columns of the columns of
- * sub-matrices.
- */
- types::global_dof_index n_cols () const;
-
- /**
- * Return the dimension of the
- * image space. To remember: the
- * matrix is of dimension
- * $m \times n$.
- */
- types::global_dof_index m () const;
-
- /**
- * Return the dimension of the
- * range space. To remember: the
- * matrix is of dimension
- * $m \times n$.
- */
- types::global_dof_index n () const;
-
- /**
- * Set the element <tt>(i,j)</tt>
- * to @p value. Throws an error
- * if the entry does not exist or
- * if <tt>value</tt> is not a
- * finite number. Still, it is
- * allowed to store zero values
- * in non-existent fields.
- */
- void set (const types::global_dof_index i,
- const types::global_dof_index j,
- const Number value);
-
- /**
- * Add @p value to the element
- * <tt>(i,j)</tt>. Throws an
- * error if the entry does not
- * exist or if <tt>value</tt> is
- * not a finite number. Still, it
- * is allowed to store zero
- * values in non-existent fields.
- */
- void add (const types::global_dof_index i, const types::global_dof_index j,
- const Number value);
-
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M*src$ with $M$
- * being this matrix.
- */
- template <typename somenumber>
- void vmult (BlockVector<somenumber> &dst,
- const BlockVector<somenumber> &src) const;
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M^T*src$ with $M$
- * being this matrix. This
- * function does the same as
- * vmult() but takes the
- * transposed matrix.
- */
- template <typename somenumber>
- void Tvmult (BlockVector<somenumber> &dst,
- const BlockVector<somenumber> &src) const;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add $M*src$ on
- * $dst$ with $M$ being this
- * matrix.
- */
- template <typename somenumber>
- void vmult_add (BlockVector<somenumber> &dst,
- const BlockVector<somenumber> &src) const;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add $M^T*src$
- * to $dst$ with $M$ being this
- * matrix. This function does the
- * same as vmult_add() but takes
- * the transposed matrix.
- */
- template <typename somenumber>
- void Tvmult_add (BlockVector<somenumber> &dst,
- const BlockVector<somenumber> &src) const;
-
-
- /**
- * Print statistics. If @p full
- * is @p true, prints a
- * histogram of all existing row
- * lengths and allocated row
- * lengths. Otherwise, just the
- * relation of allocated and used
- * entries is shown.
- */
- template <class STREAM>
- void print_statistics (STREAM& s, bool full = false);
-
- private:
- /**
- * Object storing and managing
- * the transformation of row
- * indices to indices of the
- * sub-objects.
- */
- BlockIndices row_indices;
-
- /**
- * Object storing and managing
- * the transformation of column
- * indices to indices of the
- * sub-objects.
- */
- BlockIndices column_indices;
-
- /**
- * The actual matrices
- */
- Table<2, SparseMatrixEZ<Number> > blocks;
+ public:
+ /**
+ * Default constructor. The
+ * result is an empty object with
+ * zero dimensions.
+ */
+ BlockSparseMatrixEZ ();
+
+ /**
+ * Constructor setting up an
+ * object with given unmber of
+ * block rows and columns. The
+ * blocks themselves still have
+ * zero dimension.
+ */
+ BlockSparseMatrixEZ (const unsigned int block_rows,
+ const unsigned int block_cols);
+
+ /**
+ * Copy constructor. This is
+ * needed for some container
+ * classes. It creates an object
+ * of the same number of block
+ * rows and columns. Since it
+ * calls the copy constructor of
+ * SparseMatrixEZ, the
+ * block s must be empty.
+ */
+ BlockSparseMatrixEZ (const BlockSparseMatrixEZ<Number> &);
+
+ /**
+ * Copy operator. Like the copy
+ * constructor, this may be
+ * called for objects with empty
+ * blocks only.
+ */
+ BlockSparseMatrixEZ &operator = (const BlockSparseMatrixEZ<Number> &);
+
+ /**
+ * This operator assigns a scalar to
+ * a matrix. Since this does usually
+ * not make much sense (should we set
+ * all matrix entries to this value?
+ * Only the nonzero entries of the
+ * sparsity pattern?), this operation
+ * is only allowed if the actual
+ * value to be assigned is zero. This
+ * operator only exists to allow for
+ * the obvious notation
+ * <tt>matrix=0</tt>, which sets all
+ * elements of the matrix to zero,
+ * but keep the sparsity pattern
+ * previously used.
+ */
+ BlockSparseMatrixEZ &operator = (const double d);
+
+
+ /**
+ * Set matrix to zero dimensions
+ * and release memory.
+ */
+ void clear ();
+
+ /**
+ * Initialize to given block
+ * numbers. After this
+ * operation, the matrix will
+ * have the block dimensions
+ * provided. Each block will have
+ * zero dimensions and must be
+ * initialized
+ * subsequently. After setting
+ * the sizes of the blocks,
+ * collect_sizes() must be
+ * called to update internal data
+ * structures.
+ */
+ void reinit (const unsigned int n_block_rows,
+ const unsigned int n_block_cols);
+ /**
+ * This function collects the
+ * sizes of the sub-objects and
+ * stores them in internal
+ * arrays, in order to be able to
+ * relay global indices into the
+ * matrix to indices into the
+ * subobjects. You *must* call
+ * this function each time after
+ * you have changed the size of
+ * the sub-objects.
+ */
+ void collect_sizes ();
+
+ /**
+ * Access the block with the
+ * given coordinates.
+ */
+ SparseMatrixEZ<Number> &
+ block (const unsigned int row,
+ const unsigned int column);
+
+
+ /**
+ * Access the block with the
+ * given coordinates. Version for
+ * constant objects.
+ */
+ const SparseMatrixEZ<Number> &
+ block (const unsigned int row,
+ const unsigned int column) const;
+
+ /**
+ * Return the number of blocks in a
+ * column.
+ */
+ unsigned int n_block_rows () const;
+
+ /**
+ * Return the number of blocks in a
+ * row.
+ */
+ unsigned int n_block_cols () const;
+
+ /**
+ * Return whether the object is
+ * empty. It is empty if no
+ * memory is allocated, which is
+ * the same as that both
+ * dimensions are zero. This
+ * function is just the
+ * concatenation of the
+ * respective call to all
+ * sub-matrices.
+ */
+ bool empty () const;
+
+ /**
+ * Return number of rows of this
+ * matrix, which equals the
+ * dimension of the image
+ * space. It is the sum of rows
+ * of the rows of sub-matrices.
+ */
- unsigned int n_rows () const;
++ types::global_dof_index n_rows () const;
+
+ /**
+ * Return number of columns of
+ * this matrix, which equals the
+ * dimension of the range
+ * space. It is the sum of
+ * columns of the columns of
+ * sub-matrices.
+ */
- unsigned int n_cols () const;
++ types::global_dof_index n_cols () const;
+
+ /**
+ * Return the dimension of the
+ * image space. To remember: the
+ * matrix is of dimension
+ * $m \times n$.
+ */
- unsigned int m () const;
++ types::global_dof_index m () const;
+
+ /**
+ * Return the dimension of the
+ * range space. To remember: the
+ * matrix is of dimension
+ * $m \times n$.
+ */
- unsigned int n () const;
++ types::global_dof_index n () const;
+
+ /**
+ * Set the element <tt>(i,j)</tt>
+ * to @p value. Throws an error
+ * if the entry does not exist or
+ * if <tt>value</tt> is not a
+ * finite number. Still, it is
+ * allowed to store zero values
+ * in non-existent fields.
+ */
- void set (const unsigned int i,
- const unsigned int j,
++ void set (const types::global_dof_index i,
++ const types::global_dof_index j,
+ const Number value);
+
+ /**
+ * Add @p value to the element
+ * <tt>(i,j)</tt>. Throws an
+ * error if the entry does not
+ * exist or if <tt>value</tt> is
+ * not a finite number. Still, it
+ * is allowed to store zero
+ * values in non-existent fields.
+ */
- void add (const unsigned int i, const unsigned int j,
++ void add (const types::global_dof_index i, const types::global_dof_index j,
+ const Number value);
+
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M*src$ with $M$
+ * being this matrix.
+ */
+ template <typename somenumber>
+ void vmult (BlockVector<somenumber> &dst,
+ const BlockVector<somenumber> &src) const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M^T*src$ with $M$
+ * being this matrix. This
+ * function does the same as
+ * vmult() but takes the
+ * transposed matrix.
+ */
+ template <typename somenumber>
+ void Tvmult (BlockVector<somenumber> &dst,
+ const BlockVector<somenumber> &src) const;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add $M*src$ on
+ * $dst$ with $M$ being this
+ * matrix.
+ */
+ template <typename somenumber>
+ void vmult_add (BlockVector<somenumber> &dst,
+ const BlockVector<somenumber> &src) const;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add $M^T*src$
+ * to $dst$ with $M$ being this
+ * matrix. This function does the
+ * same as vmult_add() but takes
+ * the transposed matrix.
+ */
+ template <typename somenumber>
+ void Tvmult_add (BlockVector<somenumber> &dst,
+ const BlockVector<somenumber> &src) const;
+
+
+ /**
+ * Print statistics. If @p full
+ * is @p true, prints a
+ * histogram of all existing row
+ * lengths and allocated row
+ * lengths. Otherwise, just the
+ * relation of allocated and used
+ * entries is shown.
+ */
+ template <class STREAM>
+ void print_statistics (STREAM &s, bool full = false);
+
+ private:
+ /**
+ * Object storing and managing
+ * the transformation of row
+ * indices to indices of the
+ * sub-objects.
+ */
+ BlockIndices row_indices;
+
+ /**
+ * Object storing and managing
+ * the transformation of column
+ * indices to indices of the
+ * sub-objects.
+ */
+ BlockIndices column_indices;
+
+ /**
+ * The actual matrices
+ */
+ Table<2, SparseMatrixEZ<Number> > blocks;
};
/*@}*/
Assert (numbers::is_finite(value), ExcNumberNotFinite());
- const std::pair<unsigned int,unsigned int>
+ const std::pair<unsigned int,types::global_dof_index>
- row_index = row_indices.global_to_local (i),
- col_index = column_indices.global_to_local (j);
+ row_index = row_indices.global_to_local (i),
+ col_index = column_indices.global_to_local (j);
block(row_index.first,col_index.first).set (row_index.second,
col_index.second,
value);
Assert (numbers::is_finite(value), ExcNumberNotFinite());
- const std::pair<unsigned int,unsigned int>
+ const std::pair<unsigned int,types::global_dof_index>
- row_index = row_indices.global_to_local (i),
- col_index = column_indices.global_to_local (j);
+ row_index = row_indices.global_to_local (i),
+ col_index = column_indices.global_to_local (j);
block(row_index.first,col_index.first).add (row_index.second,
col_index.second,
value);
{
const unsigned int rows = n_block_rows();
const unsigned int columns = n_block_cols();
- std::vector<unsigned int> row_sizes (rows);
- std::vector<unsigned int> col_sizes (columns);
+ std::vector<types::global_dof_index> row_sizes (rows);
+ std::vector<types::global_dof_index> col_sizes (columns);
- // first find out the row sizes
- // from the first block column
+ // first find out the row sizes
+ // from the first block column
for (unsigned int r=0; r<rows; ++r)
row_sizes[r] = blocks[r][0].m();
- // then check that the following
- // block columns have the same
- // sizes
+ // then check that the following
+ // block columns have the same
+ // sizes
for (unsigned int c=1; c<columns; ++c)
for (unsigned int r=0; r<rows; ++r)
Assert (row_sizes[r] == blocks[r][c].m(),
template <class SparsityPatternBase>
class BlockSparsityPatternBase : public Subscriptor
{
- public:
- /**
- * Define a value which is used
- * to indicate that a certain
- * value in the @p colnums array
- * is unused, i.e. does not
- * represent a certain column
- * number index.
- *
- * This value is only an alias to
- * the respective value of the
- * SparsityPattern class.
- */
- static const unsigned int invalid_entry = SparsityPattern::invalid_entry;
-
- /**
- * Initialize the matrix empty,
- * that is with no memory
- * allocated. This is useful if
- * you want such objects as
- * member variables in other
- * classes. You can make the
- * structure usable by calling
- * the reinit() function.
- */
- BlockSparsityPatternBase ();
-
- /**
- * Initialize the matrix with the
- * given number of block rows and
- * columns. The blocks themselves
- * are still empty, and you have
- * to call collect_sizes() after
- * you assign them sizes.
- */
- BlockSparsityPatternBase (const unsigned int n_block_rows,
- const unsigned int n_block_columns);
-
- /**
- * Copy constructor. This
- * constructor is only allowed to
- * be called if the sparsity pattern to be
- * copied is empty, i.e. there
- * are no block allocated at
- * present. This is for the same
- * reason as for the
- * SparsityPattern, see there
- * for the details.
- */
- BlockSparsityPatternBase (const BlockSparsityPatternBase &bsp);
-
- /**
- * Destructor.
- */
- ~BlockSparsityPatternBase ();
-
- /**
- * Resize the matrix, by setting
- * the number of block rows and
- * columns. This deletes all
- * blocks and replaces them by
- * unitialized ones, i.e. ones
- * for which also the sizes are
- * not yet set. You have to do
- * that by calling the reinit()
- * functions of the blocks
- * themselves. Do not forget to
- * call collect_sizes() after
- * that on this object.
- *
- * The reason that you have to
- * set sizes of the blocks
- * yourself is that the sizes may
- * be varying, the maximum number
- * of elements per row may be
- * varying, etc. It is simpler
- * not to reproduce the interface
- * of the SparsityPattern
- * class here but rather let the
- * user call whatever function
- * she desires.
- */
- void reinit (const unsigned int n_block_rows,
- const unsigned int n_block_columns);
-
- /**
- * Copy operator. For this the
- * same holds as for the copy
- * constructor: it is declared,
- * defined and fine to be called,
- * but the latter only for empty
- * objects.
- */
- BlockSparsityPatternBase & operator = (const BlockSparsityPatternBase &);
-
- /**
- * This function collects the
- * sizes of the sub-objects and
- * stores them in internal
- * arrays, in order to be able to
- * relay global indices into the
- * matrix to indices into the
- * subobjects. You *must* call
- * this function each time after
- * you have changed the size of
- * the sub-objects.
- */
- void collect_sizes ();
-
- /**
- * Access the block with the
- * given coordinates.
- */
- SparsityPatternBase &
- block (const unsigned int row,
- const unsigned int column);
-
-
- /**
- * Access the block with the
- * given coordinates. Version for
- * constant objects.
- */
- const SparsityPatternBase &
- block (const unsigned int row,
- const unsigned int column) const;
-
- /**
- * Grant access to the object
- * describing the distribution of
- * row indices to the individual
- * blocks.
- */
- const BlockIndices &
- get_row_indices () const;
-
- /**
- * Grant access to the object
- * describing the distribution of
- * column indices to the individual
- * blocks.
- */
- const BlockIndices &
- get_column_indices () const;
-
- /**
- * This function compresses the
- * sparsity structures that this
- * object represents. It simply
- * calls @p compress for all
- * sub-objects.
- */
- void compress ();
-
- /**
- * Return the number of blocks in a
- * column.
- */
- unsigned int n_block_rows () const;
-
- /**
- * Return the number of blocks in a
- * row.
- */
- unsigned int n_block_cols () const;
-
- /**
- * Return whether the object is
- * empty. It is empty if no
- * memory is allocated, which is
- * the same as that both
- * dimensions are zero. This
- * function is just the
- * concatenation of the
- * respective call to all
- * sub-matrices.
- */
- bool empty () const;
-
- /**
- * Return the maximum number of
- * entries per row. It returns
- * the maximal number of entries
- * per row accumulated over all
- * blocks in a row, and the
- * maximum over all rows.
- */
- unsigned int max_entries_per_row () const;
-
- /**
- * Add a nonzero entry to the matrix.
- * This function may only be called
- * for non-compressed sparsity patterns.
- *
- * If the entry already exists, nothing
- * bad happens.
- *
- * This function simply finds out
- * to which block <tt>(i,j)</tt> belongs
- * and then relays to that block.
- */
- void add (const types::global_dof_index i, const types::global_dof_index j);
-
- /**
- * Add several nonzero entries to the
- * specified matrix row. This function
- * may only be called for
- * non-compressed sparsity patterns.
- *
- * If some of the entries already
- * exist, nothing bad happens.
- *
- * This function simply finds out to
- * which blocks <tt>(row,col)</tt> for
- * <tt>col</tt> in the iterator range
- * belong and then relays to those
- * blocks.
- */
- template <typename ForwardIterator>
- void add_entries (const unsigned int row,
- ForwardIterator begin,
- ForwardIterator end,
- const bool indices_are_sorted = false);
-
- /**
- * Return number of rows of this
- * matrix, which equals the
- * dimension of the image
- * space. It is the sum of rows
- * of the (block-)rows of
- * sub-matrices.
- */
- types::global_dof_index n_rows () const;
-
- /**
- * Return number of columns of
- * this matrix, which equals the
- * dimension of the range
- * space. It is the sum of
- * columns of the (block-)columns
- * of sub-matrices.
- */
- types::global_dof_index n_cols () const;
-
- /**
- * Check if a value at a certain
- * position may be non-zero.
- */
- bool exists (const types::global_dof_index i, const types::global_dof_index j) const;
-
- /**
- * Number of entries in a
- * specific row, added up over
- * all the blocks that form this
- * row.
- */
- unsigned int row_length (const types::global_dof_index row) const;
-
- /**
- * Return the number of nonzero
- * elements of this
- * matrix. Actually, it returns
- * the number of entries in the
- * sparsity pattern; if any of
- * the entries should happen to
- * be zero, it is counted anyway.
- *
- * This function may only be
- * called if the matrix struct is
- * compressed. It does not make
- * too much sense otherwise
- * anyway.
- *
- * In the present context, it is
- * the sum of the values as
- * returned by the sub-objects.
- */
- unsigned int n_nonzero_elements () const;
-
- /**
- * Print the sparsity of the
- * matrix. The output consists of
- * one line per row of the format
- * <tt>[i,j1,j2,j3,...]</tt>. <i>i</i>
- * is the row number and
- * <i>jn</i> are the allocated
- * columns in this row.
- */
- void print (std::ostream &out) const;
-
- /**
- * Print the sparsity of the matrix
- * in a format that <tt>gnuplot</tt> understands
- * and which can be used to plot the
- * sparsity pattern in a graphical
- * way. This is the same functionality
- * implemented for usual sparsity
- * patterns, see @ref SparsityPattern.
- */
- void print_gnuplot (std::ostream &out) const;
-
- /** @addtogroup Exceptions
- * @{ */
-
- /**
- * Exception
- */
- DeclException4 (ExcIncompatibleRowNumbers,
- int, int, int, int,
- << "The blocks [" << arg1 << ',' << arg2 << "] and ["
- << arg3 << ',' << arg4 << "] have differing row numbers.");
- /**
- * Exception
- */
- DeclException4 (ExcIncompatibleColNumbers,
- int, int, int, int,
- << "The blocks [" << arg1 << ',' << arg2 << "] and ["
- << arg3 << ',' << arg4 << "] have differing column numbers.");
- /**
- * Exception
- */
- DeclException0 (ExcInvalidConstructorCall);
- //@}
-
- protected:
-
- /**
- * Number of block rows.
- */
- unsigned int rows;
-
- /**
- * Number of block columns.
- */
- unsigned int columns;
-
- /**
- * Array of sparsity patterns.
- */
- Table<2,SmartPointer<SparsityPatternBase, BlockSparsityPatternBase<SparsityPatternBase> > > sub_objects;
-
- /**
- * Object storing and managing
- * the transformation of row
- * indices to indices of the
- * sub-objects.
- */
- BlockIndices row_indices;
-
- /**
- * Object storing and managing
- * the transformation of column
- * indices to indices of the
- * sub-objects.
- */
- BlockIndices column_indices;
-
- private:
- /**
- * Temporary vector for counting the
- * elements written into the
- * individual blocks when doing a
- * collective add or set.
- */
- std::vector<unsigned int> counter_within_block;
-
- /**
- * Temporary vector for column
- * indices on each block when writing
- * local to global data on each
- * sparse matrix.
- */
- std::vector<std::vector<unsigned int> > block_column_indices;
-
- /**
- * Make the block sparse matrix a
- * friend, so that it can use our
- * #row_indices and
- * #column_indices objects.
- */
- template <typename number>
- friend class BlockSparseMatrix;
+ public:
+ /**
+ * Define a value which is used
+ * to indicate that a certain
+ * value in the @p colnums array
+ * is unused, i.e. does not
+ * represent a certain column
+ * number index.
+ *
+ * This value is only an alias to
+ * the respective value of the
+ * SparsityPattern class.
+ */
+ static const unsigned int invalid_entry = SparsityPattern::invalid_entry;
+
+ /**
+ * Initialize the matrix empty,
+ * that is with no memory
+ * allocated. This is useful if
+ * you want such objects as
+ * member variables in other
+ * classes. You can make the
+ * structure usable by calling
+ * the reinit() function.
+ */
+ BlockSparsityPatternBase ();
+
+ /**
+ * Initialize the matrix with the
+ * given number of block rows and
+ * columns. The blocks themselves
+ * are still empty, and you have
+ * to call collect_sizes() after
+ * you assign them sizes.
+ */
+ BlockSparsityPatternBase (const unsigned int n_block_rows,
+ const unsigned int n_block_columns);
+
+ /**
+ * Copy constructor. This
+ * constructor is only allowed to
+ * be called if the sparsity pattern to be
+ * copied is empty, i.e. there
+ * are no block allocated at
+ * present. This is for the same
+ * reason as for the
+ * SparsityPattern, see there
+ * for the details.
+ */
+ BlockSparsityPatternBase (const BlockSparsityPatternBase &bsp);
+
+ /**
+ * Destructor.
+ */
+ ~BlockSparsityPatternBase ();
+
+ /**
+ * Resize the matrix, by setting
+ * the number of block rows and
+ * columns. This deletes all
+ * blocks and replaces them by
+ * unitialized ones, i.e. ones
+ * for which also the sizes are
+ * not yet set. You have to do
+ * that by calling the reinit()
+ * functions of the blocks
+ * themselves. Do not forget to
+ * call collect_sizes() after
+ * that on this object.
+ *
+ * The reason that you have to
+ * set sizes of the blocks
+ * yourself is that the sizes may
+ * be varying, the maximum number
+ * of elements per row may be
+ * varying, etc. It is simpler
+ * not to reproduce the interface
+ * of the SparsityPattern
+ * class here but rather let the
+ * user call whatever function
+ * she desires.
+ */
+ void reinit (const unsigned int n_block_rows,
+ const unsigned int n_block_columns);
+
+ /**
+ * Copy operator. For this the
+ * same holds as for the copy
+ * constructor: it is declared,
+ * defined and fine to be called,
+ * but the latter only for empty
+ * objects.
+ */
+ BlockSparsityPatternBase &operator = (const BlockSparsityPatternBase &);
+
+ /**
+ * This function collects the
+ * sizes of the sub-objects and
+ * stores them in internal
+ * arrays, in order to be able to
+ * relay global indices into the
+ * matrix to indices into the
+ * subobjects. You *must* call
+ * this function each time after
+ * you have changed the size of
+ * the sub-objects.
+ */
+ void collect_sizes ();
+
+ /**
+ * Access the block with the
+ * given coordinates.
+ */
+ SparsityPatternBase &
+ block (const unsigned int row,
+ const unsigned int column);
+
+
+ /**
+ * Access the block with the
+ * given coordinates. Version for
+ * constant objects.
+ */
+ const SparsityPatternBase &
+ block (const unsigned int row,
+ const unsigned int column) const;
+
+ /**
+ * Grant access to the object
+ * describing the distribution of
+ * row indices to the individual
+ * blocks.
+ */
+ const BlockIndices &
+ get_row_indices () const;
+
+ /**
+ * Grant access to the object
+ * describing the distribution of
+ * column indices to the individual
+ * blocks.
+ */
+ const BlockIndices &
+ get_column_indices () const;
+
+ /**
+ * This function compresses the
+ * sparsity structures that this
+ * object represents. It simply
+ * calls @p compress for all
+ * sub-objects.
+ */
+ void compress ();
+
+ /**
+ * Return the number of blocks in a
+ * column.
+ */
+ unsigned int n_block_rows () const;
+
+ /**
+ * Return the number of blocks in a
+ * row.
+ */
+ unsigned int n_block_cols () const;
+
+ /**
+ * Return whether the object is
+ * empty. It is empty if no
+ * memory is allocated, which is
+ * the same as that both
+ * dimensions are zero. This
+ * function is just the
+ * concatenation of the
+ * respective call to all
+ * sub-matrices.
+ */
+ bool empty () const;
+
+ /**
+ * Return the maximum number of
+ * entries per row. It returns
+ * the maximal number of entries
+ * per row accumulated over all
+ * blocks in a row, and the
+ * maximum over all rows.
+ */
+ unsigned int max_entries_per_row () const;
+
+ /**
+ * Add a nonzero entry to the matrix.
+ * This function may only be called
+ * for non-compressed sparsity patterns.
+ *
+ * If the entry already exists, nothing
+ * bad happens.
+ *
+ * This function simply finds out
+ * to which block <tt>(i,j)</tt> belongs
+ * and then relays to that block.
+ */
- void add (const unsigned int i, const unsigned int j);
++ void add (const types::global_dof_index i, const types::global_dof_index j);
+
+ /**
+ * Add several nonzero entries to the
+ * specified matrix row. This function
+ * may only be called for
+ * non-compressed sparsity patterns.
+ *
+ * If some of the entries already
+ * exist, nothing bad happens.
+ *
+ * This function simply finds out to
+ * which blocks <tt>(row,col)</tt> for
+ * <tt>col</tt> in the iterator range
+ * belong and then relays to those
+ * blocks.
+ */
+ template <typename ForwardIterator>
+ void add_entries (const unsigned int row,
+ ForwardIterator begin,
+ ForwardIterator end,
+ const bool indices_are_sorted = false);
+
+ /**
+ * Return number of rows of this
+ * matrix, which equals the
+ * dimension of the image
+ * space. It is the sum of rows
+ * of the (block-)rows of
+ * sub-matrices.
+ */
- unsigned int n_rows () const;
++ types::global_dof_index n_rows () const;
+
+ /**
+ * Return number of columns of
+ * this matrix, which equals the
+ * dimension of the range
+ * space. It is the sum of
+ * columns of the (block-)columns
+ * of sub-matrices.
+ */
- unsigned int n_cols () const;
++ types::global_dof_index n_cols () const;
+
+ /**
+ * Check if a value at a certain
+ * position may be non-zero.
+ */
- bool exists (const unsigned int i, const unsigned int j) const;
++ bool exists (const types::global_dof_index i, const types::global_dof_index j) const;
+
+ /**
+ * Number of entries in a
+ * specific row, added up over
+ * all the blocks that form this
+ * row.
+ */
- unsigned int row_length (const unsigned int row) const;
++ unsigned int row_length (const types::global_dof_index row) const;
+
+ /**
+ * Return the number of nonzero
+ * elements of this
+ * matrix. Actually, it returns
+ * the number of entries in the
+ * sparsity pattern; if any of
+ * the entries should happen to
+ * be zero, it is counted anyway.
+ *
+ * This function may only be
+ * called if the matrix struct is
+ * compressed. It does not make
+ * too much sense otherwise
+ * anyway.
+ *
+ * In the present context, it is
+ * the sum of the values as
+ * returned by the sub-objects.
+ */
+ unsigned int n_nonzero_elements () const;
+
+ /**
+ * Print the sparsity of the
+ * matrix. The output consists of
+ * one line per row of the format
+ * <tt>[i,j1,j2,j3,...]</tt>. <i>i</i>
+ * is the row number and
+ * <i>jn</i> are the allocated
+ * columns in this row.
+ */
+ void print (std::ostream &out) const;
+
+ /**
+ * Print the sparsity of the matrix
+ * in a format that <tt>gnuplot</tt> understands
+ * and which can be used to plot the
+ * sparsity pattern in a graphical
+ * way. This is the same functionality
+ * implemented for usual sparsity
+ * patterns, see @ref SparsityPattern.
+ */
+ void print_gnuplot (std::ostream &out) const;
+
+ /** @addtogroup Exceptions
+ * @{ */
+
+ /**
+ * Exception
+ */
+ DeclException4 (ExcIncompatibleRowNumbers,
+ int, int, int, int,
+ << "The blocks [" << arg1 << ',' << arg2 << "] and ["
+ << arg3 << ',' << arg4 << "] have differing row numbers.");
+ /**
+ * Exception
+ */
+ DeclException4 (ExcIncompatibleColNumbers,
+ int, int, int, int,
+ << "The blocks [" << arg1 << ',' << arg2 << "] and ["
+ << arg3 << ',' << arg4 << "] have differing column numbers.");
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidConstructorCall);
+ //@}
+
+ protected:
+
+ /**
+ * Number of block rows.
+ */
+ unsigned int rows;
+
+ /**
+ * Number of block columns.
+ */
+ unsigned int columns;
+
+ /**
+ * Array of sparsity patterns.
+ */
+ Table<2,SmartPointer<SparsityPatternBase, BlockSparsityPatternBase<SparsityPatternBase> > > sub_objects;
+
+ /**
+ * Object storing and managing
+ * the transformation of row
+ * indices to indices of the
+ * sub-objects.
+ */
+ BlockIndices row_indices;
+
+ /**
+ * Object storing and managing
+ * the transformation of column
+ * indices to indices of the
+ * sub-objects.
+ */
+ BlockIndices column_indices;
+
+ private:
+ /**
+ * Temporary vector for counting the
+ * elements written into the
+ * individual blocks when doing a
+ * collective add or set.
+ */
+ std::vector<unsigned int> counter_within_block;
+
+ /**
+ * Temporary vector for column
+ * indices on each block when writing
+ * local to global data on each
+ * sparse matrix.
+ */
+ std::vector<std::vector<unsigned int> > block_column_indices;
+
+ /**
+ * Make the block sparse matrix a
+ * friend, so that it can use our
+ * #row_indices and
+ * #column_indices objects.
+ */
+ template <typename number>
+ friend class BlockSparseMatrix;
};
*/
class BlockSparsityPattern : public BlockSparsityPatternBase<SparsityPattern>
{
- public:
-
- /**
- * Initialize the matrix empty,
- * that is with no memory
- * allocated. This is useful if
- * you want such objects as
- * member variables in other
- * classes. You can make the
- * structure usable by calling
- * the reinit() function.
- */
- BlockSparsityPattern ();
-
- /**
- * Initialize the matrix with the
- * given number of block rows and
- * columns. The blocks themselves
- * are still empty, and you have
- * to call collect_sizes() after
- * you assign them sizes.
- */
- BlockSparsityPattern (const unsigned int n_rows,
- const unsigned int n_columns);
-
- /**
- * Forwarding to
- * BlockSparsityPatternBase::reinit().
- */
- void reinit (const unsigned int n_block_rows,
- const unsigned int n_block_columns);
-
- /**
- * Initialize the pattern with
- * two BlockIndices for the block
- * structures of matrix rows and
- * columns as well as a row
- * length vector.
- *
- * The row length vector should
- * be in the format produced by
- * DoFTools. Alternatively, there
- * is a simplified version,
- * where each of the inner
- * vectors has length one. Then,
- * the corresponding entry is
- * used as the maximal row length.
- *
- * For the diagonal blocks, the
- * inner SparsityPattern is
- * initialized with optimized
- * diagonals, while this is not
- * done for the off-diagonal blocks.
- */
- void reinit (const BlockIndices& row_indices,
- const BlockIndices& col_indices,
- const std::vector<std::vector<types::global_dof_index> >& row_lengths);
-
-
- /**
- * Return whether the structure
- * is compressed or not,
- * i.e. whether all sub-matrices
- * are compressed.
- */
- bool is_compressed () const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
- */
- std::size_t memory_consumption () const;
-
- /**
- * Copy data from an object of
- * type
- * BlockCompressedSparsityPattern,
- * i.e. resize this object to the
- * size of the given argument,
- * and copy over the contents of
- * each of the
- * subobjects. Previous content
- * of this object is lost.
- */
- void copy_from (const BlockCompressedSparsityPattern &csp);
-
- /**
- * Copy data from an object of
- * type
- * BlockCompressedSetSparsityPattern,
- * i.e. resize this object to the
- * size of the given argument,
- * and copy over the contents of
- * each of the
- * subobjects. Previous content
- * of this object is lost.
- */
- void copy_from (const BlockCompressedSetSparsityPattern &csp);
-
- /**
- * Copy data from an object of
- * type
- * BlockCompressedSimpleSparsityPattern,
- * i.e. resize this object to the
- * size of the given argument,
- * and copy over the contents of
- * each of the
- * subobjects. Previous content
- * of this object is lost.
- */
- void copy_from (const BlockCompressedSimpleSparsityPattern &csp);
+ public:
+
+ /**
+ * Initialize the matrix empty,
+ * that is with no memory
+ * allocated. This is useful if
+ * you want such objects as
+ * member variables in other
+ * classes. You can make the
+ * structure usable by calling
+ * the reinit() function.
+ */
+ BlockSparsityPattern ();
+
+ /**
+ * Initialize the matrix with the
+ * given number of block rows and
+ * columns. The blocks themselves
+ * are still empty, and you have
+ * to call collect_sizes() after
+ * you assign them sizes.
+ */
+ BlockSparsityPattern (const unsigned int n_rows,
+ const unsigned int n_columns);
+
+ /**
+ * Forwarding to
+ * BlockSparsityPatternBase::reinit().
+ */
+ void reinit (const unsigned int n_block_rows,
+ const unsigned int n_block_columns);
+
+ /**
+ * Initialize the pattern with
+ * two BlockIndices for the block
+ * structures of matrix rows and
+ * columns as well as a row
+ * length vector.
+ *
+ * The row length vector should
+ * be in the format produced by
+ * DoFTools. Alternatively, there
+ * is a simplified version,
+ * where each of the inner
+ * vectors has length one. Then,
+ * the corresponding entry is
+ * used as the maximal row length.
+ *
+ * For the diagonal blocks, the
+ * inner SparsityPattern is
+ * initialized with optimized
+ * diagonals, while this is not
+ * done for the off-diagonal blocks.
+ */
+ void reinit (const BlockIndices &row_indices,
+ const BlockIndices &col_indices,
- const std::vector<std::vector<unsigned int> > &row_lengths);
++ const std::vector<std::vector<types::global_dof_index> > &row_lengths);
+
+
+ /**
+ * Return whether the structure
+ * is compressed or not,
+ * i.e. whether all sub-matrices
+ * are compressed.
+ */
+ bool is_compressed () const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object.
+ */
+ std::size_t memory_consumption () const;
+
+ /**
+ * Copy data from an object of
+ * type
+ * BlockCompressedSparsityPattern,
+ * i.e. resize this object to the
+ * size of the given argument,
+ * and copy over the contents of
+ * each of the
+ * subobjects. Previous content
+ * of this object is lost.
+ */
+ void copy_from (const BlockCompressedSparsityPattern &csp);
+
+ /**
+ * Copy data from an object of
+ * type
+ * BlockCompressedSetSparsityPattern,
+ * i.e. resize this object to the
+ * size of the given argument,
+ * and copy over the contents of
+ * each of the
+ * subobjects. Previous content
+ * of this object is lost.
+ */
+ void copy_from (const BlockCompressedSetSparsityPattern &csp);
+
+ /**
+ * Copy data from an object of
+ * type
+ * BlockCompressedSimpleSparsityPattern,
+ * i.e. resize this object to the
+ * size of the given argument,
+ * and copy over the contents of
+ * each of the
+ * subobjects. Previous content
+ * of this object is lost.
+ */
+ void copy_from (const BlockCompressedSimpleSparsityPattern &csp);
*/
class BlockCompressedSparsityPattern : public BlockSparsityPatternBase<CompressedSparsityPattern>
{
- public:
-
- /**
- * Initialize the matrix empty,
- * that is with no memory
- * allocated. This is useful if
- * you want such objects as
- * member variables in other
- * classes. You can make the
- * structure usable by calling
- * the reinit() function.
- */
- BlockCompressedSparsityPattern ();
-
- /**
- * Initialize the matrix with the
- * given number of block rows and
- * columns. The blocks themselves
- * are still empty, and you have
- * to call collect_sizes() after
- * you assign them sizes.
- */
- BlockCompressedSparsityPattern (const unsigned int n_rows,
- const unsigned int n_columns);
-
- /**
- * Initialize the pattern with
- * two BlockIndices for the block
- * structures of matrix rows and
- * columns. This function is
- * equivalent to calling the
- * previous constructor with the
- * length of the two index vector
- * and then entering the index
- * values.
- */
- BlockCompressedSparsityPattern (const std::vector<types::global_dof_index>& row_block_sizes,
- const std::vector<types::global_dof_index>& col_block_sizes);
-
- /**
- * Initialize the pattern with
- * two BlockIndices for the block
- * structures of matrix rows and
- * columns.
- */
- BlockCompressedSparsityPattern (const BlockIndices& row_indices,
- const BlockIndices& col_indices);
-
- /**
- * Resize the matrix to a tensor
- * product of matrices with
- * dimensions defined by the
- * arguments.
- *
- * The matrix will have as many
- * block rows and columns as
- * there are entries in the two
- * arguments. The block at
- * position (<i>i,j</i>) will
- * have the dimensions
- * <tt>row_block_sizes[i]</tt>
- * times <tt>col_block_sizes[j]</tt>.
- */
- void reinit (const std::vector< types::global_dof_index > &row_block_sizes,
- const std::vector< types::global_dof_index > &col_block_sizes);
-
- /**
- * Resize the matrix to a tensor
- * product of matrices with
- * dimensions defined by the
- * arguments. The two
- * BlockIndices objects must be
- * initialized and the sparsity
- * pattern will have the
- * same block structure afterwards.
- */
- void reinit (const BlockIndices& row_indices, const BlockIndices& col_indices);
-
- /**
- * Allow the use of the reinit
- * functions of the base class as
- * well.
- */
- using BlockSparsityPatternBase<CompressedSparsityPattern>::reinit;
+ public:
+
+ /**
+ * Initialize the matrix empty,
+ * that is with no memory
+ * allocated. This is useful if
+ * you want such objects as
+ * member variables in other
+ * classes. You can make the
+ * structure usable by calling
+ * the reinit() function.
+ */
+ BlockCompressedSparsityPattern ();
+
+ /**
+ * Initialize the matrix with the
+ * given number of block rows and
+ * columns. The blocks themselves
+ * are still empty, and you have
+ * to call collect_sizes() after
+ * you assign them sizes.
+ */
+ BlockCompressedSparsityPattern (const unsigned int n_rows,
+ const unsigned int n_columns);
+
+ /**
+ * Initialize the pattern with
+ * two BlockIndices for the block
+ * structures of matrix rows and
+ * columns. This function is
+ * equivalent to calling the
+ * previous constructor with the
+ * length of the two index vector
+ * and then entering the index
+ * values.
+ */
- BlockCompressedSparsityPattern (const std::vector<unsigned int> &row_block_sizes,
- const std::vector<unsigned int> &col_block_sizes);
++ BlockCompressedSparsityPattern (const std::vector<types::global_dof_index> &row_block_sizes,
++ const std::vector<types::global_dof_index> &col_block_sizes);
+
+ /**
+ * Initialize the pattern with
+ * two BlockIndices for the block
+ * structures of matrix rows and
+ * columns.
+ */
+ BlockCompressedSparsityPattern (const BlockIndices &row_indices,
+ const BlockIndices &col_indices);
+
+ /**
+ * Resize the matrix to a tensor
+ * product of matrices with
+ * dimensions defined by the
+ * arguments.
+ *
+ * The matrix will have as many
+ * block rows and columns as
+ * there are entries in the two
+ * arguments. The block at
+ * position (<i>i,j</i>) will
+ * have the dimensions
+ * <tt>row_block_sizes[i]</tt>
+ * times <tt>col_block_sizes[j]</tt>.
+ */
- void reinit (const std::vector< unsigned int > &row_block_sizes,
- const std::vector< unsigned int > &col_block_sizes);
++ void reinit (const std::vector< types::global_dof_index > &row_block_sizes,
++ const std::vector< types::global_dof_index > &col_block_sizes);
+
+ /**
+ * Resize the matrix to a tensor
+ * product of matrices with
+ * dimensions defined by the
+ * arguments. The two
+ * BlockIndices objects must be
+ * initialized and the sparsity
+ * pattern will have the
+ * same block structure afterwards.
+ */
+ void reinit (const BlockIndices &row_indices, const BlockIndices &col_indices);
+
+ /**
+ * Allow the use of the reinit
+ * functions of the base class as
+ * well.
+ */
+ using BlockSparsityPatternBase<CompressedSparsityPattern>::reinit;
};
*/
class BlockCompressedSetSparsityPattern : public BlockSparsityPatternBase<CompressedSetSparsityPattern>
{
- public:
-
- /**
- * Initialize the matrix empty,
- * that is with no memory
- * allocated. This is useful if
- * you want such objects as
- * member variables in other
- * classes. You can make the
- * structure usable by calling
- * the reinit() function.
- */
- BlockCompressedSetSparsityPattern ();
-
- /**
- * Initialize the matrix with the
- * given number of block rows and
- * columns. The blocks themselves
- * are still empty, and you have
- * to call collect_sizes() after
- * you assign them sizes.
- */
- BlockCompressedSetSparsityPattern (const unsigned int n_rows,
- const unsigned int n_columns);
-
- /**
- * Initialize the pattern with
- * two BlockIndices for the block
- * structures of matrix rows and
- * columns. This function is
- * equivalent to calling the
- * previous constructor with the
- * length of the two index vector
- * and then entering the index
- * values.
- */
- BlockCompressedSetSparsityPattern (const std::vector<types::global_dof_index>& row_block_sizes,
- const std::vector<types::global_dof_index>& col_block_sizes);
-
- /**
- * Initialize the pattern with
- * two BlockIndices for the block
- * structures of matrix rows and
- * columns.
- */
- BlockCompressedSetSparsityPattern (const BlockIndices& row_indices,
- const BlockIndices& col_indices);
-
- /**
- * Resize the matrix to a tensor
- * product of matrices with
- * dimensions defined by the
- * arguments.
- *
- * The matrix will have as many
- * block rows and columns as
- * there are entries in the two
- * arguments. The block at
- * position (<i>i,j</i>) will
- * have the dimensions
- * <tt>row_block_sizes[i]</tt>
- * times <tt>col_block_sizes[j]</tt>.
- */
- void reinit (const std::vector< types::global_dof_index > &row_block_sizes,
- const std::vector< types::global_dof_index > &col_block_sizes);
-
- /**
- * Resize the matrix to a tensor
- * product of matrices with
- * dimensions defined by the
- * arguments. The two
- * BlockIndices objects must be
- * initialized and the sparsity
- * pattern will have the
- * same block structure afterwards.
- */
- void reinit (const BlockIndices& row_indices, const BlockIndices& col_indices);
-
- /**
- * Allow the use of the reinit
- * functions of the base class as
- * well.
- */
- using BlockSparsityPatternBase<CompressedSetSparsityPattern>::reinit;
+ public:
+
+ /**
+ * Initialize the matrix empty,
+ * that is with no memory
+ * allocated. This is useful if
+ * you want such objects as
+ * member variables in other
+ * classes. You can make the
+ * structure usable by calling
+ * the reinit() function.
+ */
+ BlockCompressedSetSparsityPattern ();
+
+ /**
+ * Initialize the matrix with the
+ * given number of block rows and
+ * columns. The blocks themselves
+ * are still empty, and you have
+ * to call collect_sizes() after
+ * you assign them sizes.
+ */
+ BlockCompressedSetSparsityPattern (const unsigned int n_rows,
+ const unsigned int n_columns);
+
+ /**
+ * Initialize the pattern with
+ * two BlockIndices for the block
+ * structures of matrix rows and
+ * columns. This function is
+ * equivalent to calling the
+ * previous constructor with the
+ * length of the two index vector
+ * and then entering the index
+ * values.
+ */
- BlockCompressedSetSparsityPattern (const std::vector<unsigned int> &row_block_sizes,
- const std::vector<unsigned int> &col_block_sizes);
++ BlockCompressedSetSparsityPattern (const std::vector<types::global_dof_index> &row_block_sizes,
++ const std::vector<types::global_dof_index> &col_block_sizes);
+
+ /**
+ * Initialize the pattern with
+ * two BlockIndices for the block
+ * structures of matrix rows and
+ * columns.
+ */
+ BlockCompressedSetSparsityPattern (const BlockIndices &row_indices,
+ const BlockIndices &col_indices);
+
+ /**
+ * Resize the matrix to a tensor
+ * product of matrices with
+ * dimensions defined by the
+ * arguments.
+ *
+ * The matrix will have as many
+ * block rows and columns as
+ * there are entries in the two
+ * arguments. The block at
+ * position (<i>i,j</i>) will
+ * have the dimensions
+ * <tt>row_block_sizes[i]</tt>
+ * times <tt>col_block_sizes[j]</tt>.
+ */
- void reinit (const std::vector< unsigned int > &row_block_sizes,
- const std::vector< unsigned int > &col_block_sizes);
++ void reinit (const std::vector< types::global_dof_index > &row_block_sizes,
++ const std::vector< types::global_dof_index > &col_block_sizes);
+
+ /**
+ * Resize the matrix to a tensor
+ * product of matrices with
+ * dimensions defined by the
+ * arguments. The two
+ * BlockIndices objects must be
+ * initialized and the sparsity
+ * pattern will have the
+ * same block structure afterwards.
+ */
+ void reinit (const BlockIndices &row_indices, const BlockIndices &col_indices);
+
+ /**
+ * Allow the use of the reinit
+ * functions of the base class as
+ * well.
+ */
+ using BlockSparsityPatternBase<CompressedSetSparsityPattern>::reinit;
};
*/
class BlockCompressedSimpleSparsityPattern : public BlockSparsityPatternBase<CompressedSimpleSparsityPattern>
{
- public:
-
- /**
- * Initialize the matrix empty,
- * that is with no memory
- * allocated. This is useful if
- * you want such objects as
- * member variables in other
- * classes. You can make the
- * structure usable by calling
- * the reinit() function.
- */
- BlockCompressedSimpleSparsityPattern ();
-
- /**
- * Initialize the matrix with the
- * given number of block rows and
- * columns. The blocks themselves
- * are still empty, and you have
- * to call collect_sizes() after
- * you assign them sizes.
- */
- BlockCompressedSimpleSparsityPattern (const unsigned int n_rows,
- const unsigned int n_columns);
-
- /**
- * Initialize the pattern with
- * two BlockIndices for the block
- * structures of matrix rows and
- * columns. This function is
- * equivalent to calling the
- * previous constructor with the
- * length of the two index vector
- * and then entering the index
- * values.
- */
- BlockCompressedSimpleSparsityPattern (const std::vector<types::global_dof_index>& row_block_sizes,
- const std::vector<types::global_dof_index>& col_block_sizes);
-
- /**
- * Initialize the pattern with symmetric
- * blocks. The number of IndexSets in the
- * vector determine the number of rows
- * and columns of blocks. The size of
- * each block is determined by the size()
- * of the respective IndexSet. Each block
- * only stores the rows given by the
- * values in the IndexSet, which is
- * useful for distributed memory parallel
- * computations and usually corresponds
- * to the locally owned DoFs.
- */
- BlockCompressedSimpleSparsityPattern (const std::vector<IndexSet> & partitioning);
-
- /**
- * Resize the pattern to a tensor product
- * of matrices with dimensions defined by
- * the arguments.
- *
- * The matrix will have as many
- * block rows and columns as
- * there are entries in the two
- * arguments. The block at
- * position (<i>i,j</i>) will
- * have the dimensions
- * <tt>row_block_sizes[i]</tt>
- * times <tt>col_block_sizes[j]</tt>.
- */
- void reinit (const std::vector< types::global_dof_index > &row_block_sizes,
- const std::vector< types::global_dof_index > &col_block_sizes);
-
- /**
- * Resize the pattern with symmetric
- * blocks determined by the size() of
- * each IndexSet. See the constructor
- * taking a vector of IndexSets for
- * details.
- */
- void reinit(const std::vector<IndexSet> & partitioning);
-
- /**
- * Access to column number field.
- * Return the column number of
- * the @p index th entry in row @p row.
- */
- unsigned int column_number (const unsigned int row,
- const unsigned int index) const;
-
- /**
- * Allow the use of the reinit
- * functions of the base class as
- * well.
- */
- using BlockSparsityPatternBase<CompressedSimpleSparsityPattern>::reinit;
+ public:
+
+ /**
+ * Initialize the matrix empty,
+ * that is with no memory
+ * allocated. This is useful if
+ * you want such objects as
+ * member variables in other
+ * classes. You can make the
+ * structure usable by calling
+ * the reinit() function.
+ */
+ BlockCompressedSimpleSparsityPattern ();
+
+ /**
+ * Initialize the matrix with the
+ * given number of block rows and
+ * columns. The blocks themselves
+ * are still empty, and you have
+ * to call collect_sizes() after
+ * you assign them sizes.
+ */
+ BlockCompressedSimpleSparsityPattern (const unsigned int n_rows,
+ const unsigned int n_columns);
+
+ /**
+ * Initialize the pattern with
+ * two BlockIndices for the block
+ * structures of matrix rows and
+ * columns. This function is
+ * equivalent to calling the
+ * previous constructor with the
+ * length of the two index vector
+ * and then entering the index
+ * values.
+ */
- BlockCompressedSimpleSparsityPattern (const std::vector<unsigned int> &row_block_sizes,
- const std::vector<unsigned int> &col_block_sizes);
++ BlockCompressedSimpleSparsityPattern (const std::vector<types::global_dof_index> &row_block_sizes,
++ const std::vector<types::global_dof_index> &col_block_sizes);
+
+ /**
+ * Initialize the pattern with symmetric
+ * blocks. The number of IndexSets in the
+ * vector determine the number of rows
+ * and columns of blocks. The size of
+ * each block is determined by the size()
+ * of the respective IndexSet. Each block
+ * only stores the rows given by the
+ * values in the IndexSet, which is
+ * useful for distributed memory parallel
+ * computations and usually corresponds
+ * to the locally owned DoFs.
+ */
+ BlockCompressedSimpleSparsityPattern (const std::vector<IndexSet> &partitioning);
+
+ /**
+ * Resize the pattern to a tensor product
+ * of matrices with dimensions defined by
+ * the arguments.
+ *
+ * The matrix will have as many
+ * block rows and columns as
+ * there are entries in the two
+ * arguments. The block at
+ * position (<i>i,j</i>) will
+ * have the dimensions
+ * <tt>row_block_sizes[i]</tt>
+ * times <tt>col_block_sizes[j]</tt>.
+ */
- void reinit (const std::vector< unsigned int > &row_block_sizes,
- const std::vector< unsigned int > &col_block_sizes);
++ void reinit (const std::vector< types::global_dof_index > &row_block_sizes,
++ const std::vector< types::global_dof_index > &col_block_sizes);
+
+ /**
+ * Resize the pattern with symmetric
+ * blocks determined by the size() of
+ * each IndexSet. See the constructor
+ * taking a vector of IndexSets for
+ * details.
+ */
+ void reinit(const std::vector<IndexSet> &partitioning);
+
+ /**
+ * Access to column number field.
+ * Return the column number of
+ * the @p index th entry in row @p row.
+ */
+ unsigned int column_number (const unsigned int row,
+ const unsigned int index) const;
+
+ /**
+ * Allow the use of the reinit
+ * functions of the base class as
+ * well.
+ */
+ using BlockSparsityPatternBase<CompressedSimpleSparsityPattern>::reinit;
};
class BlockSparsityPattern :
public dealii::BlockSparsityPatternBase<SparsityPattern>
{
- public:
-
- /**
- * Initialize the matrix empty,
- * that is with no memory
- * allocated. This is useful if
- * you want such objects as
- * member variables in other
- * classes. You can make the
- * structure usable by calling
- * the reinit() function.
- */
- BlockSparsityPattern ();
-
- /**
- * Initialize the matrix with the
- * given number of block rows and
- * columns. The blocks themselves
- * are still empty, and you have
- * to call collect_sizes() after
- * you assign them sizes.
- */
- BlockSparsityPattern (const unsigned int n_rows,
- const unsigned int n_columns);
-
- /**
- * Initialize the pattern with
- * two BlockIndices for the block
- * structures of matrix rows and
- * columns. This function is
- * equivalent to calling the
- * previous constructor with the
- * length of the two index vector
- * and then entering the index
- * values.
- */
- BlockSparsityPattern (const std::vector<types::global_dof_index>& row_block_sizes,
- const std::vector<types::global_dof_index>& col_block_sizes);
-
- /**
- * Initialize the pattern with an array
- * Epetra_Map that specifies both rows
- * and columns of the matrix (so the
- * final matrix will be a square
- * matrix), where the Epetra_Map
- * specifies the parallel distribution
- * of the degrees of freedom on the
- * individual block. This function is
- * equivalent to calling the second
- * constructor with the length of the
- * mapping vector and then entering the
- * index values.
- */
- BlockSparsityPattern (const std::vector<Epetra_Map>& parallel_partitioning);
-
- /**
- * Initialize the pattern with an array
- * of index sets that specifies both rows
- * and columns of the matrix (so the
- * final matrix will be a square matrix),
- * where the size() of the IndexSets
- * specifies the size of the blocks and
- * the values in each IndexSet denotes
- * the rows that are going to be saved in
- * each block.
- */
- BlockSparsityPattern (const std::vector<IndexSet>& parallel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
-
- /**
- * Resize the matrix to a tensor
- * product of matrices with
- * dimensions defined by the
- * arguments.
- *
- * The matrix will have as many
- * block rows and columns as
- * there are entries in the two
- * arguments. The block at
- * position (<i>i,j</i>) will
- * have the dimensions
- * <tt>row_block_sizes[i]</tt>
- * times <tt>col_block_sizes[j]</tt>.
- */
- void reinit (const std::vector< types::global_dof_index > &row_block_sizes,
- const std::vector< types::global_dof_index > &col_block_sizes);
-
- /**
- * Resize the matrix to a square tensor
- * product of matrices with parallel
- * distribution according to the
- * specifications in the array of
- * Epetra_Maps.
- */
- void reinit (const std::vector<Epetra_Map>& parallel_partitioning);
-
- /**
- * Resize the matrix to a square tensor
- * product of matrices. See the
- * constructor that takes a vector of
- * IndexSets for details.
- */
- void reinit (const std::vector<IndexSet>& parallel_partitioning,
- const MPI_Comm & communicator = MPI_COMM_WORLD);
-
-
- /**
- * Allow the use of the reinit
- * functions of the base class as
- * well.
- */
+ public:
+
+ /**
+ * Initialize the matrix empty,
+ * that is with no memory
+ * allocated. This is useful if
+ * you want such objects as
+ * member variables in other
+ * classes. You can make the
+ * structure usable by calling
+ * the reinit() function.
+ */
+ BlockSparsityPattern ();
+
+ /**
+ * Initialize the matrix with the
+ * given number of block rows and
+ * columns. The blocks themselves
+ * are still empty, and you have
+ * to call collect_sizes() after
+ * you assign them sizes.
+ */
+ BlockSparsityPattern (const unsigned int n_rows,
+ const unsigned int n_columns);
+
+ /**
+ * Initialize the pattern with
+ * two BlockIndices for the block
+ * structures of matrix rows and
+ * columns. This function is
+ * equivalent to calling the
+ * previous constructor with the
+ * length of the two index vector
+ * and then entering the index
+ * values.
+ */
- BlockSparsityPattern (const std::vector<unsigned int> &row_block_sizes,
- const std::vector<unsigned int> &col_block_sizes);
++ BlockSparsityPattern (const std::vector<types::global_dof_index> &row_block_sizes,
++ const std::vector<types::global_dof_index> &col_block_sizes);
+
+ /**
+ * Initialize the pattern with an array
+ * Epetra_Map that specifies both rows
+ * and columns of the matrix (so the
+ * final matrix will be a square
+ * matrix), where the Epetra_Map
+ * specifies the parallel distribution
+ * of the degrees of freedom on the
+ * individual block. This function is
+ * equivalent to calling the second
+ * constructor with the length of the
+ * mapping vector and then entering the
+ * index values.
+ */
+ BlockSparsityPattern (const std::vector<Epetra_Map> ¶llel_partitioning);
+
+ /**
+ * Initialize the pattern with an array
+ * of index sets that specifies both rows
+ * and columns of the matrix (so the
+ * final matrix will be a square matrix),
+ * where the size() of the IndexSets
+ * specifies the size of the blocks and
+ * the values in each IndexSet denotes
+ * the rows that are going to be saved in
+ * each block.
+ */
+ BlockSparsityPattern (const std::vector<IndexSet> ¶llel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
+
+ /**
+ * Resize the matrix to a tensor
+ * product of matrices with
+ * dimensions defined by the
+ * arguments.
+ *
+ * The matrix will have as many
+ * block rows and columns as
+ * there are entries in the two
+ * arguments. The block at
+ * position (<i>i,j</i>) will
+ * have the dimensions
+ * <tt>row_block_sizes[i]</tt>
+ * times <tt>col_block_sizes[j]</tt>.
+ */
- void reinit (const std::vector< unsigned int > &row_block_sizes,
- const std::vector< unsigned int > &col_block_sizes);
++ void reinit (const std::vector< types::global_dof_index > &row_block_sizes,
++ const std::vector< types::global_dof_index > &col_block_sizes);
+
+ /**
+ * Resize the matrix to a square tensor
+ * product of matrices with parallel
+ * distribution according to the
+ * specifications in the array of
+ * Epetra_Maps.
+ */
+ void reinit (const std::vector<Epetra_Map> ¶llel_partitioning);
+
+ /**
+ * Resize the matrix to a square tensor
+ * product of matrices. See the
+ * constructor that takes a vector of
+ * IndexSets for details.
+ */
+ void reinit (const std::vector<IndexSet> ¶llel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
+
+
+ /**
+ * Allow the use of the reinit
+ * functions of the base class as
+ * well.
+ */
using BlockSparsityPatternBase<SparsityPattern>::reinit;
};
}
template <class SparsityPatternBase>
inline
void
-BlockSparsityPatternBase<SparsityPatternBase>::add (const unsigned int i,
- const unsigned int j)
+BlockSparsityPatternBase<SparsityPatternBase>::add (const types::global_dof_index i,
+ const types::global_dof_index j)
{
- // if you get an error here, are
- // you sure you called
- // <tt>collect_sizes()</tt> before?
+ // if you get an error here, are
+ // you sure you called
+ // <tt>collect_sizes()</tt> before?
- const std::pair<unsigned int,unsigned int>
+ const std::pair<unsigned int,types::global_dof_index>
- row_index = row_indices.global_to_local (i),
- col_index = column_indices.global_to_local (j);
+ row_index = row_indices.global_to_local (i),
+ col_index = column_indices.global_to_local (j);
sub_objects[row_index.first][col_index.first]->add (row_index.second,
col_index.second);
}
counter_within_block.resize (this->n_block_cols());
}
- const unsigned int n_cols = static_cast<unsigned int>(end - begin);
+ const types::global_dof_index n_cols = static_cast<types::global_dof_index>(end - begin);
- // Resize sub-arrays to n_cols. This
- // is a bit wasteful, but we resize
- // only a few times (then the maximum
- // row length won't increase that
- // much any more). At least we know
- // that all arrays are going to be of
- // the same size, so we can check
- // whether the size of one is large
- // enough before actually going
- // through all of them.
+ // Resize sub-arrays to n_cols. This
+ // is a bit wasteful, but we resize
+ // only a few times (then the maximum
+ // row length won't increase that
+ // much any more). At least we know
+ // that all arrays are going to be of
+ // the same size, so we can check
+ // whether the size of one is large
+ // enough before actually going
+ // through all of them.
if (block_column_indices[0].size() < n_cols)
for (unsigned int i=0; i<this->n_block_cols(); ++i)
block_column_indices[i].resize(n_cols);
for (unsigned int i=0; i<this->n_block_cols(); ++i)
counter_within_block[i] = 0;
- // Go through the column indices to
- // find out which portions of the
- // values should be set in which
- // block of the matrix. We need to
- // touch all the data, since we can't
- // be sure that the data of one block
- // is stored contiguously (in fact,
- // indices will be intermixed when it
- // comes from an element matrix).
+ // Go through the column indices to
+ // find out which portions of the
+ // values should be set in which
+ // block of the matrix. We need to
+ // touch all the data, since we can't
+ // be sure that the data of one block
+ // is stored contiguously (in fact,
+ // indices will be intermixed when it
+ // comes from an element matrix).
for (ForwardIterator it = begin; it != end; ++it)
{
- const unsigned int col = *it;
+ const types::global_dof_index col = *it;
- const std::pair<unsigned int, unsigned int>
+ const std::pair<unsigned int, types::global_dof_index>
- col_index = this->column_indices.global_to_local(col);
+ col_index = this->column_indices.global_to_local(col);
const unsigned int local_index = counter_within_block[col_index.first]++;
Assert (length == n_cols, ExcInternalError());
#endif
- // Now we found out about where the
- // individual columns should start and
- // where we should start reading out
- // data. Now let's write the data into
- // the individual blocks!
+ // Now we found out about where the
+ // individual columns should start and
+ // where we should start reading out
+ // data. Now let's write the data into
+ // the individual blocks!
- const std::pair<unsigned int,unsigned int>
+ const std::pair<unsigned int,types::global_dof_index>
- row_index = this->row_indices.global_to_local (row);
+ row_index = this->row_indices.global_to_local (row);
for (unsigned int block_col=0; block_col<n_block_cols(); ++block_col)
{
if (counter_within_block[block_col] == 0)
template <class SparsityPatternBase>
inline
bool
-BlockSparsityPatternBase<SparsityPatternBase>::exists (const unsigned int i,
- const unsigned int j) const
+BlockSparsityPatternBase<SparsityPatternBase>::exists (const types::global_dof_index i,
+ const types::global_dof_index j) const
{
- // if you get an error here, are
- // you sure you called
- // <tt>collect_sizes()</tt> before?
+ // if you get an error here, are
+ // you sure you called
+ // <tt>collect_sizes()</tt> before?
- const std::pair<unsigned int,unsigned int>
+ const std::pair<unsigned int,types::global_dof_index>
- row_index = row_indices.global_to_local (i),
- col_index = column_indices.global_to_local (j);
+ row_index = row_indices.global_to_local (i),
+ col_index = column_indices.global_to_local (j);
return sub_objects[row_index.first][col_index.first]->exists (row_index.second,
- col_index.second);
+ col_index.second);
}
inline
unsigned int
BlockSparsityPatternBase<SparsityPatternBase>::
-row_length (const unsigned int row) const
+row_length (const types::global_dof_index row) const
{
- const std::pair<unsigned int,unsigned int>
+ const std::pair<unsigned int,types::global_dof_index>
- row_index = row_indices.global_to_local (row);
+ row_index = row_indices.global_to_local (row);
unsigned int c = 0;
template <typename Number>
class BlockVector : public BlockVectorBase<Vector<Number> >
{
- public:
- /**
- * Typedef the base class for simpler
- * access to its own typedefs.
- */
- typedef BlockVectorBase<Vector<Number> > BaseClass;
-
- /**
- * Typedef the type of the underlying
- * vector.
- */
- typedef typename BaseClass::BlockType BlockType;
-
- /**
- * Import the typedefs from the base
- * class.
- */
- typedef typename BaseClass::value_type value_type;
- typedef typename BaseClass::real_type real_type;
- typedef typename BaseClass::pointer pointer;
- typedef typename BaseClass::const_pointer const_pointer;
- typedef typename BaseClass::reference reference;
- typedef typename BaseClass::const_reference const_reference;
- typedef typename BaseClass::size_type size_type;
- typedef typename BaseClass::iterator iterator;
- typedef typename BaseClass::const_iterator const_iterator;
-
- /**
- * Constructor. There are three
- * ways to use this
- * constructor. First, without
- * any arguments, it generates
- * an object with no
- * blocks. Given one argument,
- * it initializes <tt>num_blocks</tt>
- * blocks, but these blocks have
- * size zero. The third variant
- * finally initializes all
- * blocks to the same size
- * <tt>block_size</tt>.
- *
- * Confer the other constructor
- * further down if you intend to
- * use blocks of different
- * sizes.
- */
- explicit BlockVector (const unsigned int num_blocks = 0,
- const types::global_dof_index block_size = 0);
-
- /**
- * Copy-Constructor. Dimension set to
- * that of V, all components are copied
- * from V
- */
- BlockVector (const BlockVector<Number>& V);
+ public:
+ /**
+ * Typedef the base class for simpler
+ * access to its own typedefs.
+ */
+ typedef BlockVectorBase<Vector<Number> > BaseClass;
+
+ /**
+ * Typedef the type of the underlying
+ * vector.
+ */
+ typedef typename BaseClass::BlockType BlockType;
+
+ /**
+ * Import the typedefs from the base
+ * class.
+ */
+ typedef typename BaseClass::value_type value_type;
+ typedef typename BaseClass::real_type real_type;
+ typedef typename BaseClass::pointer pointer;
+ typedef typename BaseClass::const_pointer const_pointer;
+ typedef typename BaseClass::reference reference;
+ typedef typename BaseClass::const_reference const_reference;
+ typedef typename BaseClass::size_type size_type;
+ typedef typename BaseClass::iterator iterator;
+ typedef typename BaseClass::const_iterator const_iterator;
+
+ /**
+ * Constructor. There are three
+ * ways to use this
+ * constructor. First, without
+ * any arguments, it generates
+ * an object with no
+ * blocks. Given one argument,
+ * it initializes <tt>num_blocks</tt>
+ * blocks, but these blocks have
+ * size zero. The third variant
+ * finally initializes all
+ * blocks to the same size
+ * <tt>block_size</tt>.
+ *
+ * Confer the other constructor
+ * further down if you intend to
+ * use blocks of different
+ * sizes.
+ */
- explicit BlockVector (const unsigned int num_blocks = 0,
- const unsigned int block_size = 0);
++ explicit BlockVector (const unsigned int num_blocks = 0,
++ const types::global_dof_index block_size = 0);
+
+ /**
+ * Copy-Constructor. Dimension set to
+ * that of V, all components are copied
+ * from V
+ */
+ BlockVector (const BlockVector<Number> &V);
#ifndef DEAL_II_EXPLICIT_CONSTRUCTOR_BUG
#ifdef DEAL_II_USE_TRILINOS
- /**
- * A copy constructor taking a
- * (parallel) Trilinos block
- * vector and copying it into
- * the deal.II own format.
- */
- BlockVector (const TrilinosWrappers::BlockVector &v);
+ /**
+ * A copy constructor taking a
+ * (parallel) Trilinos block
+ * vector and copying it into
+ * the deal.II own format.
+ */
+ BlockVector (const TrilinosWrappers::BlockVector &v);
#endif
- /**
- * Constructor. Set the number of
- * blocks to
- * <tt>block_sizes.size()</tt> and
- * initialize each block with
- * <tt>block_sizes[i]</tt> zero
- * elements.
- */
- BlockVector (const std::vector<types::global_dof_index> &block_sizes);
-
- /**
- * Constructor. Initialize vector
- * to the structure found in the
- * BlockIndices argument.
- */
- BlockVector (const BlockIndices& block_indices);
-
- /**
- * Constructor. Set the number of
- * blocks to
- * <tt>n.size()</tt>. Initialize the
- * vector with the elements
- * pointed to by the range of
- * iterators given as second and
- * third argument. Apart from the
- * first argument, this
- * constructor is in complete
- * analogy to the respective
- * constructor of the
- * <tt>std::vector</tt> class, but the
- * first argument is needed in
- * order to know how to subdivide
- * the block vector into
- * different blocks.
- */
- template <typename InputIterator>
- BlockVector (const std::vector<unsigned int> &n,
- const InputIterator first,
- const InputIterator end);
-
- /**
- * Destructor. Clears memory
- */
- ~BlockVector ();
-
- /**
- * Call the compress() function on all
- * the subblocks.
- *
- * This functionality only needs to be
- * called if using MPI based vectors and
- * exists in other objects for
- * compatibility.
- *
- * See @ref GlossCompress "Compressing
- * distributed objects" for more
- * information.
- */
- void compress (::dealii::VectorOperation::values operation
- =::dealii::VectorOperation::unknown);
-
- /**
- * Copy operator: fill all components of
- * the vector with the given scalar
- * value.
- */
- BlockVector & operator = (const value_type s);
-
- /**
- * Copy operator for arguments of the
- * same type. Resize the
- * present vector if necessary.
- */
- BlockVector &
- operator= (const BlockVector &V);
-
- /**
- * Copy operator for template arguments
- * of different types. Resize the
- * present vector if necessary.
- */
- template <class Number2>
- BlockVector &
- operator= (const BlockVector<Number2> &V);
-
- /**
- * Copy a regular vector into a
- * block vector.
- */
- BlockVector &
- operator= (const Vector<Number> &V);
+ /**
+ * Constructor. Set the number of
+ * blocks to
+ * <tt>block_sizes.size()</tt> and
+ * initialize each block with
+ * <tt>block_sizes[i]</tt> zero
+ * elements.
+ */
- BlockVector (const std::vector<unsigned int> &block_sizes);
++ BlockVector (const std::vector<types::global_dof_index> &block_sizes);
+
+ /**
+ * Constructor. Initialize vector
+ * to the structure found in the
+ * BlockIndices argument.
+ */
+ BlockVector (const BlockIndices &block_indices);
+
+ /**
+ * Constructor. Set the number of
+ * blocks to
+ * <tt>n.size()</tt>. Initialize the
+ * vector with the elements
+ * pointed to by the range of
+ * iterators given as second and
+ * third argument. Apart from the
+ * first argument, this
+ * constructor is in complete
+ * analogy to the respective
+ * constructor of the
+ * <tt>std::vector</tt> class, but the
+ * first argument is needed in
+ * order to know how to subdivide
+ * the block vector into
+ * different blocks.
+ */
+ template <typename InputIterator>
+ BlockVector (const std::vector<unsigned int> &n,
+ const InputIterator first,
+ const InputIterator end);
+
+ /**
+ * Destructor. Clears memory
+ */
+ ~BlockVector ();
+
+ /**
+ * Call the compress() function on all
+ * the subblocks.
+ *
+ * This functionality only needs to be
+ * called if using MPI based vectors and
+ * exists in other objects for
+ * compatibility.
+ *
+ * See @ref GlossCompress "Compressing
+ * distributed objects" for more
+ * information.
+ */
+ void compress (::dealii::VectorOperation::values operation
+ =::dealii::VectorOperation::unknown);
+
+ /**
+ * Copy operator: fill all components of
+ * the vector with the given scalar
+ * value.
+ */
+ BlockVector &operator = (const value_type s);
+
+ /**
+ * Copy operator for arguments of the
+ * same type. Resize the
+ * present vector if necessary.
+ */
+ BlockVector &
+ operator= (const BlockVector &V);
+
+ /**
+ * Copy operator for template arguments
+ * of different types. Resize the
+ * present vector if necessary.
+ */
+ template <class Number2>
+ BlockVector &
+ operator= (const BlockVector<Number2> &V);
+
+ /**
+ * Copy a regular vector into a
+ * block vector.
+ */
+ BlockVector &
+ operator= (const Vector<Number> &V);
#ifdef DEAL_II_USE_TRILINOS
- /**
- * A copy constructor from a
- * Trilinos block vector to a
- * deal.II block vector.
- */
- BlockVector &
- operator= (const TrilinosWrappers::BlockVector &V);
+ /**
+ * A copy constructor from a
+ * Trilinos block vector to a
+ * deal.II block vector.
+ */
+ BlockVector &
+ operator= (const TrilinosWrappers::BlockVector &V);
#endif
- /**
- * Reinitialize the BlockVector to
- * contain <tt>num_blocks</tt> blocks of
- * size <tt>block_size</tt> each.
- *
- * If the second argument is left
- * at its default value, then the
- * block vector allocates the
- * specified number of blocks but
- * leaves them at zero size. You
- * then need to later
- * reinitialize the individual
- * blocks, and call
- * collect_sizes() to update the
- * block system's knowledge of
- * its individual block's sizes.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- */
- void reinit (const unsigned int num_blocks,
- const types::global_dof_index block_size = 0,
- const bool fast = false);
-
- /**
- * Reinitialize the BlockVector such that
- * it contains
- * <tt>block_sizes.size()</tt>
- * blocks. Each block is reinitialized to
- * dimension <tt>block_sizes[i]</tt>.
- *
- * If the number of blocks is the
- * same as before this function
- * was called, all vectors remain
- * the same and reinit() is
- * called for each vector.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- *
- * Note that you must call this
- * (or the other reinit()
- * functions) function, rather
- * than calling the reinit()
- * functions of an individual
- * block, to allow the block
- * vector to update its caches of
- * vector sizes. If you call
- * reinit() on one of the
- * blocks, then subsequent
- * actions on this object may
- * yield unpredictable results
- * since they may be routed to
- * the wrong block.
- */
- void reinit (const std::vector<types::global_dof_index> &N,
- const bool fast=false);
-
- /**
- * Reinitialize the BlockVector
- * to reflect the structure found
- * in BlockIndices.
- *
- * If the number of blocks is the
- * same as before this function
- * was called, all vectors remain
- * the same and reinit() is
- * called for each vector.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- */
- void reinit (const BlockIndices& block_indices,
- const bool fast=false);
-
- /**
- * Change the dimension to that
- * of the vector <tt>V</tt>. The same
- * applies as for the other
- * reinit() function.
- *
- * The elements of <tt>V</tt> are not
- * copied, i.e. this function is
- * the same as calling <tt>reinit
- * (V.size(), fast)</tt>.
- *
- * Note that you must call this
- * (or the other reinit()
- * functions) function, rather
- * than calling the reinit()
- * functions of an individual
- * block, to allow the block
- * vector to update its caches of
- * vector sizes. If you call
- * reinit() of one of the
- * blocks, then subsequent
- * actions of this object may
- * yield unpredictable results
- * since they may be routed to
- * the wrong block.
- */
- template <typename Number2>
- void reinit (const BlockVector<Number2> &V,
- const bool fast=false);
-
- /**
- * Scale each element of the
- * vector by the given factor.
- *
- * This function is deprecated
- * and will be removed in a
- * future version. Use
- * <tt>operator *=</tt> and
- * <tt>operator /=</tt> instead.
- *
- * @deprecated Use <tt>operator*=</tt>
- * instead.
- */
- void scale (const value_type factor);
-
- /**
- * Multiply each element of this
- * vector by the corresponding
- * element of <tt>v</tt>.
- */
- template <class BlockVector2>
- void scale (const BlockVector2 &v);
-
- /**
- * Swap the contents of this
- * vector and the other vector
- * <tt>v</tt>. One could do this
- * operation with a temporary
- * variable and copying over the
- * data elements, but this
- * function is significantly more
- * efficient since it only swaps
- * the pointers to the data of
- * the two vectors and therefore
- * does not need to allocate
- * temporary storage and move
- * data around.
- *
- * Limitation: right now this
- * function only works if both
- * vectors have the same number
- * of blocks. If needed, the
- * numbers of blocks should be
- * exchanged, too.
- *
- * This function is analog to the
- * the swap() function of all C++
- * standard containers. Also,
- * there is a global function
- * swap(u,v) that simply calls
- * <tt>u.swap(v)</tt>, again in analogy
- * to standard functions.
- */
- void swap (BlockVector<Number> &v);
-
- /**
- * Output of vector in user-defined
- * format.
- */
- void print (const char* format = 0) const;
-
- /**
- * Print to a stream.
- */
- void print (std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const bool across = true) const;
-
- /**
- * Write the vector en bloc to a
- * stream. This is done in a binary mode,
- * so the output is neither readable by
- * humans nor (probably) by other
- * computers using a different operating
- * system or number format.
- */
- void block_write (std::ostream &out) const;
-
- /**
- * Read a vector en block from a
- * file. This is done using the inverse
- * operations to the above function, so
- * it is reasonably fast because the
- * bitstream is not interpreted.
- *
- * The vector is resized if necessary.
- *
- * A primitive form of error checking is
- * performed which will recognize the
- * bluntest attempts to interpret some
- * data as a vector stored bitwise to a
- * file, but not more.
- */
- void block_read (std::istream &in);
-
- /** @addtogroup Exceptions
- * @{ */
-
- /**
- * Exception
- */
- DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
- //@}
+ /**
+ * Reinitialize the BlockVector to
+ * contain <tt>num_blocks</tt> blocks of
+ * size <tt>block_size</tt> each.
+ *
+ * If the second argument is left
+ * at its default value, then the
+ * block vector allocates the
+ * specified number of blocks but
+ * leaves them at zero size. You
+ * then need to later
+ * reinitialize the individual
+ * blocks, and call
+ * collect_sizes() to update the
+ * block system's knowledge of
+ * its individual block's sizes.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ */
+ void reinit (const unsigned int num_blocks,
- const unsigned int block_size = 0,
++ const types::global_dof_index block_size = 0,
+ const bool fast = false);
+
+ /**
+ * Reinitialize the BlockVector such that
+ * it contains
+ * <tt>block_sizes.size()</tt>
+ * blocks. Each block is reinitialized to
+ * dimension <tt>block_sizes[i]</tt>.
+ *
+ * If the number of blocks is the
+ * same as before this function
+ * was called, all vectors remain
+ * the same and reinit() is
+ * called for each vector.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ *
+ * Note that you must call this
+ * (or the other reinit()
+ * functions) function, rather
+ * than calling the reinit()
+ * functions of an individual
+ * block, to allow the block
+ * vector to update its caches of
+ * vector sizes. If you call
+ * reinit() on one of the
+ * blocks, then subsequent
+ * actions on this object may
+ * yield unpredictable results
+ * since they may be routed to
+ * the wrong block.
+ */
- void reinit (const std::vector<unsigned int> &N,
- const bool fast=false);
++ void reinit (const std::vector<types::global_dof_index> &N,
++ const bool fast=false);
+
+ /**
+ * Reinitialize the BlockVector
+ * to reflect the structure found
+ * in BlockIndices.
+ *
+ * If the number of blocks is the
+ * same as before this function
+ * was called, all vectors remain
+ * the same and reinit() is
+ * called for each vector.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ */
+ void reinit (const BlockIndices &block_indices,
+ const bool fast=false);
+
+ /**
+ * Change the dimension to that
+ * of the vector <tt>V</tt>. The same
+ * applies as for the other
+ * reinit() function.
+ *
+ * The elements of <tt>V</tt> are not
+ * copied, i.e. this function is
+ * the same as calling <tt>reinit
+ * (V.size(), fast)</tt>.
+ *
+ * Note that you must call this
+ * (or the other reinit()
+ * functions) function, rather
+ * than calling the reinit()
+ * functions of an individual
+ * block, to allow the block
+ * vector to update its caches of
+ * vector sizes. If you call
+ * reinit() of one of the
+ * blocks, then subsequent
+ * actions of this object may
+ * yield unpredictable results
+ * since they may be routed to
+ * the wrong block.
+ */
+ template <typename Number2>
+ void reinit (const BlockVector<Number2> &V,
+ const bool fast=false);
+
+ /**
+ * Scale each element of the
+ * vector by the given factor.
+ *
+ * This function is deprecated
+ * and will be removed in a
+ * future version. Use
+ * <tt>operator *=</tt> and
+ * <tt>operator /=</tt> instead.
+ *
+ * @deprecated Use <tt>operator*=</tt>
+ * instead.
+ */
+ void scale (const value_type factor);
+
+ /**
+ * Multiply each element of this
+ * vector by the corresponding
+ * element of <tt>v</tt>.
+ */
+ template <class BlockVector2>
+ void scale (const BlockVector2 &v);
+
+ /**
+ * Swap the contents of this
+ * vector and the other vector
+ * <tt>v</tt>. One could do this
+ * operation with a temporary
+ * variable and copying over the
+ * data elements, but this
+ * function is significantly more
+ * efficient since it only swaps
+ * the pointers to the data of
+ * the two vectors and therefore
+ * does not need to allocate
+ * temporary storage and move
+ * data around.
+ *
+ * Limitation: right now this
+ * function only works if both
+ * vectors have the same number
+ * of blocks. If needed, the
+ * numbers of blocks should be
+ * exchanged, too.
+ *
+ * This function is analog to the
+ * the swap() function of all C++
+ * standard containers. Also,
+ * there is a global function
+ * swap(u,v) that simply calls
+ * <tt>u.swap(v)</tt>, again in analogy
+ * to standard functions.
+ */
+ void swap (BlockVector<Number> &v);
+
+ /**
+ * Output of vector in user-defined
+ * format.
+ */
+ void print (const char *format = 0) const;
+
+ /**
+ * Print to a stream.
+ */
+ void print (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /**
+ * Write the vector en bloc to a
+ * stream. This is done in a binary mode,
+ * so the output is neither readable by
+ * humans nor (probably) by other
+ * computers using a different operating
+ * system or number format.
+ */
+ void block_write (std::ostream &out) const;
+
+ /**
+ * Read a vector en block from a
+ * file. This is done using the inverse
+ * operations to the above function, so
+ * it is reasonably fast because the
+ * bitstream is not interpreted.
+ *
+ * The vector is resized if necessary.
+ *
+ * A primitive form of error checking is
+ * performed which will recognize the
+ * bluntest attempts to interpret some
+ * data as a vector stored bitwise to a
+ * file, but not more.
+ */
+ void block_read (std::istream &in);
+
+ /** @addtogroup Exceptions
+ * @{ */
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
+ //@}
};
/*@}*/
};
- /**
- * General random-access iterator
- * class for block vectors. Since
- * we do not want to have two
- * classes for non-const
- * iterator and
- * const_iterator, we take a
- * second template argument which
- * denotes whether the vector we
- * point into is a constant object
- * or not. The first template
- * argument is always the number
- * type of the block vector in use.
- *
- * This class satisfies all
- * requirements of random access
- * iterators defined in the C++
- * standard. Operations on these
- * iterators are constant in the
- * number of elements in the block
- * vector. However, they are
- * sometimes linear in the number
- * of blocks in the vector, but
- * since that does rarely change
- * dynamically within an
- * application, this is a constant
- * and we again have that the
- * iterator satisfies the
- * requirements of a random access
- * iterator.
- *
- * The implementation of this class
- * has to work around some problems
- * in compilers and standard
- * libraries. One of these requires
- * us to write all comparison
- * operators twice, once comparison
- * with iterators of the same type
- * and once with iterators pointing
- * to numbers of opposite constness
- * specification. The reason is
- * that if we would have written
- * the comparison operators as a
- * template on the constness of the
- * right hand side, then gcc2.95
- * signals an error that these
- * operators ambiguate operators
- * declared somewhere within the
- * standard library. Likewise, we
- * have to work around some
- * problems with granting other
- * iterators friendship. This makes
- * the implementation somewhat
- * non-optimal at places, but at
- * least everything works.
- *
- * @author Wolfgang Bangerth, 2001
- */
+ /**
+ * General random-access iterator
+ * class for block vectors. Since
+ * we do not want to have two
+ * classes for non-const
+ * iterator and
+ * const_iterator, we take a
+ * second template argument which
+ * denotes whether the vector we
+ * point into is a constant object
+ * or not. The first template
+ * argument is always the number
+ * type of the block vector in use.
+ *
+ * This class satisfies all
+ * requirements of random access
+ * iterators defined in the C++
+ * standard. Operations on these
+ * iterators are constant in the
+ * number of elements in the block
+ * vector. However, they are
+ * sometimes linear in the number
+ * of blocks in the vector, but
+ * since that does rarely change
+ * dynamically within an
+ * application, this is a constant
+ * and we again have that the
+ * iterator satisfies the
+ * requirements of a random access
+ * iterator.
+ *
+ * The implementation of this class
+ * has to work around some problems
+ * in compilers and standard
+ * libraries. One of these requires
+ * us to write all comparison
+ * operators twice, once comparison
+ * with iterators of the same type
+ * and once with iterators pointing
+ * to numbers of opposite constness
+ * specification. The reason is
+ * that if we would have written
+ * the comparison operators as a
+ * template on the constness of the
+ * right hand side, then gcc2.95
+ * signals an error that these
+ * operators ambiguate operators
+ * declared somewhere within the
+ * standard library. Likewise, we
+ * have to work around some
+ * problems with granting other
+ * iterators friendship. This makes
+ * the implementation somewhat
+ * non-optimal at places, but at
+ * least everything works.
+ *
+ * @author Wolfgang Bangerth, 2001
+ */
template <class BlockVectorType, bool constness>
class Iterator :
- public std::iterator<std::random_access_iterator_tag,
- typename Types<BlockVectorType,constness>::value_type>
+ public std::iterator<std::random_access_iterator_tag,
+ typename Types<BlockVectorType,constness>::value_type>
{
- private:
- /**
- * Typedef an iterator with
- * opposite constness
- * requirements on the elements
- * it points to.
- */
- typedef Iterator<BlockVectorType,!constness> InverseConstnessIterator;
-
- public:
- /**
- * Type of the number this
- * iterator points
- * to. Depending on the value
- * of the second template
- * parameter, this is either a
- * constant or non-const
- * number.
- */
- typedef
- typename Types<BlockVectorType,constness>::value_type
- value_type;
-
- /**
- * Declare some typedefs which
- * are standard for iterators
- * and are used by algorithms
- * to enquire about the
- * specifics of the iterators
- * they work on.
- */
- typedef std::random_access_iterator_tag iterator_type;
- typedef std::ptrdiff_t difference_type;
- typedef typename BlockVectorType::reference reference;
- typedef value_type *pointer;
-
- typedef
- typename Types<BlockVectorType,constness>::dereference_type
- dereference_type;
-
- /**
- * Typedef the type of the
- * block vector (which differs
- * in constness, depending on
- * the second template
- * parameter).
- */
- typedef
- typename Types<BlockVectorType,constness>::BlockVector
- BlockVector;
-
- /**
- * Construct an iterator from
- * a vector to which we point
- * and the global index of
- * the element pointed to.
- *
- * Depending on the value of
- * the <tt>constness</tt> template
- * argument of this class,
- * the first argument of this
- * constructor is either is a
- * const or non-const
- * reference.
- */
- Iterator (BlockVector &parent,
- const types::global_dof_index global_index);
-
- /**
- * Copy constructor.
- */
- Iterator (const Iterator<BlockVectorType,constness> &c);
-
- /**
- * Copy constructor for
- * conversion between iterators
- * with different constness
- * requirements. This
- * constructor throws an error
- * if an attempt is made at
- * converting a constant to a
- * non-constant iterator.
- */
- Iterator (const InverseConstnessIterator &c);
-
- private:
- /**
- * Constructor used internally
- * in this class. The arguments
- * match exactly the values of
- * the respective member
- * variables.
- */
- Iterator (BlockVector &parent,
- const types::global_dof_index global_index,
- const unsigned int current_block,
- const types::global_dof_index index_within_block,
- const types::global_dof_index next_break_forward,
- const types::global_dof_index next_break_backward);
-
- public:
-
- /**
- * Copy operator.
- */
- Iterator & operator = (const Iterator &c);
-
- /**
- * Dereferencing operator. If the
- * template argument
- * <tt>constness</tt> is
- * <tt>true</tt>, then no writing to
- * the result is possible, making
- * this a const_iterator.
- */
- dereference_type operator * () const;
-
- /**
- * Random access operator,
- * grant access to arbitrary
- * elements relative to the one
- * presently pointed to.
- */
- dereference_type operator [] (const difference_type d) const;
-
- /**
- * Prefix increment operator. This
- * operator advances the iterator to
- * the next element and returns a
- * reference to <tt>*this</tt>.
- */
- Iterator & operator ++ ();
-
- /**
- * Postfix increment
- * operator. This operator
- * advances the iterator to
- * the next element and
- * returns a copy of the old
- * value of this iterator.
- */
- Iterator operator ++ (int);
-
- /**
- * Prefix decrement operator. This
- * operator retracts the iterator to
- * the previous element and returns a
- * reference to <tt>*this</tt>.
- */
- Iterator & operator -- ();
-
- /**
- * Postfix decrement
- * operator. This operator
- * retracts the iterator to
- * the previous element and
- * returns a copy of the old
- * value of this iterator.
- */
- Iterator operator -- (int);
-
- /**
- * Compare for equality of
- * iterators. This operator
- * checks whether the vectors
- * pointed to are the same,
- * and if not it throws an
- * exception.
- */
- bool operator == (const Iterator &i) const;
-
- /**
- * Same, but compare with an
- * iterator of different
- * constness.
- */
- bool operator == (const InverseConstnessIterator &i) const;
-
- /**
- * Compare for inequality of
- * iterators. This operator
- * checks whether the vectors
- * pointed to are the same,
- * and if not it throws an
- * exception.
- */
- bool operator != (const Iterator &i) const;
-
- /**
- * Same, but compare with an
- * iterator of different
- * constness.
- */
- bool operator != (const InverseConstnessIterator &i) const;
-
- /**
- * Check whether this
- * iterators points to an
- * element previous to the
- * one pointed to by the
- * given argument. This
- * operator checks whether
- * the vectors pointed to are
- * the same, and if not it
- * throws an exception.
- */
- bool operator < (const Iterator &i) const;
-
- /**
- * Same, but compare with an
- * iterator of different
- * constness.
- */
- bool operator < (const InverseConstnessIterator &i) const;
-
- /**
- * Comparison operator alike
- * to the one above.
- */
- bool operator <= (const Iterator &i) const;
-
- /**
- * Same, but compare with an
- * iterator of different
- * constness.
- */
- bool operator <= (const InverseConstnessIterator &i) const;
-
- /**
- * Comparison operator alike
- * to the one above.
- */
- bool operator > (const Iterator &i) const;
-
- /**
- * Same, but compare with an
- * iterator of different
- * constness.
- */
- bool operator > (const InverseConstnessIterator &i) const;
-
- /**
- * Comparison operator alike
- * to the one above.
- */
- bool operator >= (const Iterator &i) const;
-
- /**
- * Same, but compare with an
- * iterator of different
- * constness.
- */
- bool operator >= (const InverseConstnessIterator &i) const;
-
- /**
- * Return the distance between
- * the two iterators, in
- * elements.
- */
- difference_type operator - (const Iterator &i) const;
-
- /**
- * Same, but for iterators of
- * opposite constness.
- */
- difference_type operator - (const InverseConstnessIterator &i) const;
-
- /**
- * Return an iterator which is
- * the given number of elements
- * in front of the present one.
- */
- Iterator operator + (const difference_type &d) const;
-
- /**
- * Return an iterator which is
- * the given number of elements
- * behind the present one.
- */
- Iterator operator - (const difference_type &d) const;
-
- /**
- * Move the iterator <tt>d</tt>
- * elements forward at once,
- * and return the result.
- */
- Iterator & operator += (const difference_type &d);
-
- /**
- * Move the iterator <tt>d</tt>
- * elements backward at once,
- * and return the result.
- */
- Iterator & operator -= (const difference_type &d);
-
- /** @addtogroup Exceptions
- * @{ */
-
- /**
- * Exception.
- */
- DeclException0 (ExcPointerToDifferentVectors);
- /**
- * Exception.
- */
- DeclException0 (ExcCastingAwayConstness);
- //@}
- private:
- /**
- * Pointer to the block
- * vector object to which
- * this iterator
- * points. Depending on the
- * value of the <tt>constness</tt>
- * template argument of this
- * class, this is a <tt>const</tt>
- * or non-<tt>const</tt> pointer.
- */
- BlockVector *parent;
-
- /**
- * Global index of the
- * element to which we
- * presently point.
- */
- types::global_dof_index global_index;
-
- /**
- * Current block and index
- * within this block of the
- * element presently pointed
- * to.
- */
- unsigned int current_block;
- types::global_dof_index index_within_block;
-
- /**
- * Indices of the global
- * element address at which
- * we have to move on to
- * another block when moving
- * forward and
- * backward. These indices
- * are kept as a cache since
- * this is much more
- * efficient than always
- * asking the parent object.
- */
- types::global_dof_index next_break_forward;
- types::global_dof_index next_break_backward;
-
- /**
- * Move forward one element.
- */
- void move_forward ();
-
- /**
- * Move backward one element.
- */
- void move_backward ();
-
-
- /**
- * Mark all other instances of
- * this template as friends. In
- * fact, we only need the
- * inverse constness iterator
- * as friend, but this is
- * something that ISO C++ does
- * not allow to specify.
- */
- template <typename N, bool C>
- friend class Iterator;
+ private:
+ /**
+ * Typedef an iterator with
+ * opposite constness
+ * requirements on the elements
+ * it points to.
+ */
+ typedef Iterator<BlockVectorType,!constness> InverseConstnessIterator;
+
+ public:
+ /**
+ * Type of the number this
+ * iterator points
+ * to. Depending on the value
+ * of the second template
+ * parameter, this is either a
+ * constant or non-const
+ * number.
+ */
+ typedef
+ typename Types<BlockVectorType,constness>::value_type
+ value_type;
+
+ /**
+ * Declare some typedefs which
+ * are standard for iterators
+ * and are used by algorithms
+ * to enquire about the
+ * specifics of the iterators
+ * they work on.
+ */
+ typedef std::random_access_iterator_tag iterator_type;
+ typedef std::ptrdiff_t difference_type;
+ typedef typename BlockVectorType::reference reference;
+ typedef value_type *pointer;
+
+ typedef
+ typename Types<BlockVectorType,constness>::dereference_type
+ dereference_type;
+
+ /**
+ * Typedef the type of the
+ * block vector (which differs
+ * in constness, depending on
+ * the second template
+ * parameter).
+ */
+ typedef
+ typename Types<BlockVectorType,constness>::BlockVector
+ BlockVector;
+
+ /**
+ * Construct an iterator from
+ * a vector to which we point
+ * and the global index of
+ * the element pointed to.
+ *
+ * Depending on the value of
+ * the <tt>constness</tt> template
+ * argument of this class,
+ * the first argument of this
+ * constructor is either is a
+ * const or non-const
+ * reference.
+ */
- Iterator (BlockVector &parent,
- const unsigned int global_index);
++ Iterator (BlockVector &parent,
++ const types::global_dof_index global_index);
+
+ /**
+ * Copy constructor.
+ */
+ Iterator (const Iterator<BlockVectorType,constness> &c);
+
+ /**
+ * Copy constructor for
+ * conversion between iterators
+ * with different constness
+ * requirements. This
+ * constructor throws an error
+ * if an attempt is made at
+ * converting a constant to a
+ * non-constant iterator.
+ */
+ Iterator (const InverseConstnessIterator &c);
+
+ private:
+ /**
+ * Constructor used internally
+ * in this class. The arguments
+ * match exactly the values of
+ * the respective member
+ * variables.
+ */
+ Iterator (BlockVector &parent,
- const unsigned int global_index,
- const unsigned int current_block,
- const unsigned int index_within_block,
- const unsigned int next_break_forward,
- const unsigned int next_break_backward);
++ const types::global_dof_index global_index,
++ const unsigned int current_block,
++ const types::global_dof_index index_within_block,
++ const types::global_dof_index next_break_forward,
++ const types::global_dof_index next_break_backward);
+
+ public:
+
+ /**
+ * Copy operator.
+ */
+ Iterator &operator = (const Iterator &c);
+
+ /**
+ * Dereferencing operator. If the
+ * template argument
+ * <tt>constness</tt> is
+ * <tt>true</tt>, then no writing to
+ * the result is possible, making
+ * this a const_iterator.
+ */
+ dereference_type operator * () const;
+
+ /**
+ * Random access operator,
+ * grant access to arbitrary
+ * elements relative to the one
+ * presently pointed to.
+ */
+ dereference_type operator [] (const difference_type d) const;
+
+ /**
+ * Prefix increment operator. This
+ * operator advances the iterator to
+ * the next element and returns a
+ * reference to <tt>*this</tt>.
+ */
+ Iterator &operator ++ ();
+
+ /**
+ * Postfix increment
+ * operator. This operator
+ * advances the iterator to
+ * the next element and
+ * returns a copy of the old
+ * value of this iterator.
+ */
+ Iterator operator ++ (int);
+
+ /**
+ * Prefix decrement operator. This
+ * operator retracts the iterator to
+ * the previous element and returns a
+ * reference to <tt>*this</tt>.
+ */
+ Iterator &operator -- ();
+
+ /**
+ * Postfix decrement
+ * operator. This operator
+ * retracts the iterator to
+ * the previous element and
+ * returns a copy of the old
+ * value of this iterator.
+ */
+ Iterator operator -- (int);
+
+ /**
+ * Compare for equality of
+ * iterators. This operator
+ * checks whether the vectors
+ * pointed to are the same,
+ * and if not it throws an
+ * exception.
+ */
+ bool operator == (const Iterator &i) const;
+
+ /**
+ * Same, but compare with an
+ * iterator of different
+ * constness.
+ */
+ bool operator == (const InverseConstnessIterator &i) const;
+
+ /**
+ * Compare for inequality of
+ * iterators. This operator
+ * checks whether the vectors
+ * pointed to are the same,
+ * and if not it throws an
+ * exception.
+ */
+ bool operator != (const Iterator &i) const;
+
+ /**
+ * Same, but compare with an
+ * iterator of different
+ * constness.
+ */
+ bool operator != (const InverseConstnessIterator &i) const;
+
+ /**
+ * Check whether this
+ * iterators points to an
+ * element previous to the
+ * one pointed to by the
+ * given argument. This
+ * operator checks whether
+ * the vectors pointed to are
+ * the same, and if not it
+ * throws an exception.
+ */
+ bool operator < (const Iterator &i) const;
+
+ /**
+ * Same, but compare with an
+ * iterator of different
+ * constness.
+ */
+ bool operator < (const InverseConstnessIterator &i) const;
+
+ /**
+ * Comparison operator alike
+ * to the one above.
+ */
+ bool operator <= (const Iterator &i) const;
+
+ /**
+ * Same, but compare with an
+ * iterator of different
+ * constness.
+ */
+ bool operator <= (const InverseConstnessIterator &i) const;
+
+ /**
+ * Comparison operator alike
+ * to the one above.
+ */
+ bool operator > (const Iterator &i) const;
+
+ /**
+ * Same, but compare with an
+ * iterator of different
+ * constness.
+ */
+ bool operator > (const InverseConstnessIterator &i) const;
+
+ /**
+ * Comparison operator alike
+ * to the one above.
+ */
+ bool operator >= (const Iterator &i) const;
+
+ /**
+ * Same, but compare with an
+ * iterator of different
+ * constness.
+ */
+ bool operator >= (const InverseConstnessIterator &i) const;
+
+ /**
+ * Return the distance between
+ * the two iterators, in
+ * elements.
+ */
+ difference_type operator - (const Iterator &i) const;
+
+ /**
+ * Same, but for iterators of
+ * opposite constness.
+ */
+ difference_type operator - (const InverseConstnessIterator &i) const;
+
+ /**
+ * Return an iterator which is
+ * the given number of elements
+ * in front of the present one.
+ */
+ Iterator operator + (const difference_type &d) const;
+
+ /**
+ * Return an iterator which is
+ * the given number of elements
+ * behind the present one.
+ */
+ Iterator operator - (const difference_type &d) const;
+
+ /**
+ * Move the iterator <tt>d</tt>
+ * elements forward at once,
+ * and return the result.
+ */
+ Iterator &operator += (const difference_type &d);
+
+ /**
+ * Move the iterator <tt>d</tt>
+ * elements backward at once,
+ * and return the result.
+ */
+ Iterator &operator -= (const difference_type &d);
+
+ /** @addtogroup Exceptions
+ * @{ */
+
+ /**
+ * Exception.
+ */
+ DeclException0 (ExcPointerToDifferentVectors);
+ /**
+ * Exception.
+ */
+ DeclException0 (ExcCastingAwayConstness);
+ //@}
+ private:
+ /**
+ * Pointer to the block
+ * vector object to which
+ * this iterator
+ * points. Depending on the
+ * value of the <tt>constness</tt>
+ * template argument of this
+ * class, this is a <tt>const</tt>
+ * or non-<tt>const</tt> pointer.
+ */
+ BlockVector *parent;
+
+ /**
+ * Global index of the
+ * element to which we
+ * presently point.
+ */
- unsigned int global_index;
++ types::global_dof_index global_index;
+
+ /**
+ * Current block and index
+ * within this block of the
+ * element presently pointed
+ * to.
+ */
+ unsigned int current_block;
- unsigned int index_within_block;
++ types::global_dof_index index_within_block;
+
+ /**
+ * Indices of the global
+ * element address at which
+ * we have to move on to
+ * another block when moving
+ * forward and
+ * backward. These indices
+ * are kept as a cache since
+ * this is much more
+ * efficient than always
+ * asking the parent object.
+ */
- unsigned int next_break_forward;
- unsigned int next_break_backward;
++ types::global_dof_index next_break_forward;
++ types::global_dof_index next_break_backward;
+
+ /**
+ * Move forward one element.
+ */
+ void move_forward ();
+
+ /**
+ * Move backward one element.
+ */
+ void move_backward ();
+
+
+ /**
+ * Mark all other instances of
+ * this template as friends. In
+ * fact, we only need the
+ * inverse constness iterator
+ * as friend, but this is
+ * something that ISO C++ does
+ * not allow to specify.
+ */
+ template <typename N, bool C>
+ friend class Iterator;
};
} // namespace BlockVectorIterators
} // namespace internal
template <class VectorType>
class BlockVectorBase : public Subscriptor
{
- public:
- /**
- * Typedef the type of the underlying
- * vector.
- */
- typedef VectorType BlockType;
-
- /*
- * Declare standard types used in
- * all containers. These types
- * parallel those in the
- * <tt>C++</tt> standard
- * libraries
- * <tt>std::vector<...></tt>
- * class. This includes iterator
- * types.
- */
- typedef typename BlockType::value_type value_type;
- typedef value_type *pointer;
- typedef const value_type *const_pointer;
- typedef dealii::internal::BlockVectorIterators::Iterator<BlockVectorBase,false> iterator;
- typedef dealii::internal::BlockVectorIterators::Iterator<BlockVectorBase,true> const_iterator;
- typedef typename BlockType::reference reference;
- typedef typename BlockType::const_reference const_reference;
-
- typedef std::size_t size_type;
-
- /**
- * Declare a type that has holds
- * real-valued numbers with the
- * same precision as the template
- * argument to this class. If the
- * template argument of this
- * class is a real data type,
- * then real_type equals the
- * template argument. If the
- * template argument is a
- * std::complex type then
- * real_type equals the type
- * underlying the complex
- * numbers.
- *
- * This typedef is used to
- * represent the return type of
- * norms.
- */
- typedef typename BlockType::real_type real_type;
-
- /**
- * Default constructor.
- */
- BlockVectorBase ();
-
- /**
- * Update internal structures
- * after resizing
- * vectors. Whenever you reinited
- * a block of a block vector, the
- * internal data structures are
- * corrupted. Therefore, you
- * should call this function
- * after al blocks got their new
- * size.
- */
- void collect_sizes ();
-
- /**
- * Call the compress() function on all
- * the subblocks of the matrix.
- *
- * This functionality only needs to be
- * called if using MPI based vectors and
- * exists in other objects for
- * compatibility.
- *
- * See @ref GlossCompress "Compressing
- * distributed objects" for more
- * information.
- */
- void compress (::dealii::VectorOperation::values operation
- =::dealii::VectorOperation::unknown);
-
-
- /**
- * Access to a single block.
- */
- BlockType &
- block (const unsigned int i);
-
- /**
- * Read-only access to a single block.
- */
- const BlockType &
- block (const unsigned int i) const;
-
- /**
- * Return a reference on the
- * object that describes the
- * mapping between block and
- * global indices. The use of
- * this function is highly
- * deprecated and it should
- * vanish in one of the next
- * versions
- */
- const BlockIndices &
- get_block_indices () const;
-
- /**
- * Number of blocks.
- */
- unsigned int n_blocks () const;
-
- /**
- * Return dimension of the vector. This
- * is the sum of the dimensions of all
- * components.
- */
- types::global_dof_index size () const;
-
- /**
- * Return an iterator pointing to
- * the first element.
- */
- iterator begin ();
-
- /**
- * Return an iterator pointing to
- * the first element of a
- * constant block vector.
- */
- const_iterator begin () const;
-
- /**
- * Return an iterator pointing to
- * the element past the end.
- */
- iterator end ();
-
- /**
- * Return an iterator pointing to
- * the element past the end of a
- * constant block vector.
- */
- const_iterator end () const;
-
- /**
- * Access components, returns U(i).
- */
- value_type operator() (const types::global_dof_index i) const;
-
- /**
- * Access components, returns U(i)
- * as a writeable reference.
- */
- reference operator() (const types::global_dof_index i);
-
- /**
- * Access components, returns U(i).
- *
- * Exactly the same as operator().
- */
- value_type operator[] (const types::global_dof_index i) const;
-
- /**
- * Access components, returns U(i)
- * as a writeable reference.
- *
- * Exactly the same as operator().
- */
- reference operator[] (const types::global_dof_index i);
-
- /**
- * Copy operator: fill all components of
- * the vector with the given scalar
- * value.
- */
- BlockVectorBase & operator = (const value_type s);
-
- /**
- * Copy operator for arguments of the
- * same type.
- */
- BlockVectorBase &
- operator= (const BlockVectorBase& V);
-
- /**
- * Copy operator for template arguments
- * of different types.
- */
- template <class VectorType2>
- BlockVectorBase &
- operator= (const BlockVectorBase<VectorType2> &V);
-
- /**
- * Copy operator from non-block
- * vectors to block vectors.
- */
- BlockVectorBase &
- operator = (const VectorType &v);
-
- /**
- * Check for equality of two block vector
- * types. This operation is only allowed
- * if the two vectors already have the
- * same block structure.
- */
- template <class VectorType2>
- bool
- operator == (const BlockVectorBase<VectorType2> &v) const;
-
- /**
- * $U = U * V$: scalar product.
- */
- value_type operator* (const BlockVectorBase& V) const;
-
- /**
- * Return square of the $l_2$-norm.
- */
- real_type norm_sqr () const;
-
- /**
- * Return the mean value of the elements
- * of this vector.
- */
- value_type mean_value () const;
-
- /**
- * Return the $l_1$-norm of the vector,
- * i.e. the sum of the absolute values.
- */
- real_type l1_norm () const;
-
- /**
- * Return the $l_2$-norm of the vector,
- * i.e. the square root of the sum of
- * the squares of the elements.
- */
- real_type l2_norm () const;
-
- /**
- * Return the maximum absolute value of
- * the elements of this vector, which is
- * the $l_\infty$-norm of a vector.
- */
- real_type linfty_norm () const;
-
- /**
- * Return whether the vector contains only
- * elements with value zero. This function
- * is mainly for internal consistency
- * check and should seldom be used when
- * not in debug mode since it uses quite
- * some time.
- */
- bool all_zero () const;
-
- /**
- * Return @p true if the vector has no
- * negative entries, i.e. all entries are
- * zero or positive. This function is
- * used, for example, to check whether
- * refinement indicators are really all
- * positive (or zero).
- */
- bool is_non_negative () const;
-
- /**
- * Addition operator. Fast equivalent to
- * <tt>U.add(1, V)</tt>.
- */
- BlockVectorBase &
- operator += (const BlockVectorBase &V);
-
- /**
- * Subtraction operator. Fast equivalent
- * to <tt>U.add(-1, V)</tt>.
- */
- BlockVectorBase &
- operator -= (const BlockVectorBase &V);
-
-
- /**
- * A collective add operation:
- * This funnction adds a whole
- * set of values stored in @p
- * values to the vector
- * components specified by @p
- * indices.
- */
- template <typename Number>
- void add (const std::vector<types::global_dof_index> &indices,
- const std::vector<Number> &values);
-
- /**
- * This is a second collective
- * add operation. As a
- * difference, this function
- * takes a deal.II vector of
- * values.
- */
- template <typename Number>
- void add (const std::vector<types::global_dof_index> &indices,
- const Vector<Number> &values);
-
- /**
- * Take an address where
- * <tt>n_elements</tt> are stored
- * contiguously and add them into
- * the vector. Handles all cases
- * which are not covered by the
- * other two <tt>add()</tt>
- * functions above.
- */
- template <typename Number>
- void add (const unsigned int n_elements,
- const types::global_dof_index *indices,
- const Number *values);
-
- /**
- * $U(0-DIM)+=s$. Addition of <tt>s</tt>
- * to all components. Note that
- * <tt>s</tt> is a scalar and not a
- * vector.
- */
- void add (const value_type s);
-
- /**
- * U+=V.
- * Simple vector addition, equal to the
- * <tt>operator +=</tt>.
- */
- void add (const BlockVectorBase& V);
-
- /**
- * U+=a*V.
- * Simple addition of a scaled vector.
- */
- void add (const value_type a, const BlockVectorBase& V);
-
- /**
- * U+=a*V+b*W.
- * Multiple addition of scaled vectors.
- */
- void add (const value_type a, const BlockVectorBase& V,
- const value_type b, const BlockVectorBase& W);
-
- /**
- * U=s*U+V.
- * Scaling and simple vector addition.
- */
- void sadd (const value_type s, const BlockVectorBase& V);
-
- /**
- * U=s*U+a*V.
- * Scaling and simple addition.
- */
- void sadd (const value_type s, const value_type a, const BlockVectorBase& V);
-
- /**
- * U=s*U+a*V+b*W.
- * Scaling and multiple addition.
- */
- void sadd (const value_type s, const value_type a,
- const BlockVectorBase& V,
- const value_type b, const BlockVectorBase& W);
-
- /**
- * U=s*U+a*V+b*W+c*X.
- * Scaling and multiple addition.
- */
- void sadd (const value_type s, const value_type a,
- const BlockVectorBase& V,
- const value_type b, const BlockVectorBase& W,
- const value_type c, const BlockVectorBase& X);
-
- /**
- * Scale each element of the
- * vector by a constant
- * value.
- */
- BlockVectorBase & operator *= (const value_type factor);
-
- /**
- * Scale each element of the
- * vector by the inverse of the
- * given value.
- */
- BlockVectorBase & operator /= (const value_type factor);
-
- /**
- * Multiply each element of this
- * vector by the corresponding
- * element of <tt>v</tt>.
- */
- template <class BlockVector2>
- void scale (const BlockVector2 &v);
-
- /**
- * U=a*V. Assignment.
- */
- template <class BlockVector2>
- void equ (const value_type a, const BlockVector2 &V);
-
- /**
- * U=a*V+b*W.
- * Replacing by sum.
- */
- void equ (const value_type a, const BlockVectorBase& V,
- const value_type b, const BlockVectorBase& W);
-
- /**
- * This function does nothing but is
- * there for compatibility with the
- * @p PETScWrappers::Vector class.
- *
- * For the PETSc vector wrapper class,
- * this function updates the ghost
- * values of the PETSc vector. This
- * is necessary after any modification
- * before reading ghost values.
- *
- * However, for the implementation of
- * this class, it is immaterial and thus
- * an empty function.
- */
- void update_ghost_values () const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
- */
- std::size_t memory_consumption () const;
-
- protected:
- /**
- * Pointer to the array of components.
- */
- std::vector<VectorType> components;
-
- /**
- * Object managing the
- * transformation between global
- * indices and indices within the
- * different blocks.
- */
- BlockIndices block_indices;
-
- /**
- * Make the iterator class a
- * friend.
- */
- template <typename N, bool C>
- friend class dealii::internal::BlockVectorIterators::Iterator;
-
- template <typename> friend class BlockVectorBase;
+ public:
+ /**
+ * Typedef the type of the underlying
+ * vector.
+ */
+ typedef VectorType BlockType;
+
+ /*
+ * Declare standard types used in
+ * all containers. These types
+ * parallel those in the
+ * <tt>C++</tt> standard
+ * libraries
+ * <tt>std::vector<...></tt>
+ * class. This includes iterator
+ * types.
+ */
+ typedef typename BlockType::value_type value_type;
+ typedef value_type *pointer;
+ typedef const value_type *const_pointer;
+ typedef dealii::internal::BlockVectorIterators::Iterator<BlockVectorBase,false> iterator;
+ typedef dealii::internal::BlockVectorIterators::Iterator<BlockVectorBase,true> const_iterator;
+ typedef typename BlockType::reference reference;
+ typedef typename BlockType::const_reference const_reference;
+
+ typedef std::size_t size_type;
+
+ /**
+ * Declare a type that has holds
+ * real-valued numbers with the
+ * same precision as the template
+ * argument to this class. If the
+ * template argument of this
+ * class is a real data type,
+ * then real_type equals the
+ * template argument. If the
+ * template argument is a
+ * std::complex type then
+ * real_type equals the type
+ * underlying the complex
+ * numbers.
+ *
+ * This typedef is used to
+ * represent the return type of
+ * norms.
+ */
+ typedef typename BlockType::real_type real_type;
+
+ /**
+ * Default constructor.
+ */
+ BlockVectorBase ();
+
+ /**
+ * Update internal structures
+ * after resizing
+ * vectors. Whenever you reinited
+ * a block of a block vector, the
+ * internal data structures are
+ * corrupted. Therefore, you
+ * should call this function
+ * after al blocks got their new
+ * size.
+ */
+ void collect_sizes ();
+
+ /**
+ * Call the compress() function on all
+ * the subblocks of the matrix.
+ *
+ * This functionality only needs to be
+ * called if using MPI based vectors and
+ * exists in other objects for
+ * compatibility.
+ *
+ * See @ref GlossCompress "Compressing
+ * distributed objects" for more
+ * information.
+ */
+ void compress (::dealii::VectorOperation::values operation
+ =::dealii::VectorOperation::unknown);
+
+
+ /**
+ * Access to a single block.
+ */
+ BlockType &
+ block (const unsigned int i);
+
+ /**
+ * Read-only access to a single block.
+ */
+ const BlockType &
+ block (const unsigned int i) const;
+
+ /**
+ * Return a reference on the
+ * object that describes the
+ * mapping between block and
+ * global indices. The use of
+ * this function is highly
+ * deprecated and it should
+ * vanish in one of the next
+ * versions
+ */
+ const BlockIndices &
+ get_block_indices () const;
+
+ /**
+ * Number of blocks.
+ */
+ unsigned int n_blocks () const;
+
+ /**
+ * Return dimension of the vector. This
+ * is the sum of the dimensions of all
+ * components.
+ */
- unsigned int size () const;
++ types::global_dof_index size () const;
+
+ /**
+ * Return an iterator pointing to
+ * the first element.
+ */
+ iterator begin ();
+
+ /**
+ * Return an iterator pointing to
+ * the first element of a
+ * constant block vector.
+ */
+ const_iterator begin () const;
+
+ /**
+ * Return an iterator pointing to
+ * the element past the end.
+ */
+ iterator end ();
+
+ /**
+ * Return an iterator pointing to
+ * the element past the end of a
+ * constant block vector.
+ */
+ const_iterator end () const;
+
+ /**
+ * Access components, returns U(i).
+ */
- value_type operator() (const unsigned int i) const;
++ value_type operator() (const types::global_dof_index i) const;
+
+ /**
+ * Access components, returns U(i)
+ * as a writeable reference.
+ */
- reference operator() (const unsigned int i);
++ reference operator() (const types::global_dof_index i);
+
+ /**
+ * Access components, returns U(i).
+ *
+ * Exactly the same as operator().
+ */
- value_type operator[] (const unsigned int i) const;
++ value_type operator[] (const types::global_dof_index i) const;
+
+ /**
+ * Access components, returns U(i)
+ * as a writeable reference.
+ *
+ * Exactly the same as operator().
+ */
- reference operator[] (const unsigned int i);
++ reference operator[] (const types::global_dof_index i);
+
+ /**
+ * Copy operator: fill all components of
+ * the vector with the given scalar
+ * value.
+ */
+ BlockVectorBase &operator = (const value_type s);
+
+ /**
+ * Copy operator for arguments of the
+ * same type.
+ */
+ BlockVectorBase &
+ operator= (const BlockVectorBase &V);
+
+ /**
+ * Copy operator for template arguments
+ * of different types.
+ */
+ template <class VectorType2>
+ BlockVectorBase &
+ operator= (const BlockVectorBase<VectorType2> &V);
+
+ /**
+ * Copy operator from non-block
+ * vectors to block vectors.
+ */
+ BlockVectorBase &
+ operator = (const VectorType &v);
+
+ /**
+ * Check for equality of two block vector
+ * types. This operation is only allowed
+ * if the two vectors already have the
+ * same block structure.
+ */
+ template <class VectorType2>
+ bool
+ operator == (const BlockVectorBase<VectorType2> &v) const;
+
+ /**
+ * $U = U * V$: scalar product.
+ */
+ value_type operator* (const BlockVectorBase &V) const;
+
+ /**
+ * Return square of the $l_2$-norm.
+ */
+ real_type norm_sqr () const;
+
+ /**
+ * Return the mean value of the elements
+ * of this vector.
+ */
+ value_type mean_value () const;
+
+ /**
+ * Return the $l_1$-norm of the vector,
+ * i.e. the sum of the absolute values.
+ */
+ real_type l1_norm () const;
+
+ /**
+ * Return the $l_2$-norm of the vector,
+ * i.e. the square root of the sum of
+ * the squares of the elements.
+ */
+ real_type l2_norm () const;
+
+ /**
+ * Return the maximum absolute value of
+ * the elements of this vector, which is
+ * the $l_\infty$-norm of a vector.
+ */
+ real_type linfty_norm () const;
+
+ /**
+ * Return whether the vector contains only
+ * elements with value zero. This function
+ * is mainly for internal consistency
+ * check and should seldom be used when
+ * not in debug mode since it uses quite
+ * some time.
+ */
+ bool all_zero () const;
+
+ /**
+ * Return @p true if the vector has no
+ * negative entries, i.e. all entries are
+ * zero or positive. This function is
+ * used, for example, to check whether
+ * refinement indicators are really all
+ * positive (or zero).
+ */
+ bool is_non_negative () const;
+
+ /**
+ * Addition operator. Fast equivalent to
+ * <tt>U.add(1, V)</tt>.
+ */
+ BlockVectorBase &
+ operator += (const BlockVectorBase &V);
+
+ /**
+ * Subtraction operator. Fast equivalent
+ * to <tt>U.add(-1, V)</tt>.
+ */
+ BlockVectorBase &
+ operator -= (const BlockVectorBase &V);
+
+
+ /**
+ * A collective add operation:
+ * This funnction adds a whole
+ * set of values stored in @p
+ * values to the vector
+ * components specified by @p
+ * indices.
+ */
+ template <typename Number>
- void add (const std::vector<unsigned int> &indices,
- const std::vector<Number> &values);
++ void add (const std::vector<types::global_dof_index> &indices,
++ const std::vector<Number> &values);
+
+ /**
+ * This is a second collective
+ * add operation. As a
+ * difference, this function
+ * takes a deal.II vector of
+ * values.
+ */
+ template <typename Number>
- void add (const std::vector<unsigned int> &indices,
- const Vector<Number> &values);
++ void add (const std::vector<types::global_dof_index> &indices,
++ const Vector<Number> &values);
+
+ /**
+ * Take an address where
+ * <tt>n_elements</tt> are stored
+ * contiguously and add them into
+ * the vector. Handles all cases
+ * which are not covered by the
+ * other two <tt>add()</tt>
+ * functions above.
+ */
+ template <typename Number>
- void add (const unsigned int n_elements,
- const unsigned int *indices,
- const Number *values);
++ void add (const unsigned int n_elements,
++ const types::global_dof_index *indices,
++ const Number *values);
+
+ /**
+ * $U(0-DIM)+=s$. Addition of <tt>s</tt>
+ * to all components. Note that
+ * <tt>s</tt> is a scalar and not a
+ * vector.
+ */
+ void add (const value_type s);
+
+ /**
+ * U+=V.
+ * Simple vector addition, equal to the
+ * <tt>operator +=</tt>.
+ */
+ void add (const BlockVectorBase &V);
+
+ /**
+ * U+=a*V.
+ * Simple addition of a scaled vector.
+ */
+ void add (const value_type a, const BlockVectorBase &V);
+
+ /**
+ * U+=a*V+b*W.
+ * Multiple addition of scaled vectors.
+ */
+ void add (const value_type a, const BlockVectorBase &V,
+ const value_type b, const BlockVectorBase &W);
+
+ /**
+ * U=s*U+V.
+ * Scaling and simple vector addition.
+ */
+ void sadd (const value_type s, const BlockVectorBase &V);
+
+ /**
+ * U=s*U+a*V.
+ * Scaling and simple addition.
+ */
+ void sadd (const value_type s, const value_type a, const BlockVectorBase &V);
+
+ /**
+ * U=s*U+a*V+b*W.
+ * Scaling and multiple addition.
+ */
+ void sadd (const value_type s, const value_type a,
+ const BlockVectorBase &V,
+ const value_type b, const BlockVectorBase &W);
+
+ /**
+ * U=s*U+a*V+b*W+c*X.
+ * Scaling and multiple addition.
+ */
+ void sadd (const value_type s, const value_type a,
+ const BlockVectorBase &V,
+ const value_type b, const BlockVectorBase &W,
+ const value_type c, const BlockVectorBase &X);
+
+ /**
+ * Scale each element of the
+ * vector by a constant
+ * value.
+ */
+ BlockVectorBase &operator *= (const value_type factor);
+
+ /**
+ * Scale each element of the
+ * vector by the inverse of the
+ * given value.
+ */
+ BlockVectorBase &operator /= (const value_type factor);
+
+ /**
+ * Multiply each element of this
+ * vector by the corresponding
+ * element of <tt>v</tt>.
+ */
+ template <class BlockVector2>
+ void scale (const BlockVector2 &v);
+
+ /**
+ * U=a*V. Assignment.
+ */
+ template <class BlockVector2>
+ void equ (const value_type a, const BlockVector2 &V);
+
+ /**
+ * U=a*V+b*W.
+ * Replacing by sum.
+ */
+ void equ (const value_type a, const BlockVectorBase &V,
+ const value_type b, const BlockVectorBase &W);
+
+ /**
+ * This function does nothing but is
+ * there for compatibility with the
+ * @p PETScWrappers::Vector class.
+ *
+ * For the PETSc vector wrapper class,
+ * this function updates the ghost
+ * values of the PETSc vector. This
+ * is necessary after any modification
+ * before reading ghost values.
+ *
+ * However, for the implementation of
+ * this class, it is immaterial and thus
+ * an empty function.
+ */
+ void update_ghost_values () const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object.
+ */
+ std::size_t memory_consumption () const;
+
+ protected:
+ /**
+ * Pointer to the array of components.
+ */
+ std::vector<VectorType> components;
+
+ /**
+ * Object managing the
+ * transformation between global
+ * indices and indices within the
+ * different blocks.
+ */
+ BlockIndices block_indices;
+
+ /**
+ * Make the iterator class a
+ * friend.
+ */
+ template <typename N, bool C>
+ friend class dealii::internal::BlockVectorIterators::Iterator;
+
+ template <typename> friend class BlockVectorBase;
};
inline
Iterator<BlockVectorType,constness>::
Iterator (BlockVector &parent,
- const unsigned int global_index,
- const unsigned int current_block,
- const unsigned int index_within_block,
- const unsigned int next_break_forward,
- const unsigned int next_break_backward)
+ const types::global_dof_index global_index,
+ const unsigned int current_block,
+ const types::global_dof_index index_within_block,
+ const types::global_dof_index next_break_forward,
+ const types::global_dof_index next_break_backward)
- :
- parent (&parent),
- global_index (global_index),
- current_block (current_block),
- index_within_block (index_within_block),
- next_break_forward (next_break_forward),
- next_break_backward (next_break_backward)
+ :
+ parent (&parent),
+ global_index (global_index),
+ current_block (current_block),
+ index_within_block (index_within_block),
+ next_break_forward (next_break_forward),
+ next_break_backward (next_break_backward)
{
}
template <class BlockVectorType, bool constness>
Iterator<BlockVectorType,constness>::
- Iterator (BlockVector &parent,
- const unsigned int global_index)
+ Iterator (BlockVector &parent,
+ const types::global_dof_index global_index)
- :
- parent (&parent),
- global_index (global_index)
+ :
+ parent (&parent),
+ global_index (global_index)
{
- // find which block we are
- // in. for this, take into
- // account that it happens at
- // times that people want to
- // initialize iterators
- // past-the-end
+ // find which block we are
+ // in. for this, take into
+ // account that it happens at
+ // times that people want to
+ // initialize iterators
+ // past-the-end
if (global_index < parent.size())
{
- const std::pair<unsigned int, unsigned int>
+ const std::pair<unsigned int, types::global_dof_index>
- indices = parent.block_indices.global_to_local(global_index);
+ indices = parent.block_indices.global_to_local(global_index);
current_block = indices.first;
index_within_block = indices.second;
template <typename number>
class ChunkSparseMatrix : public virtual Subscriptor
{
- public:
- /**
- * Type of matrix entries. In analogy to
- * the STL container classes.
- */
- typedef number value_type;
-
- /**
- * Declare a type that has holds
- * real-valued numbers with the
- * same precision as the template
- * argument to this class. If the
- * template argument of this
- * class is a real data type,
- * then real_type equals the
- * template argument. If the
- * template argument is a
- * std::complex type then
- * real_type equals the type
- * underlying the complex
- * numbers.
- *
- * This typedef is used to
- * represent the return type of
- * norms.
- */
- typedef typename numbers::NumberTraits<number>::real_type real_type;
-
- /**
- * A structure that describes some of the
- * traits of this class in terms of its
- * run-time behavior. Some other classes
- * (such as the block matrix classes)
- * that take one or other of the matrix
- * classes as its template parameters can
- * tune their behavior based on the
- * variables in this class.
- */
- struct Traits
- {
- /**
- * It is safe to elide additions of
- * zeros to individual elements of
- * this matrix.
- */
- static const bool zero_addition_can_be_elided = true;
- };
-
- /**
- * @name Constructors and initalization.
- */
+ public:
+ /**
+ * Type of matrix entries. In analogy to
+ * the STL container classes.
+ */
+ typedef number value_type;
+
+ /**
+ * Declare a type that has holds
+ * real-valued numbers with the
+ * same precision as the template
+ * argument to this class. If the
+ * template argument of this
+ * class is a real data type,
+ * then real_type equals the
+ * template argument. If the
+ * template argument is a
+ * std::complex type then
+ * real_type equals the type
+ * underlying the complex
+ * numbers.
+ *
+ * This typedef is used to
+ * represent the return type of
+ * norms.
+ */
+ typedef typename numbers::NumberTraits<number>::real_type real_type;
+
+ /**
+ * A structure that describes some of the
+ * traits of this class in terms of its
+ * run-time behavior. Some other classes
+ * (such as the block matrix classes)
+ * that take one or other of the matrix
+ * classes as its template parameters can
+ * tune their behavior based on the
+ * variables in this class.
+ */
+ struct Traits
+ {
+ /**
+ * It is safe to elide additions of
+ * zeros to individual elements of
+ * this matrix.
+ */
+ static const bool zero_addition_can_be_elided = true;
+ };
+
+ /**
+ * @name Constructors and initalization.
+ */
//@{
- /**
- * Constructor; initializes the matrix to
- * be empty, without any structure, i.e.
- * the matrix is not usable at all. This
- * constructor is therefore only useful
- * for matrices which are members of a
- * class. All other matrices should be
- * created at a point in the data flow
- * where all necessary information is
- * available.
- *
- * You have to initialize
- * the matrix before usage with
- * reinit(const ChunkSparsityPattern&).
- */
- ChunkSparseMatrix ();
-
- /**
- * Copy constructor. This constructor is
- * only allowed to be called if the matrix
- * to be copied is empty. This is for the
- * same reason as for the
- * ChunkSparsityPattern, see there for the
- * details.
- *
- * If you really want to copy a whole
- * matrix, you can do so by using the
- * copy_from() function.
- */
- ChunkSparseMatrix (const ChunkSparseMatrix &);
-
- /**
- * Constructor. Takes the given
- * matrix sparsity structure to
- * represent the sparsity pattern
- * of this matrix. You can change
- * the sparsity pattern later on
- * by calling the reinit(const
- * ChunkSparsityPattern&) function.
- *
- * You have to make sure that the
- * lifetime of the sparsity
- * structure is at least as long
- * as that of this matrix or as
- * long as reinit(const
- * ChunkSparsityPattern&) is not
- * called with a new sparsity
- * pattern.
- *
- * The constructor is marked
- * explicit so as to disallow
- * that someone passes a sparsity
- * pattern in place of a sparse
- * matrix to some function, where
- * an empty matrix would be
- * generated then.
- */
- explicit ChunkSparseMatrix (const ChunkSparsityPattern &sparsity);
-
- /**
- * Copy constructor: initialize
- * the matrix with the identity
- * matrix. This constructor will
- * throw an exception if the
- * sizes of the sparsity pattern
- * and the identity matrix do not
- * coincide, or if the sparsity
- * pattern does not provide for
- * nonzero entries on the entire
- * diagonal.
- */
- ChunkSparseMatrix (const ChunkSparsityPattern &sparsity,
- const IdentityMatrix &id);
-
- /**
- * Destructor. Free all memory, but do not
- * release the memory of the sparsity
- * structure.
- */
- virtual ~ChunkSparseMatrix ();
-
- /**
- * Copy operator. Since copying
- * entire sparse matrices is a
- * very expensive operation, we
- * disallow doing so except for
- * the special case of empty
- * matrices of size zero. This
- * doesn't seem particularly
- * useful, but is exactly what
- * one needs if one wanted to
- * have a
- * <code>std::vector@<ChunkSparseMatrix@<double@>
- * @></code>: in that case, one
- * can create a vector (which
- * needs the ability to copy
- * objects) of empty matrices
- * that are then later filled
- * with something useful.
- */
- ChunkSparseMatrix<number>& operator = (const ChunkSparseMatrix<number> &);
-
- /**
- * Copy operator: initialize
- * the matrix with the identity
- * matrix. This operator will
- * throw an exception if the
- * sizes of the sparsity pattern
- * and the identity matrix do not
- * coincide, or if the sparsity
- * pattern does not provide for
- * nonzero entries on the entire
- * diagonal.
- */
- ChunkSparseMatrix<number> &
- operator= (const IdentityMatrix &id);
-
- /**
- * This operator assigns a scalar to
- * a matrix. Since this does usually
- * not make much sense (should we set
- * all matrix entries to this value?
- * Only the nonzero entries of the
- * sparsity pattern?), this operation
- * is only allowed if the actual
- * value to be assigned is zero. This
- * operator only exists to allow for
- * the obvious notation
- * <tt>matrix=0</tt>, which sets all
- * elements of the matrix to zero,
- * but keep the sparsity pattern
- * previously used.
- */
- ChunkSparseMatrix & operator = (const double d);
-
- /**
- * Reinitialize the sparse matrix
- * with the given sparsity
- * pattern. The latter tells the
- * matrix how many nonzero
- * elements there need to be
- * reserved.
- *
- * Regarding memory allocation,
- * the same applies as said
- * above.
- *
- * You have to make sure that the
- * lifetime of the sparsity
- * structure is at least as long
- * as that of this matrix or as
- * long as reinit(const
- * ChunkSparsityPattern &) is not
- * called with a new sparsity
- * structure.
- *
- * The elements of the matrix are
- * set to zero by this function.
- */
- virtual void reinit (const ChunkSparsityPattern &sparsity);
-
- /**
- * Release all memory and return
- * to a state just like after
- * having called the default
- * constructor. It also forgets
- * the sparsity pattern it was
- * previously tied to.
- */
- virtual void clear ();
+ /**
+ * Constructor; initializes the matrix to
+ * be empty, without any structure, i.e.
+ * the matrix is not usable at all. This
+ * constructor is therefore only useful
+ * for matrices which are members of a
+ * class. All other matrices should be
+ * created at a point in the data flow
+ * where all necessary information is
+ * available.
+ *
+ * You have to initialize
+ * the matrix before usage with
+ * reinit(const ChunkSparsityPattern&).
+ */
+ ChunkSparseMatrix ();
+
+ /**
+ * Copy constructor. This constructor is
+ * only allowed to be called if the matrix
+ * to be copied is empty. This is for the
+ * same reason as for the
+ * ChunkSparsityPattern, see there for the
+ * details.
+ *
+ * If you really want to copy a whole
+ * matrix, you can do so by using the
+ * copy_from() function.
+ */
+ ChunkSparseMatrix (const ChunkSparseMatrix &);
+
+ /**
+ * Constructor. Takes the given
+ * matrix sparsity structure to
+ * represent the sparsity pattern
+ * of this matrix. You can change
+ * the sparsity pattern later on
+ * by calling the reinit(const
+ * ChunkSparsityPattern&) function.
+ *
+ * You have to make sure that the
+ * lifetime of the sparsity
+ * structure is at least as long
+ * as that of this matrix or as
+ * long as reinit(const
+ * ChunkSparsityPattern&) is not
+ * called with a new sparsity
+ * pattern.
+ *
+ * The constructor is marked
+ * explicit so as to disallow
+ * that someone passes a sparsity
+ * pattern in place of a sparse
+ * matrix to some function, where
+ * an empty matrix would be
+ * generated then.
+ */
+ explicit ChunkSparseMatrix (const ChunkSparsityPattern &sparsity);
+
+ /**
+ * Copy constructor: initialize
+ * the matrix with the identity
+ * matrix. This constructor will
+ * throw an exception if the
+ * sizes of the sparsity pattern
+ * and the identity matrix do not
+ * coincide, or if the sparsity
+ * pattern does not provide for
+ * nonzero entries on the entire
+ * diagonal.
+ */
+ ChunkSparseMatrix (const ChunkSparsityPattern &sparsity,
- const IdentityMatrix &id);
++ const IdentityMatrix &id);
+
+ /**
+ * Destructor. Free all memory, but do not
+ * release the memory of the sparsity
+ * structure.
+ */
+ virtual ~ChunkSparseMatrix ();
+
+ /**
+ * Copy operator. Since copying
+ * entire sparse matrices is a
+ * very expensive operation, we
+ * disallow doing so except for
+ * the special case of empty
+ * matrices of size zero. This
+ * doesn't seem particularly
+ * useful, but is exactly what
+ * one needs if one wanted to
+ * have a
+ * <code>std::vector@<ChunkSparseMatrix@<double@>
+ * @></code>: in that case, one
+ * can create a vector (which
+ * needs the ability to copy
+ * objects) of empty matrices
+ * that are then later filled
+ * with something useful.
+ */
+ ChunkSparseMatrix<number> &operator = (const ChunkSparseMatrix<number> &);
+
+ /**
+ * Copy operator: initialize
+ * the matrix with the identity
+ * matrix. This operator will
+ * throw an exception if the
+ * sizes of the sparsity pattern
+ * and the identity matrix do not
+ * coincide, or if the sparsity
+ * pattern does not provide for
+ * nonzero entries on the entire
+ * diagonal.
+ */
+ ChunkSparseMatrix<number> &
- operator= (const IdentityMatrix &id);
++ operator= (const IdentityMatrix &id);
+
+ /**
+ * This operator assigns a scalar to
+ * a matrix. Since this does usually
+ * not make much sense (should we set
+ * all matrix entries to this value?
+ * Only the nonzero entries of the
+ * sparsity pattern?), this operation
+ * is only allowed if the actual
+ * value to be assigned is zero. This
+ * operator only exists to allow for
+ * the obvious notation
+ * <tt>matrix=0</tt>, which sets all
+ * elements of the matrix to zero,
+ * but keep the sparsity pattern
+ * previously used.
+ */
+ ChunkSparseMatrix &operator = (const double d);
+
+ /**
+ * Reinitialize the sparse matrix
+ * with the given sparsity
+ * pattern. The latter tells the
+ * matrix how many nonzero
+ * elements there need to be
+ * reserved.
+ *
+ * Regarding memory allocation,
+ * the same applies as said
+ * above.
+ *
+ * You have to make sure that the
+ * lifetime of the sparsity
+ * structure is at least as long
+ * as that of this matrix or as
+ * long as reinit(const
+ * ChunkSparsityPattern &) is not
+ * called with a new sparsity
+ * structure.
+ *
+ * The elements of the matrix are
+ * set to zero by this function.
+ */
+ virtual void reinit (const ChunkSparsityPattern &sparsity);
+
+ /**
+ * Release all memory and return
+ * to a state just like after
+ * having called the default
+ * constructor. It also forgets
+ * the sparsity pattern it was
+ * previously tied to.
+ */
+ virtual void clear ();
//@}
- /**
- * @name Information on the matrix
- */
+ /**
+ * @name Information on the matrix
+ */
//@{
- /**
- * Return whether the object is
- * empty. It is empty if either
- * both dimensions are zero or no
- * ChunkSparsityPattern is
- * associated.
- */
- bool empty () const;
-
- /**
- * Return the dimension of the
- * image space. To remember: the
- * matrix is of dimension
- * $m \times n$.
- */
- types::global_dof_index m () const;
-
- /**
- * Return the dimension of the
- * range space. To remember: the
- * matrix is of dimension
- * $m \times n$.
- */
- types::global_dof_index n () const;
-
- /**
- * Return the number of nonzero
- * elements of this
- * matrix. Actually, it returns
- * the number of entries in the
- * sparsity pattern; if any of
- * the entries should happen to
- * be zero, it is counted anyway.
- */
- unsigned int n_nonzero_elements () const;
-
- /**
- * Return the number of actually
- * nonzero elements of this
- * matrix.
- *
- * Note, that this function does
- * (in contrary to
- * n_nonzero_elements()) not
- * count all entries of the
- * sparsity pattern but only the
- * ones that are nonzero.
- */
- unsigned int n_actually_nonzero_elements () const;
-
- /**
- * Return a (constant) reference
- * to the underlying sparsity
- * pattern of this matrix.
- *
- * Though the return value is
- * declared <tt>const</tt>, you
- * should be aware that it may
- * change if you call any
- * nonconstant function of
- * objects which operate on it.
- */
- const ChunkSparsityPattern & get_sparsity_pattern () const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object. See
- * MemoryConsumption.
- */
- std::size_t memory_consumption () const;
+ /**
+ * Return whether the object is
+ * empty. It is empty if either
+ * both dimensions are zero or no
+ * ChunkSparsityPattern is
+ * associated.
+ */
+ bool empty () const;
+
+ /**
+ * Return the dimension of the
+ * image space. To remember: the
+ * matrix is of dimension
+ * $m \times n$.
+ */
- unsigned int m () const;
++ types::global_dof_index m () const;
+
+ /**
+ * Return the dimension of the
+ * range space. To remember: the
+ * matrix is of dimension
+ * $m \times n$.
+ */
- unsigned int n () const;
++ types::global_dof_index n () const;
+
+ /**
+ * Return the number of nonzero
+ * elements of this
+ * matrix. Actually, it returns
+ * the number of entries in the
+ * sparsity pattern; if any of
+ * the entries should happen to
+ * be zero, it is counted anyway.
+ */
+ unsigned int n_nonzero_elements () const;
+
+ /**
+ * Return the number of actually
+ * nonzero elements of this
+ * matrix.
+ *
+ * Note, that this function does
+ * (in contrary to
+ * n_nonzero_elements()) not
+ * count all entries of the
+ * sparsity pattern but only the
+ * ones that are nonzero.
+ */
+ unsigned int n_actually_nonzero_elements () const;
+
+ /**
+ * Return a (constant) reference
+ * to the underlying sparsity
+ * pattern of this matrix.
+ *
+ * Though the return value is
+ * declared <tt>const</tt>, you
+ * should be aware that it may
+ * change if you call any
+ * nonconstant function of
+ * objects which operate on it.
+ */
+ const ChunkSparsityPattern &get_sparsity_pattern () const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object. See
+ * MemoryConsumption.
+ */
+ std::size_t memory_consumption () const;
//@}
- /**
- * @name Modifying entries
- */
+ /**
+ * @name Modifying entries
+ */
//@{
- /**
- * Set the element (<i>i,j</i>)
- * to <tt>value</tt>. Throws an
- * error if the entry does not
- * exist or if <tt>value</tt> is
- * not a finite number. Still, it
- * is allowed to store zero
- * values in non-existent fields.
- */
- void set (const types::global_dof_index i,
- const types::global_dof_index j,
- const number value);
-
- /**
- * Add <tt>value</tt> to the
- * element (<i>i,j</i>). Throws
- * an error if the entry does not
- * exist or if <tt>value</tt> is
- * not a finite number. Still, it
- * is allowed to store zero
- * values in non-existent fields.
- */
- void add (const types::global_dof_index i,
- const types::global_dof_index j,
- const number value);
-
- /**
- * Multiply the entire matrix by a
- * fixed factor.
- */
- ChunkSparseMatrix & operator *= (const number factor);
-
- /**
- * Divide the entire matrix by a
- * fixed factor.
- */
- ChunkSparseMatrix & operator /= (const number factor);
-
- /**
- * Symmetrize the matrix by
- * forming the mean value between
- * the existing matrix and its
- * transpose, $A = \frac 12(A+A^T)$.
- *
- * This operation assumes that
- * the underlying sparsity
- * pattern represents a symmetric
- * object. If this is not the
- * case, then the result of this
- * operation will not be a
- * symmetric matrix, since it
- * only explicitly symmetrizes
- * by looping over the lower left
- * triangular part for efficiency
- * reasons; if there are entries
- * in the upper right triangle,
- * then these elements are missed
- * in the
- * symmetrization. Symmetrization
- * of the sparsity pattern can be
- * obtain by
- * ChunkSparsityPattern::symmetrize().
- */
- void symmetrize ();
-
- /**
- * Copy the given matrix to this
- * one. The operation throws an
- * error if the sparsity patterns
- * of the two involved matrices
- * do not point to the same
- * object, since in this case the
- * copy operation is
- * cheaper. Since this operation
- * is notheless not for free, we
- * do not make it available
- * through <tt>operator =</tt>,
- * since this may lead to
- * unwanted usage, e.g. in copy
- * arguments to functions, which
- * should really be arguments by
- * reference.
- *
- * The source matrix may be a matrix
- * of arbitrary type, as long as its
- * data type is convertible to the
- * data type of this matrix.
- *
- * The function returns a reference to
- * <tt>*this</tt>.
- */
- template <typename somenumber>
- ChunkSparseMatrix<number> &
- copy_from (const ChunkSparseMatrix<somenumber> &source);
-
- /**
- * This function is complete
- * analogous to the
- * ChunkSparsityPattern::copy_from()
- * function in that it allows to
- * initialize a whole matrix in
- * one step. See there for more
- * information on argument types
- * and their meaning. You can
- * also find a small example on
- * how to use this function
- * there.
- *
- * The only difference to the
- * cited function is that the
- * objects which the inner
- * iterator points to need to be
- * of type <tt>std::pair<unsigned
- * int, value</tt>, where
- * <tt>value</tt> needs to be
- * convertible to the element
- * type of this class, as
- * specified by the
- * <tt>number</tt> template
- * argument.
- *
- * Previous content of the matrix
- * is overwritten. Note that the
- * entries specified by the input
- * parameters need not
- * necessarily cover all elements
- * of the matrix. Elements not
- * covered remain untouched.
- */
- template <typename ForwardIterator>
- void copy_from (const ForwardIterator begin,
- const ForwardIterator end);
-
- /**
- * Copy the nonzero entries of a
- * full matrix into this
- * object. Previous content is
- * deleted. Note that the
- * underlying sparsity pattern
- * must be appropriate to hold
- * the nonzero entries of the
- * full matrix.
- */
- template <typename somenumber>
- void copy_from (const FullMatrix<somenumber> &matrix);
-
- /**
- * Add <tt>matrix</tt> scaled by
- * <tt>factor</tt> to this matrix,
- * i.e. the matrix <tt>factor*matrix</tt>
- * is added to <tt>this</tt>. This
- * function throws an error if the
- * sparsity patterns of the two involved
- * matrices do not point to the same
- * object, since in this case the
- * operation is cheaper.
- *
- * The source matrix may be a sparse
- * matrix over an arbitrary underlying
- * scalar type, as long as its data type
- * is convertible to the data type of
- * this matrix.
- */
- template <typename somenumber>
- void add (const number factor,
- const ChunkSparseMatrix<somenumber> &matrix);
+ /**
+ * Set the element (<i>i,j</i>)
+ * to <tt>value</tt>. Throws an
+ * error if the entry does not
+ * exist or if <tt>value</tt> is
+ * not a finite number. Still, it
+ * is allowed to store zero
+ * values in non-existent fields.
+ */
- void set (const unsigned int i,
- const unsigned int j,
++ void set (const types::global_dof_index i,
++ const types::global_dof_index j,
+ const number value);
+
+ /**
+ * Add <tt>value</tt> to the
+ * element (<i>i,j</i>). Throws
+ * an error if the entry does not
+ * exist or if <tt>value</tt> is
+ * not a finite number. Still, it
+ * is allowed to store zero
+ * values in non-existent fields.
+ */
- void add (const unsigned int i,
- const unsigned int j,
++ void add (const types::global_dof_index i,
++ const types::global_dof_index j,
+ const number value);
+
+ /**
+ * Multiply the entire matrix by a
+ * fixed factor.
+ */
+ ChunkSparseMatrix &operator *= (const number factor);
+
+ /**
+ * Divide the entire matrix by a
+ * fixed factor.
+ */
+ ChunkSparseMatrix &operator /= (const number factor);
+
+ /**
+ * Symmetrize the matrix by
+ * forming the mean value between
+ * the existing matrix and its
+ * transpose, $A = \frac 12(A+A^T)$.
+ *
+ * This operation assumes that
+ * the underlying sparsity
+ * pattern represents a symmetric
+ * object. If this is not the
+ * case, then the result of this
+ * operation will not be a
+ * symmetric matrix, since it
+ * only explicitly symmetrizes
+ * by looping over the lower left
+ * triangular part for efficiency
+ * reasons; if there are entries
+ * in the upper right triangle,
+ * then these elements are missed
+ * in the
+ * symmetrization. Symmetrization
+ * of the sparsity pattern can be
+ * obtain by
+ * ChunkSparsityPattern::symmetrize().
+ */
+ void symmetrize ();
+
+ /**
+ * Copy the given matrix to this
+ * one. The operation throws an
+ * error if the sparsity patterns
+ * of the two involved matrices
+ * do not point to the same
+ * object, since in this case the
+ * copy operation is
+ * cheaper. Since this operation
+ * is notheless not for free, we
+ * do not make it available
+ * through <tt>operator =</tt>,
+ * since this may lead to
+ * unwanted usage, e.g. in copy
+ * arguments to functions, which
+ * should really be arguments by
+ * reference.
+ *
+ * The source matrix may be a matrix
+ * of arbitrary type, as long as its
+ * data type is convertible to the
+ * data type of this matrix.
+ *
+ * The function returns a reference to
+ * <tt>*this</tt>.
+ */
+ template <typename somenumber>
+ ChunkSparseMatrix<number> &
+ copy_from (const ChunkSparseMatrix<somenumber> &source);
+
+ /**
+ * This function is complete
+ * analogous to the
+ * ChunkSparsityPattern::copy_from()
+ * function in that it allows to
+ * initialize a whole matrix in
+ * one step. See there for more
+ * information on argument types
+ * and their meaning. You can
+ * also find a small example on
+ * how to use this function
+ * there.
+ *
+ * The only difference to the
+ * cited function is that the
+ * objects which the inner
+ * iterator points to need to be
+ * of type <tt>std::pair<unsigned
+ * int, value</tt>, where
+ * <tt>value</tt> needs to be
+ * convertible to the element
+ * type of this class, as
+ * specified by the
+ * <tt>number</tt> template
+ * argument.
+ *
+ * Previous content of the matrix
+ * is overwritten. Note that the
+ * entries specified by the input
+ * parameters need not
+ * necessarily cover all elements
+ * of the matrix. Elements not
+ * covered remain untouched.
+ */
+ template <typename ForwardIterator>
+ void copy_from (const ForwardIterator begin,
+ const ForwardIterator end);
+
+ /**
+ * Copy the nonzero entries of a
+ * full matrix into this
+ * object. Previous content is
+ * deleted. Note that the
+ * underlying sparsity pattern
+ * must be appropriate to hold
+ * the nonzero entries of the
+ * full matrix.
+ */
+ template <typename somenumber>
+ void copy_from (const FullMatrix<somenumber> &matrix);
+
+ /**
+ * Add <tt>matrix</tt> scaled by
+ * <tt>factor</tt> to this matrix,
+ * i.e. the matrix <tt>factor*matrix</tt>
+ * is added to <tt>this</tt>. This
+ * function throws an error if the
+ * sparsity patterns of the two involved
+ * matrices do not point to the same
+ * object, since in this case the
+ * operation is cheaper.
+ *
+ * The source matrix may be a sparse
+ * matrix over an arbitrary underlying
+ * scalar type, as long as its data type
+ * is convertible to the data type of
+ * this matrix.
+ */
+ template <typename somenumber>
+ void add (const number factor,
+ const ChunkSparseMatrix<somenumber> &matrix);
//@}
- /**
- * @name Entry Access
- */
+ /**
+ * @name Entry Access
+ */
//@{
- /**
- * Return the value of the entry
- * (<i>i,j</i>). This may be an
- * expensive operation and you
- * should always take care where
- * to call this function. In
- * order to avoid abuse, this
- * function throws an exception
- * if the required element does
- * not exist in the matrix.
- *
- * In case you want a function
- * that returns zero instead (for
- * entries that are not in the
- * sparsity pattern of the
- * matrix), use the el()
- * function.
- *
- * If you are looping over all elements,
- * consider using one of the iterator
- * classes instead, since they are
- * tailored better to a sparse matrix
- * structure.
- */
- number operator () (const types::global_dof_index i,
- const types::global_dof_index j) const;
-
- /**
- * This function is mostly like
- * operator()() in that it
- * returns the value of the
- * matrix entry (<i>i,j</i>). The
- * only difference is that if
- * this entry does not exist in
- * the sparsity pattern, then
- * instead of raising an
- * exception, zero is
- * returned. While this may be
- * convenient in some cases, note
- * that it is simple to write
- * algorithms that are slow
- * compared to an optimal
- * solution, since the sparsity
- * of the matrix is not used.
- *
- * If you are looping over all elements,
- * consider using one of the iterator
- * classes instead, since they are
- * tailored better to a sparse matrix
- * structure.
- */
- number el (const types::global_dof_index i,
- const types::global_dof_index j) const;
-
- /**
- * Return the main diagonal
- * element in the <i>i</i>th
- * row. This function throws an
- * error if the matrix is not
- * quadratic (see
- * ChunkSparsityPattern::optimize_diagonal()).
- *
- * This function is considerably
- * faster than the operator()(),
- * since for quadratic matrices, the
- * diagonal entry may be the
- * first to be stored in each row
- * and access therefore does not
- * involve searching for the
- * right column number.
- */
- number diag_element (const types::global_dof_index i) const;
-
- /**
- * Same as above, but return a
- * writeable reference. You're
- * sure you know what you do?
- */
- number & diag_element (const types::global_dof_index i);
+ /**
+ * Return the value of the entry
+ * (<i>i,j</i>). This may be an
+ * expensive operation and you
+ * should always take care where
+ * to call this function. In
+ * order to avoid abuse, this
+ * function throws an exception
+ * if the required element does
+ * not exist in the matrix.
+ *
+ * In case you want a function
+ * that returns zero instead (for
+ * entries that are not in the
+ * sparsity pattern of the
+ * matrix), use the el()
+ * function.
+ *
+ * If you are looping over all elements,
+ * consider using one of the iterator
+ * classes instead, since they are
+ * tailored better to a sparse matrix
+ * structure.
+ */
- number operator () (const unsigned int i,
- const unsigned int j) const;
++ number operator () (const types::global_dof_index i,
++ const types::global_dof_index j) const;
+
+ /**
+ * This function is mostly like
+ * operator()() in that it
+ * returns the value of the
+ * matrix entry (<i>i,j</i>). The
+ * only difference is that if
+ * this entry does not exist in
+ * the sparsity pattern, then
+ * instead of raising an
+ * exception, zero is
+ * returned. While this may be
+ * convenient in some cases, note
+ * that it is simple to write
+ * algorithms that are slow
+ * compared to an optimal
+ * solution, since the sparsity
+ * of the matrix is not used.
+ *
+ * If you are looping over all elements,
+ * consider using one of the iterator
+ * classes instead, since they are
+ * tailored better to a sparse matrix
+ * structure.
+ */
- number el (const unsigned int i,
- const unsigned int j) const;
++ number el (const types::global_dof_index i,
++ const types::global_dof_index j) const;
+
+ /**
+ * Return the main diagonal
+ * element in the <i>i</i>th
+ * row. This function throws an
+ * error if the matrix is not
+ * quadratic (see
+ * ChunkSparsityPattern::optimize_diagonal()).
+ *
+ * This function is considerably
+ * faster than the operator()(),
+ * since for quadratic matrices, the
+ * diagonal entry may be the
+ * first to be stored in each row
+ * and access therefore does not
+ * involve searching for the
+ * right column number.
+ */
- number diag_element (const unsigned int i) const;
++ number diag_element (const types::global_dof_index i) const;
+
+ /**
+ * Same as above, but return a
+ * writeable reference. You're
+ * sure you know what you do?
+ */
- number &diag_element (const unsigned int i);
++ number &diag_element (const types::global_dof_index i);
//@}
- /**
- * @name Matrix vector multiplications
- */
+ /**
+ * @name Matrix vector multiplications
+ */
//@{
- /**
- * Matrix-vector multiplication:
- * let <i>dst = M*src</i> with
- * <i>M</i> being this matrix.
- *
- * Note that while this function can
- * operate on all vectors that offer
- * iterator classes, it is only really
- * effective for objects of type @ref
- * Vector. For all classes for which
- * iterating over elements, or random
- * member access is expensive, this
- * function is not efficient. In
- * particular, if you want to multiply
- * with BlockVector objects, you should
- * consider using a BlockChunkSparseMatrix as
- * well.
- *
- * Source and destination must
- * not be the same vector.
- */
- template <class OutVector, class InVector>
- void vmult (OutVector& dst,
- const InVector& src) const;
-
- /**
- * Matrix-vector multiplication:
- * let <i>dst = M<sup>T</sup>*src</i> with
- * <i>M</i> being this
- * matrix. This function does the
- * same as vmult() but takes
- * the transposed matrix.
- *
- * Note that while this function can
- * operate on all vectors that offer
- * iterator classes, it is only really
- * effective for objects of type @ref
- * Vector. For all classes for which
- * iterating over elements, or random
- * member access is expensive, this
- * function is not efficient. In
- * particular, if you want to multiply
- * with BlockVector objects, you should
- * consider using a BlockChunkSparseMatrix as
- * well.
- *
- * Source and destination must
- * not be the same vector.
- */
- template <class OutVector, class InVector>
- void Tvmult (OutVector& dst,
- const InVector& src) const;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M*src</i> on <i>dst</i>
- * with <i>M</i> being this
- * matrix.
- *
- * Note that while this function can
- * operate on all vectors that offer
- * iterator classes, it is only really
- * effective for objects of type @ref
- * Vector. For all classes for which
- * iterating over elements, or random
- * member access is expensive, this
- * function is not efficient. In
- * particular, if you want to multiply
- * with BlockVector objects, you should
- * consider using a BlockChunkSparseMatrix as
- * well.
- *
- * Source and destination must
- * not be the same vector.
- */
- template <class OutVector, class InVector>
- void vmult_add (OutVector& dst,
- const InVector& src) const;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M<sup>T</sup>*src</i> to
- * <i>dst</i> with <i>M</i> being
- * this matrix. This function
- * does the same as vmult_add()
- * but takes the transposed
- * matrix.
- *
- * Note that while this function can
- * operate on all vectors that offer
- * iterator classes, it is only really
- * effective for objects of type @ref
- * Vector. For all classes for which
- * iterating over elements, or random
- * member access is expensive, this
- * function is not efficient. In
- * particular, if you want to multiply
- * with BlockVector objects, you should
- * consider using a BlockChunkSparseMatrix as
- * well.
- *
- * Source and destination must
- * not be the same vector.
- */
- template <class OutVector, class InVector>
- void Tvmult_add (OutVector& dst,
- const InVector& src) const;
-
- /**
- * Return the square of the norm
- * of the vector $v$ with respect
- * to the norm induced by this
- * matrix,
- * i.e. $\left(v,Mv\right)$. This
- * is useful, e.g. in the finite
- * element context, where the
- * $L_2$ norm of a function
- * equals the matrix norm with
- * respect to the mass matrix of
- * the vector representing the
- * nodal values of the finite
- * element function.
- *
- * Obviously, the matrix needs to be
- * quadratic for this operation, and for
- * the result to actually be a norm it
- * also needs to be either real symmetric
- * or complex hermitian.
- *
- * The underlying template types of both
- * this matrix and the given vector
- * should either both be real or
- * complex-valued, but not mixed, for
- * this function to make sense.
- */
- template <typename somenumber>
- somenumber matrix_norm_square (const Vector<somenumber> &v) const;
-
- /**
- * Compute the matrix scalar
- * product $\left(u,Mv\right)$.
- */
- template <typename somenumber>
- somenumber matrix_scalar_product (const Vector<somenumber> &u,
- const Vector<somenumber> &v) const;
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to be
- * <i>r=b-Mx</i>. Write the
- * residual into
- * <tt>dst</tt>. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned.
- *
- * Source <i>x</i> and destination
- * <i>dst</i> must not be the same
- * vector.
- */
- template <typename somenumber>
- somenumber residual (Vector<somenumber> &dst,
- const Vector<somenumber> &x,
- const Vector<somenumber> &b) const;
+ /**
+ * Matrix-vector multiplication:
+ * let <i>dst = M*src</i> with
+ * <i>M</i> being this matrix.
+ *
+ * Note that while this function can
+ * operate on all vectors that offer
+ * iterator classes, it is only really
+ * effective for objects of type @ref
+ * Vector. For all classes for which
+ * iterating over elements, or random
+ * member access is expensive, this
+ * function is not efficient. In
+ * particular, if you want to multiply
+ * with BlockVector objects, you should
+ * consider using a BlockChunkSparseMatrix as
+ * well.
+ *
+ * Source and destination must
+ * not be the same vector.
+ */
+ template <class OutVector, class InVector>
+ void vmult (OutVector &dst,
+ const InVector &src) const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let <i>dst = M<sup>T</sup>*src</i> with
+ * <i>M</i> being this
+ * matrix. This function does the
+ * same as vmult() but takes
+ * the transposed matrix.
+ *
+ * Note that while this function can
+ * operate on all vectors that offer
+ * iterator classes, it is only really
+ * effective for objects of type @ref
+ * Vector. For all classes for which
+ * iterating over elements, or random
+ * member access is expensive, this
+ * function is not efficient. In
+ * particular, if you want to multiply
+ * with BlockVector objects, you should
+ * consider using a BlockChunkSparseMatrix as
+ * well.
+ *
+ * Source and destination must
+ * not be the same vector.
+ */
+ template <class OutVector, class InVector>
+ void Tvmult (OutVector &dst,
+ const InVector &src) const;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M*src</i> on <i>dst</i>
+ * with <i>M</i> being this
+ * matrix.
+ *
+ * Note that while this function can
+ * operate on all vectors that offer
+ * iterator classes, it is only really
+ * effective for objects of type @ref
+ * Vector. For all classes for which
+ * iterating over elements, or random
+ * member access is expensive, this
+ * function is not efficient. In
+ * particular, if you want to multiply
+ * with BlockVector objects, you should
+ * consider using a BlockChunkSparseMatrix as
+ * well.
+ *
+ * Source and destination must
+ * not be the same vector.
+ */
+ template <class OutVector, class InVector>
+ void vmult_add (OutVector &dst,
+ const InVector &src) const;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M<sup>T</sup>*src</i> to
+ * <i>dst</i> with <i>M</i> being
+ * this matrix. This function
+ * does the same as vmult_add()
+ * but takes the transposed
+ * matrix.
+ *
+ * Note that while this function can
+ * operate on all vectors that offer
+ * iterator classes, it is only really
+ * effective for objects of type @ref
+ * Vector. For all classes for which
+ * iterating over elements, or random
+ * member access is expensive, this
+ * function is not efficient. In
+ * particular, if you want to multiply
+ * with BlockVector objects, you should
+ * consider using a BlockChunkSparseMatrix as
+ * well.
+ *
+ * Source and destination must
+ * not be the same vector.
+ */
+ template <class OutVector, class InVector>
+ void Tvmult_add (OutVector &dst,
+ const InVector &src) const;
+
+ /**
+ * Return the square of the norm
+ * of the vector $v$ with respect
+ * to the norm induced by this
+ * matrix,
+ * i.e. $\left(v,Mv\right)$. This
+ * is useful, e.g. in the finite
+ * element context, where the
+ * $L_2$ norm of a function
+ * equals the matrix norm with
+ * respect to the mass matrix of
+ * the vector representing the
+ * nodal values of the finite
+ * element function.
+ *
+ * Obviously, the matrix needs to be
+ * quadratic for this operation, and for
+ * the result to actually be a norm it
+ * also needs to be either real symmetric
+ * or complex hermitian.
+ *
+ * The underlying template types of both
+ * this matrix and the given vector
+ * should either both be real or
+ * complex-valued, but not mixed, for
+ * this function to make sense.
+ */
+ template <typename somenumber>
+ somenumber matrix_norm_square (const Vector<somenumber> &v) const;
+
+ /**
+ * Compute the matrix scalar
+ * product $\left(u,Mv\right)$.
+ */
+ template <typename somenumber>
+ somenumber matrix_scalar_product (const Vector<somenumber> &u,
+ const Vector<somenumber> &v) const;
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to be
+ * <i>r=b-Mx</i>. Write the
+ * residual into
+ * <tt>dst</tt>. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned.
+ *
+ * Source <i>x</i> and destination
+ * <i>dst</i> must not be the same
+ * vector.
+ */
+ template <typename somenumber>
+ somenumber residual (Vector<somenumber> &dst,
+ const Vector<somenumber> &x,
+ const Vector<somenumber> &b) const;
//@}
- /**
- * @name Matrix norms
- */
+ /**
+ * @name Matrix norms
+ */
//@{
- /**
- * Return the l1-norm of the matrix, that is
- * $|M|_1=max_{all columns j}\sum_{all
- * rows i} |M_ij|$,
- * (max. sum of columns).
- * This is the
- * natural matrix norm that is compatible
- * to the l1-norm for vectors, i.e.
- * $|Mv|_1\leq |M|_1 |v|_1$.
- * (cf. Haemmerlin-Hoffmann : Numerische Mathematik)
- */
- real_type l1_norm () const;
-
- /**
- * Return the linfty-norm of the
- * matrix, that is
- * $|M|_infty=max_{all rows i}\sum_{all
- * columns j} |M_ij|$,
- * (max. sum of rows).
- * This is the
- * natural matrix norm that is compatible
- * to the linfty-norm of vectors, i.e.
- * $|Mv|_infty \leq |M|_infty |v|_infty$.
- * (cf. Haemmerlin-Hoffmann : Numerische Mathematik)
- */
- real_type linfty_norm () const;
-
- /**
- * Return the frobenius norm of the
- * matrix, i.e. the square root of the
- * sum of squares of all entries in the
- * matrix.
- */
- real_type frobenius_norm () const;
+ /**
+ * Return the l1-norm of the matrix, that is
+ * $|M|_1=max_{all columns j}\sum_{all
+ * rows i} |M_ij|$,
+ * (max. sum of columns).
+ * This is the
+ * natural matrix norm that is compatible
+ * to the l1-norm for vectors, i.e.
+ * $|Mv|_1\leq |M|_1 |v|_1$.
+ * (cf. Haemmerlin-Hoffmann : Numerische Mathematik)
+ */
+ real_type l1_norm () const;
+
+ /**
+ * Return the linfty-norm of the
+ * matrix, that is
+ * $|M|_infty=max_{all rows i}\sum_{all
+ * columns j} |M_ij|$,
+ * (max. sum of rows).
+ * This is the
+ * natural matrix norm that is compatible
+ * to the linfty-norm of vectors, i.e.
+ * $|Mv|_infty \leq |M|_infty |v|_infty$.
+ * (cf. Haemmerlin-Hoffmann : Numerische Mathematik)
+ */
+ real_type linfty_norm () const;
+
+ /**
+ * Return the frobenius norm of the
+ * matrix, i.e. the square root of the
+ * sum of squares of all entries in the
+ * matrix.
+ */
+ real_type frobenius_norm () const;
//@}
- /**
- * @name Preconditioning methods
- */
+ /**
+ * @name Preconditioning methods
+ */
//@{
- /**
- * Apply the Jacobi
- * preconditioner, which
- * multiplies every element of
- * the <tt>src</tt> vector by the
- * inverse of the respective
- * diagonal element and
- * multiplies the result with the
- * relaxation factor <tt>omega</tt>.
- */
- template <typename somenumber>
- void precondition_Jacobi (Vector<somenumber> &dst,
- const Vector<somenumber> &src,
- const number omega = 1.) const;
-
- /**
- * Apply SSOR preconditioning to
- * <tt>src</tt>.
- */
- template <typename somenumber>
- void precondition_SSOR (Vector<somenumber> &dst,
- const Vector<somenumber> &src,
- const number om = 1.) const;
-
- /**
- * Apply SOR preconditioning
- * matrix to <tt>src</tt>.
- */
- template <typename somenumber>
- void precondition_SOR (Vector<somenumber> &dst,
- const Vector<somenumber> &src,
- const number om = 1.) const;
-
- /**
- * Apply transpose SOR
- * preconditioning matrix to
- * <tt>src</tt>.
- */
- template <typename somenumber>
- void precondition_TSOR (Vector<somenumber> &dst,
+ /**
+ * Apply the Jacobi
+ * preconditioner, which
+ * multiplies every element of
+ * the <tt>src</tt> vector by the
+ * inverse of the respective
+ * diagonal element and
+ * multiplies the result with the
+ * relaxation factor <tt>omega</tt>.
+ */
+ template <typename somenumber>
+ void precondition_Jacobi (Vector<somenumber> &dst,
const Vector<somenumber> &src,
- const number om = 1.) const;
-
- /**
- * Perform SSOR preconditioning
- * in-place. Apply the
- * preconditioner matrix without
- * copying to a second vector.
- * <tt>omega</tt> is the relaxation
- * parameter.
- */
- template <typename somenumber>
- void SSOR (Vector<somenumber> &v,
- const number omega = 1.) const;
-
- /**
- * Perform an SOR preconditioning
- * in-place. <tt>omega</tt> is
- * the relaxation parameter.
- */
- template <typename somenumber>
- void SOR (Vector<somenumber> &v,
+ const number omega = 1.) const;
+
+ /**
+ * Apply SSOR preconditioning to
+ * <tt>src</tt>.
+ */
+ template <typename somenumber>
+ void precondition_SSOR (Vector<somenumber> &dst,
+ const Vector<somenumber> &src,
+ const number om = 1.) const;
+
+ /**
+ * Apply SOR preconditioning
+ * matrix to <tt>src</tt>.
+ */
+ template <typename somenumber>
+ void precondition_SOR (Vector<somenumber> &dst,
+ const Vector<somenumber> &src,
+ const number om = 1.) const;
+
+ /**
+ * Apply transpose SOR
+ * preconditioning matrix to
+ * <tt>src</tt>.
+ */
+ template <typename somenumber>
+ void precondition_TSOR (Vector<somenumber> &dst,
+ const Vector<somenumber> &src,
+ const number om = 1.) const;
+
+ /**
+ * Perform SSOR preconditioning
+ * in-place. Apply the
+ * preconditioner matrix without
+ * copying to a second vector.
+ * <tt>omega</tt> is the relaxation
+ * parameter.
+ */
+ template <typename somenumber>
+ void SSOR (Vector<somenumber> &v,
+ const number omega = 1.) const;
+
+ /**
+ * Perform an SOR preconditioning
+ * in-place. <tt>omega</tt> is
+ * the relaxation parameter.
+ */
+ template <typename somenumber>
+ void SOR (Vector<somenumber> &v,
+ const number om = 1.) const;
+
+ /**
+ * Perform a transpose SOR
+ * preconditioning in-place.
+ * <tt>omega</tt> is the
+ * relaxation parameter.
+ */
+ template <typename somenumber>
+ void TSOR (Vector<somenumber> &v,
+ const number om = 1.) const;
+
+ /**
+ * Perform a permuted SOR
+ * preconditioning in-place.
+ *
+ * The standard SOR method is
+ * applied in the order
+ * prescribed by <tt>permutation</tt>,
+ * that is, first the row
+ * <tt>permutation[0]</tt>, then
+ * <tt>permutation[1]</tt> and so
+ * on. For efficiency reasons,
+ * the permutation as well as its
+ * inverse are required.
+ *
+ * <tt>omega</tt> is the
+ * relaxation parameter.
+ */
+ template <typename somenumber>
+ void PSOR (Vector<somenumber> &v,
+ const std::vector<unsigned int> &permutation,
+ const std::vector<unsigned int> &inverse_permutation,
+ const number om = 1.) const;
+
+ /**
+ * Perform a transposed permuted SOR
+ * preconditioning in-place.
+ *
+ * The transposed SOR method is
+ * applied in the order
+ * prescribed by
+ * <tt>permutation</tt>, that is,
+ * first the row
+ * <tt>permutation[m()-1]</tt>,
+ * then
+ * <tt>permutation[m()-2]</tt>
+ * and so on. For efficiency
+ * reasons, the permutation as
+ * well as its inverse are
+ * required.
+ *
+ * <tt>omega</tt> is the
+ * relaxation parameter.
+ */
+ template <typename somenumber>
+ void TPSOR (Vector<somenumber> &v,
+ const std::vector<unsigned int> &permutation,
+ const std::vector<unsigned int> &inverse_permutation,
const number om = 1.) const;
- /**
- * Perform a transpose SOR
- * preconditioning in-place.
- * <tt>omega</tt> is the
- * relaxation parameter.
- */
- template <typename somenumber>
- void TSOR (Vector<somenumber> &v,
- const number om = 1.) const;
-
- /**
- * Perform a permuted SOR
- * preconditioning in-place.
- *
- * The standard SOR method is
- * applied in the order
- * prescribed by <tt>permutation</tt>,
- * that is, first the row
- * <tt>permutation[0]</tt>, then
- * <tt>permutation[1]</tt> and so
- * on. For efficiency reasons,
- * the permutation as well as its
- * inverse are required.
- *
- * <tt>omega</tt> is the
- * relaxation parameter.
- */
- template <typename somenumber>
- void PSOR (Vector<somenumber> &v,
- const std::vector<unsigned int>& permutation,
- const std::vector<unsigned int>& inverse_permutation,
- const number om = 1.) const;
-
- /**
- * Perform a transposed permuted SOR
- * preconditioning in-place.
- *
- * The transposed SOR method is
- * applied in the order
- * prescribed by
- * <tt>permutation</tt>, that is,
- * first the row
- * <tt>permutation[m()-1]</tt>,
- * then
- * <tt>permutation[m()-2]</tt>
- * and so on. For efficiency
- * reasons, the permutation as
- * well as its inverse are
- * required.
- *
- * <tt>omega</tt> is the
- * relaxation parameter.
- */
- template <typename somenumber>
- void TPSOR (Vector<somenumber> &v,
- const std::vector<unsigned int>& permutation,
- const std::vector<unsigned int>& inverse_permutation,
- const number om = 1.) const;
-
- /**
- * Do one SOR step on <tt>v</tt>.
- * Performs a direct SOR step
- * with right hand side
- * <tt>b</tt>.
- */
- template <typename somenumber>
- void SOR_step (Vector<somenumber> &v,
- const Vector<somenumber> &b,
- const number om = 1.) const;
-
- /**
- * Do one adjoint SOR step on
- * <tt>v</tt>. Performs a direct
- * TSOR step with right hand side
- * <tt>b</tt>.
- */
- template <typename somenumber>
- void TSOR_step (Vector<somenumber> &v,
- const Vector<somenumber> &b,
- const number om = 1.) const;
-
- /**
- * Do one SSOR step on
- * <tt>v</tt>. Performs a direct
- * SSOR step with right hand side
- * <tt>b</tt> by performing TSOR
- * after SOR.
- */
- template <typename somenumber>
- void SSOR_step (Vector<somenumber> &v,
- const Vector<somenumber> &b,
- const number om = 1.) const;
+ /**
+ * Do one SOR step on <tt>v</tt>.
+ * Performs a direct SOR step
+ * with right hand side
+ * <tt>b</tt>.
+ */
+ template <typename somenumber>
+ void SOR_step (Vector<somenumber> &v,
+ const Vector<somenumber> &b,
+ const number om = 1.) const;
+
+ /**
+ * Do one adjoint SOR step on
+ * <tt>v</tt>. Performs a direct
+ * TSOR step with right hand side
+ * <tt>b</tt>.
+ */
+ template <typename somenumber>
+ void TSOR_step (Vector<somenumber> &v,
+ const Vector<somenumber> &b,
+ const number om = 1.) const;
+
+ /**
+ * Do one SSOR step on
+ * <tt>v</tt>. Performs a direct
+ * SSOR step with right hand side
+ * <tt>b</tt> by performing TSOR
+ * after SOR.
+ */
+ template <typename somenumber>
+ void SSOR_step (Vector<somenumber> &v,
+ const Vector<somenumber> &b,
+ const number om = 1.) const;
//@}
- /**
- * @name Input/Output
- */
+ /**
+ * @name Input/Output
+ */
//@{
- /**
- * Print the matrix to the given
- * stream, using the format
- * <tt>(line,col) value</tt>,
- * i.e. one nonzero entry of the
- * matrix per line.
- */
- void print (std::ostream &out) const;
-
- /**
- * Print the matrix in the usual
- * format, i.e. as a matrix and
- * not as a list of nonzero
- * elements. For better
- * readability, elements not in
- * the matrix are displayed as
- * empty space, while matrix
- * elements which are explicitly
- * set to zero are displayed as
- * such.
- *
- * The parameters allow for a
- * flexible setting of the output
- * format: <tt>precision</tt> and
- * <tt>scientific</tt> are used
- * to determine the number
- * format, where <tt>scientific =
- * false</tt> means fixed point
- * notation. A zero entry for
- * <tt>width</tt> makes the
- * function compute a width, but
- * it may be changed to a
- * positive value, if output is
- * crude.
- *
- * Additionally, a character for
- * an empty value may be
- * specified.
- *
- * Finally, the whole matrix can
- * be multiplied with a common
- * denominator to produce more
- * readable output, even
- * integers.
- *
- * @attention This function may
- * produce <b>large</b> amounts
- * of output if applied to a
- * large matrix!
- */
- void print_formatted (std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const unsigned int width = 0,
- const char *zero_string = " ",
- const double denominator = 1.) const;
-
- /**
- * Print the actual pattern of
- * the matrix. For each entry
- * with an absolute value larger
- * than threshold, a '*' is
- * printed, a ':' for every value
- * smaller and a '.' for every
- * entry not allocated.
- */
- void print_pattern(std::ostream& out,
- const double threshold = 0.) const;
-
- /**
- * Write the data of this object
- * en bloc to a file. This is
- * done in a binary mode, so the
- * output is neither readable by
- * humans nor (probably) by other
- * computers using a different
- * operating system of number
- * format.
- *
- * The purpose of this function
- * is that you can swap out
- * matrices and sparsity pattern
- * if you are short of memory,
- * want to communicate between
- * different programs, or allow
- * objects to be persistent
- * across different runs of the
- * program.
- */
- void block_write (std::ostream &out) const;
-
- /**
- * Read data that has previously
- * been written by block_write()
- * from a file. This is done
- * using the inverse operations
- * to the above function, so it
- * is reasonably fast because the
- * bitstream is not interpreted
- * except for a few numbers up
- * front.
- *
- * The object is resized on this
- * operation, and all previous
- * contents are lost. Note,
- * however, that no checks are
- * performed whether new data and
- * the underlying ChunkSparsityPattern
- * object fit together. It is
- * your responsibility to make
- * sure that the sparsity pattern
- * and the data to be read match.
- *
- * A primitive form of error
- * checking is performed which
- * will recognize the bluntest
- * attempts to interpret some
- * data as a matrix stored
- * bitwise to a file that wasn't
- * actually created that way, but
- * not more.
- */
- void block_read (std::istream &in);
+ /**
+ * Print the matrix to the given
+ * stream, using the format
+ * <tt>(line,col) value</tt>,
+ * i.e. one nonzero entry of the
+ * matrix per line.
+ */
+ void print (std::ostream &out) const;
+
+ /**
+ * Print the matrix in the usual
+ * format, i.e. as a matrix and
+ * not as a list of nonzero
+ * elements. For better
+ * readability, elements not in
+ * the matrix are displayed as
+ * empty space, while matrix
+ * elements which are explicitly
+ * set to zero are displayed as
+ * such.
+ *
+ * The parameters allow for a
+ * flexible setting of the output
+ * format: <tt>precision</tt> and
+ * <tt>scientific</tt> are used
+ * to determine the number
+ * format, where <tt>scientific =
+ * false</tt> means fixed point
+ * notation. A zero entry for
+ * <tt>width</tt> makes the
+ * function compute a width, but
+ * it may be changed to a
+ * positive value, if output is
+ * crude.
+ *
+ * Additionally, a character for
+ * an empty value may be
+ * specified.
+ *
+ * Finally, the whole matrix can
+ * be multiplied with a common
+ * denominator to produce more
+ * readable output, even
+ * integers.
+ *
+ * @attention This function may
+ * produce <b>large</b> amounts
+ * of output if applied to a
+ * large matrix!
+ */
+ void print_formatted (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const unsigned int width = 0,
+ const char *zero_string = " ",
+ const double denominator = 1.) const;
+
+ /**
+ * Print the actual pattern of
+ * the matrix. For each entry
+ * with an absolute value larger
+ * than threshold, a '*' is
+ * printed, a ':' for every value
+ * smaller and a '.' for every
+ * entry not allocated.
+ */
+ void print_pattern(std::ostream &out,
+ const double threshold = 0.) const;
+
+ /**
+ * Write the data of this object
+ * en bloc to a file. This is
+ * done in a binary mode, so the
+ * output is neither readable by
+ * humans nor (probably) by other
+ * computers using a different
+ * operating system of number
+ * format.
+ *
+ * The purpose of this function
+ * is that you can swap out
+ * matrices and sparsity pattern
+ * if you are short of memory,
+ * want to communicate between
+ * different programs, or allow
+ * objects to be persistent
+ * across different runs of the
+ * program.
+ */
+ void block_write (std::ostream &out) const;
+
+ /**
+ * Read data that has previously
+ * been written by block_write()
+ * from a file. This is done
+ * using the inverse operations
+ * to the above function, so it
+ * is reasonably fast because the
+ * bitstream is not interpreted
+ * except for a few numbers up
+ * front.
+ *
+ * The object is resized on this
+ * operation, and all previous
+ * contents are lost. Note,
+ * however, that no checks are
+ * performed whether new data and
+ * the underlying ChunkSparsityPattern
+ * object fit together. It is
+ * your responsibility to make
+ * sure that the sparsity pattern
+ * and the data to be read match.
+ *
+ * A primitive form of error
+ * checking is performed which
+ * will recognize the bluntest
+ * attempts to interpret some
+ * data as a matrix stored
+ * bitwise to a file that wasn't
+ * actually created that way, but
+ * not more.
+ */
+ void block_read (std::istream &in);
//@}
- /** @addtogroup Exceptions
- * @{ */
-
- /**
- * Exception
- */
- DeclException2 (ExcInvalidIndex,
- int, int,
- << "The entry with index <" << arg1 << ',' << arg2
- << "> does not exist.");
- /**
- * Exception
- */
- DeclException1 (ExcInvalidIndex1,
- int,
- << "The index " << arg1 << " is not in the allowed range.");
- /**
- * Exception
- */
- DeclException0 (ExcDifferentChunkSparsityPatterns);
- /**
- * Exception
- */
- DeclException2 (ExcIteratorRange,
- int, int,
- << "The iterators denote a range of " << arg1
- << " elements, but the given number of rows was " << arg2);
- /**
- * Exception
- */
- DeclException0 (ExcSourceEqualsDestination);
- //@}
- private:
- /**
- * Pointer to the sparsity
- * pattern used for this
- * matrix. In order to guarantee
- * that it is not deleted while
- * still in use, we subscribe to
- * it using the SmartPointer
- * class.
- */
- SmartPointer<const ChunkSparsityPattern,ChunkSparseMatrix<number> > cols;
-
- /**
- * Array of values for all the
- * nonzero entries. The position
- * within the matrix, i.e. the
- * row and column number for a
- * given entry can only be
- * deduced using the sparsity
- * pattern. The same holds for
- * the more common operation of
- * finding an entry by its
- * coordinates.
- */
- number *val;
-
- /**
- * Allocated size of #val. This
- * can be larger than the
- * actually used part if the size
- * of the matrix was reduced
- * somewhen in the past by
- * associating a sparsity pattern
- * with a smaller size to this
- * object, using the reinit()
- * function.
- */
- unsigned int max_len;
-
- /**
- * Return the location of entry
- * $(i,j)$ within the val array.
- */
- unsigned int compute_location (const types::global_dof_index i,
- const types::global_dof_index j) const;
-
- // make all other sparse matrices
- // friends
- template <typename somenumber> friend class ChunkSparseMatrix;
+ /** @addtogroup Exceptions
+ * @{ */
+
+ /**
+ * Exception
+ */
+ DeclException2 (ExcInvalidIndex,
+ int, int,
+ << "The entry with index <" << arg1 << ',' << arg2
+ << "> does not exist.");
+ /**
+ * Exception
+ */
+ DeclException1 (ExcInvalidIndex1,
+ int,
+ << "The index " << arg1 << " is not in the allowed range.");
+ /**
+ * Exception
+ */
+ DeclException0 (ExcDifferentChunkSparsityPatterns);
+ /**
+ * Exception
+ */
+ DeclException2 (ExcIteratorRange,
+ int, int,
+ << "The iterators denote a range of " << arg1
+ << " elements, but the given number of rows was " << arg2);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcSourceEqualsDestination);
+ //@}
+ private:
+ /**
+ * Pointer to the sparsity
+ * pattern used for this
+ * matrix. In order to guarantee
+ * that it is not deleted while
+ * still in use, we subscribe to
+ * it using the SmartPointer
+ * class.
+ */
+ SmartPointer<const ChunkSparsityPattern,ChunkSparseMatrix<number> > cols;
+
+ /**
+ * Array of values for all the
+ * nonzero entries. The position
+ * within the matrix, i.e. the
+ * row and column number for a
+ * given entry can only be
+ * deduced using the sparsity
+ * pattern. The same holds for
+ * the more common operation of
+ * finding an entry by its
+ * coordinates.
+ */
+ number *val;
+
+ /**
+ * Allocated size of #val. This
+ * can be larger than the
+ * actually used part if the size
+ * of the matrix was reduced
+ * somewhen in the past by
+ * associating a sparsity pattern
+ * with a smaller size to this
+ * object, using the reinit()
+ * function.
+ */
+ unsigned int max_len;
+
+ /**
+ * Return the location of entry
+ * $(i,j)$ within the val array.
+ */
- unsigned int compute_location (const unsigned int i,
- const unsigned int j) const;
++ unsigned int compute_location (const types::global_dof_index i,
++ const types::global_dof_index j) const;
+
+ // make all other sparse matrices
+ // friends
+ template <typename somenumber> friend class ChunkSparseMatrix;
};
/*@}*/
template <typename number>
inline
- number & ChunkSparseMatrix<number>::diag_element (const types::global_dof_index i)
-number &ChunkSparseMatrix<number>::diag_element (const unsigned int i)
++number &ChunkSparseMatrix<number>::diag_element (const types::global_dof_index i)
{
Assert (cols != 0, ExcNotInitialized());
Assert (cols->optimize_diagonal(), ExcNotQuadratic());
template <typename number>
ChunkSparseMatrix<number>::ChunkSparseMatrix (const ChunkSparsityPattern &c,
- const IdentityMatrix &id)
+ const IdentityMatrix &id)
- :
- cols(0, "ChunkSparseMatrix"),
- val(0),
- max_len(0)
+ :
+ cols(0, "ChunkSparseMatrix"),
+ val(0),
+ max_len(0)
{
Assert (c.n_rows() == id.m(), ExcDimensionMismatch (c.n_rows(), id.m()));
Assert (c.n_cols() == id.n(), ExcDimensionMismatch (c.n_cols(), id.n()));
*/
class ChunkSparsityPattern : public Subscriptor
{
- public:
-
- /**
- * Define a value which is used
- * to indicate that a certain
- * value in the colnums array
- * is unused, i.e. does not
- * represent a certain column
- * number index.
- *
- * Indices with this invalid
- * value are used to insert new
- * entries to the sparsity
- * pattern using the add() member
- * function, and are removed when
- * calling compress().
- *
- * You should not assume that the
- * variable declared here has a
- * certain value. The
- * initialization is given here
- * only to enable the compiler to
- * perform some optimizations,
- * but the actual value of the
- * variable may change over time.
- */
- static const unsigned int invalid_entry = SparsityPattern::invalid_entry;
-
- /**
- * Initialize the matrix empty,
- * that is with no memory
- * allocated. This is useful if
- * you want such objects as
- * member variables in other
- * classes. You can make the
- * structure usable by calling
- * the reinit() function.
- */
- ChunkSparsityPattern ();
-
- /**
- * Copy constructor. This
- * constructor is only allowed to
- * be called if the matrix
- * structure to be copied is
- * empty. This is so in order to
- * prevent involuntary copies of
- * objects for temporaries, which
- * can use large amounts of
- * computing time. However, copy
- * constructors are needed if yo
- * want to use the STL data types
- * on classes like this, e.g. to
- * write such statements like
- * <tt>v.push_back
- * (ChunkSparsityPattern());</tt>,
- * with <tt>v</tt> a vector of
- * ChunkSparsityPattern objects.
- *
- * Usually, it is sufficient to
- * use the explicit keyword to
- * disallow unwanted temporaries,
- * but for the STL vectors, this
- * does not work. Since copying a
- * structure like this is not
- * useful anyway because multiple
- * matrices can use the same
- * sparsity structure, copies are
- * only allowed for empty
- * objects, as described above.
- */
- ChunkSparsityPattern (const ChunkSparsityPattern &);
-
- /**
- * Initialize a rectangular
- * matrix.
- *
- * @arg m number of rows
- * @arg n number of columns
- * @arg max_per_row maximum
- * number of nonzero entries per row
- *
- * @arg optimize_diagonal store
- * diagonal entries first in row;
- * see optimize_diagonal(). This
- * takes effect for quadratic
- * matrices only.
- */
- ChunkSparsityPattern (const types::global_dof_index m,
- const types::global_dof_index n,
- const unsigned int max_chunks_per_row,
- const unsigned int chunk_size,
- const bool optimize_diagonal = true);
-
- /**
- * Initialize a rectangular
- * matrix.
- *
- * @arg m number of rows
- * @arg n number of columns
- *
- * @arg row_lengths possible
- * number of nonzero entries for
- * each row. This vector must
- * have one entry for each row.
- *
- * @arg optimize_diagonal store
- * diagonal entries first in row;
- * see optimize_diagonal(). This
- * takes effect for quadratic
- * matrices only.
- */
- ChunkSparsityPattern (const types::global_dof_index m,
- const types::global_dof_index n,
- const std::vector<unsigned int>& row_lengths,
- const unsigned int chunk_size,
- const bool optimize_diagonal = true);
-
- /**
- * Initialize a quadratic matrix
- * of dimension <tt>n</tt> with
- * at most <tt>max_per_row</tt>
- * nonzero entries per row.
- *
- * This constructor automatically
- * enables optimized storage of
- * diagonal elements. To avoid
- * this, use the constructor
- * taking row and column numbers
- * separately.
- */
- ChunkSparsityPattern (const types::global_dof_index n,
- const unsigned int max_per_row,
- const unsigned int chunk_size);
-
- /**
- * Initialize a quadratic matrix.
- *
- * @arg m number of rows and columns
- *
- * @arg row_lengths possible
- * number of nonzero entries for
- * each row. This vector must
- * have one entry for each row.
- *
- * @arg optimize_diagonal store
- * diagonal entries first in row;
- * see optimize_diagonal().
- */
- ChunkSparsityPattern (const types::global_dof_index m,
- const std::vector<unsigned int>& row_lengths,
- const unsigned int chunk_size,
- const bool optimize_diagonal = true);
-
- /**
- * Destructor.
- */
- ~ChunkSparsityPattern ();
-
- /**
- * Copy operator. For this the
- * same holds as for the copy
- * constructor: it is declared,
- * defined and fine to be called,
- * but the latter only for empty
- * objects.
- */
- ChunkSparsityPattern & operator = (const ChunkSparsityPattern &);
-
- /**
- * Reallocate memory and set up data
- * structures for a new matrix with
- * <tt>m </tt>rows and <tt>n</tt> columns,
- * with at most <tt>max_per_row</tt>
- * nonzero entries per row.
- *
- * This function simply maps its
- * operations to the other
- * <tt>reinit</tt> function.
- */
- void reinit (const types::global_dof_index m,
- const types::global_dof_index n,
- const unsigned int max_per_row,
- const unsigned int chunk_size,
- const bool optimize_diagonal = true);
-
- /**
- * Reallocate memory for a matrix
- * of size <tt>m x n</tt>. The
- * number of entries for each row
- * is taken from the array
- * <tt>row_lengths</tt> which has to
- * give this number of each row
- * <tt>i=1...m</tt>.
- *
- * If <tt>m*n==0</tt> all memory is freed,
- * resulting in a total reinitialization
- * of the object. If it is nonzero, new
- * memory is only allocated if the new
- * size extends the old one. This is done
- * to save time and to avoid fragmentation
- * of the heap.
- *
- * If the number of rows equals
- * the number of columns and the
- * last parameter is true,
- * diagonal elements are stored
- * first in each row to allow
- * optimized access in relaxation
- * methods of SparseMatrix.
- */
- void reinit (const types::global_dof_index m,
- const types::global_dof_index n,
- const std::vector<unsigned int> &row_lengths,
- const unsigned int chunk_size,
- const bool optimize_diagonal = true);
-
- /**
- * Same as above, but with a
- * VectorSlice argument instead.
- */
- void reinit (const types::global_dof_index m,
- const types::global_dof_index n,
- const VectorSlice<const std::vector<unsigned int> > &row_lengths,
- const unsigned int chunk_size,
- const bool optimize_diagonal = true);
-
- /**
- * This function compresses the sparsity
- * structure that this object represents.
- * It does so by eliminating unused
- * entries and sorting the remaining ones
- * to allow faster access by usage of
- * binary search algorithms. A special
- * sorting scheme is used for the
- * diagonal entry of quadratic matrices,
- * which is always the first entry of
- * each row.
- *
- * The memory which is no more
- * needed is released.
- *
- * SparseMatrix objects require the
- * ChunkSparsityPattern objects they are
- * initialized with to be compressed, to
- * reduce memory requirements.
- */
- void compress ();
-
- /**
- * This function can be used as a
- * replacement for reinit(),
- * subsequent calls to add() and
- * a final call to close() if you
- * know exactly in advance the
- * entries that will form the
- * matrix sparsity pattern.
- *
- * The first two parameters
- * determine the size of the
- * matrix. For the two last ones,
- * note that a sparse matrix can
- * be described by a sequence of
- * rows, each of which is
- * represented by a sequence of
- * pairs of column indices and
- * values. In the present
- * context, the begin() and
- * end() parameters designate
- * iterators (of forward iterator
- * type) into a container, one
- * representing one row. The
- * distance between begin()
- * and end() should therefore
- * be equal to
- * n_rows(). These iterators
- * may be iterators of
- * <tt>std::vector</tt>,
- * <tt>std::list</tt>, pointers into a
- * C-style array, or any other
- * iterator satisfying the
- * requirements of a forward
- * iterator. The objects pointed
- * to by these iterators
- * (i.e. what we get after
- * applying <tt>operator*</tt> or
- * <tt>operator-></tt> to one of these
- * iterators) must be a container
- * itself that provides functions
- * <tt>begin</tt> and <tt>end</tt>
- * designating a range of
- * iterators that describe the
- * contents of one
- * line. Dereferencing these
- * inner iterators must either
- * yield a pair of an unsigned
- * integer as column index and a
- * value of arbitrary type (such
- * a type would be used if we
- * wanted to describe a sparse
- * matrix with one such object),
- * or simply an unsigned integer
- * (of we only wanted to describe
- * a sparsity pattern). The
- * function is able to determine
- * itself whether an unsigned
- * integer or a pair is what we
- * get after dereferencing the
- * inner iterators, through some
- * template magic.
- *
- * While the order of the outer
- * iterators denotes the
- * different rows of the matrix,
- * the order of the inner
- * iterator denoting the columns
- * does not matter, as they are
- * sorted internal to this
- * function anyway.
- *
- * Since that all sounds very
- * complicated, consider the
- * following example code, which
- * may be used to fill a sparsity
- * pattern:
- * @code
- * std::vector<std::vector<unsigned int> > column_indices (n_rows);
- * for (unsigned int row=0; row<n_rows; ++row)
- * // generate necessary columns in this row
- * fill_row (column_indices[row]);
- *
- * sparsity.copy_from (n_rows, n_cols,
- * column_indices.begin(),
- * column_indices.end());
- * @endcode
- *
- * Note that this example works
- * since the iterators
- * dereferenced yield containers
- * with functions <tt>begin</tt> and
- * <tt>end</tt> (namely
- * <tt>std::vector</tt>s), and the
- * inner iterators dereferenced
- * yield unsigned integers as
- * column indices. Note that we
- * could have replaced each of
- * the two <tt>std::vector</tt>
- * occurrences by <tt>std::list</tt>,
- * and the inner one by
- * <tt>std::set</tt> as well.
- *
- * Another example would be as
- * follows, where we initialize a
- * whole matrix, not only a
- * sparsity pattern:
- * @code
- * std::vector<std::map<unsigned int,double> > entries (n_rows);
- * for (unsigned int row=0; row<n_rows; ++row)
- * // generate necessary pairs of columns
- * // and corresponding values in this row
- * fill_row (entries[row]);
- *
- * sparsity.copy_from (n_rows, n_cols,
- * column_indices.begin(),
- * column_indices.end());
- * matrix.reinit (sparsity);
- * matrix.copy_from (column_indices.begin(),
- * column_indices.end());
- * @endcode
- *
- * This example works because
- * dereferencing iterators of the
- * inner type yields a pair of
- * unsigned integers and a value,
- * the first of which we take as
- * column index. As previously,
- * the outer <tt>std::vector</tt>
- * could be replaced by
- * <tt>std::list</tt>, and the inner
- * <tt>std::map<unsigned int,double></tt>
- * could be replaced by
- * <tt>std::vector<std::pair<unsigned int,double> ></tt>,
- * or a list or set of such
- * pairs, as they all return
- * iterators that point to such
- * pairs.
- */
- template <typename ForwardIterator>
- void copy_from (const types::global_dof_index n_rows,
- const types::global_dof_index n_cols,
- const ForwardIterator begin,
- const ForwardIterator end,
- const unsigned int chunk_size,
- const bool optimize_diagonal = true);
-
- /**
- * Copy data from an object of type
- * CompressedSparsityPattern,
- * CompressedSetSparsityPattern or
- * CompressedSimpleSparsityPattern.
- * Previous content of this object is
- * lost, and the sparsity pattern is in
- * compressed mode afterwards.
- */
- template <typename SparsityType>
- void copy_from (const SparsityType &csp,
- const unsigned int chunk_size,
- const bool optimize_diagonal = true);
-
- /**
- * Take a full matrix and use its
- * nonzero entries to generate a
- * sparse matrix entry pattern
- * for this object.
- *
- * Previous content of this
- * object is lost, and the
- * sparsity pattern is in
- * compressed mode afterwards.
- */
- template <typename number>
- void copy_from (const FullMatrix<number> &matrix,
- const unsigned int chunk_size,
- const bool optimize_diagonal = true);
-
- /**
- * Return whether the object is empty. It
- * is empty if no memory is allocated,
- * which is the same as that both
- * dimensions are zero.
- */
- bool empty () const;
-
- /**
- * Return the chunk size given as
- * argument when constructing this
- * object.
- */
- unsigned int get_chunk_size () const;
-
- /**
- * Return the maximum number of entries per
- * row. Before compression, this equals the
- * number given to the constructor, while
- * after compression, it equals the maximum
- * number of entries actually allocated by
- * the user.
- */
- unsigned int max_entries_per_row () const;
-
- /**
- * Add a nonzero entry to the matrix.
- * This function may only be called
- * for non-compressed sparsity patterns.
- *
- * If the entry already exists, nothing
- * bad happens.
- */
- void add (const types::global_dof_index i,
- const types::global_dof_index j);
-
- /**
- * Make the sparsity pattern
- * symmetric by adding the
- * sparsity pattern of the
- * transpose object.
- *
- * This function throws an
- * exception if the sparsity
- * pattern does not represent a
- * quadratic matrix.
- */
- void symmetrize ();
-
- /**
- * Return number of rows of this
- * matrix, which equals the dimension
- * of the image space.
- */
- inline types::global_dof_index n_rows () const;
-
- /**
- * Return number of columns of this
- * matrix, which equals the dimension
- * of the range space.
- */
- inline types::global_dof_index n_cols () const;
-
- /**
- * Check if a value at a certain
- * position may be non-zero.
- */
- bool exists (const types::global_dof_index i,
- const types::global_dof_index j) const;
-
- /**
- * Number of entries in a specific row.
- */
- unsigned int row_length (const types::global_dof_index row) const;
-
- /**
- * Compute the bandwidth of the matrix
- * represented by this structure. The
- * bandwidth is the maximum of $|i-j|$
- * for which the index pair $(i,j)$
- * represents a nonzero entry of the
- * matrix. Consequently, the maximum
- * bandwidth a $n\times m$ matrix can
- * have is $\max\{n-1,m-1\}$.
- */
- unsigned int bandwidth () const;
-
- /**
- * Return the number of nonzero elements of
- * this matrix. Actually, it returns the
- * number of entries in the sparsity
- * pattern; if any of the entries should
- * happen to be zero, it is counted
- * anyway.
- *
- * This function may only be called if the
- * matrix struct is compressed. It does not
- * make too much sense otherwise anyway.
- */
- unsigned int n_nonzero_elements () const;
-
- /**
- * Return whether the structure is
- * compressed or not.
- */
- bool is_compressed () const;
-
- /**
- * Determine whether the matrix
- * uses special convention for
- * quadratic matrices.
- *
- * A return value <tt>true</tt> means
- * that diagonal elements are stored
- * first in each row. A number of
- * functions in this class and the
- * library in general, for example
- * relaxation methods like Jacobi() and
- * SOR(), require this to make their
- * operations more efficient, since they
- * need to quickly access the diagonal
- * elements and do not have to search for
- * them if they are the first element of
- * each row. A side effect of this scheme
- * is that each row contains at least one
- * element, even if the row is empty
- * (i.e. the diagonal element exists, but
- * has value zero).
- *
- * A return value <tt>false</tt> means
- * that diagonal elements are stored
- * anywhere in the row, or not at all. In
- * particular, a row or even the whole
- * matrix may be empty. This can be used
- * if you have block matrices where the
- * off-diagonal blocks are quadratic but
- * are never used for operations like the
- * ones mentioned above. In this case,
- * some memory can be saved by not using
- * the diagonal storage optimization.
- */
- bool optimize_diagonal () const;
-
- /**
- * Return whether this object stores only
- * those entries that have been added
- * explicitly, or if the sparsity pattern
- * contains elements that have been added
- * through other means (implicitly) while
- * building it. For the current class,
- * the result is false because we store
- * entire chunks, not individual
- * elements, and adding one entry to the
- * sparsity pattern requires also adding
- * all the other elements of a chunk. The
- * only exception is if
- * <code>chunk_size==1</code>, the
- * sparsity pattern is nonsymmetric or
- * optimize_diag has been set to false.
- *
- * This function mainly serves the
- * purpose of describing the current
- * class in cases where several kinds of
- * sparsity patterns can be passed as
- * template arguments.
- */
- bool stores_only_added_elements () const;
-
- /**
- * Write the data of this object
- * en bloc to a file. This is
- * done in a binary mode, so the
- * output is neither readable by
- * humans nor (probably) by other
- * computers using a different
- * operating system of number
- * format.
- *
- * The purpose of this function
- * is that you can swap out
- * matrices and sparsity pattern
- * if you are short of memory,
- * want to communicate between
- * different programs, or allow
- * objects to be persistent
- * across different runs of the
- * program.
- */
- void block_write (std::ostream &out) const;
-
- /**
- * Read data that has previously
- * been written by block_write()
- * from a file. This is done
- * using the inverse operations
- * to the above function, so it
- * is reasonably fast because the
- * bitstream is not interpreted
- * except for a few numbers up
- * front.
- *
- * The object is resized on this
- * operation, and all previous
- * contents are lost.
- *
- * A primitive form of error
- * checking is performed which
- * will recognize the bluntest
- * attempts to interpret some
- * data as a vector stored
- * bitwise to a file, but not
- * more.
- */
- void block_read (std::istream &in);
-
- /**
- * Print the sparsity of the
- * matrix. The output consists of
- * one line per row of the format
- * <tt>[i,j1,j2,j3,...]</tt>. <i>i</i>
- * is the row number and
- * <i>jn</i> are the allocated
- * columns in this row.
- */
- void print (std::ostream &out) const;
-
- /**
- * Print the sparsity of the matrix
- * in a format that <tt>gnuplot</tt> understands
- * and which can be used to plot the
- * sparsity pattern in a graphical
- * way. The format consists of pairs
- * <tt>i j</tt> of nonzero elements, each
- * representing one entry of this
- * matrix, one per line of the output
- * file. Indices are counted from
- * zero on, as usual. Since sparsity
- * patterns are printed in the same
- * way as matrices are displayed, we
- * print the negative of the column
- * index, which means that the
- * <tt>(0,0)</tt> element is in the top left
- * rather than in the bottom left
- * corner.
- *
- * Print the sparsity pattern in
- * gnuplot by setting the data style
- * to dots or points and use the
- * <tt>plot</tt> command.
- */
- void print_gnuplot (std::ostream &out) const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object. See
- * MemoryConsumption.
- */
- std::size_t memory_consumption () const;
-
- /** @addtogroup Exceptions
- * @{ */
- /**
- * Exception
- */
- DeclException1 (ExcInvalidNumber,
- int,
- << "The provided number is invalid here: " << arg1);
- /**
- * Exception
- */
- DeclException2 (ExcInvalidIndex,
- int, int,
- << "The given index " << arg1
- << " should be less than " << arg2 << ".");
- /**
- * Exception
- */
- DeclException2 (ExcNotEnoughSpace,
- int, int,
- << "Upon entering a new entry to row " << arg1
- << ": there was no free entry any more. " << std::endl
- << "(Maximum number of entries for this row: "
- << arg2 << "; maybe the matrix is already compressed?)");
- /**
- * Exception
- */
- DeclException0 (ExcNotCompressed);
- /**
- * Exception
- */
- DeclException0 (ExcMatrixIsCompressed);
- /**
- * Exception
- */
- DeclException0 (ExcEmptyObject);
- /**
- * Exception
- */
- DeclException0 (ExcInvalidConstructorCall);
- /**
- * This exception is thrown if
- * the matrix does not follow the
- * convention of storing diagonal
- * elements first in row. Refer
- * to
- * SparityPattern::optimize_diagonal()
- * for more information.
- */
- DeclException0 (ExcDiagonalNotOptimized);
- /**
- * Exception
- */
- DeclException2 (ExcIteratorRange,
- int, int,
- << "The iterators denote a range of " << arg1
- << " elements, but the given number of rows was " << arg2);
- /**
- * Exception
- */
- DeclException0 (ExcMETISNotInstalled);
- /**
- * Exception
- */
- DeclException1 (ExcInvalidNumberOfPartitions,
- int,
- << "The number of partitions you gave is " << arg1
- << ", but must be greater than zero.");
- /**
- * Exception
- */
- DeclException2 (ExcInvalidArraySize,
- int, int,
- << "The array has size " << arg1 << " but should have size "
- << arg2);
- //@}
- private:
- /**
- * Number of rows that this sparsity
- * structure shall represent.
- */
- types::global_dof_index rows;
-
- /**
- * Number of columns that this sparsity
- * structure shall represent.
- */
- types::global_dof_index cols;
-
- /**
- * The size of chunks.
- */
- unsigned int chunk_size;
-
- /**
- * The reduced sparsity pattern. We store
- * only which chunks exist, with each
- * chunk a block in the matrix of size
- * chunk_size by chunk_size.
- */
- SparsityPattern sparsity_pattern;
-
- /**
- * Make all the chunk sparse matrix kinds
- * friends.
- */
- template <typename> friend class ChunkSparseMatrix;
+ public:
+
+ /**
+ * Define a value which is used
+ * to indicate that a certain
+ * value in the colnums array
+ * is unused, i.e. does not
+ * represent a certain column
+ * number index.
+ *
+ * Indices with this invalid
+ * value are used to insert new
+ * entries to the sparsity
+ * pattern using the add() member
+ * function, and are removed when
+ * calling compress().
+ *
+ * You should not assume that the
+ * variable declared here has a
+ * certain value. The
+ * initialization is given here
+ * only to enable the compiler to
+ * perform some optimizations,
+ * but the actual value of the
+ * variable may change over time.
+ */
+ static const unsigned int invalid_entry = SparsityPattern::invalid_entry;
+
+ /**
+ * Initialize the matrix empty,
+ * that is with no memory
+ * allocated. This is useful if
+ * you want such objects as
+ * member variables in other
+ * classes. You can make the
+ * structure usable by calling
+ * the reinit() function.
+ */
+ ChunkSparsityPattern ();
+
+ /**
+ * Copy constructor. This
+ * constructor is only allowed to
+ * be called if the matrix
+ * structure to be copied is
+ * empty. This is so in order to
+ * prevent involuntary copies of
+ * objects for temporaries, which
+ * can use large amounts of
+ * computing time. However, copy
+ * constructors are needed if yo
+ * want to use the STL data types
+ * on classes like this, e.g. to
+ * write such statements like
+ * <tt>v.push_back
+ * (ChunkSparsityPattern());</tt>,
+ * with <tt>v</tt> a vector of
+ * ChunkSparsityPattern objects.
+ *
+ * Usually, it is sufficient to
+ * use the explicit keyword to
+ * disallow unwanted temporaries,
+ * but for the STL vectors, this
+ * does not work. Since copying a
+ * structure like this is not
+ * useful anyway because multiple
+ * matrices can use the same
+ * sparsity structure, copies are
+ * only allowed for empty
+ * objects, as described above.
+ */
+ ChunkSparsityPattern (const ChunkSparsityPattern &);
+
+ /**
+ * Initialize a rectangular
+ * matrix.
+ *
+ * @arg m number of rows
+ * @arg n number of columns
+ * @arg max_per_row maximum
+ * number of nonzero entries per row
+ *
+ * @arg optimize_diagonal store
+ * diagonal entries first in row;
+ * see optimize_diagonal(). This
+ * takes effect for quadratic
+ * matrices only.
+ */
- ChunkSparsityPattern (const unsigned int m,
- const unsigned int n,
++ ChunkSparsityPattern (const types::global_dof_index m,
++ const types::global_dof_index n,
+ const unsigned int max_chunks_per_row,
+ const unsigned int chunk_size,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Initialize a rectangular
+ * matrix.
+ *
+ * @arg m number of rows
+ * @arg n number of columns
+ *
+ * @arg row_lengths possible
+ * number of nonzero entries for
+ * each row. This vector must
+ * have one entry for each row.
+ *
+ * @arg optimize_diagonal store
+ * diagonal entries first in row;
+ * see optimize_diagonal(). This
+ * takes effect for quadratic
+ * matrices only.
+ */
- ChunkSparsityPattern (const unsigned int m,
- const unsigned int n,
++ ChunkSparsityPattern (const types::global_dof_index m,
++ const types::global_dof_index n,
+ const std::vector<unsigned int> &row_lengths,
+ const unsigned int chunk_size,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Initialize a quadratic matrix
+ * of dimension <tt>n</tt> with
+ * at most <tt>max_per_row</tt>
+ * nonzero entries per row.
+ *
+ * This constructor automatically
+ * enables optimized storage of
+ * diagonal elements. To avoid
+ * this, use the constructor
+ * taking row and column numbers
+ * separately.
+ */
- ChunkSparsityPattern (const unsigned int n,
- const unsigned int max_per_row,
- const unsigned int chunk_size);
++ ChunkSparsityPattern (const types::global_dof_index n,
++ const unsigned int max_per_row,
++ const unsigned int chunk_size);
+
+ /**
+ * Initialize a quadratic matrix.
+ *
+ * @arg m number of rows and columns
+ *
+ * @arg row_lengths possible
+ * number of nonzero entries for
+ * each row. This vector must
+ * have one entry for each row.
+ *
+ * @arg optimize_diagonal store
+ * diagonal entries first in row;
+ * see optimize_diagonal().
+ */
- ChunkSparsityPattern (const unsigned int m,
++ ChunkSparsityPattern (const types::global_dof_index m,
+ const std::vector<unsigned int> &row_lengths,
+ const unsigned int chunk_size,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Destructor.
+ */
+ ~ChunkSparsityPattern ();
+
+ /**
+ * Copy operator. For this the
+ * same holds as for the copy
+ * constructor: it is declared,
+ * defined and fine to be called,
+ * but the latter only for empty
+ * objects.
+ */
+ ChunkSparsityPattern &operator = (const ChunkSparsityPattern &);
+
+ /**
+ * Reallocate memory and set up data
+ * structures for a new matrix with
+ * <tt>m </tt>rows and <tt>n</tt> columns,
+ * with at most <tt>max_per_row</tt>
+ * nonzero entries per row.
+ *
+ * This function simply maps its
+ * operations to the other
+ * <tt>reinit</tt> function.
+ */
- void reinit (const unsigned int m,
- const unsigned int n,
++ void reinit (const types::global_dof_index m,
++ const types::global_dof_index n,
+ const unsigned int max_per_row,
+ const unsigned int chunk_size,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Reallocate memory for a matrix
+ * of size <tt>m x n</tt>. The
+ * number of entries for each row
+ * is taken from the array
+ * <tt>row_lengths</tt> which has to
+ * give this number of each row
+ * <tt>i=1...m</tt>.
+ *
+ * If <tt>m*n==0</tt> all memory is freed,
+ * resulting in a total reinitialization
+ * of the object. If it is nonzero, new
+ * memory is only allocated if the new
+ * size extends the old one. This is done
+ * to save time and to avoid fragmentation
+ * of the heap.
+ *
+ * If the number of rows equals
+ * the number of columns and the
+ * last parameter is true,
+ * diagonal elements are stored
+ * first in each row to allow
+ * optimized access in relaxation
+ * methods of SparseMatrix.
+ */
- void reinit (const unsigned int m,
- const unsigned int n,
++ void reinit (const types::global_dof_index m,
++ const types::global_dof_index n,
+ const std::vector<unsigned int> &row_lengths,
+ const unsigned int chunk_size,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Same as above, but with a
+ * VectorSlice argument instead.
+ */
- void reinit (const unsigned int m,
- const unsigned int n,
++ void reinit (const types::global_dof_index m,
++ const types::global_dof_index n,
+ const VectorSlice<const std::vector<unsigned int> > &row_lengths,
+ const unsigned int chunk_size,
+ const bool optimize_diagonal = true);
+
+ /**
+ * This function compresses the sparsity
+ * structure that this object represents.
+ * It does so by eliminating unused
+ * entries and sorting the remaining ones
+ * to allow faster access by usage of
+ * binary search algorithms. A special
+ * sorting scheme is used for the
+ * diagonal entry of quadratic matrices,
+ * which is always the first entry of
+ * each row.
+ *
+ * The memory which is no more
+ * needed is released.
+ *
+ * SparseMatrix objects require the
+ * ChunkSparsityPattern objects they are
+ * initialized with to be compressed, to
+ * reduce memory requirements.
+ */
+ void compress ();
+
+ /**
+ * This function can be used as a
+ * replacement for reinit(),
+ * subsequent calls to add() and
+ * a final call to close() if you
+ * know exactly in advance the
+ * entries that will form the
+ * matrix sparsity pattern.
+ *
+ * The first two parameters
+ * determine the size of the
+ * matrix. For the two last ones,
+ * note that a sparse matrix can
+ * be described by a sequence of
+ * rows, each of which is
+ * represented by a sequence of
+ * pairs of column indices and
+ * values. In the present
+ * context, the begin() and
+ * end() parameters designate
+ * iterators (of forward iterator
+ * type) into a container, one
+ * representing one row. The
+ * distance between begin()
+ * and end() should therefore
+ * be equal to
+ * n_rows(). These iterators
+ * may be iterators of
+ * <tt>std::vector</tt>,
+ * <tt>std::list</tt>, pointers into a
+ * C-style array, or any other
+ * iterator satisfying the
+ * requirements of a forward
+ * iterator. The objects pointed
+ * to by these iterators
+ * (i.e. what we get after
+ * applying <tt>operator*</tt> or
+ * <tt>operator-></tt> to one of these
+ * iterators) must be a container
+ * itself that provides functions
+ * <tt>begin</tt> and <tt>end</tt>
+ * designating a range of
+ * iterators that describe the
+ * contents of one
+ * line. Dereferencing these
+ * inner iterators must either
+ * yield a pair of an unsigned
+ * integer as column index and a
+ * value of arbitrary type (such
+ * a type would be used if we
+ * wanted to describe a sparse
+ * matrix with one such object),
+ * or simply an unsigned integer
+ * (of we only wanted to describe
+ * a sparsity pattern). The
+ * function is able to determine
+ * itself whether an unsigned
+ * integer or a pair is what we
+ * get after dereferencing the
+ * inner iterators, through some
+ * template magic.
+ *
+ * While the order of the outer
+ * iterators denotes the
+ * different rows of the matrix,
+ * the order of the inner
+ * iterator denoting the columns
+ * does not matter, as they are
+ * sorted internal to this
+ * function anyway.
+ *
+ * Since that all sounds very
+ * complicated, consider the
+ * following example code, which
+ * may be used to fill a sparsity
+ * pattern:
+ * @code
+ * std::vector<std::vector<unsigned int> > column_indices (n_rows);
+ * for (unsigned int row=0; row<n_rows; ++row)
+ * // generate necessary columns in this row
+ * fill_row (column_indices[row]);
+ *
+ * sparsity.copy_from (n_rows, n_cols,
+ * column_indices.begin(),
+ * column_indices.end());
+ * @endcode
+ *
+ * Note that this example works
+ * since the iterators
+ * dereferenced yield containers
+ * with functions <tt>begin</tt> and
+ * <tt>end</tt> (namely
+ * <tt>std::vector</tt>s), and the
+ * inner iterators dereferenced
+ * yield unsigned integers as
+ * column indices. Note that we
+ * could have replaced each of
+ * the two <tt>std::vector</tt>
+ * occurrences by <tt>std::list</tt>,
+ * and the inner one by
+ * <tt>std::set</tt> as well.
+ *
+ * Another example would be as
+ * follows, where we initialize a
+ * whole matrix, not only a
+ * sparsity pattern:
+ * @code
+ * std::vector<std::map<unsigned int,double> > entries (n_rows);
+ * for (unsigned int row=0; row<n_rows; ++row)
+ * // generate necessary pairs of columns
+ * // and corresponding values in this row
+ * fill_row (entries[row]);
+ *
+ * sparsity.copy_from (n_rows, n_cols,
+ * column_indices.begin(),
+ * column_indices.end());
+ * matrix.reinit (sparsity);
+ * matrix.copy_from (column_indices.begin(),
+ * column_indices.end());
+ * @endcode
+ *
+ * This example works because
+ * dereferencing iterators of the
+ * inner type yields a pair of
+ * unsigned integers and a value,
+ * the first of which we take as
+ * column index. As previously,
+ * the outer <tt>std::vector</tt>
+ * could be replaced by
+ * <tt>std::list</tt>, and the inner
+ * <tt>std::map<unsigned int,double></tt>
+ * could be replaced by
+ * <tt>std::vector<std::pair<unsigned int,double> ></tt>,
+ * or a list or set of such
+ * pairs, as they all return
+ * iterators that point to such
+ * pairs.
+ */
+ template <typename ForwardIterator>
- void copy_from (const unsigned int n_rows,
- const unsigned int n_cols,
++ void copy_from (const types::global_dof_index n_rows,
++ const types::global_dof_index n_cols,
+ const ForwardIterator begin,
+ const ForwardIterator end,
+ const unsigned int chunk_size,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Copy data from an object of type
+ * CompressedSparsityPattern,
+ * CompressedSetSparsityPattern or
+ * CompressedSimpleSparsityPattern.
+ * Previous content of this object is
+ * lost, and the sparsity pattern is in
+ * compressed mode afterwards.
+ */
+ template <typename SparsityType>
+ void copy_from (const SparsityType &csp,
+ const unsigned int chunk_size,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Take a full matrix and use its
+ * nonzero entries to generate a
+ * sparse matrix entry pattern
+ * for this object.
+ *
+ * Previous content of this
+ * object is lost, and the
+ * sparsity pattern is in
+ * compressed mode afterwards.
+ */
+ template <typename number>
+ void copy_from (const FullMatrix<number> &matrix,
+ const unsigned int chunk_size,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Return whether the object is empty. It
+ * is empty if no memory is allocated,
+ * which is the same as that both
+ * dimensions are zero.
+ */
+ bool empty () const;
+
+ /**
+ * Return the chunk size given as
+ * argument when constructing this
+ * object.
+ */
+ unsigned int get_chunk_size () const;
+
+ /**
+ * Return the maximum number of entries per
+ * row. Before compression, this equals the
+ * number given to the constructor, while
+ * after compression, it equals the maximum
+ * number of entries actually allocated by
+ * the user.
+ */
+ unsigned int max_entries_per_row () const;
+
+ /**
+ * Add a nonzero entry to the matrix.
+ * This function may only be called
+ * for non-compressed sparsity patterns.
+ *
+ * If the entry already exists, nothing
+ * bad happens.
+ */
- void add (const unsigned int i,
- const unsigned int j);
++ void add (const types::global_dof_index i,
++ const types::global_dof_index j);
+
+ /**
+ * Make the sparsity pattern
+ * symmetric by adding the
+ * sparsity pattern of the
+ * transpose object.
+ *
+ * This function throws an
+ * exception if the sparsity
+ * pattern does not represent a
+ * quadratic matrix.
+ */
+ void symmetrize ();
+
+ /**
+ * Return number of rows of this
+ * matrix, which equals the dimension
+ * of the image space.
+ */
- inline unsigned int n_rows () const;
++ inline types::global_dof_index n_rows () const;
+
+ /**
+ * Return number of columns of this
+ * matrix, which equals the dimension
+ * of the range space.
+ */
- inline unsigned int n_cols () const;
++ inline types::global_dof_index n_cols () const;
+
+ /**
+ * Check if a value at a certain
+ * position may be non-zero.
+ */
- bool exists (const unsigned int i,
- const unsigned int j) const;
++ bool exists (const types::global_dof_index i,
++ const types::global_dof_index j) const;
+
+ /**
+ * Number of entries in a specific row.
+ */
- unsigned int row_length (const unsigned int row) const;
++ unsigned int row_length (const types::global_dof_index row) const;
+
+ /**
+ * Compute the bandwidth of the matrix
+ * represented by this structure. The
+ * bandwidth is the maximum of $|i-j|$
+ * for which the index pair $(i,j)$
+ * represents a nonzero entry of the
+ * matrix. Consequently, the maximum
+ * bandwidth a $n\times m$ matrix can
+ * have is $\max\{n-1,m-1\}$.
+ */
+ unsigned int bandwidth () const;
+
+ /**
+ * Return the number of nonzero elements of
+ * this matrix. Actually, it returns the
+ * number of entries in the sparsity
+ * pattern; if any of the entries should
+ * happen to be zero, it is counted
+ * anyway.
+ *
+ * This function may only be called if the
+ * matrix struct is compressed. It does not
+ * make too much sense otherwise anyway.
+ */
+ unsigned int n_nonzero_elements () const;
+
+ /**
+ * Return whether the structure is
+ * compressed or not.
+ */
+ bool is_compressed () const;
+
+ /**
+ * Determine whether the matrix
+ * uses special convention for
+ * quadratic matrices.
+ *
+ * A return value <tt>true</tt> means
+ * that diagonal elements are stored
+ * first in each row. A number of
+ * functions in this class and the
+ * library in general, for example
+ * relaxation methods like Jacobi() and
+ * SOR(), require this to make their
+ * operations more efficient, since they
+ * need to quickly access the diagonal
+ * elements and do not have to search for
+ * them if they are the first element of
+ * each row. A side effect of this scheme
+ * is that each row contains at least one
+ * element, even if the row is empty
+ * (i.e. the diagonal element exists, but
+ * has value zero).
+ *
+ * A return value <tt>false</tt> means
+ * that diagonal elements are stored
+ * anywhere in the row, or not at all. In
+ * particular, a row or even the whole
+ * matrix may be empty. This can be used
+ * if you have block matrices where the
+ * off-diagonal blocks are quadratic but
+ * are never used for operations like the
+ * ones mentioned above. In this case,
+ * some memory can be saved by not using
+ * the diagonal storage optimization.
+ */
+ bool optimize_diagonal () const;
+
+ /**
+ * Return whether this object stores only
+ * those entries that have been added
+ * explicitly, or if the sparsity pattern
+ * contains elements that have been added
+ * through other means (implicitly) while
+ * building it. For the current class,
+ * the result is false because we store
+ * entire chunks, not individual
+ * elements, and adding one entry to the
+ * sparsity pattern requires also adding
+ * all the other elements of a chunk. The
+ * only exception is if
+ * <code>chunk_size==1</code>, the
+ * sparsity pattern is nonsymmetric or
+ * optimize_diag has been set to false.
+ *
+ * This function mainly serves the
+ * purpose of describing the current
+ * class in cases where several kinds of
+ * sparsity patterns can be passed as
+ * template arguments.
+ */
+ bool stores_only_added_elements () const;
+
+ /**
+ * Write the data of this object
+ * en bloc to a file. This is
+ * done in a binary mode, so the
+ * output is neither readable by
+ * humans nor (probably) by other
+ * computers using a different
+ * operating system of number
+ * format.
+ *
+ * The purpose of this function
+ * is that you can swap out
+ * matrices and sparsity pattern
+ * if you are short of memory,
+ * want to communicate between
+ * different programs, or allow
+ * objects to be persistent
+ * across different runs of the
+ * program.
+ */
+ void block_write (std::ostream &out) const;
+
+ /**
+ * Read data that has previously
+ * been written by block_write()
+ * from a file. This is done
+ * using the inverse operations
+ * to the above function, so it
+ * is reasonably fast because the
+ * bitstream is not interpreted
+ * except for a few numbers up
+ * front.
+ *
+ * The object is resized on this
+ * operation, and all previous
+ * contents are lost.
+ *
+ * A primitive form of error
+ * checking is performed which
+ * will recognize the bluntest
+ * attempts to interpret some
+ * data as a vector stored
+ * bitwise to a file, but not
+ * more.
+ */
+ void block_read (std::istream &in);
+
+ /**
+ * Print the sparsity of the
+ * matrix. The output consists of
+ * one line per row of the format
+ * <tt>[i,j1,j2,j3,...]</tt>. <i>i</i>
+ * is the row number and
+ * <i>jn</i> are the allocated
+ * columns in this row.
+ */
+ void print (std::ostream &out) const;
+
+ /**
+ * Print the sparsity of the matrix
+ * in a format that <tt>gnuplot</tt> understands
+ * and which can be used to plot the
+ * sparsity pattern in a graphical
+ * way. The format consists of pairs
+ * <tt>i j</tt> of nonzero elements, each
+ * representing one entry of this
+ * matrix, one per line of the output
+ * file. Indices are counted from
+ * zero on, as usual. Since sparsity
+ * patterns are printed in the same
+ * way as matrices are displayed, we
+ * print the negative of the column
+ * index, which means that the
+ * <tt>(0,0)</tt> element is in the top left
+ * rather than in the bottom left
+ * corner.
+ *
+ * Print the sparsity pattern in
+ * gnuplot by setting the data style
+ * to dots or points and use the
+ * <tt>plot</tt> command.
+ */
+ void print_gnuplot (std::ostream &out) const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object. See
+ * MemoryConsumption.
+ */
+ std::size_t memory_consumption () const;
+
+ /** @addtogroup Exceptions
+ * @{ */
+ /**
+ * Exception
+ */
+ DeclException1 (ExcInvalidNumber,
+ int,
+ << "The provided number is invalid here: " << arg1);
+ /**
+ * Exception
+ */
+ DeclException2 (ExcInvalidIndex,
+ int, int,
+ << "The given index " << arg1
+ << " should be less than " << arg2 << ".");
+ /**
+ * Exception
+ */
+ DeclException2 (ExcNotEnoughSpace,
+ int, int,
+ << "Upon entering a new entry to row " << arg1
+ << ": there was no free entry any more. " << std::endl
+ << "(Maximum number of entries for this row: "
+ << arg2 << "; maybe the matrix is already compressed?)");
+ /**
+ * Exception
+ */
+ DeclException0 (ExcNotCompressed);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcMatrixIsCompressed);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcEmptyObject);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidConstructorCall);
+ /**
+ * This exception is thrown if
+ * the matrix does not follow the
+ * convention of storing diagonal
+ * elements first in row. Refer
+ * to
+ * SparityPattern::optimize_diagonal()
+ * for more information.
+ */
+ DeclException0 (ExcDiagonalNotOptimized);
+ /**
+ * Exception
+ */
+ DeclException2 (ExcIteratorRange,
+ int, int,
+ << "The iterators denote a range of " << arg1
+ << " elements, but the given number of rows was " << arg2);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcMETISNotInstalled);
+ /**
+ * Exception
+ */
+ DeclException1 (ExcInvalidNumberOfPartitions,
+ int,
+ << "The number of partitions you gave is " << arg1
+ << ", but must be greater than zero.");
+ /**
+ * Exception
+ */
+ DeclException2 (ExcInvalidArraySize,
+ int, int,
+ << "The array has size " << arg1 << " but should have size "
+ << arg2);
+ //@}
+ private:
+ /**
+ * Number of rows that this sparsity
+ * structure shall represent.
+ */
- unsigned int rows;
++ types::global_dof_index rows;
+
+ /**
+ * Number of columns that this sparsity
+ * structure shall represent.
+ */
- unsigned int cols;
++ types::global_dof_index cols;
+
+ /**
+ * The size of chunks.
+ */
+ unsigned int chunk_size;
+
+ /**
+ * The reduced sparsity pattern. We store
+ * only which chunks exist, with each
+ * chunk a block in the matrix of size
+ * chunk_size by chunk_size.
+ */
+ SparsityPattern sparsity_pattern;
+
+ /**
+ * Make all the chunk sparse matrix kinds
+ * friends.
+ */
+ template <typename> friend class ChunkSparseMatrix;
};
*/
class CompressedSetSparsityPattern : public Subscriptor
{
- public:
- /**
- * An iterator that can be used to
- * iterate over the elements of a single
- * row. The result of dereferencing such
- * an iterator is a column index.
- */
- typedef std::set<unsigned int>::const_iterator row_iterator;
-
-
- /**
- * Initialize the matrix empty,
- * that is with no memory
- * allocated. This is useful if
- * you want such objects as
- * member variables in other
- * classes. You can make the
- * structure usable by calling
- * the reinit() function.
- */
- CompressedSetSparsityPattern ();
-
- /**
- * Copy constructor. This constructor is
- * only allowed to be called if the
- * matrix structure to be copied is
- * empty. This is so in order to prevent
- * involuntary copies of objects for
- * temporaries, which can use large
- * amounts of computing time. However,
- * copy constructors are needed if yo
- * want to use the STL data types on
- * classes like this, e.g. to write such
- * statements like <tt>v.push_back
- * (CompressedSetSparsityPattern());</tt>,
- * with @p v a vector of @p
- * CompressedSetSparsityPattern objects.
- */
- CompressedSetSparsityPattern (const CompressedSetSparsityPattern &);
-
- /**
- * Initialize a rectangular
- * matrix with @p m rows and
- * @p n columns.
- */
- CompressedSetSparsityPattern (const types::global_dof_index m,
- const types::global_dof_index n);
-
- /**
- * Initialize a square matrix of
- * dimension @p n.
- */
- CompressedSetSparsityPattern (const types::global_dof_index n);
-
- /**
- * Copy operator. For this the
- * same holds as for the copy
- * constructor: it is declared,
- * defined and fine to be called,
- * but the latter only for empty
- * objects.
- */
- CompressedSetSparsityPattern & operator = (const CompressedSetSparsityPattern &);
-
- /**
- * Reallocate memory and set up
- * data structures for a new
- * matrix with @p m rows and
- * @p n columns, with at most
- * max_entries_per_row() nonzero
- * entries per row.
- */
- void reinit (const types::global_dof_index m,
- const types::global_dof_index n);
-
- /**
- * Since this object is kept
- * compressed at all times anway,
- * this function does nothing,
- * but is declared to make the
- * interface of this class as
- * much alike as that of the
- * SparsityPattern class.
- */
- void compress ();
-
- /**
- * Return whether the object is
- * empty. It is empty if no
- * memory is allocated, which is
- * the same as that both
- * dimensions are zero.
- */
- bool empty () const;
-
- /**
- * Return the maximum number of
- * entries per row. Note that
- * this number may change as
- * entries are added.
- */
- unsigned int max_entries_per_row () const;
-
- /**
- * Add a nonzero entry to the
- * matrix. If the entry already
- * exists, nothing bad happens.
- */
- void add (const types::global_dof_index i,
- const types::global_dof_index j);
-
- /**
- * Add several nonzero entries to the
- * specified row of the matrix. If the
- * entries already exist, nothing bad
- * happens.
- */
+ public:
+ /**
+ * An iterator that can be used to
+ * iterate over the elements of a single
+ * row. The result of dereferencing such
+ * an iterator is a column index.
+ */
+ typedef std::set<unsigned int>::const_iterator row_iterator;
+
+
+ /**
+ * Initialize the matrix empty,
+ * that is with no memory
+ * allocated. This is useful if
+ * you want such objects as
+ * member variables in other
+ * classes. You can make the
+ * structure usable by calling
+ * the reinit() function.
+ */
+ CompressedSetSparsityPattern ();
+
+ /**
+ * Copy constructor. This constructor is
+ * only allowed to be called if the
+ * matrix structure to be copied is
+ * empty. This is so in order to prevent
+ * involuntary copies of objects for
+ * temporaries, which can use large
+ * amounts of computing time. However,
+ * copy constructors are needed if yo
+ * want to use the STL data types on
+ * classes like this, e.g. to write such
+ * statements like <tt>v.push_back
+ * (CompressedSetSparsityPattern());</tt>,
+ * with @p v a vector of @p
+ * CompressedSetSparsityPattern objects.
+ */
+ CompressedSetSparsityPattern (const CompressedSetSparsityPattern &);
+
+ /**
+ * Initialize a rectangular
+ * matrix with @p m rows and
+ * @p n columns.
+ */
- CompressedSetSparsityPattern (const unsigned int m,
- const unsigned int n);
++ CompressedSetSparsityPattern (const types::global_dof_index m,
++ const types::global_dof_index n);
+
+ /**
+ * Initialize a square matrix of
+ * dimension @p n.
+ */
- CompressedSetSparsityPattern (const unsigned int n);
++ CompressedSetSparsityPattern (const types::global_dof_index n);
+
+ /**
+ * Copy operator. For this the
+ * same holds as for the copy
+ * constructor: it is declared,
+ * defined and fine to be called,
+ * but the latter only for empty
+ * objects.
+ */
+ CompressedSetSparsityPattern &operator = (const CompressedSetSparsityPattern &);
+
+ /**
+ * Reallocate memory and set up
+ * data structures for a new
+ * matrix with @p m rows and
+ * @p n columns, with at most
+ * max_entries_per_row() nonzero
+ * entries per row.
+ */
- void reinit (const unsigned int m,
- const unsigned int n);
++ void reinit (const types::global_dof_index m,
++ const types::global_dof_index n);
+
+ /**
+ * Since this object is kept
+ * compressed at all times anway,
+ * this function does nothing,
+ * but is declared to make the
+ * interface of this class as
+ * much alike as that of the
+ * SparsityPattern class.
+ */
+ void compress ();
+
+ /**
+ * Return whether the object is
+ * empty. It is empty if no
+ * memory is allocated, which is
+ * the same as that both
+ * dimensions are zero.
+ */
+ bool empty () const;
+
+ /**
+ * Return the maximum number of
+ * entries per row. Note that
+ * this number may change as
+ * entries are added.
+ */
+ unsigned int max_entries_per_row () const;
+
+ /**
+ * Add a nonzero entry to the
+ * matrix. If the entry already
+ * exists, nothing bad happens.
+ */
- void add (const unsigned int i,
- const unsigned int j);
++ void add (const types::global_dof_index i,
++ const types::global_dof_index j);
+
+ /**
+ * Add several nonzero entries to the
+ * specified row of the matrix. If the
+ * entries already exist, nothing bad
+ * happens.
+ */
+ template <typename ForwardIterator>
- void add_entries (const unsigned int row,
- ForwardIterator begin,
- ForwardIterator end,
- const bool indices_are_sorted = false);
++ void add_entries (const types::global_dof_index row,
++ ForwardIterator begin,
++ ForwardIterator end,
++ const bool indices_are_sorted = false);
+
+ /**
+ * Check if a value at a certain
+ * position may be non-zero.
+ */
- bool exists (const unsigned int i,
- const unsigned int j) const;
++ bool exists (const types::global_dof_index i,
++ const types::global_dof_index j) const;
+
+ /**
+ * Make the sparsity pattern
+ * symmetric by adding the
+ * sparsity pattern of the
+ * transpose object.
+ *
+ * This function throws an
+ * exception if the sparsity
+ * pattern does not represent a
+ * square matrix.
+ */
+ void symmetrize ();
+
+ /**
+ * Print the sparsity of the
+ * matrix. The output consists of
+ * one line per row of the format
+ * <tt>[i,j1,j2,j3,...]</tt>. <i>i</i>
+ * is the row number and
+ * <i>jn</i> are the allocated
+ * columns in this row.
+ */
+ void print (std::ostream &out) const;
+
+ /**
+ * Print the sparsity of the matrix in a
+ * format that @p gnuplot understands and
+ * which can be used to plot the sparsity
+ * pattern in a graphical way. The format
+ * consists of pairs <tt>i j</tt> of
+ * nonzero elements, each representing
+ * one entry of this matrix, one per line
+ * of the output file. Indices are
+ * counted from zero on, as usual. Since
+ * sparsity patterns are printed in the
+ * same way as matrices are displayed, we
+ * print the negative of the column
+ * index, which means that the
+ * <tt>(0,0)</tt> element is in the top
+ * left rather than in the bottom left
+ * corner.
+ *
+ * Print the sparsity pattern in
+ * gnuplot by setting the data style
+ * to dots or points and use the
+ * @p plot command.
+ */
+ void print_gnuplot (std::ostream &out) const;
+
+ /**
+ * Return number of rows of this
+ * matrix, which equals the dimension
+ * of the image space.
+ */
- unsigned int n_rows () const;
++ types::global_dof_index n_rows () const;
+
+ /**
+ * Return number of columns of this
+ * matrix, which equals the dimension
+ * of the range space.
+ */
- unsigned int n_cols () const;
++ types::global_dof_index n_cols () const;
+
+ /**
+ * Number of entries in a specific row.
+ */
- unsigned int row_length (const unsigned int row) const;
++ unsigned int row_length (const types::global_dof_index row) const;
+
+ /**
+ * Return an iterator that can loop over
+ * all entries in the given
+ * row. Dereferencing the iterator yields
+ * a column index.
+ */
- row_iterator row_begin (const unsigned int row) const;
++ row_iterator row_begin (const types::global_dof_index row) const;
+
+ /**
+ * End iterator for the given row.
+ */
- row_iterator row_end (const unsigned int row) const;
++ row_iterator row_end (const types::global_dof_index row) const;
+
+
+ /**
+ * Compute the bandwidth of the matrix
+ * represented by this structure. The
+ * bandwidth is the maximum of
+ * $|i-j|$ for which the index pair
+ * $(i,j)$ represents a nonzero entry
+ * of the matrix.
+ */
+ unsigned int bandwidth () const;
+
+ /**
+ * Return the number of nonzero elements
+ * allocated through this sparsity
+ * pattern.
+ */
+ unsigned int n_nonzero_elements () const;
+
+ /**
+ * Return whether this object stores only
+ * those entries that have been added
+ * explicitly, or if the sparsity pattern
+ * contains elements that have been added
+ * through other means (implicitly) while
+ * building it. For the current class,
+ * the result is always true.
+ *
+ * This function mainly serves the
+ * purpose of describing the current
+ * class in cases where several kinds of
+ * sparsity patterns can be passed as
+ * template arguments.
+ */
+ static
+ bool stores_only_added_elements ();
+
+ private:
+ /**
+ * Number of rows that this sparsity
+ * structure shall represent.
+ */
- unsigned int rows;
++ types::global_dof_index rows;
+
+ /**
+ * Number of columns that this sparsity
+ * structure shall represent.
+ */
- unsigned int cols;
++ types::global_dof_index cols;
+
+ /**
+ * For each row of the matrix, store the
+ * allocated non-zero entries as a
+ * std::set of column indices. For a
+ * discussion of storage schemes see the
+ * CompressedSparsityPattern::Line class.
+ */
+ struct Line
+ {
+ std::set<unsigned int> entries;
+
+ /**
+ * Constructor.
+ */
+ Line ();
+
+ /**
+ * Add the given column number to
+ * this line.
+ */
- void add (const unsigned int col_num);
++ void add (const types::global_dof_index col_num);
+
+ /**
+ * Add the columns specified by the
+ * iterator range to this line.
+ */
template <typename ForwardIterator>
- void add_entries (const types::global_dof_index row,
- ForwardIterator begin,
- ForwardIterator end,
- const bool indices_are_sorted = false);
-
- /**
- * Check if a value at a certain
- * position may be non-zero.
- */
- bool exists (const types::global_dof_index i,
- const types::global_dof_index j) const;
-
- /**
- * Make the sparsity pattern
- * symmetric by adding the
- * sparsity pattern of the
- * transpose object.
- *
- * This function throws an
- * exception if the sparsity
- * pattern does not represent a
- * square matrix.
- */
- void symmetrize ();
-
- /**
- * Print the sparsity of the
- * matrix. The output consists of
- * one line per row of the format
- * <tt>[i,j1,j2,j3,...]</tt>. <i>i</i>
- * is the row number and
- * <i>jn</i> are the allocated
- * columns in this row.
- */
- void print (std::ostream &out) const;
-
- /**
- * Print the sparsity of the matrix in a
- * format that @p gnuplot understands and
- * which can be used to plot the sparsity
- * pattern in a graphical way. The format
- * consists of pairs <tt>i j</tt> of
- * nonzero elements, each representing
- * one entry of this matrix, one per line
- * of the output file. Indices are
- * counted from zero on, as usual. Since
- * sparsity patterns are printed in the
- * same way as matrices are displayed, we
- * print the negative of the column
- * index, which means that the
- * <tt>(0,0)</tt> element is in the top
- * left rather than in the bottom left
- * corner.
- *
- * Print the sparsity pattern in
- * gnuplot by setting the data style
- * to dots or points and use the
- * @p plot command.
- */
- void print_gnuplot (std::ostream &out) const;
-
- /**
- * Return number of rows of this
- * matrix, which equals the dimension
- * of the image space.
- */
- types::global_dof_index n_rows () const;
-
- /**
- * Return number of columns of this
- * matrix, which equals the dimension
- * of the range space.
- */
- types::global_dof_index n_cols () const;
-
- /**
- * Number of entries in a specific row.
- */
- unsigned int row_length (const types::global_dof_index row) const;
-
- /**
- * Return an iterator that can loop over
- * all entries in the given
- * row. Dereferencing the iterator yields
- * a column index.
- */
- row_iterator row_begin (const types::global_dof_index row) const;
-
- /**
- * End iterator for the given row.
- */
- row_iterator row_end (const types::global_dof_index row) const;
-
-
- /**
- * Compute the bandwidth of the matrix
- * represented by this structure. The
- * bandwidth is the maximum of
- * $|i-j|$ for which the index pair
- * $(i,j)$ represents a nonzero entry
- * of the matrix.
- */
- unsigned int bandwidth () const;
-
- /**
- * Return the number of nonzero elements
- * allocated through this sparsity
- * pattern.
- */
- unsigned int n_nonzero_elements () const;
-
- /**
- * Return whether this object stores only
- * those entries that have been added
- * explicitly, or if the sparsity pattern
- * contains elements that have been added
- * through other means (implicitly) while
- * building it. For the current class,
- * the result is always true.
- *
- * This function mainly serves the
- * purpose of describing the current
- * class in cases where several kinds of
- * sparsity patterns can be passed as
- * template arguments.
- */
- static
- bool stores_only_added_elements ();
-
- private:
- /**
- * Number of rows that this sparsity
- * structure shall represent.
- */
- types::global_dof_index rows;
-
- /**
- * Number of columns that this sparsity
- * structure shall represent.
- */
- types::global_dof_index cols;
-
- /**
- * For each row of the matrix, store the
- * allocated non-zero entries as a
- * std::set of column indices. For a
- * discussion of storage schemes see the
- * CompressedSparsityPattern::Line class.
- */
- struct Line
- {
- std::set<unsigned int> entries;
-
- /**
- * Constructor.
- */
- Line ();
-
- /**
- * Add the given column number to
- * this line.
- */
- void add (const types::global_dof_index col_num);
-
- /**
- * Add the columns specified by the
- * iterator range to this line.
- */
- template <typename ForwardIterator>
- void add_entries (ForwardIterator begin,
- ForwardIterator end);
- };
-
-
- /**
- * Actual data: store for each
- * row the set of nonzero
- * entries.
- */
- std::vector<Line> lines;
+ void add_entries (ForwardIterator begin,
+ ForwardIterator end);
+ };
+
+
+ /**
+ * Actual data: store for each
+ * row the set of nonzero
+ * entries.
+ */
+ std::vector<Line> lines;
};
/*@}*/
*/
class CompressedSimpleSparsityPattern : public Subscriptor
{
- CompressedSimpleSparsityPattern (const unsigned int m,
- const unsigned int n,
+ public:
+ /**
+ * An iterator that can be used to
+ * iterate over the elements of a single
+ * row. The result of dereferencing such
+ * an iterator is a column index.
+ */
+ typedef std::vector<unsigned int>::const_iterator row_iterator;
+
+ /**
+ * Initialize the matrix empty,
+ * that is with no memory
+ * allocated. This is useful if
+ * you want such objects as
+ * member variables in other
+ * classes. You can make the
+ * structure usable by calling
+ * the reinit() function.
+ */
+ CompressedSimpleSparsityPattern ();
+
+ /**
+ * Copy constructor. This constructor is
+ * only allowed to be called if the
+ * matrix structure to be copied is
+ * empty. This is so in order to prevent
+ * involuntary copies of objects for
+ * temporaries, which can use large
+ * amounts of computing time. However,
+ * copy constructors are needed if you
+ * want to use the STL data types on
+ * classes like this, e.g. to write such
+ * statements like <tt>v.push_back
+ * (CompressedSparsityPattern());</tt>,
+ * with @p v a vector of @p
+ * CompressedSparsityPattern objects.
+ */
+ CompressedSimpleSparsityPattern (const CompressedSimpleSparsityPattern &);
+
+ /**
+ * Initialize a rectangular
+ * matrix with @p m rows and
+ * @p n columns. The @p rowset
+ * restricts the storage to
+ * elements in rows of this set.
+ * Adding elements outside of
+ * this set has no effect. The
+ * default argument keeps all
+ * entries.
+ */
- CompressedSimpleSparsityPattern (const unsigned int n);
++ CompressedSimpleSparsityPattern (const types::global_dof_index m,
++ const types::global_dof_index n,
+ const IndexSet &rowset = IndexSet());
+
+ /**
+ * Initialize a square matrix of
+ * dimension @p n.
+ */
- void reinit (const unsigned int m,
- const unsigned int n,
++ CompressedSimpleSparsityPattern (const types::global_dof_index n);
+
+ /**
+ * Copy operator. For this the
+ * same holds as for the copy
+ * constructor: it is declared,
+ * defined and fine to be called,
+ * but the latter only for empty
+ * objects.
+ */
+ CompressedSimpleSparsityPattern &operator = (const CompressedSimpleSparsityPattern &);
+
+ /**
+ * Reallocate memory and set up
+ * data structures for a new
+ * matrix with @p m rows and
+ * @p n columns, with at most
+ * max_entries_per_row() nonzero
+ * entries per row. The @p rowset
+ * restricts the storage to
+ * elements in rows of this set.
+ * Adding elements outside of
+ * this set has no effect. The
+ * default argument keeps all
+ * entries.
+ */
- void add (const unsigned int i,
- const unsigned int j);
++ void reinit (const types::global_dof_index m,
++ const types::global_dof_index n,
+ const IndexSet &rowset = IndexSet());
+
+ /**
+ * Since this object is kept
+ * compressed at all times anway,
+ * this function does nothing,
+ * but is declared to make the
+ * interface of this class as
+ * much alike as that of the
+ * SparsityPattern class.
+ */
+ void compress ();
+
+ /**
+ * Return whether the object is
+ * empty. It is empty if no
+ * memory is allocated, which is
+ * the same as that both
+ * dimensions are zero.
+ */
+ bool empty () const;
+
+ /**
+ * Return the maximum number of
+ * entries per row. Note that
+ * this number may change as
+ * entries are added.
+ */
+ unsigned int max_entries_per_row () const;
+
+ /**
+ * Add a nonzero entry to the
+ * matrix. If the entry already
+ * exists, nothing bad happens.
+ */
- void add_entries (const unsigned int row,
- ForwardIterator begin,
- ForwardIterator end,
- const bool indices_are_unique_and_sorted = false);
++ void add (const types::global_dof_index i,
++ const types::global_dof_index j);
+
+ /**
+ * Add several nonzero entries to the
+ * specified row of the matrix. If the
+ * entries already exist, nothing bad
+ * happens.
+ */
+ template <typename ForwardIterator>
- bool exists (const unsigned int i,
- const unsigned int j) const;
++ void add_entries (const types::global_dof_index row,
++ ForwardIterator begin,
++ ForwardIterator end,
++ const bool indices_are_unique_and_sorted = false);
+
+ /**
+ * Check if a value at a certain
+ * position may be non-zero.
+ */
- unsigned int n_rows () const;
++ bool exists (const types::global_dof_index i,
++ const types::global_dof_index j) const;
+
+ /**
+ * Make the sparsity pattern
+ * symmetric by adding the
+ * sparsity pattern of the
+ * transpose object.
+ *
+ * This function throws an
+ * exception if the sparsity
+ * pattern does not represent a
+ * square matrix.
+ */
+ void symmetrize ();
+
+ /**
+ * Print the sparsity of the
+ * matrix. The output consists of
+ * one line per row of the format
+ * <tt>[i,j1,j2,j3,...]</tt>. <i>i</i>
+ * is the row number and
+ * <i>jn</i> are the allocated
+ * columns in this row.
+ */
+ void print (std::ostream &out) const;
+
+ /**
+ * Print the sparsity of the matrix in a
+ * format that @p gnuplot understands and
+ * which can be used to plot the sparsity
+ * pattern in a graphical way. The format
+ * consists of pairs <tt>i j</tt> of
+ * nonzero elements, each representing
+ * one entry of this matrix, one per line
+ * of the output file. Indices are
+ * counted from zero on, as usual. Since
+ * sparsity patterns are printed in the
+ * same way as matrices are displayed, we
+ * print the negative of the column
+ * index, which means that the
+ * <tt>(0,0)</tt> element is in the top
+ * left rather than in the bottom left
+ * corner.
+ *
+ * Print the sparsity pattern in
+ * gnuplot by setting the data style
+ * to dots or points and use the
+ * @p plot command.
+ */
+ void print_gnuplot (std::ostream &out) const;
+
+ /**
+ * Return number of rows of this
+ * matrix, which equals the dimension
+ * of the image space.
+ */
- unsigned int n_cols () const;
++ types::global_dof_index n_rows () const;
+
+ /**
+ * Return number of columns of this
+ * matrix, which equals the dimension
+ * of the range space.
+ */
- unsigned int row_length (const unsigned int row) const;
++ types::global_dof_index n_cols () const;
+
+ /**
+ * Number of entries in a
+ * specific row. This function
+ * can only be called if the
+ * given row is a member of the
+ * index set of rows that we want
+ * to store.
+ */
- unsigned int column_number (const unsigned int row,
- const unsigned int index) const;
++ unsigned int row_length (const types::global_dof_index row) const;
+
+ /**
+ * Access to column number field.
+ * Return the column number of
+ * the @p indexth entry in @p row.
+ */
- row_iterator row_begin (const unsigned int row) const;
++ unsigned int column_number (const types::global_dof_index row,
++ const types::global_dof_index index) const;
+
+ /**
+ * Return an iterator that can loop over
+ * all entries in the given
+ * row. Dereferencing the iterator yields
+ * a column index.
+ */
- row_iterator row_end (const unsigned int row) const;
++ row_iterator row_begin (const types::global_dof_index row) const;
+
+ /**
+ * Returns the end of the current row.
+ */
- unsigned int rows;
++ row_iterator row_end (const types::global_dof_index row) const;
+ /**
+ * Compute the bandwidth of the matrix
+ * represented by this structure. The
+ * bandwidth is the maximum of
+ * $|i-j|$ for which the index pair
+ * $(i,j)$ represents a nonzero entry
+ * of the matrix.
+ */
+ unsigned int bandwidth () const;
+
+ /**
+ * Return the number of nonzero elements
+ * allocated through this sparsity pattern.
+ */
+ unsigned int n_nonzero_elements () const;
+
+ /**
+ * Return the IndexSet that sets which
+ * rows are active on the current
+ * processor. It corresponds to the
+ * IndexSet given to this class in the
+ * constructor or in the reinit function.
+ */
+ const IndexSet &row_index_set () const;
+
+ /**
+ * return whether this object stores only
+ * those entries that have been added
+ * explicitly, or if the sparsity pattern
+ * contains elements that have been added
+ * through other means (implicitly) while
+ * building it. For the current class,
+ * the result is always true.
+ *
+ * This function mainly serves the
+ * purpose of describing the current
+ * class in cases where several kinds of
+ * sparsity patterns can be passed as
+ * template arguments.
+ */
+ static
+ bool stores_only_added_elements ();
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object.
+ */
+ std::size_t memory_consumption () const;
+
+ private:
+ /**
+ * Number of rows that this sparsity
+ * structure shall represent.
+ */
- unsigned int cols;
++ types::global_dof_index rows;
+
+ /**
+ * Number of columns that this sparsity
+ * structure shall represent.
+ */
++ types::global_dof_index cols;
+
+ /**
+ * A set that contains the valid rows.
+ */
+
+ IndexSet rowset;
+
+
+ /**
+ * Store some data for each row
+ * describing which entries of this row
+ * are nonzero. Data is stored sorted in
+ * the @p entries std::vector.
+ * The vector per row is dynamically
+ * growing upon insertion doubling its
+ * memory each time.
+ */
+ struct Line
+ {
public:
- /**
- * An iterator that can be used to
- * iterate over the elements of a single
- * row. The result of dereferencing such
- * an iterator is a column index.
- */
- typedef std::vector<unsigned int>::const_iterator row_iterator;
-
- /**
- * Initialize the matrix empty,
- * that is with no memory
- * allocated. This is useful if
- * you want such objects as
- * member variables in other
- * classes. You can make the
- * structure usable by calling
- * the reinit() function.
- */
- CompressedSimpleSparsityPattern ();
-
- /**
- * Copy constructor. This constructor is
- * only allowed to be called if the
- * matrix structure to be copied is
- * empty. This is so in order to prevent
- * involuntary copies of objects for
- * temporaries, which can use large
- * amounts of computing time. However,
- * copy constructors are needed if you
- * want to use the STL data types on
- * classes like this, e.g. to write such
- * statements like <tt>v.push_back
- * (CompressedSparsityPattern());</tt>,
- * with @p v a vector of @p
- * CompressedSparsityPattern objects.
- */
- CompressedSimpleSparsityPattern (const CompressedSimpleSparsityPattern &);
-
- /**
- * Initialize a rectangular
- * matrix with @p m rows and
- * @p n columns. The @p rowset
- * restricts the storage to
- * elements in rows of this set.
- * Adding elements outside of
- * this set has no effect. The
- * default argument keeps all
- * entries.
- */
- CompressedSimpleSparsityPattern (const types::global_dof_index m,
- const types::global_dof_index n,
- const IndexSet & rowset = IndexSet());
-
- /**
- * Initialize a square matrix of
- * dimension @p n.
- */
- CompressedSimpleSparsityPattern (const types::global_dof_index n);
-
- /**
- * Copy operator. For this the
- * same holds as for the copy
- * constructor: it is declared,
- * defined and fine to be called,
- * but the latter only for empty
- * objects.
- */
- CompressedSimpleSparsityPattern & operator = (const CompressedSimpleSparsityPattern &);
-
- /**
- * Reallocate memory and set up
- * data structures for a new
- * matrix with @p m rows and
- * @p n columns, with at most
- * max_entries_per_row() nonzero
- * entries per row. The @p rowset
- * restricts the storage to
- * elements in rows of this set.
- * Adding elements outside of
- * this set has no effect. The
- * default argument keeps all
- * entries.
- */
- void reinit (const types::global_dof_index m,
- const types::global_dof_index n,
- const IndexSet & rowset = IndexSet());
-
- /**
- * Since this object is kept
- * compressed at all times anway,
- * this function does nothing,
- * but is declared to make the
- * interface of this class as
- * much alike as that of the
- * SparsityPattern class.
- */
- void compress ();
-
- /**
- * Return whether the object is
- * empty. It is empty if no
- * memory is allocated, which is
- * the same as that both
- * dimensions are zero.
- */
- bool empty () const;
-
- /**
- * Return the maximum number of
- * entries per row. Note that
- * this number may change as
- * entries are added.
- */
- unsigned int max_entries_per_row () const;
-
- /**
- * Add a nonzero entry to the
- * matrix. If the entry already
- * exists, nothing bad happens.
- */
- void add (const types::global_dof_index i,
- const types::global_dof_index j);
-
- /**
- * Add several nonzero entries to the
- * specified row of the matrix. If the
- * entries already exist, nothing bad
- * happens.
- */
+ /**
+ * Storage for the column indices of
+ * this row. This array is always
+ * kept sorted.
+ */
+ std::vector<unsigned int> entries;
+
+ /**
+ * Constructor.
+ */
+ Line ();
+
+ /**
+ * Add the given column number to
+ * this line.
+ */
- void add (const unsigned int col_num);
++ void add (const types::global_dof_index col_num);
+
+ /**
+ * Add the columns specified by the
+ * iterator range to this line.
+ */
template <typename ForwardIterator>
- void add_entries (const types::global_dof_index row,
- ForwardIterator begin,
- ForwardIterator end,
- const bool indices_are_unique_and_sorted = false);
-
- /**
- * Check if a value at a certain
- * position may be non-zero.
- */
- bool exists (const types::global_dof_index i,
- const types::global_dof_index j) const;
-
- /**
- * Make the sparsity pattern
- * symmetric by adding the
- * sparsity pattern of the
- * transpose object.
- *
- * This function throws an
- * exception if the sparsity
- * pattern does not represent a
- * square matrix.
- */
- void symmetrize ();
-
- /**
- * Print the sparsity of the
- * matrix. The output consists of
- * one line per row of the format
- * <tt>[i,j1,j2,j3,...]</tt>. <i>i</i>
- * is the row number and
- * <i>jn</i> are the allocated
- * columns in this row.
- */
- void print (std::ostream &out) const;
-
- /**
- * Print the sparsity of the matrix in a
- * format that @p gnuplot understands and
- * which can be used to plot the sparsity
- * pattern in a graphical way. The format
- * consists of pairs <tt>i j</tt> of
- * nonzero elements, each representing
- * one entry of this matrix, one per line
- * of the output file. Indices are
- * counted from zero on, as usual. Since
- * sparsity patterns are printed in the
- * same way as matrices are displayed, we
- * print the negative of the column
- * index, which means that the
- * <tt>(0,0)</tt> element is in the top
- * left rather than in the bottom left
- * corner.
- *
- * Print the sparsity pattern in
- * gnuplot by setting the data style
- * to dots or points and use the
- * @p plot command.
- */
- void print_gnuplot (std::ostream &out) const;
-
- /**
- * Return number of rows of this
- * matrix, which equals the dimension
- * of the image space.
- */
- types::global_dof_index n_rows () const;
-
- /**
- * Return number of columns of this
- * matrix, which equals the dimension
- * of the range space.
- */
- types::global_dof_index n_cols () const;
-
- /**
- * Number of entries in a
- * specific row. This function
- * can only be called if the
- * given row is a member of the
- * index set of rows that we want
- * to store.
- */
- unsigned int row_length (const types::global_dof_index row) const;
-
- /**
- * Access to column number field.
- * Return the column number of
- * the @p indexth entry in @p row.
- */
- unsigned int column_number (const types::global_dof_index row,
- const types::global_dof_index index) const;
-
- /**
- * Return an iterator that can loop over
- * all entries in the given
- * row. Dereferencing the iterator yields
- * a column index.
- */
- row_iterator row_begin (const types::global_dof_index row) const;
-
- /**
- * Returns the end of the current row.
- */
- row_iterator row_end (const types::global_dof_index row) const;
- /**
- * Compute the bandwidth of the matrix
- * represented by this structure. The
- * bandwidth is the maximum of
- * $|i-j|$ for which the index pair
- * $(i,j)$ represents a nonzero entry
- * of the matrix.
- */
- unsigned int bandwidth () const;
-
- /**
- * Return the number of nonzero elements
- * allocated through this sparsity pattern.
- */
- unsigned int n_nonzero_elements () const;
-
- /**
- * Return the IndexSet that sets which
- * rows are active on the current
- * processor. It corresponds to the
- * IndexSet given to this class in the
- * constructor or in the reinit function.
- */
- const IndexSet & row_index_set () const;
-
- /**
- * return whether this object stores only
- * those entries that have been added
- * explicitly, or if the sparsity pattern
- * contains elements that have been added
- * through other means (implicitly) while
- * building it. For the current class,
- * the result is always true.
- *
- * This function mainly serves the
- * purpose of describing the current
- * class in cases where several kinds of
- * sparsity patterns can be passed as
- * template arguments.
- */
- static
- bool stores_only_added_elements ();
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
- */
+ void add_entries (ForwardIterator begin,
+ ForwardIterator end,
+ const bool indices_are_sorted);
+
+ /**
+ * estimates memory consumption.
+ */
std::size_t memory_consumption () const;
+ };
- private:
- /**
- * Number of rows that this sparsity
- * structure shall represent.
- */
- types::global_dof_index rows;
-
- /**
- * Number of columns that this sparsity
- * structure shall represent.
- */
- types::global_dof_index cols;
-
- /**
- * A set that contains the valid rows.
- */
-
- IndexSet rowset;
-
-
- /**
- * Store some data for each row
- * describing which entries of this row
- * are nonzero. Data is stored sorted in
- * the @p entries std::vector.
- * The vector per row is dynamically
- * growing upon insertion doubling its
- * memory each time.
- */
- struct Line
- {
- public:
- /**
- * Storage for the column indices of
- * this row. This array is always
- * kept sorted.
- */
- std::vector<unsigned int> entries;
-
- /**
- * Constructor.
- */
- Line ();
-
- /**
- * Add the given column number to
- * this line.
- */
- void add (const types::global_dof_index col_num);
-
- /**
- * Add the columns specified by the
- * iterator range to this line.
- */
- template <typename ForwardIterator>
- void add_entries (ForwardIterator begin,
- ForwardIterator end,
- const bool indices_are_sorted);
-
- /**
- * estimates memory consumption.
- */
- std::size_t memory_consumption () const;
- };
-
-
- /**
- * Actual data: store for each
- * row the set of nonzero
- * entries.
- */
- std::vector<Line> lines;
+
+ /**
+ * Actual data: store for each
+ * row the set of nonzero
+ * entries.
+ */
+ std::vector<Line> lines;
};
/*@}*/
inline
void
-CompressedSimpleSparsityPattern::Line::add (const unsigned int j)
+CompressedSimpleSparsityPattern::Line::add (const types::global_dof_index j)
{
- // first check the last element (or if line
- // is still empty)
+ // first check the last element (or if line
+ // is still empty)
if ( (entries.size()==0) || ( entries.back() < j) )
{
entries.push_back(j);
*/
class CompressedSparsityPattern : public Subscriptor
{
- CompressedSparsityPattern (const unsigned int m,
- const unsigned int n);
+ public:
+ /**
+ * An iterator that can be used to
+ * iterate over the elements of a single
+ * row. The result of dereferencing such
+ * an iterator is a column index.
+ */
+ typedef std::vector<unsigned int>::const_iterator row_iterator;
+
+ /**
+ * Initialize the matrix empty,
+ * that is with no memory
+ * allocated. This is useful if
+ * you want such objects as
+ * member variables in other
+ * classes. You can make the
+ * structure usable by calling
+ * the reinit() function.
+ */
+ CompressedSparsityPattern ();
+
+ /**
+ * Copy constructor. This constructor is
+ * only allowed to be called if the
+ * matrix structure to be copied is
+ * empty. This is so in order to prevent
+ * involuntary copies of objects for
+ * temporaries, which can use large
+ * amounts of computing time. However,
+ * copy constructors are needed if yo
+ * want to use the STL data types on
+ * classes like this, e.g. to write such
+ * statements like <tt>v.push_back
+ * (CompressedSparsityPattern());</tt>,
+ * with @p v a vector of @p
+ * CompressedSparsityPattern objects.
+ */
+ CompressedSparsityPattern (const CompressedSparsityPattern &);
+
+ /**
+ * Initialize a rectangular
+ * matrix with @p m rows and
+ * @p n columns.
+ */
- CompressedSparsityPattern (const unsigned int n);
++ CompressedSparsityPattern (const types::global_dof_index m,
++ const types::global_dof_index n);
+
+ /**
+ * Initialize a square matrix of
+ * dimension @p n.
+ */
- void reinit (const unsigned int m,
- const unsigned int n);
++ CompressedSparsityPattern (const types::global_dof_index n);
+
+ /**
+ * Copy operator. For this the
+ * same holds as for the copy
+ * constructor: it is declared,
+ * defined and fine to be called,
+ * but the latter only for empty
+ * objects.
+ */
+ CompressedSparsityPattern &operator = (const CompressedSparsityPattern &);
+
+ /**
+ * Reallocate memory and set up
+ * data structures for a new
+ * matrix with @p m rows and
+ * @p n columns, with at most
+ * max_entries_per_row() nonzero
+ * entries per row.
+ */
- void add (const unsigned int i,
- const unsigned int j);
++ void reinit (const types::global_dof_index m,
++ const types::global_dof_index n);
+
+ /**
+ * Since this object is kept
+ * compressed at all times anway,
+ * this function does nothing,
+ * but is declared to make the
+ * interface of this class as
+ * much alike as that of the
+ * SparsityPattern class.
+ */
+ void compress ();
+
+ /**
+ * Return whether the object is
+ * empty. It is empty if no
+ * memory is allocated, which is
+ * the same as that both
+ * dimensions are zero.
+ */
+ bool empty () const;
+
+ /**
+ * Return the maximum number of
+ * entries per row. Note that
+ * this number may change as
+ * entries are added.
+ */
+ unsigned int max_entries_per_row () const;
+
+ /**
+ * Add a nonzero entry to the
+ * matrix. If the entry already
+ * exists, nothing bad happens.
+ */
- void add_entries (const unsigned int row,
- ForwardIterator begin,
- ForwardIterator end,
- const bool indices_are_unique_and_sorted = false);
++ void add (const types::global_dof_index i,
++ const types::global_dof_index j);
+
+ /**
+ * Add several nonzero entries to the
+ * specified row of the matrix. If the
+ * entries already exist, nothing bad
+ * happens.
+ */
+ template <typename ForwardIterator>
- bool exists (const unsigned int i,
- const unsigned int j) const;
++ void add_entries (const types::global_dof_index row,
++ ForwardIterator begin,
++ ForwardIterator end,
++ const bool indices_are_unique_and_sorted = false);
+
+ /**
+ * Check if a value at a certain
+ * position may be non-zero.
+ */
- unsigned int n_rows () const;
++ bool exists (const types::global_dof_index i,
++ const types::global_dof_index j) const;
+
+ /**
+ * Make the sparsity pattern
+ * symmetric by adding the
+ * sparsity pattern of the
+ * transpose object.
+ *
+ * This function throws an
+ * exception if the sparsity
+ * pattern does not represent a
+ * square matrix.
+ */
+ void symmetrize ();
+
+ /**
+ * Print the sparsity of the
+ * matrix. The output consists of
+ * one line per row of the format
+ * <tt>[i,j1,j2,j3,...]</tt>. <i>i</i>
+ * is the row number and
+ * <i>jn</i> are the allocated
+ * columns in this row.
+ */
+ void print (std::ostream &out) const;
+
+ /**
+ * Print the sparsity of the matrix in a
+ * format that @p gnuplot understands and
+ * which can be used to plot the sparsity
+ * pattern in a graphical way. The format
+ * consists of pairs <tt>i j</tt> of
+ * nonzero elements, each representing
+ * one entry of this matrix, one per line
+ * of the output file. Indices are
+ * counted from zero on, as usual. Since
+ * sparsity patterns are printed in the
+ * same way as matrices are displayed, we
+ * print the negative of the column
+ * index, which means that the
+ * <tt>(0,0)</tt> element is in the top
+ * left rather than in the bottom left
+ * corner.
+ *
+ * Print the sparsity pattern in
+ * gnuplot by setting the data style
+ * to dots or points and use the
+ * @p plot command.
+ */
+ void print_gnuplot (std::ostream &out) const;
+
+ /**
+ * Return number of rows of this
+ * matrix, which equals the dimension
+ * of the image space.
+ */
- unsigned int n_cols () const;
++ types::global_dof_index n_rows () const;
+
+ /**
+ * Return number of columns of this
+ * matrix, which equals the dimension
+ * of the range space.
+ */
- unsigned int row_length (const unsigned int row) const;
++ types::global_dof_index n_cols () const;
+
+ /**
+ * Number of entries in a specific row.
+ */
- unsigned int column_number (const unsigned int row,
- const unsigned int index) const;
++ unsigned int row_length (const types::global_dof_index row) const;
+
+ /**
+ * Access to column number field.
+ * Return the column number of
+ * the @p indexth entry in @p row.
+ */
- row_iterator row_begin (const unsigned int row) const;
++ unsigned int column_number (const types::global_dof_index row,
++ const unsigned int index) const;
+
+ /**
+ * Return an iterator that can loop over
+ * all entries in the given
+ * row. Dereferencing the iterator yields
+ * a column index.
+ */
- row_iterator row_end (const unsigned int row) const;
++ row_iterator row_begin (const types::global_dof_index row) const;
+
+ /**
+ * Returns the end of the current row.
+ */
- unsigned int rows;
++ row_iterator row_end (const types::global_dof_index row) const;
+
+ /**
+ * Compute the bandwidth of the matrix
+ * represented by this structure. The
+ * bandwidth is the maximum of
+ * $|i-j|$ for which the index pair
+ * $(i,j)$ represents a nonzero entry
+ * of the matrix.
+ */
+ unsigned int bandwidth () const;
+
+ /**
+ * Return the number of nonzero elements
+ * allocated through this sparsity
+ * pattern.
+ */
+ unsigned int n_nonzero_elements () const;
+
+ /**
+ * Return whether this object stores only
+ * those entries that have been added
+ * explicitly, or if the sparsity pattern
+ * contains elements that have been added
+ * through other means (implicitly) while
+ * building it. For the current class,
+ * the result is always true.
+ *
+ * This function mainly serves the
+ * purpose of describing the current
+ * class in cases where several kinds of
+ * sparsity patterns can be passed as
+ * template arguments.
+ */
+ static
+ bool stores_only_added_elements ();
+
+ private:
+ /**
+ * Number of rows that this sparsity
+ * structure shall represent.
+ */
- unsigned int cols;
++ types::global_dof_index rows;
+
+ /**
+ * Number of columns that this sparsity
+ * structure shall represent.
+ */
++ types::global_dof_index cols;
+
+ /**
+ * Store some data for each row
+ * describing which entries of this row
+ * are nonzero. Data is organized as
+ * follows: if an entry is added to a
+ * row, it is first added to the #cache
+ * variable, irrespective of whether an
+ * entry with same column number has
+ * already been added. Only if the cache
+ * is full do we flush it by removing
+ * duplicates, removing entries that are
+ * already stored in the @p entries
+ * array, sorting everything, and merging
+ * the two arrays.
+ *
+ * The reasoning behind this scheme is
+ * that memory allocation is expensive,
+ * and we only want to do it when really
+ * necessary. Previously (in deal.II
+ * versions up to 5.0), we used to store
+ * the column indices inside a std::set,
+ * but this would allocate 20 bytes each
+ * time we added an entry. (A std::set
+ * based class has later been revived in
+ * form of the
+ * CompressedSetSparsityPattern class, as
+ * this turned out to be more efficient
+ * for hp finite element programs such as
+ * step-27). Using the
+ * present scheme, we only need to
+ * allocate memory once for every 8 added
+ * entries, and we waste a lot less
+ * memory by not using a balanced tree
+ * for storing column indices.
+ *
+ * Since some functions that are @p const
+ * need to access the data of this
+ * object, but need to flush caches
+ * before, the flush_cache() function is
+ * marked const, and the data members are
+ * marked @p mutable.
+ *
+ * A small testseries about the size of
+ * the cache showed that the run time of
+ * a small program just testing the
+ * compressed sparsity pattern element
+ * insertion routine ran for 3.6 seconds
+ * with a cache size of 8, and 4.2
+ * seconds with a cache size of 16. We
+ * deem even smaller cache sizes
+ * undesirable, since they lead to more
+ * memory allocations, while larger cache
+ * sizes lead to waste of memory. The
+ * original version of this class, with
+ * one std::set per row took 8.2 seconds
+ * on the same program.
+ */
+ struct Line
+ {
+ private:
+ /**
+ * Size of the cache.
+ */
+ static const unsigned int cache_size = 8;
+
public:
- /**
- * An iterator that can be used to
- * iterate over the elements of a single
- * row. The result of dereferencing such
- * an iterator is a column index.
- */
- typedef std::vector<unsigned int>::const_iterator row_iterator;
-
- /**
- * Initialize the matrix empty,
- * that is with no memory
- * allocated. This is useful if
- * you want such objects as
- * member variables in other
- * classes. You can make the
- * structure usable by calling
- * the reinit() function.
- */
- CompressedSparsityPattern ();
-
- /**
- * Copy constructor. This constructor is
- * only allowed to be called if the
- * matrix structure to be copied is
- * empty. This is so in order to prevent
- * involuntary copies of objects for
- * temporaries, which can use large
- * amounts of computing time. However,
- * copy constructors are needed if yo
- * want to use the STL data types on
- * classes like this, e.g. to write such
- * statements like <tt>v.push_back
- * (CompressedSparsityPattern());</tt>,
- * with @p v a vector of @p
- * CompressedSparsityPattern objects.
- */
- CompressedSparsityPattern (const CompressedSparsityPattern &);
-
- /**
- * Initialize a rectangular
- * matrix with @p m rows and
- * @p n columns.
- */
- CompressedSparsityPattern (const types::global_dof_index m,
- const types::global_dof_index n);
-
- /**
- * Initialize a square matrix of
- * dimension @p n.
- */
- CompressedSparsityPattern (const types::global_dof_index n);
-
- /**
- * Copy operator. For this the
- * same holds as for the copy
- * constructor: it is declared,
- * defined and fine to be called,
- * but the latter only for empty
- * objects.
- */
- CompressedSparsityPattern & operator = (const CompressedSparsityPattern &);
-
- /**
- * Reallocate memory and set up
- * data structures for a new
- * matrix with @p m rows and
- * @p n columns, with at most
- * max_entries_per_row() nonzero
- * entries per row.
- */
- void reinit (const types::global_dof_index m,
- const types::global_dof_index n);
-
- /**
- * Since this object is kept
- * compressed at all times anway,
- * this function does nothing,
- * but is declared to make the
- * interface of this class as
- * much alike as that of the
- * SparsityPattern class.
- */
- void compress ();
-
- /**
- * Return whether the object is
- * empty. It is empty if no
- * memory is allocated, which is
- * the same as that both
- * dimensions are zero.
- */
- bool empty () const;
-
- /**
- * Return the maximum number of
- * entries per row. Note that
- * this number may change as
- * entries are added.
- */
- unsigned int max_entries_per_row () const;
-
- /**
- * Add a nonzero entry to the
- * matrix. If the entry already
- * exists, nothing bad happens.
- */
- void add (const types::global_dof_index i,
- const types::global_dof_index j);
-
- /**
- * Add several nonzero entries to the
- * specified row of the matrix. If the
- * entries already exist, nothing bad
- * happens.
- */
+ /**
+ * Storage for the column indices of
+ * this row, unless they are still in
+ * the cache. This array is always
+ * kept sorted.
+ */
+ mutable std::vector<unsigned int> entries;
+
+ /**
+ * Cache of entries that have not yet
+ * been written to #entries;
+ */
+ mutable unsigned int cache[cache_size];
+
+ /**
+ * Number of entries in the cache.
+ */
+ mutable unsigned int cache_entries;
+
+ /**
+ * Constructor.
+ */
+ Line ();
+
+ /**
+ * Add the given column number to
+ * this line.
+ */
- void add (const unsigned int col_num);
++ void add (const types::global_dof_index col_num);
+
+ /**
+ * Add the columns specified by the
+ * iterator range to this line.
+ */
template <typename ForwardIterator>
- void add_entries (const types::global_dof_index row,
- ForwardIterator begin,
- ForwardIterator end,
- const bool indices_are_unique_and_sorted = false);
-
- /**
- * Check if a value at a certain
- * position may be non-zero.
- */
- bool exists (const types::global_dof_index i,
- const types::global_dof_index j) const;
-
- /**
- * Make the sparsity pattern
- * symmetric by adding the
- * sparsity pattern of the
- * transpose object.
- *
- * This function throws an
- * exception if the sparsity
- * pattern does not represent a
- * square matrix.
- */
- void symmetrize ();
-
- /**
- * Print the sparsity of the
- * matrix. The output consists of
- * one line per row of the format
- * <tt>[i,j1,j2,j3,...]</tt>. <i>i</i>
- * is the row number and
- * <i>jn</i> are the allocated
- * columns in this row.
- */
- void print (std::ostream &out) const;
-
- /**
- * Print the sparsity of the matrix in a
- * format that @p gnuplot understands and
- * which can be used to plot the sparsity
- * pattern in a graphical way. The format
- * consists of pairs <tt>i j</tt> of
- * nonzero elements, each representing
- * one entry of this matrix, one per line
- * of the output file. Indices are
- * counted from zero on, as usual. Since
- * sparsity patterns are printed in the
- * same way as matrices are displayed, we
- * print the negative of the column
- * index, which means that the
- * <tt>(0,0)</tt> element is in the top
- * left rather than in the bottom left
- * corner.
- *
- * Print the sparsity pattern in
- * gnuplot by setting the data style
- * to dots or points and use the
- * @p plot command.
- */
- void print_gnuplot (std::ostream &out) const;
-
- /**
- * Return number of rows of this
- * matrix, which equals the dimension
- * of the image space.
- */
- types::global_dof_index n_rows () const;
-
- /**
- * Return number of columns of this
- * matrix, which equals the dimension
- * of the range space.
- */
- types::global_dof_index n_cols () const;
-
- /**
- * Number of entries in a specific row.
- */
- unsigned int row_length (const types::global_dof_index row) const;
-
- /**
- * Access to column number field.
- * Return the column number of
- * the @p indexth entry in @p row.
- */
- unsigned int column_number (const types::global_dof_index row,
- const unsigned int index) const;
-
- /**
- * Return an iterator that can loop over
- * all entries in the given
- * row. Dereferencing the iterator yields
- * a column index.
- */
- row_iterator row_begin (const types::global_dof_index row) const;
-
- /**
- * Returns the end of the current row.
- */
- row_iterator row_end (const types::global_dof_index row) const;
-
- /**
- * Compute the bandwidth of the matrix
- * represented by this structure. The
- * bandwidth is the maximum of
- * $|i-j|$ for which the index pair
- * $(i,j)$ represents a nonzero entry
- * of the matrix.
- */
- unsigned int bandwidth () const;
-
- /**
- * Return the number of nonzero elements
- * allocated through this sparsity
- * pattern.
- */
- unsigned int n_nonzero_elements () const;
-
- /**
- * Return whether this object stores only
- * those entries that have been added
- * explicitly, or if the sparsity pattern
- * contains elements that have been added
- * through other means (implicitly) while
- * building it. For the current class,
- * the result is always true.
- *
- * This function mainly serves the
- * purpose of describing the current
- * class in cases where several kinds of
- * sparsity patterns can be passed as
- * template arguments.
- */
- static
- bool stores_only_added_elements ();
-
- private:
- /**
- * Number of rows that this sparsity
- * structure shall represent.
- */
- types::global_dof_index rows;
-
- /**
- * Number of columns that this sparsity
- * structure shall represent.
- */
- types::global_dof_index cols;
-
- /**
- * Store some data for each row
- * describing which entries of this row
- * are nonzero. Data is organized as
- * follows: if an entry is added to a
- * row, it is first added to the #cache
- * variable, irrespective of whether an
- * entry with same column number has
- * already been added. Only if the cache
- * is full do we flush it by removing
- * duplicates, removing entries that are
- * already stored in the @p entries
- * array, sorting everything, and merging
- * the two arrays.
- *
- * The reasoning behind this scheme is
- * that memory allocation is expensive,
- * and we only want to do it when really
- * necessary. Previously (in deal.II
- * versions up to 5.0), we used to store
- * the column indices inside a std::set,
- * but this would allocate 20 bytes each
- * time we added an entry. (A std::set
- * based class has later been revived in
- * form of the
- * CompressedSetSparsityPattern class, as
- * this turned out to be more efficient
- * for hp finite element programs such as
- * step-27). Using the
- * present scheme, we only need to
- * allocate memory once for every 8 added
- * entries, and we waste a lot less
- * memory by not using a balanced tree
- * for storing column indices.
- *
- * Since some functions that are @p const
- * need to access the data of this
- * object, but need to flush caches
- * before, the flush_cache() function is
- * marked const, and the data members are
- * marked @p mutable.
- *
- * A small testseries about the size of
- * the cache showed that the run time of
- * a small program just testing the
- * compressed sparsity pattern element
- * insertion routine ran for 3.6 seconds
- * with a cache size of 8, and 4.2
- * seconds with a cache size of 16. We
- * deem even smaller cache sizes
- * undesirable, since they lead to more
- * memory allocations, while larger cache
- * sizes lead to waste of memory. The
- * original version of this class, with
- * one std::set per row took 8.2 seconds
- * on the same program.
- */
- struct Line
- {
- private:
- /**
- * Size of the cache.
- */
- static const unsigned int cache_size = 8;
-
- public:
- /**
- * Storage for the column indices of
- * this row, unless they are still in
- * the cache. This array is always
- * kept sorted.
- */
- mutable std::vector<unsigned int> entries;
-
- /**
- * Cache of entries that have not yet
- * been written to #entries;
- */
- mutable unsigned int cache[cache_size];
-
- /**
- * Number of entries in the cache.
- */
- mutable unsigned int cache_entries;
-
- /**
- * Constructor.
- */
- Line ();
-
- /**
- * Add the given column number to
- * this line.
- */
- void add (const types::global_dof_index col_num);
-
- /**
- * Add the columns specified by the
- * iterator range to this line.
- */
- template <typename ForwardIterator>
- void add_entries (ForwardIterator begin,
- ForwardIterator end,
- const bool indices_are_sorted);
-
- /**
- * Flush the cache my merging it with
- * the #entries array.
- */
- void flush_cache () const;
- };
-
-
- /**
- * Actual data: store for each
- * row the set of nonzero
- * entries.
- */
- std::vector<Line> lines;
+ void add_entries (ForwardIterator begin,
+ ForwardIterator end,
+ const bool indices_are_sorted);
+
+ /**
+ * Flush the cache my merging it with
+ * the #entries array.
+ */
+ void flush_cache () const;
+ };
+
+
+ /**
+ * Actual data: store for each
+ * row the set of nonzero
+ * entries.
+ */
+ std::vector<Line> lines;
};
/*@}*/
inline
void
-CompressedSparsityPattern::Line::add (const unsigned int j)
+CompressedSparsityPattern::Line::add (const types::global_dof_index j)
{
- // first check whether this entry is
- // already in the cache. if so, we can
- // safely return
+ // first check whether this entry is
+ // already in the cache. if so, we can
+ // safely return
for (unsigned int i=0; i<cache_entries; ++i)
if (cache[i] == j)
return;
*/
class ConstraintMatrix : public Subscriptor
{
- public:
- /**
- * An enum that describes what should
- * happen if the two ConstraintMatrix
- * objects involved in a call to the
- * merge() function happen to have
- * constraints on the same degrees of
- * freedom.
- */
- enum MergeConflictBehavior
- {
- /**
- * Throw an exception if the two
- * objects concerned have
- * conflicting constraints on the
- * same degree of freedom.
- */
- no_conflicts_allowed,
-
- /**
- * In an operation
- * <code>cm1.merge(cm2)</code>, if
- * <code>cm1</code> and
- * <code>cm2</code> have
- * constraints on the same degree
- * of freedom, take the one from
- * <code>cm1</code>.
- */
- left_object_wins,
-
- /**
- * In an operation
- * <code>cm1.merge(cm2)</code>, if
- * <code>cm1</code> and
- * <code>cm2</code> have
- * constraints on the same degree
- * of freedom, take the one from
- * <code>cm2</code>.
- */
- right_object_wins
- };
-
- /**
- * Constructor. The supplied IndexSet
- * defines which indices might be
- * constrained inside this
- * ConstraintMatrix. In a calculation
- * with a
- * parallel::distributed::DoFHandler one
- * should use locally_relevant_dofs. The
- * IndexSet allows the ConstraintMatrix
- * to safe memory. Otherwise internal
- * data structures for all possible
- * indices will be created.
- */
- ConstraintMatrix (const IndexSet & local_constraints = IndexSet());
-
- /**
- * Copy constructor
- */
- ConstraintMatrix (const ConstraintMatrix &constraint_matrix);
-
- /**
- * Reinit the ConstraintMatrix object and
- * supply an IndexSet with lines that may
- * be constrained. This function is only
- * relevant in the distributed case to
- * supply a different IndexSet. Otherwise
- * this routine is equivalent to calling
- * clear(). See the constructor for
- * details.
- */
- void reinit (const IndexSet & local_constraints = IndexSet());
-
- /**
- * Determines if we can store a
- * constraint for the given @p
- * line_index. This routine only matters
- * in the distributed case and checks if
- * the IndexSet allows storage of this
- * line. Always returns true if not in
- * the distributed case.
- */
- bool can_store_line (const types::global_dof_index line_index) const;
-
- /**
- * This function copies the content of @p
- * constraints_in with DoFs that are
- * element of the IndexSet @p
- * filter. Elements that are not present
- * in the IndexSet are ignored. All DoFs
- * will be transformed to local index
- * space of the filter, both the
- * constrained DoFs and the other DoFs
- * these entries are constrained to. The
- * local index space of the filter is a
- * contiguous numbering of all (global)
- * DoFs that are elements in the
- * filter.
- *
- * If, for example, the filter represents
- * the range <tt>[10,20)</tt>, and the
- * constraint matrix @p constraints_in
- * includes the global indices
- * <tt>{7,13,14}</tt>, the indices
- * <tt>{3,4}</tt> are added to the
- * calling constraint matrix (since 13
- * and 14 are elements in the filter and
- * element 13 is the fourth element in
- * the index, and 14 is the fifth).
- *
- * This function provides an easy way to
- * create a ConstraintMatrix for certain
- * vector components in a vector-valued
- * problem from a full ConstraintMatrix,
- * i.e. extracting a diagonal subblock
- * from a larger ConstraintMatrix. The
- * block is specified by the IndexSet
- * argument.
- */
- void add_selected_constraints (const ConstraintMatrix &constraints_in,
- const IndexSet &filter);
-
- /**
- * @name Adding constraints
- * @{
- */
-
- /**
- * Add a new line to the matrix. If the
- * line already exists, then the function
- * simply returns without doing anything.
- */
- void add_line (const types::global_dof_index line);
-
- /**
- * Call the first add_line() function for
- * every index <code>i</code> for which
- * <code>lines[i]</code> is true.
- *
- * This function essentially exists to
- * allow adding several constraints of
- * the form <i>x<sub>i</sub></i>=0 all at once, where
- * the set of indices <i>i</i> for which these
- * constraints should be added are given
- * by the argument of this function. On
- * the other hand, just as if the
- * single-argument add_line() function
- * were called repeatedly, the
- * constraints can later be modified to
- * include linear dependencies using the
- * add_entry() function as well as
- * inhomogeneities using
- * set_inhomogeneity().
- */
- void add_lines (const std::vector<bool> &lines);
-
- /**
- * Call the first add_line() function for
- * every index <code>i</code> that
- * appears in the argument.
- *
- * This function essentially exists to
- * allow adding several constraints of
- * the form <i>x<sub>i</sub></i>=0 all at once, where
- * the set of indices <i>i</i> for which these
- * constraints should be added are given
- * by the argument of this function. On
- * the other hand, just as if the
- * single-argument add_line() function
- * were called repeatedly, the
- * constraints can later be modified to
- * include linear dependencies using the
- * add_entry() function as well as
- * inhomogeneities using
- * set_inhomogeneity().
- */
- void add_lines (const std::set<types::global_dof_index> &lines);
-
- /**
- * Call the first add_line() function for
- * every index <code>i</code> that
- * appears in the argument.
- *
- * This function essentially exists to
- * allow adding several constraints of
- * the form <i>x<sub>i</sub></i>=0 all at once, where
- * the set of indices <i>i</i> for which these
- * constraints should be added are given
- * by the argument of this function. On
- * the other hand, just as if the
- * single-argument add_line() function
- * were called repeatedly, the
- * constraints can later be modified to
- * include linear dependencies using the
- * add_entry() function as well as
- * inhomogeneities using
- * set_inhomogeneity().
- */
- void add_lines (const IndexSet &lines);
-
- /**
- * Add an entry to a given
- * line. The list of lines is
- * searched from the back to the
- * front, so clever programming
- * would add a new line (which is
- * pushed to the back) and
- * immediately afterwards fill
- * the entries of that line. This
- * way, no expensive searching is
- * needed.
- *
- * If an entry with the same
- * indices as the one this
- * function call denotes already
- * exists, then this function
- * simply returns provided that
- * the value of the entry is the
- * same. Thus, it does no harm to
- * enter a constraint twice.
- */
- void add_entry (const types::global_dof_index line,
- const types::global_dof_index column,
- const double value);
-
- /**
- * Add a whole series of entries,
- * denoted by pairs of column indices
- * and values, to a line of
- * constraints. This function is
- * equivalent to calling the preceding
- * function several times, but is
- * faster.
- */
- void add_entries (const types::global_dof_index line,
- const std::vector<std::pair<types::global_dof_index,double> > &col_val_pairs);
-
- /**
- * Set an imhomogeneity to the
- * constraint line <i>i</i>, according
- * to the discussion in the general
- * class description.
- *
- * @note the line needs to be added with
- * one of the add_line() calls first.
- */
- void set_inhomogeneity (const types::global_dof_index line,
- const double value);
-
- /**
- * Close the filling of entries. Since
- * the lines of a matrix of this type
- * are usually filled in an arbitrary
- * order and since we do not want to
- * use associative constainers to store
- * the lines, we need to sort the lines
- * and within the lines the columns
- * before usage of the matrix. This is
- * done through this function.
- *
- * Also, zero entries are discarded,
- * since they are not needed.
- *
- * After closing, no more entries are
- * accepted. If the object was already
- * closed, then this function returns
- * immediately.
- *
- * This function also resolves chains
- * of constraints. For example, degree
- * of freedom 13 may be constrained to
- * <i>u</i><sub>13</sub>=<i>u</i><sub>3</sub>/2+<i>u</i><sub>7</sub>/2 while degree of
- * freedom 7 is itself constrained as
- * <i>u</i><sub>7</sub>=<i>u</i><sub>2</sub>/2+<i>u</i><sub>4</sub>/2. Then, the
- * resolution will be that
- * <i>u</i><sub>13</sub>=<i>u</i><sub>3</sub>/2+<i>u</i><sub>2</sub>/4+<i>u</i><sub>4</sub>/4. Note,
- * however, that cycles in this graph
- * of constraints are not allowed,
- * i.e. for example <i>u</i><sub>4</sub> may not be
- * constrained, directly or indirectly,
- * to <i>u</i><sub>13</sub> again.
- */
- void close ();
-
- /**
- * Merge the constraints represented by
- * the object given as argument into
- * the constraints represented by this
- * object. Both objects may or may not
- * be closed (by having their function
- * close() called before). If this
- * object was closed before, then it
- * will be closed afterwards as
- * well. Note, however, that if the
- * other argument is closed, then
- * merging may be significantly faster.
- *
- * Using the default value of the second
- * arguments, the constraints in each of
- * the two objects (the old one
- * represented by this object and the
- * argument) may not refer to the same
- * degree of freedom, i.e. a degree of
- * freedom that is constrained in one
- * object may not be constrained in the
- * second. If this is nevertheless the
- * case, an exception is thrown. However,
- * this behavior can be changed by
- * providing a different value for the
- * second argument.
- */
- void merge (const ConstraintMatrix &other_constraints,
- const MergeConflictBehavior merge_conflict_behavior = no_conflicts_allowed);
-
- /**
- * Shift all entries of this matrix
- * down @p offset rows and over @p
- * offset columns.
- *
- * This function is useful if you are
- * building block matrices, where all
- * blocks are built by the same
- * DoFHandler object, i.e. the matrix
- * size is larger than the number of
- * degrees of freedom. Since several
- * matrix rows and columns correspond
- * to the same degrees of freedom,
- * you'd generate several constraint
- * objects, then shift them, and
- * finally merge() them together
- * again.
- */
- void shift (const types::global_dof_index offset);
-
- /**
- * Clear all entries of this
- * matrix. Reset the flag determining
- * whether new entries are accepted or
- * not.
- *
- * This function may be called also on
- * objects which are empty or already
- * cleared.
- */
- void clear ();
-
- /**
- * @}
- */
-
-
- /**
- * @name Querying constraints
- * @{
- */
-
- /**
- * Return number of constraints stored in
- * this matrix.
- */
- unsigned int n_constraints () const;
-
- /**
- * Return whether the degree of freedom
- * with number @p index is a
- * constrained one.
- *
- * Note that if close() was called
- * before, then this function is
- * significantly faster, since then the
- * constrained degrees of freedom are
- * sorted and we can do a binary
- * search, while before close() was
- * called, we have to perform a linear
- * search through all entries.
- */
- bool is_constrained (const types::global_dof_index index) const;
-
- /**
- * Return whether the dof is
- * constrained, and whether it is
- * constrained to only one other degree
- * of freedom with weight one. The
- * function therefore returns whether
- * the degree of freedom would simply
- * be eliminated in favor of exactly
- * one other degree of freedom.
- *
- * The function returns @p false if
- * either the degree of freedom is not
- * constrained at all, or if it is
- * constrained to more than one other
- * degree of freedom, or if it is
- * constrained to only one degree of
- * freedom but with a weight different
- * from one.
- */
- bool is_identity_constrained (const types::global_dof_index index) const;
-
- /**
- * Return the maximum number of other
- * dofs that one dof is constrained
- * to. For example, in 2d a hanging
- * node is constrained only to its two
- * neighbors, so the returned value
- * would be 2. However, for higher
- * order elements and/or higher
- * dimensions, or other types of
- * constraints, this number is no more
- * obvious.
- *
- * The name indicates that within the
- * system matrix, references to a
- * constrained node are indirected to
- * the nodes it is constrained to.
- */
- unsigned int max_constraint_indirections () const;
-
- /**
- * Returns <tt>true</tt> in case the
- * dof is constrained and there is a
- * non-trivial inhomogeneous valeus set
- * to the dof.
- */
- bool is_inhomogeneously_constrained (const types::global_dof_index index) const;
-
- /**
- * Returns <tt>false</tt> if all
- * constraints in the ConstraintMatrix
- * are homogeneous ones, and
- * <tt>true</tt> if there is at least
- * one inhomogeneity.
- */
- bool has_inhomogeneities () const;
-
- /**
- * Returns a pointer to the the vector of
- * entries if a line is constrained, and a
- * zero pointer in case the dof is not
- * constrained.
- */
- const std::vector<std::pair<types::global_dof_index,double> >*
- get_constraint_entries (const types::global_dof_index line) const;
-
- /**
- * Returns the value of the inhomogeneity
- * stored in the constrained dof @p
- * line. Unconstrained dofs also return a
- * zero value.
- */
- double get_inhomogeneity (const types::global_dof_index line) const;
-
- /**
- * Print the constraint lines. Mainly
- * for debugging purposes.
- *
- * This function writes out all entries
- * in the constraint matrix lines with
- * their value in the form <tt>row col
- * : value</tt>. Unconstrained lines
- * containing only one identity entry
- * are not stored in this object and
- * are not printed.
- */
- void print (std::ostream &) const;
-
- /**
- * Write the graph of constraints in
- * 'dot' format. 'dot' is a program
- * that can take a list of nodes and
- * produce a graphical representation
- * of the graph of constrained degrees
- * of freedom and the degrees of
- * freedom they are constrained to.
- *
- * The output of this function can be
- * used as input to the 'dot' program
- * that can convert the graph into a
- * graphical representation in
- * postscript, png, xfig, and a number
- * of other formats.
- *
- * This function exists mostly for
- * debugging purposes.
- */
- void write_dot (std::ostream &) const;
-
- /**
- * Determine an estimate for the memory
- * consumption (in bytes) of this
- * object.
- */
+ public:
+ /**
+ * An enum that describes what should
+ * happen if the two ConstraintMatrix
+ * objects involved in a call to the
+ * merge() function happen to have
+ * constraints on the same degrees of
+ * freedom.
+ */
+ enum MergeConflictBehavior
+ {
+ /**
+ * Throw an exception if the two
+ * objects concerned have
+ * conflicting constraints on the
+ * same degree of freedom.
+ */
+ no_conflicts_allowed,
+
+ /**
+ * In an operation
+ * <code>cm1.merge(cm2)</code>, if
+ * <code>cm1</code> and
+ * <code>cm2</code> have
+ * constraints on the same degree
+ * of freedom, take the one from
+ * <code>cm1</code>.
+ */
+ left_object_wins,
+
+ /**
+ * In an operation
+ * <code>cm1.merge(cm2)</code>, if
+ * <code>cm1</code> and
+ * <code>cm2</code> have
+ * constraints on the same degree
+ * of freedom, take the one from
+ * <code>cm2</code>.
+ */
+ right_object_wins
+ };
+
+ /**
+ * Constructor. The supplied IndexSet
+ * defines which indices might be
+ * constrained inside this
+ * ConstraintMatrix. In a calculation
+ * with a
+ * parallel::distributed::DoFHandler one
+ * should use locally_relevant_dofs. The
+ * IndexSet allows the ConstraintMatrix
+ * to safe memory. Otherwise internal
+ * data structures for all possible
+ * indices will be created.
+ */
+ ConstraintMatrix (const IndexSet &local_constraints = IndexSet());
+
+ /**
+ * Copy constructor
+ */
+ ConstraintMatrix (const ConstraintMatrix &constraint_matrix);
+
+ /**
+ * Reinit the ConstraintMatrix object and
+ * supply an IndexSet with lines that may
+ * be constrained. This function is only
+ * relevant in the distributed case to
+ * supply a different IndexSet. Otherwise
+ * this routine is equivalent to calling
+ * clear(). See the constructor for
+ * details.
+ */
+ void reinit (const IndexSet &local_constraints = IndexSet());
+
+ /**
+ * Determines if we can store a
+ * constraint for the given @p
+ * line_index. This routine only matters
+ * in the distributed case and checks if
+ * the IndexSet allows storage of this
+ * line. Always returns true if not in
+ * the distributed case.
+ */
- bool can_store_line (const unsigned int line_index) const;
++ bool can_store_line (const types::global_dof_index line_index) const;
+
+ /**
+ * This function copies the content of @p
+ * constraints_in with DoFs that are
+ * element of the IndexSet @p
+ * filter. Elements that are not present
+ * in the IndexSet are ignored. All DoFs
+ * will be transformed to local index
+ * space of the filter, both the
+ * constrained DoFs and the other DoFs
+ * these entries are constrained to. The
+ * local index space of the filter is a
+ * contiguous numbering of all (global)
+ * DoFs that are elements in the
+ * filter.
+ *
+ * If, for example, the filter represents
+ * the range <tt>[10,20)</tt>, and the
+ * constraint matrix @p constraints_in
+ * includes the global indices
+ * <tt>{7,13,14}</tt>, the indices
+ * <tt>{3,4}</tt> are added to the
+ * calling constraint matrix (since 13
+ * and 14 are elements in the filter and
+ * element 13 is the fourth element in
+ * the index, and 14 is the fifth).
+ *
+ * This function provides an easy way to
+ * create a ConstraintMatrix for certain
+ * vector components in a vector-valued
+ * problem from a full ConstraintMatrix,
+ * i.e. extracting a diagonal subblock
+ * from a larger ConstraintMatrix. The
+ * block is specified by the IndexSet
+ * argument.
+ */
+ void add_selected_constraints (const ConstraintMatrix &constraints_in,
+ const IndexSet &filter);
+
+ /**
+ * @name Adding constraints
+ * @{
+ */
+
+ /**
+ * Add a new line to the matrix. If the
+ * line already exists, then the function
+ * simply returns without doing anything.
+ */
- void add_line (const unsigned int line);
++ void add_line (const types::global_dof_index line);
+
+ /**
+ * Call the first add_line() function for
+ * every index <code>i</code> for which
+ * <code>lines[i]</code> is true.
+ *
+ * This function essentially exists to
+ * allow adding several constraints of
+ * the form <i>x<sub>i</sub></i>=0 all at once, where
+ * the set of indices <i>i</i> for which these
+ * constraints should be added are given
+ * by the argument of this function. On
+ * the other hand, just as if the
+ * single-argument add_line() function
+ * were called repeatedly, the
+ * constraints can later be modified to
+ * include linear dependencies using the
+ * add_entry() function as well as
+ * inhomogeneities using
+ * set_inhomogeneity().
+ */
+ void add_lines (const std::vector<bool> &lines);
+
+ /**
+ * Call the first add_line() function for
+ * every index <code>i</code> that
+ * appears in the argument.
+ *
+ * This function essentially exists to
+ * allow adding several constraints of
+ * the form <i>x<sub>i</sub></i>=0 all at once, where
+ * the set of indices <i>i</i> for which these
+ * constraints should be added are given
+ * by the argument of this function. On
+ * the other hand, just as if the
+ * single-argument add_line() function
+ * were called repeatedly, the
+ * constraints can later be modified to
+ * include linear dependencies using the
+ * add_entry() function as well as
+ * inhomogeneities using
+ * set_inhomogeneity().
+ */
- void add_lines (const std::set<unsigned int> &lines);
++ void add_lines (const std::set<types::global_dof_index> &lines);
+
+ /**
+ * Call the first add_line() function for
+ * every index <code>i</code> that
+ * appears in the argument.
+ *
+ * This function essentially exists to
+ * allow adding several constraints of
+ * the form <i>x<sub>i</sub></i>=0 all at once, where
+ * the set of indices <i>i</i> for which these
+ * constraints should be added are given
+ * by the argument of this function. On
+ * the other hand, just as if the
+ * single-argument add_line() function
+ * were called repeatedly, the
+ * constraints can later be modified to
+ * include linear dependencies using the
+ * add_entry() function as well as
+ * inhomogeneities using
+ * set_inhomogeneity().
+ */
+ void add_lines (const IndexSet &lines);
+
+ /**
+ * Add an entry to a given
+ * line. The list of lines is
+ * searched from the back to the
+ * front, so clever programming
+ * would add a new line (which is
+ * pushed to the back) and
+ * immediately afterwards fill
+ * the entries of that line. This
+ * way, no expensive searching is
+ * needed.
+ *
+ * If an entry with the same
+ * indices as the one this
+ * function call denotes already
+ * exists, then this function
+ * simply returns provided that
+ * the value of the entry is the
+ * same. Thus, it does no harm to
+ * enter a constraint twice.
+ */
- void add_entry (const unsigned int line,
- const unsigned int column,
++ void add_entry (const types::global_dof_index line,
++ const types::global_dof_index column,
+ const double value);
+
+ /**
+ * Add a whole series of entries,
+ * denoted by pairs of column indices
+ * and values, to a line of
+ * constraints. This function is
+ * equivalent to calling the preceding
+ * function several times, but is
+ * faster.
+ */
- void add_entries (const unsigned int line,
- const std::vector<std::pair<unsigned int,double> > &col_val_pairs);
++ void add_entries (const types::global_dof_index line,
++ const std::vector<std::pair<types::global_dof_index,double> > &col_val_pairs);
+
+ /**
+ * Set an imhomogeneity to the
+ * constraint line <i>i</i>, according
+ * to the discussion in the general
+ * class description.
+ *
+ * @note the line needs to be added with
+ * one of the add_line() calls first.
+ */
- void set_inhomogeneity (const unsigned int line,
++ void set_inhomogeneity (const types::global_dof_index line,
+ const double value);
+
+ /**
+ * Close the filling of entries. Since
+ * the lines of a matrix of this type
+ * are usually filled in an arbitrary
+ * order and since we do not want to
+ * use associative constainers to store
+ * the lines, we need to sort the lines
+ * and within the lines the columns
+ * before usage of the matrix. This is
+ * done through this function.
+ *
+ * Also, zero entries are discarded,
+ * since they are not needed.
+ *
+ * After closing, no more entries are
+ * accepted. If the object was already
+ * closed, then this function returns
+ * immediately.
+ *
+ * This function also resolves chains
+ * of constraints. For example, degree
+ * of freedom 13 may be constrained to
+ * <i>u</i><sub>13</sub>=<i>u</i><sub>3</sub>/2+<i>u</i><sub>7</sub>/2 while degree of
+ * freedom 7 is itself constrained as
+ * <i>u</i><sub>7</sub>=<i>u</i><sub>2</sub>/2+<i>u</i><sub>4</sub>/2. Then, the
+ * resolution will be that
+ * <i>u</i><sub>13</sub>=<i>u</i><sub>3</sub>/2+<i>u</i><sub>2</sub>/4+<i>u</i><sub>4</sub>/4. Note,
+ * however, that cycles in this graph
+ * of constraints are not allowed,
+ * i.e. for example <i>u</i><sub>4</sub> may not be
+ * constrained, directly or indirectly,
+ * to <i>u</i><sub>13</sub> again.
+ */
+ void close ();
+
+ /**
+ * Merge the constraints represented by
+ * the object given as argument into
+ * the constraints represented by this
+ * object. Both objects may or may not
+ * be closed (by having their function
+ * close() called before). If this
+ * object was closed before, then it
+ * will be closed afterwards as
+ * well. Note, however, that if the
+ * other argument is closed, then
+ * merging may be significantly faster.
+ *
+ * Using the default value of the second
+ * arguments, the constraints in each of
+ * the two objects (the old one
+ * represented by this object and the
+ * argument) may not refer to the same
+ * degree of freedom, i.e. a degree of
+ * freedom that is constrained in one
+ * object may not be constrained in the
+ * second. If this is nevertheless the
+ * case, an exception is thrown. However,
+ * this behavior can be changed by
+ * providing a different value for the
+ * second argument.
+ */
+ void merge (const ConstraintMatrix &other_constraints,
+ const MergeConflictBehavior merge_conflict_behavior = no_conflicts_allowed);
+
+ /**
+ * Shift all entries of this matrix
+ * down @p offset rows and over @p
+ * offset columns.
+ *
+ * This function is useful if you are
+ * building block matrices, where all
+ * blocks are built by the same
+ * DoFHandler object, i.e. the matrix
+ * size is larger than the number of
+ * degrees of freedom. Since several
+ * matrix rows and columns correspond
+ * to the same degrees of freedom,
+ * you'd generate several constraint
+ * objects, then shift them, and
+ * finally merge() them together
+ * again.
+ */
- void shift (const unsigned int offset);
++ void shift (const types::global_dof_index offset);
+
+ /**
+ * Clear all entries of this
+ * matrix. Reset the flag determining
+ * whether new entries are accepted or
+ * not.
+ *
+ * This function may be called also on
+ * objects which are empty or already
+ * cleared.
+ */
+ void clear ();
+
+ /**
+ * @}
+ */
+
+
+ /**
+ * @name Querying constraints
+ * @{
+ */
+
+ /**
+ * Return number of constraints stored in
+ * this matrix.
+ */
+ unsigned int n_constraints () const;
+
+ /**
+ * Return whether the degree of freedom
+ * with number @p index is a
+ * constrained one.
+ *
+ * Note that if close() was called
+ * before, then this function is
+ * significantly faster, since then the
+ * constrained degrees of freedom are
+ * sorted and we can do a binary
+ * search, while before close() was
+ * called, we have to perform a linear
+ * search through all entries.
+ */
- bool is_constrained (const unsigned int index) const;
++ bool is_constrained (const types::global_dof_index index) const;
+
+ /**
+ * Return whether the dof is
+ * constrained, and whether it is
+ * constrained to only one other degree
+ * of freedom with weight one. The
+ * function therefore returns whether
+ * the degree of freedom would simply
+ * be eliminated in favor of exactly
+ * one other degree of freedom.
+ *
+ * The function returns @p false if
+ * either the degree of freedom is not
+ * constrained at all, or if it is
+ * constrained to more than one other
+ * degree of freedom, or if it is
+ * constrained to only one degree of
+ * freedom but with a weight different
+ * from one.
+ */
- bool is_identity_constrained (const unsigned int index) const;
++ bool is_identity_constrained (const types::global_dof_index index) const;
+
+ /**
+ * Return the maximum number of other
+ * dofs that one dof is constrained
+ * to. For example, in 2d a hanging
+ * node is constrained only to its two
+ * neighbors, so the returned value
+ * would be 2. However, for higher
+ * order elements and/or higher
+ * dimensions, or other types of
+ * constraints, this number is no more
+ * obvious.
+ *
+ * The name indicates that within the
+ * system matrix, references to a
+ * constrained node are indirected to
+ * the nodes it is constrained to.
+ */
+ unsigned int max_constraint_indirections () const;
+
+ /**
+ * Returns <tt>true</tt> in case the
+ * dof is constrained and there is a
+ * non-trivial inhomogeneous valeus set
+ * to the dof.
+ */
- bool is_inhomogeneously_constrained (const unsigned int index) const;
++ bool is_inhomogeneously_constrained (const types::global_dof_index index) const;
+
+ /**
+ * Returns <tt>false</tt> if all
+ * constraints in the ConstraintMatrix
+ * are homogeneous ones, and
+ * <tt>true</tt> if there is at least
+ * one inhomogeneity.
+ */
+ bool has_inhomogeneities () const;
+
+ /**
+ * Returns a pointer to the the vector of
+ * entries if a line is constrained, and a
+ * zero pointer in case the dof is not
+ * constrained.
+ */
- const std::vector<std::pair<unsigned int,double> > *
- get_constraint_entries (const unsigned int line) const;
++ const std::vector<std::pair<types::global_dof_index,double> > *
++ get_constraint_entries (const types::global_dof_index line) const;
+
+ /**
+ * Returns the value of the inhomogeneity
+ * stored in the constrained dof @p
+ * line. Unconstrained dofs also return a
+ * zero value.
+ */
- double get_inhomogeneity (const unsigned int line) const;
++ double get_inhomogeneity (const types::global_dof_index line) const;
+
+ /**
+ * Print the constraint lines. Mainly
+ * for debugging purposes.
+ *
+ * This function writes out all entries
+ * in the constraint matrix lines with
+ * their value in the form <tt>row col
+ * : value</tt>. Unconstrained lines
+ * containing only one identity entry
+ * are not stored in this object and
+ * are not printed.
+ */
+ void print (std::ostream &) const;
+
+ /**
+ * Write the graph of constraints in
+ * 'dot' format. 'dot' is a program
+ * that can take a list of nodes and
+ * produce a graphical representation
+ * of the graph of constrained degrees
+ * of freedom and the degrees of
+ * freedom they are constrained to.
+ *
+ * The output of this function can be
+ * used as input to the 'dot' program
+ * that can convert the graph into a
+ * graphical representation in
+ * postscript, png, xfig, and a number
+ * of other formats.
+ *
+ * This function exists mostly for
+ * debugging purposes.
+ */
+ void write_dot (std::ostream &) const;
+
+ /**
+ * Determine an estimate for the memory
+ * consumption (in bytes) of this
+ * object.
+ */
+ std::size_t memory_consumption () const;
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Eliminating constraints from linear systems after their creation
+ * @{
+ */
+
+ /**
+ * Condense a given sparsity
+ * pattern. This function assumes the
+ * uncondensed matrix struct to be
+ * compressed and the one to be filled
+ * to be empty. The condensed structure
+ * is compressed afterwards.
+ *
+ * The constraint matrix object must be
+ * closed to call this function.
+ *
+ * @note The hanging nodes are
+ * completely eliminated from the
+ * linear system referring to
+ * <tt>condensed</tt>. Therefore, the
+ * dimension of <tt>condensed</tt> is
+ * the dimension of
+ * <tt>uncondensed</tt> minus the
+ * number of constrained degrees of
+ * freedom.
+ */
+ void condense (const SparsityPattern &uncondensed,
+ SparsityPattern &condensed) const;
+
+
+ /**
+ * This function does much the same as
+ * the above one, except that it
+ * condenses the matrix struct
+ * 'in-place'. It does not remove
+ * nonzero entries from the matrix but
+ * adds those needed for the process of
+ * distribution of the constrained
+ * degrees of freedom.
+ *
+ * Since this function adds new nonzero
+ * entries to the sparsity pattern, the
+ * argument must not be
+ * compressed. However the constraint
+ * matrix must be closed. The matrix
+ * struct is compressed at the end of
+ * the function.
+ */
+ void condense (SparsityPattern &sparsity) const;
+
+ /**
+ * Same function as above, but
+ * condenses square block sparsity
+ * patterns.
+ */
+ void condense (BlockSparsityPattern &sparsity) const;
+
+ /**
+ * Same function as above, but
+ * condenses square compressed sparsity
+ * patterns.
+ *
+ * Given the data structure used by
+ * CompressedSparsityPattern, this
+ * function becomes quadratic in the
+ * number of degrees of freedom for
+ * large problems and can dominate
+ * setting up linear systems when
+ * several hundred thousand or millions
+ * of unknowns are involved and for
+ * problems with many nonzero elements
+ * per row (for example for
+ * vector-valued problems or hp finite
+ * elements). In this case, it is
+ * advisable to use the
+ * CompressedSetSparsityPattern class
+ * instead, see for example @ref
+ * step_27 "step-27", or to use the
+ * CompressedSimpleSparsityPattern
+ * class, see for example @ref step_31
+ * "step-31".
+ */
+ void condense (CompressedSparsityPattern &sparsity) const;
+
+ /**
+ * Same function as above, but
+ * condenses compressed sparsity
+ * patterns, which are based on the
+ * std::set container.
+ */
+ void condense (CompressedSetSparsityPattern &sparsity) const;
+
+ /**
+ * Same function as above, but
+ * condenses compressed sparsity
+ * patterns, which are based on the
+ * ''simple'' aproach.
+ */
+ void condense (CompressedSimpleSparsityPattern &sparsity) const;
+
+ /**
+ * Same function as above, but
+ * condenses square compressed sparsity
+ * patterns.
+ *
+ * Given the data structure used by
+ * BlockCompressedSparsityPattern, this
+ * function becomes quadratic in the
+ * number of degrees of freedom for
+ * large problems and can dominate
+ * setting up linear systems when
+ * several hundred thousand or millions
+ * of unknowns are involved and for
+ * problems with many nonzero elements
+ * per row (for example for
+ * vector-valued problems or hp finite
+ * elements). In this case, it is
+ * advisable to use the
+ * BlockCompressedSetSparsityPattern
+ * class instead, see for example @ref
+ * step_27 "step-27" and @ref step_31
+ * "step-31".
+ */
+ void condense (BlockCompressedSparsityPattern &sparsity) const;
+
+ /**
+ * Same function as above, but
+ * condenses square compressed sparsity
+ * patterns.
+ */
+ void condense (BlockCompressedSetSparsityPattern &sparsity) const;
+
+ /**
+ * Same function as above, but
+ * condenses square compressed sparsity
+ * patterns.
+ */
+ void condense (BlockCompressedSimpleSparsityPattern &sparsity) const;
+
+
+ /**
+ * Condense a given matrix. The
+ * associated matrix struct should be
+ * condensed and compressed. It is the
+ * user's responsibility to guarantee
+ * that all entries in the @p condensed
+ * matrix be zero!
+ *
+ * The constraint matrix object must be
+ * closed to call this function.
+ */
+ template<typename number>
+ void condense (const SparseMatrix<number> &uncondensed,
+ SparseMatrix<number> &condensed) const;
+
+ /**
+ * This function does much the same as
+ * the above one, except that it
+ * condenses the matrix 'in-place'. See
+ * the general documentation of this
+ * class for more detailed information.
+ */
+ template<typename number>
+ void condense (SparseMatrix<number> &matrix) const;
+
+ /**
+ * Same function as above, but
+ * condenses square block sparse
+ * matrices.
+ */
+ template <typename number>
+ void condense (BlockSparseMatrix<number> &matrix) const;
+
+ /**
+ * Condense the given vector @p
+ * uncondensed into @p condensed. It is
+ * the user's responsibility to
+ * guarantee that all entries of @p
+ * condensed be zero. Note that this
+ * function does not take any
+ * inhomogeneity into account and
+ * throws an exception in case there
+ * are any inhomogeneities. Use
+ * the function using both a matrix and
+ * vector for that case.
+ *
+ * The @p VectorType may be a
+ * Vector<float>, Vector<double>,
+ * BlockVector<tt><...></tt>, a PETSc
+ * or Trilinos vector wrapper class, or
+ * any other type having the same
+ * interface.
+ */
+ template <class VectorType>
+ void condense (const VectorType &uncondensed,
+ VectorType &condensed) const;
+
+ /**
+ * Condense the given vector
+ * in-place. The @p VectorType may be a
+ * Vector<float>, Vector<double>,
+ * BlockVector<tt><...></tt>, a PETSc
+ * or Trilinos vector wrapper class, or
+ * any other type having the same
+ * interface. Note that this function
+ * does not take any inhomogeneity into
+ * account and throws an exception in
+ * case there are any
+ * inhomogeneities. Use the function
+ * using both a matrix and vector for
+ * that case.
+ */
+ template <class VectorType>
+ void condense (VectorType &vec) const;
+
+ /**
+ * Condense a given matrix and a given
+ * vector. The associated matrix struct
+ * should be condensed and
+ * compressed. It is the user's
+ * responsibility to guarantee that all
+ * entries in the @p condensed matrix
+ * and vector be zero! This function is
+ * the appropriate choice for applying
+ * inhomogeneous constraints.
+ *
+ * The constraint matrix object must be
+ * closed to call this function.
+ */
+ template<typename number, class VectorType>
+ void condense (const SparseMatrix<number> &uncondensed_matrix,
+ const VectorType &uncondensed_vector,
+ SparseMatrix<number> &condensed_matrix,
+ VectorType &condensed_vector) const;
+
+ /**
+ * This function does much the same as
+ * the above one, except that it
+ * condenses matrix and vector
+ * 'in-place'. See the general
+ * documentation of this class for more
+ * detailed information.
+ */
+ template<typename number, class VectorType>
+ void condense (SparseMatrix<number> &matrix,
+ VectorType &vector) const;
+
+ /**
+ * Same function as above, but
+ * condenses square block sparse
+ * matrices and vectors.
+ */
+ template <typename number, class BlockVectorType>
+ void condense (BlockSparseMatrix<number> &matrix,
+ BlockVectorType &vector) const;
+
+ /**
+ * Sets the values of all constrained
+ * DoFs in a vector to zero.
+ * The @p VectorType may be a
+ * Vector<float>, Vector<double>,
+ * BlockVector<tt><...></tt>, a
+ * PETSc or Trilinos vector
+ * wrapper class, or any other
+ * type having the same
+ * interface.
+ */
+ template <class VectorType>
+ void set_zero (VectorType &vec) const;
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Eliminating constraints from linear systems during their creation
+ * @{
+ */
+
+ /**
+ * This function takes a vector of
+ * local contributions (@p
+ * local_vector) corresponding to the
+ * degrees of freedom indices given in
+ * @p local_dof_indices and distributes
+ * them to the global vector. In most
+ * cases, these local contributions
+ * will be the result of an integration
+ * over a cell or face of a
+ * cell. However, as long as @p
+ * local_vector and @p
+ * local_dof_indices have the same
+ * number of elements, this function is
+ * happy with whatever it is
+ * given.
+ *
+ * In contrast to the similar function
+ * in the DoFAccessor class, this
+ * function also takes care of
+ * constraints, i.e. if one of the
+ * elements of @p local_dof_indices
+ * belongs to a constrained node, then
+ * rather than writing the
+ * corresponding element of @p
+ * local_vector into @p global_vector,
+ * the element is distributed to the
+ * entries in the global vector to
+ * which this particular degree of
+ * freedom is constrained.
+ *
+ * Thus, by using this function to
+ * distribute local contributions to the
+ * global object, one saves the call to
+ * the condense function after the
+ * vectors and matrices are fully
+ * assembled. On the other hand, by
+ * consequence, the function does not
+ * only write into the entries enumerated
+ * by the @p local_dof_indices array, but
+ * also (possibly) others as necessary.
+ *
+ * Note that this function will apply all
+ * constraints as if they were
+ * homogeneous. For correctly setting
+ * inhomogeneous constraints, use the
+ * similar function with a matrix
+ * argument or the function with both
+ * matrix and vector arguments.
+ *
+ * @note This function is not
+ * thread-safe, so you will need to make
+ * sure that only one process at a time
+ * calls this function.
+ */
+ template <class InVector, class OutVector>
+ void
+ distribute_local_to_global (const InVector &local_vector,
- const std::vector<unsigned int> &local_dof_indices,
++ const std::vector<types::global_dof_index> &local_dof_indices,
+ OutVector &global_vector) const;
+
+ /**
+ * This function takes a vector of
+ * local contributions (@p
+ * local_vector) corresponding to the
+ * degrees of freedom indices given in
+ * @p local_dof_indices and distributes
+ * them to the global vector. In most
+ * cases, these local contributions
+ * will be the result of an integration
+ * over a cell or face of a
+ * cell. However, as long as @p
+ * local_vector and @p
+ * local_dof_indices have the same
+ * number of elements, this function is
+ * happy with whatever it is
+ * given.
+ *
+ * In contrast to the similar function in
+ * the DoFAccessor class, this function
+ * also takes care of constraints,
+ * i.e. if one of the elements of @p
+ * local_dof_indices belongs to a
+ * constrained node, then rather than
+ * writing the corresponding element of
+ * @p local_vector into @p global_vector,
+ * the element is distributed to the
+ * entries in the global vector to which
+ * this particular degree of freedom is
+ * constrained.
+ *
+ * Thus, by using this function to
+ * distribute local contributions to the
+ * global object, one saves the call to
+ * the condense function after the
+ * vectors and matrices are fully
+ * assembled. On the other hand, by
+ * consequence, the function does not
+ * only write into the entries enumerated
+ * by the @p local_dof_indices array, but
+ * also (possibly) others as
+ * necessary. This includes writing into
+ * diagonal elements of the matrix if the
+ * corresponding degree of freedom is
+ * constrained.
+ *
+ * The fourth argument
+ * <tt>local_matrix</tt> is intended to
+ * be used in case one wants to apply
+ * inhomogeneous constraints on the
+ * vector only. Such a situation could be
+ * where one wants to assemble of a right
+ * hand side vector on a problem with
+ * inhomogeneous constraints, but the
+ * global matrix has been assembled
+ * previously. A typical example of this
+ * is a time stepping algorithm where the
+ * stiffness matrix is assembled once,
+ * and the right hand side updated every
+ * time step. Note that, however, the
+ * entries in the columns of the local
+ * matrix have to be exactly the same as
+ * those that have been written into the
+ * global matrix. Otherwise, this
+ * function will not be able to correctly
+ * handle inhomogeneities.
+ *
+ * @note This function is not
+ * thread-safe, so you will need to make
+ * sure that only one process at a time
+ * calls this function.
+ */
+ template <typename VectorType>
+ void
+ distribute_local_to_global (const Vector<double> &local_vector,
- const std::vector<unsigned int> &local_dof_indices,
++ const std::vector<types::global_dof_index> &local_dof_indices,
+ VectorType &global_vector,
+ const FullMatrix<double> &local_matrix) const;
+
+ /**
+ * Enter a single value into a
+ * result vector, obeying constraints.
+ */
+ template <class VectorType>
+ void
+ distribute_local_to_global (const unsigned int index,
+ const double value,
+ VectorType &global_vector) const;
+
+ /**
+ * This function takes a pointer to a
+ * vector of local contributions (@p
+ * local_vector) corresponding to the
+ * degrees of freedom indices given in
+ * @p local_dof_indices and distributes
+ * them to the global vector. In most
+ * cases, these local contributions
+ * will be the result of an integration
+ * over a cell or face of a
+ * cell. However, as long as the
+ * entries in @p local_dof_indices
+ * indicate reasonable global vector
+ * entries, this function is happy with
+ * whatever it is given.
+ *
+ * If one of the elements of @p
+ * local_dof_indices belongs to a
+ * constrained node, then rather than
+ * writing the corresponding element of
+ * @p local_vector into @p
+ * global_vector, the element is
+ * distributed to the entries in the
+ * global vector to which this
+ * particular degree of freedom is
+ * constrained.
+ *
+ * Thus, by using this function to
+ * distribute local contributions to
+ * the global object, one saves the
+ * call to the condense function after
+ * the vectors and matrices are fully
+ * assembled. Note that this function
+ * completely ignores inhomogeneous
+ * constraints.
+ *
+ * @note This function is not
+ * thread-safe, so you will need to
+ * make sure that only one process at a
+ * time calls this function.
+ */
+ template <typename ForwardIteratorVec, typename ForwardIteratorInd,
+ class VectorType>
+ void
+ distribute_local_to_global (ForwardIteratorVec local_vector_begin,
+ ForwardIteratorVec local_vector_end,
+ ForwardIteratorInd local_indices_begin,
+ VectorType &global_vector) const;
+
+ /**
+ * This function takes a matrix of
+ * local contributions (@p
+ * local_matrix) corresponding to the
+ * degrees of freedom indices given in
+ * @p local_dof_indices and distributes
+ * them to the global matrix. In most
+ * cases, these local contributions
+ * will be the result of an integration
+ * over a cell or face of a
+ * cell. However, as long as @p
+ * local_matrix and @p
+ * local_dof_indices have the same
+ * number of elements, this function is
+ * happy with whatever it is given.
+ *
+ * In contrast to the similar function
+ * in the DoFAccessor class, this
+ * function also takes care of
+ * constraints, i.e. if one of the
+ * elements of @p local_dof_indices
+ * belongs to a constrained node, then
+ * rather than writing the
+ * corresponding element of @p
+ * local_matrix into @p global_matrix,
+ * the element is distributed to the
+ * entries in the global matrix to
+ * which this particular degree of
+ * freedom is constrained.
+ *
+ * With this scheme, we never write
+ * into rows or columns of constrained
+ * degrees of freedom. In order to make
+ * sure that the resulting matrix can
+ * still be inverted, we need to do
+ * something with the diagonal elements
+ * corresponding to constrained
+ * nodes. Thus, if a degree of freedom
+ * in @p local_dof_indices is
+ * constrained, we distribute the
+ * corresponding entries in the matrix,
+ * but also add the absolute value of
+ * the diagonal entry of the local
+ * matrix to the corresponding entry in
+ * the global matrix. Since the exact
+ * value of the diagonal element is not
+ * important (the value of the
+ * respective degree of freedom will be
+ * overwritten by the distribute() call
+ * later on anyway), this guarantees
+ * that the diagonal entry is always
+ * non-zero, positive, and of the same
+ * order of magnitude as the other
+ * entries of the matrix.
+ *
+ * Thus, by using this function to
+ * distribute local contributions to
+ * the global object, one saves the
+ * call to the condense function after
+ * the vectors and matrices are fully
+ * assembled.
+ *
+ * @note This function is not
+ * thread-safe, so you will need to
+ * make sure that only one process at a
+ * time calls this function.
+ */
+ template <typename MatrixType>
+ void
+ distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const std::vector<unsigned int> &local_dof_indices,
++ const std::vector<types::global_dof_index> &local_dof_indices,
+ MatrixType &global_matrix) const;
+
+ /**
+ * Does the same as the function
+ * above but can treat non
+ * quadratic matrices.
+ */
+ template <typename MatrixType>
+ void
+ distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
++ const std::vector<types::global_dof_index> &row_indices,
++ const std::vector<types::global_dof_index> &col_indices,
+ MatrixType &global_matrix) const;
+
+ /**
+ * This function simultaneously
+ * writes elements into matrix
+ * and vector, according to the
+ * constraints specified by the
+ * calling ConstraintMatrix. This
+ * function can correctly handle
+ * inhomogeneous constraints as
+ * well. For the parameter
+ * use_inhomogeneities_for_rhs
+ * see the documentation in @ref
+ * constraints module.
+ *
+ * @note This function is not
+ * thread-safe, so you will need to
+ * make sure that only one process at a
+ * time calls this function.
+ */
+ template <typename MatrixType, typename VectorType>
+ void
+ distribute_local_to_global (const FullMatrix<double> &local_matrix,
+ const Vector<double> &local_vector,
- const std::vector<unsigned int> &local_dof_indices,
++ const std::vector<types::global_dof_index> &local_dof_indices,
+ MatrixType &global_matrix,
+ VectorType &global_vector,
+ bool use_inhomogeneities_for_rhs = false) const;
+
+ /**
+ * Do a similar operation as the
+ * distribute_local_to_global() function
+ * that distributes writing entries into
+ * a matrix for constrained degrees of
+ * freedom, except that here we don't
+ * write into a matrix but only allocate
+ * sparsity pattern entries.
+ *
+ * As explained in the
+ * @ref hp_paper "hp paper"
+ * and in step-27,
+ * first allocating a sparsity pattern
+ * and later coming back and allocating
+ * additional entries for those matrix
+ * entries that will be written to due to
+ * the elimination of constrained degrees
+ * of freedom (using
+ * ConstraintMatrix::condense() ), can be
+ * a very expensive procedure. It is
+ * cheaper to allocate these entries
+ * right away without having to do a
+ * second pass over the sparsity pattern
+ * object. This function does exactly
+ * that.
+ *
+ * Because the function only allocates
+ * entries in a sparsity pattern, all it
+ * needs to know are the degrees of
+ * freedom that couple to each
+ * other. Unlike the previous function,
+ * no actual values are written, so the
+ * second input argument is not necessary
+ * here.
+ *
+ * The third argument to this function,
+ * keep_constrained_entries determines
+ * whether the function shall allocate
+ * entries in the sparsity pattern at
+ * all for entries that will later be
+ * set to zero upon condensation of the
+ * matrix. These entries are necessary
+ * if the matrix is built
+ * unconstrained, and only later
+ * condensed. They are not necessary if
+ * the matrix is built using the
+ * distribute_local_to_global()
+ * function of this class which
+ * distributes entries right away when
+ * copying a local matrix into a global
+ * object. The default of this argument
+ * is true, meaning to allocate the few
+ * entries that may later be set to
+ * zero.
+ *
+ * By default, the function adds
+ * entries for all pairs of indices
+ * given in the first argument to the
+ * sparsity pattern (unless
+ * keep_constrained_entries is
+ * false). However, sometimes one would
+ * like to only add a subset of all of
+ * these pairs. In that case, the last
+ * argument can be used which specifies
+ * a boolean mask which of the pairs of
+ * indices should be considered. If the
+ * mask is false for a pair of indices,
+ * then no entry will be added to the
+ * sparsity pattern for this pair,
+ * irrespective of whether one or both
+ * of the indices correspond to
+ * constrained degrees of freedom.
+ *
+ * This function is not typically called
+ * from user code, but is used in the
+ * DoFTools::make_sparsity_pattern()
+ * function when passed a constraint
+ * matrix object.
+ */
+ template <typename SparsityType>
+ void
- add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
++ add_entries_local_to_global (const std::vector<types::global_dof_index> &local_dof_indices,
+ SparsityType &sparsity_pattern,
+ const bool keep_constrained_entries = true,
+ const Table<2,bool> &dof_mask = default_empty_table) const;
+
+ /**
+ * Similar to the other function,
+ * but for non-quadratic sparsity
+ * patterns.
+ */
+
+ template <typename SparsityType>
+ void
- add_entries_local_to_global (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
++ add_entries_local_to_global (const std::vector<types::global_dof_index> &row_indices,
++ const std::vector<types::global_dof_index> &col_indices,
+ SparsityType &sparsity_pattern,
+ const bool keep_constrained_entries = true,
+ const Table<2,bool> &dof_mask = default_empty_table) const;
+
+ /**
+ * This function imports values from a
+ * global vector (@p global_vector) by
+ * applying the constraints to a vector
+ * of local values, expressed in
+ * iterator format. In most cases, the
+ * local values will be identified by
+ * the local dof values on a
+ * cell. However, as long as the
+ * entries in @p local_dof_indices
+ * indicate reasonable global vector
+ * entries, this function is happy with
+ * whatever it is given.
+ *
+ * If one of the elements of @p
+ * local_dof_indices belongs to a
+ * constrained node, then rather than
+ * writing the corresponding element of
+ * @p global_vector into @p
+ * local_vector, the constraints are
+ * resolved as the respective
+ * distribute function does, i.e., the
+ * local entry is constructed from the
+ * global entries to which this
+ * particular degree of freedom is
+ * constrained.
+ *
+ * In contrast to the similar function
+ * get_dof_values in the DoFAccessor
+ * class, this function does not need
+ * the constrained values to be
+ * correctly set (i.e., distribute to
+ * be called).
+ */
+ template <typename ForwardIteratorVec, typename ForwardIteratorInd,
+ class VectorType>
+ void
- get_dof_values (const VectorType &global_vector,
++ get_dof_values (const VectorType &global_vector,
+ ForwardIteratorInd local_indices_begin,
+ ForwardIteratorVec local_vector_begin,
+ ForwardIteratorVec local_vector_end) const;
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Dealing with constraints after solving a linear system
+ * @{
+ */
+
+ /**
+ * Re-distribute the elements of the
+ * vector @p condensed to @p
+ * uncondensed. It is the user's
+ * responsibility to guarantee that all
+ * entries of @p uncondensed be zero!
+ *
+ * This function undoes the action of
+ * @p condense somehow, but it should
+ * be noted that it is not the inverse
+ * of @p condense.
+ *
+ * The @p VectorType may be a
+ * Vector<float>, Vector<double>,
+ * BlockVector<tt><...></tt>, a PETSc
+ * or Trilinos vector wrapper class, or
+ * any other type having the same
+ * interface.
+ */
+ template <class VectorType>
+ void distribute (const VectorType &condensed,
+ VectorType &uncondensed) const;
+
+ /**
+ * Re-distribute the elements of the
+ * vector in-place. The @p VectorType
+ * may be a Vector<float>,
+ * Vector<double>,
+ * BlockVector<tt><...></tt>, a PETSc
+ * or Trilinos vector wrapper class, or
+ * any other type having the same
+ * interface.
+ *
+ * Note that if called with a
+ * TrilinosWrappers::MPI::Vector it may
+ * not contain ghost elements.
+ */
+ template <class VectorType>
+ void distribute (VectorType &vec) const;
+
+ /**
+ * @}
+ */
+
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcMatrixIsClosed);
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0 (ExcMatrixNotClosed);
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException1 (ExcLineInexistant,
- unsigned int,
++ types::global_dof_index,
+ << "The specified line " << arg1
+ << " does not exist.");
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException4 (ExcEntryAlreadyExists,
+ int, int, double, double,
+ << "The entry for the indices " << arg1 << " and "
+ << arg2 << " already exists, but the values "
+ << arg3 << " (old) and " << arg4 << " (new) differ "
+ << "by " << (arg4-arg3) << ".");
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException2 (ExcDoFConstrainedToConstrainedDoF,
+ int, int,
+ << "You tried to constrain DoF " << arg1
+ << " to DoF " << arg2
+ << ", but that one is also constrained. This is not allowed!");
+ /**
+ * Exception.
+ *
+ * @ingroup Exceptions
+ */
+ DeclException1 (ExcDoFIsConstrainedFromBothObjects,
+ int,
+ << "Degree of freedom " << arg1
+ << " is constrained from both object in a merge operation.");
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException1 (ExcDoFIsConstrainedToConstrainedDoF,
+ int,
+ << "In the given argument a degree of freedom is constrained "
+ << "to another DoF with number " << arg1
+ << ", which however is constrained by this object. This is not"
+ << " allowed.");
+ /**
+ * Exception
+ *
+ * @ingroup Exceptions
+ */
+ DeclException1 (ExcRowNotStoredHere,
+ int,
+ << "The index set given to this constraint matrix indicates "
+ << "constraints for degree of freedom " << arg1
+ << " should not be stored by this object, but a constraint "
+ << "is being added.");
+
+ private:
+
+ /**
+ * This class represents one line of a
+ * constraint matrix.
+ */
+ struct ConstraintLine
+ {
+ /**
+ * A data type in which we store the list
+ * of entries that make up the homogenous
+ * part of a constraint.
+ */
- typedef std::vector<std::pair<unsigned int,double> > Entries;
++ typedef std::vector<std::pair<types::global_dof_index,double> > Entries;
+
+ /**
+ * Number of this line. Since only
+ * very few lines are stored, we
+ * can not assume a specific order
+ * and have to store the line
+ * number explicitly.
+ */
- unsigned int line;
++ types::global_dof_index line;
+
+ /**
+ * Row numbers and values of the
+ * entries in this line.
+ *
+ * For the reason why we use a
+ * vector instead of a map and the
+ * consequences thereof, the same
+ * applies as what is said for
+ * ConstraintMatrix::lines.
+ */
+ Entries entries;
+
+ /**
+ * Value of the inhomogeneity.
+ */
+ double inhomogeneity;
+
+ /**
+ * This operator is a bit weird and
+ * unintuitive: it compares the
+ * line numbers of two lines. We
+ * need this to sort the lines; in
+ * fact we could do this using a
+ * comparison predicate. However,
+ * this way, it is easier, albeit
+ * unintuitive since two lines
+ * really have no god-given order
+ * relation.
+ */
+ bool operator < (const ConstraintLine &) const;
+
+ /**
+ * This operator is likewise weird:
+ * it checks whether the line
+ * indices of the two operands are
+ * equal, irrespective of the fact
+ * that the contents of the line
+ * may be different.
+ */
+ bool operator == (const ConstraintLine &) const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes) of
+ * this object.
+ */
std::size_t memory_consumption () const;
-
- /**
- * @}
- */
-
- /**
- * @name Eliminating constraints from linear systems after their creation
- * @{
- */
-
- /**
- * Condense a given sparsity
- * pattern. This function assumes the
- * uncondensed matrix struct to be
- * compressed and the one to be filled
- * to be empty. The condensed structure
- * is compressed afterwards.
- *
- * The constraint matrix object must be
- * closed to call this function.
- *
- * @note The hanging nodes are
- * completely eliminated from the
- * linear system referring to
- * <tt>condensed</tt>. Therefore, the
- * dimension of <tt>condensed</tt> is
- * the dimension of
- * <tt>uncondensed</tt> minus the
- * number of constrained degrees of
- * freedom.
- */
- void condense (const SparsityPattern &uncondensed,
- SparsityPattern &condensed) const;
-
-
- /**
- * This function does much the same as
- * the above one, except that it
- * condenses the matrix struct
- * 'in-place'. It does not remove
- * nonzero entries from the matrix but
- * adds those needed for the process of
- * distribution of the constrained
- * degrees of freedom.
- *
- * Since this function adds new nonzero
- * entries to the sparsity pattern, the
- * argument must not be
- * compressed. However the constraint
- * matrix must be closed. The matrix
- * struct is compressed at the end of
- * the function.
- */
- void condense (SparsityPattern &sparsity) const;
-
- /**
- * Same function as above, but
- * condenses square block sparsity
- * patterns.
- */
- void condense (BlockSparsityPattern &sparsity) const;
-
- /**
- * Same function as above, but
- * condenses square compressed sparsity
- * patterns.
- *
- * Given the data structure used by
- * CompressedSparsityPattern, this
- * function becomes quadratic in the
- * number of degrees of freedom for
- * large problems and can dominate
- * setting up linear systems when
- * several hundred thousand or millions
- * of unknowns are involved and for
- * problems with many nonzero elements
- * per row (for example for
- * vector-valued problems or hp finite
- * elements). In this case, it is
- * advisable to use the
- * CompressedSetSparsityPattern class
- * instead, see for example @ref
- * step_27 "step-27", or to use the
- * CompressedSimpleSparsityPattern
- * class, see for example @ref step_31
- * "step-31".
- */
- void condense (CompressedSparsityPattern &sparsity) const;
-
- /**
- * Same function as above, but
- * condenses compressed sparsity
- * patterns, which are based on the
- * std::set container.
- */
- void condense (CompressedSetSparsityPattern &sparsity) const;
-
- /**
- * Same function as above, but
- * condenses compressed sparsity
- * patterns, which are based on the
- * ''simple'' aproach.
- */
- void condense (CompressedSimpleSparsityPattern &sparsity) const;
-
- /**
- * Same function as above, but
- * condenses square compressed sparsity
- * patterns.
- *
- * Given the data structure used by
- * BlockCompressedSparsityPattern, this
- * function becomes quadratic in the
- * number of degrees of freedom for
- * large problems and can dominate
- * setting up linear systems when
- * several hundred thousand or millions
- * of unknowns are involved and for
- * problems with many nonzero elements
- * per row (for example for
- * vector-valued problems or hp finite
- * elements). In this case, it is
- * advisable to use the
- * BlockCompressedSetSparsityPattern
- * class instead, see for example @ref
- * step_27 "step-27" and @ref step_31
- * "step-31".
- */
- void condense (BlockCompressedSparsityPattern &sparsity) const;
-
- /**
- * Same function as above, but
- * condenses square compressed sparsity
- * patterns.
- */
- void condense (BlockCompressedSetSparsityPattern &sparsity) const;
-
- /**
- * Same function as above, but
- * condenses square compressed sparsity
- * patterns.
- */
- void condense (BlockCompressedSimpleSparsityPattern &sparsity) const;
-
-
- /**
- * Condense a given matrix. The
- * associated matrix struct should be
- * condensed and compressed. It is the
- * user's responsibility to guarantee
- * that all entries in the @p condensed
- * matrix be zero!
- *
- * The constraint matrix object must be
- * closed to call this function.
- */
- template<typename number>
- void condense (const SparseMatrix<number> &uncondensed,
- SparseMatrix<number> &condensed) const;
-
- /**
- * This function does much the same as
- * the above one, except that it
- * condenses the matrix 'in-place'. See
- * the general documentation of this
- * class for more detailed information.
- */
- template<typename number>
- void condense (SparseMatrix<number> &matrix) const;
-
- /**
- * Same function as above, but
- * condenses square block sparse
- * matrices.
- */
- template <typename number>
- void condense (BlockSparseMatrix<number> &matrix) const;
-
- /**
- * Condense the given vector @p
- * uncondensed into @p condensed. It is
- * the user's responsibility to
- * guarantee that all entries of @p
- * condensed be zero. Note that this
- * function does not take any
- * inhomogeneity into account and
- * throws an exception in case there
- * are any inhomogeneities. Use
- * the function using both a matrix and
- * vector for that case.
- *
- * The @p VectorType may be a
- * Vector<float>, Vector<double>,
- * BlockVector<tt><...></tt>, a PETSc
- * or Trilinos vector wrapper class, or
- * any other type having the same
- * interface.
- */
- template <class VectorType>
- void condense (const VectorType &uncondensed,
- VectorType &condensed) const;
-
- /**
- * Condense the given vector
- * in-place. The @p VectorType may be a
- * Vector<float>, Vector<double>,
- * BlockVector<tt><...></tt>, a PETSc
- * or Trilinos vector wrapper class, or
- * any other type having the same
- * interface. Note that this function
- * does not take any inhomogeneity into
- * account and throws an exception in
- * case there are any
- * inhomogeneities. Use the function
- * using both a matrix and vector for
- * that case.
- */
- template <class VectorType>
- void condense (VectorType &vec) const;
-
- /**
- * Condense a given matrix and a given
- * vector. The associated matrix struct
- * should be condensed and
- * compressed. It is the user's
- * responsibility to guarantee that all
- * entries in the @p condensed matrix
- * and vector be zero! This function is
- * the appropriate choice for applying
- * inhomogeneous constraints.
- *
- * The constraint matrix object must be
- * closed to call this function.
- */
- template<typename number, class VectorType>
- void condense (const SparseMatrix<number> &uncondensed_matrix,
- const VectorType &uncondensed_vector,
- SparseMatrix<number> &condensed_matrix,
- VectorType &condensed_vector) const;
-
- /**
- * This function does much the same as
- * the above one, except that it
- * condenses matrix and vector
- * 'in-place'. See the general
- * documentation of this class for more
- * detailed information.
- */
- template<typename number, class VectorType>
- void condense (SparseMatrix<number> &matrix,
- VectorType &vector) const;
-
- /**
- * Same function as above, but
- * condenses square block sparse
- * matrices and vectors.
- */
- template <typename number, class BlockVectorType>
- void condense (BlockSparseMatrix<number> &matrix,
- BlockVectorType &vector) const;
-
- /**
- * Sets the values of all constrained
- * DoFs in a vector to zero.
- * The @p VectorType may be a
- * Vector<float>, Vector<double>,
- * BlockVector<tt><...></tt>, a
- * PETSc or Trilinos vector
- * wrapper class, or any other
- * type having the same
- * interface.
- */
- template <class VectorType>
- void set_zero (VectorType &vec) const;
-
- /**
- * @}
- */
-
- /**
- * @name Eliminating constraints from linear systems during their creation
- * @{
- */
-
- /**
- * This function takes a vector of
- * local contributions (@p
- * local_vector) corresponding to the
- * degrees of freedom indices given in
- * @p local_dof_indices and distributes
- * them to the global vector. In most
- * cases, these local contributions
- * will be the result of an integration
- * over a cell or face of a
- * cell. However, as long as @p
- * local_vector and @p
- * local_dof_indices have the same
- * number of elements, this function is
- * happy with whatever it is
- * given.
- *
- * In contrast to the similar function
- * in the DoFAccessor class, this
- * function also takes care of
- * constraints, i.e. if one of the
- * elements of @p local_dof_indices
- * belongs to a constrained node, then
- * rather than writing the
- * corresponding element of @p
- * local_vector into @p global_vector,
- * the element is distributed to the
- * entries in the global vector to
- * which this particular degree of
- * freedom is constrained.
- *
- * Thus, by using this function to
- * distribute local contributions to the
- * global object, one saves the call to
- * the condense function after the
- * vectors and matrices are fully
- * assembled. On the other hand, by
- * consequence, the function does not
- * only write into the entries enumerated
- * by the @p local_dof_indices array, but
- * also (possibly) others as necessary.
- *
- * Note that this function will apply all
- * constraints as if they were
- * homogeneous. For correctly setting
- * inhomogeneous constraints, use the
- * similar function with a matrix
- * argument or the function with both
- * matrix and vector arguments.
- *
- * @note This function is not
- * thread-safe, so you will need to make
- * sure that only one process at a time
- * calls this function.
- */
- template <class InVector, class OutVector>
- void
- distribute_local_to_global (const InVector &local_vector,
- const std::vector<types::global_dof_index> &local_dof_indices,
- OutVector &global_vector) const;
-
- /**
- * This function takes a vector of
- * local contributions (@p
- * local_vector) corresponding to the
- * degrees of freedom indices given in
- * @p local_dof_indices and distributes
- * them to the global vector. In most
- * cases, these local contributions
- * will be the result of an integration
- * over a cell or face of a
- * cell. However, as long as @p
- * local_vector and @p
- * local_dof_indices have the same
- * number of elements, this function is
- * happy with whatever it is
- * given.
- *
- * In contrast to the similar function in
- * the DoFAccessor class, this function
- * also takes care of constraints,
- * i.e. if one of the elements of @p
- * local_dof_indices belongs to a
- * constrained node, then rather than
- * writing the corresponding element of
- * @p local_vector into @p global_vector,
- * the element is distributed to the
- * entries in the global vector to which
- * this particular degree of freedom is
- * constrained.
- *
- * Thus, by using this function to
- * distribute local contributions to the
- * global object, one saves the call to
- * the condense function after the
- * vectors and matrices are fully
- * assembled. On the other hand, by
- * consequence, the function does not
- * only write into the entries enumerated
- * by the @p local_dof_indices array, but
- * also (possibly) others as
- * necessary. This includes writing into
- * diagonal elements of the matrix if the
- * corresponding degree of freedom is
- * constrained.
- *
- * The fourth argument
- * <tt>local_matrix</tt> is intended to
- * be used in case one wants to apply
- * inhomogeneous constraints on the
- * vector only. Such a situation could be
- * where one wants to assemble of a right
- * hand side vector on a problem with
- * inhomogeneous constraints, but the
- * global matrix has been assembled
- * previously. A typical example of this
- * is a time stepping algorithm where the
- * stiffness matrix is assembled once,
- * and the right hand side updated every
- * time step. Note that, however, the
- * entries in the columns of the local
- * matrix have to be exactly the same as
- * those that have been written into the
- * global matrix. Otherwise, this
- * function will not be able to correctly
- * handle inhomogeneities.
- *
- * @note This function is not
- * thread-safe, so you will need to make
- * sure that only one process at a time
- * calls this function.
- */
- template <typename VectorType>
- void
- distribute_local_to_global (const Vector<double> &local_vector,
- const std::vector<types::global_dof_index> &local_dof_indices,
- VectorType &global_vector,
- const FullMatrix<double> &local_matrix) const;
-
- /**
- * Enter a single value into a
- * result vector, obeying constraints.
- */
- template <class VectorType>
- void
- distribute_local_to_global (const unsigned int index,
- const double value,
- VectorType &global_vector) const;
-
- /**
- * This function takes a pointer to a
- * vector of local contributions (@p
- * local_vector) corresponding to the
- * degrees of freedom indices given in
- * @p local_dof_indices and distributes
- * them to the global vector. In most
- * cases, these local contributions
- * will be the result of an integration
- * over a cell or face of a
- * cell. However, as long as the
- * entries in @p local_dof_indices
- * indicate reasonable global vector
- * entries, this function is happy with
- * whatever it is given.
- *
- * If one of the elements of @p
- * local_dof_indices belongs to a
- * constrained node, then rather than
- * writing the corresponding element of
- * @p local_vector into @p
- * global_vector, the element is
- * distributed to the entries in the
- * global vector to which this
- * particular degree of freedom is
- * constrained.
- *
- * Thus, by using this function to
- * distribute local contributions to
- * the global object, one saves the
- * call to the condense function after
- * the vectors and matrices are fully
- * assembled. Note that this function
- * completely ignores inhomogeneous
- * constraints.
- *
- * @note This function is not
- * thread-safe, so you will need to
- * make sure that only one process at a
- * time calls this function.
- */
- template <typename ForwardIteratorVec, typename ForwardIteratorInd,
- class VectorType>
- void
- distribute_local_to_global (ForwardIteratorVec local_vector_begin,
- ForwardIteratorVec local_vector_end,
- ForwardIteratorInd local_indices_begin,
- VectorType &global_vector) const;
-
- /**
- * This function takes a matrix of
- * local contributions (@p
- * local_matrix) corresponding to the
- * degrees of freedom indices given in
- * @p local_dof_indices and distributes
- * them to the global matrix. In most
- * cases, these local contributions
- * will be the result of an integration
- * over a cell or face of a
- * cell. However, as long as @p
- * local_matrix and @p
- * local_dof_indices have the same
- * number of elements, this function is
- * happy with whatever it is given.
- *
- * In contrast to the similar function
- * in the DoFAccessor class, this
- * function also takes care of
- * constraints, i.e. if one of the
- * elements of @p local_dof_indices
- * belongs to a constrained node, then
- * rather than writing the
- * corresponding element of @p
- * local_matrix into @p global_matrix,
- * the element is distributed to the
- * entries in the global matrix to
- * which this particular degree of
- * freedom is constrained.
- *
- * With this scheme, we never write
- * into rows or columns of constrained
- * degrees of freedom. In order to make
- * sure that the resulting matrix can
- * still be inverted, we need to do
- * something with the diagonal elements
- * corresponding to constrained
- * nodes. Thus, if a degree of freedom
- * in @p local_dof_indices is
- * constrained, we distribute the
- * corresponding entries in the matrix,
- * but also add the absolute value of
- * the diagonal entry of the local
- * matrix to the corresponding entry in
- * the global matrix. Since the exact
- * value of the diagonal element is not
- * important (the value of the
- * respective degree of freedom will be
- * overwritten by the distribute() call
- * later on anyway), this guarantees
- * that the diagonal entry is always
- * non-zero, positive, and of the same
- * order of magnitude as the other
- * entries of the matrix.
- *
- * Thus, by using this function to
- * distribute local contributions to
- * the global object, one saves the
- * call to the condense function after
- * the vectors and matrices are fully
- * assembled.
- *
- * @note This function is not
- * thread-safe, so you will need to
- * make sure that only one process at a
- * time calls this function.
- */
- template <typename MatrixType>
- void
- distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const std::vector<types::global_dof_index> &local_dof_indices,
- MatrixType &global_matrix) const;
-
- /**
- * Does the same as the function
- * above but can treat non
- * quadratic matrices.
- */
- template <typename MatrixType>
- void
- distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const std::vector<types::global_dof_index> &row_indices,
- const std::vector<types::global_dof_index> &col_indices,
- MatrixType &global_matrix) const;
-
- /**
- * This function simultaneously
- * writes elements into matrix
- * and vector, according to the
- * constraints specified by the
- * calling ConstraintMatrix. This
- * function can correctly handle
- * inhomogeneous constraints as
- * well. For the parameter
- * use_inhomogeneities_for_rhs
- * see the documentation in @ref
- * constraints module.
- *
- * @note This function is not
- * thread-safe, so you will need to
- * make sure that only one process at a
- * time calls this function.
- */
- template <typename MatrixType, typename VectorType>
- void
- distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const Vector<double> &local_vector,
- const std::vector<types::global_dof_index> &local_dof_indices,
- MatrixType &global_matrix,
- VectorType &global_vector,
- bool use_inhomogeneities_for_rhs = false) const;
-
- /**
- * Do a similar operation as the
- * distribute_local_to_global() function
- * that distributes writing entries into
- * a matrix for constrained degrees of
- * freedom, except that here we don't
- * write into a matrix but only allocate
- * sparsity pattern entries.
- *
- * As explained in the
- * @ref hp_paper "hp paper"
- * and in step-27,
- * first allocating a sparsity pattern
- * and later coming back and allocating
- * additional entries for those matrix
- * entries that will be written to due to
- * the elimination of constrained degrees
- * of freedom (using
- * ConstraintMatrix::condense() ), can be
- * a very expensive procedure. It is
- * cheaper to allocate these entries
- * right away without having to do a
- * second pass over the sparsity pattern
- * object. This function does exactly
- * that.
- *
- * Because the function only allocates
- * entries in a sparsity pattern, all it
- * needs to know are the degrees of
- * freedom that couple to each
- * other. Unlike the previous function,
- * no actual values are written, so the
- * second input argument is not necessary
- * here.
- *
- * The third argument to this function,
- * keep_constrained_entries determines
- * whether the function shall allocate
- * entries in the sparsity pattern at
- * all for entries that will later be
- * set to zero upon condensation of the
- * matrix. These entries are necessary
- * if the matrix is built
- * unconstrained, and only later
- * condensed. They are not necessary if
- * the matrix is built using the
- * distribute_local_to_global()
- * function of this class which
- * distributes entries right away when
- * copying a local matrix into a global
- * object. The default of this argument
- * is true, meaning to allocate the few
- * entries that may later be set to
- * zero.
- *
- * By default, the function adds
- * entries for all pairs of indices
- * given in the first argument to the
- * sparsity pattern (unless
- * keep_constrained_entries is
- * false). However, sometimes one would
- * like to only add a subset of all of
- * these pairs. In that case, the last
- * argument can be used which specifies
- * a boolean mask which of the pairs of
- * indices should be considered. If the
- * mask is false for a pair of indices,
- * then no entry will be added to the
- * sparsity pattern for this pair,
- * irrespective of whether one or both
- * of the indices correspond to
- * constrained degrees of freedom.
- *
- * This function is not typically called
- * from user code, but is used in the
- * DoFTools::make_sparsity_pattern()
- * function when passed a constraint
- * matrix object.
- */
- template <typename SparsityType>
- void
- add_entries_local_to_global (const std::vector<types::global_dof_index> &local_dof_indices,
- SparsityType &sparsity_pattern,
- const bool keep_constrained_entries = true,
- const Table<2,bool> &dof_mask = default_empty_table) const;
-
- /**
- * Similar to the other function,
- * but for non-quadratic sparsity
- * patterns.
- */
-
- template <typename SparsityType>
- void
- add_entries_local_to_global (const std::vector<types::global_dof_index> &row_indices,
- const std::vector<types::global_dof_index> &col_indices,
- SparsityType &sparsity_pattern,
- const bool keep_constrained_entries = true,
- const Table<2,bool> &dof_mask = default_empty_table) const;
-
- /**
- * This function imports values from a
- * global vector (@p global_vector) by
- * applying the constraints to a vector
- * of local values, expressed in
- * iterator format. In most cases, the
- * local values will be identified by
- * the local dof values on a
- * cell. However, as long as the
- * entries in @p local_dof_indices
- * indicate reasonable global vector
- * entries, this function is happy with
- * whatever it is given.
- *
- * If one of the elements of @p
- * local_dof_indices belongs to a
- * constrained node, then rather than
- * writing the corresponding element of
- * @p global_vector into @p
- * local_vector, the constraints are
- * resolved as the respective
- * distribute function does, i.e., the
- * local entry is constructed from the
- * global entries to which this
- * particular degree of freedom is
- * constrained.
- *
- * In contrast to the similar function
- * get_dof_values in the DoFAccessor
- * class, this function does not need
- * the constrained values to be
- * correctly set (i.e., distribute to
- * be called).
- */
- template <typename ForwardIteratorVec, typename ForwardIteratorInd,
- class VectorType>
- void
- get_dof_values (const VectorType &global_vector,
- ForwardIteratorInd local_indices_begin,
- ForwardIteratorVec local_vector_begin,
- ForwardIteratorVec local_vector_end) const;
-
- /**
- * @}
- */
-
- /**
- * @name Dealing with constraints after solving a linear system
- * @{
- */
-
- /**
- * Re-distribute the elements of the
- * vector @p condensed to @p
- * uncondensed. It is the user's
- * responsibility to guarantee that all
- * entries of @p uncondensed be zero!
- *
- * This function undoes the action of
- * @p condense somehow, but it should
- * be noted that it is not the inverse
- * of @p condense.
- *
- * The @p VectorType may be a
- * Vector<float>, Vector<double>,
- * BlockVector<tt><...></tt>, a PETSc
- * or Trilinos vector wrapper class, or
- * any other type having the same
- * interface.
- */
- template <class VectorType>
- void distribute (const VectorType &condensed,
- VectorType &uncondensed) const;
-
- /**
- * Re-distribute the elements of the
- * vector in-place. The @p VectorType
- * may be a Vector<float>,
- * Vector<double>,
- * BlockVector<tt><...></tt>, a PETSc
- * or Trilinos vector wrapper class, or
- * any other type having the same
- * interface.
- *
- * Note that if called with a
- * TrilinosWrappers::MPI::Vector it may
- * not contain ghost elements.
- */
- template <class VectorType>
- void distribute (VectorType &vec) const;
-
- /**
- * @}
- */
-
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcMatrixIsClosed);
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException0 (ExcMatrixNotClosed);
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException1 (ExcLineInexistant,
- types::global_dof_index,
- << "The specified line " << arg1
- << " does not exist.");
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException4 (ExcEntryAlreadyExists,
- int, int, double, double,
- << "The entry for the indices " << arg1 << " and "
- << arg2 << " already exists, but the values "
- << arg3 << " (old) and " << arg4 << " (new) differ "
- << "by " << (arg4-arg3) << ".");
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException2 (ExcDoFConstrainedToConstrainedDoF,
- int, int,
- << "You tried to constrain DoF " << arg1
- << " to DoF " << arg2
- << ", but that one is also constrained. This is not allowed!");
- /**
- * Exception.
- *
- * @ingroup Exceptions
- */
- DeclException1 (ExcDoFIsConstrainedFromBothObjects,
- int,
- << "Degree of freedom " << arg1
- << " is constrained from both object in a merge operation.");
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException1 (ExcDoFIsConstrainedToConstrainedDoF,
- int,
- << "In the given argument a degree of freedom is constrained "
- << "to another DoF with number " << arg1
- << ", which however is constrained by this object. This is not"
- << " allowed.");
- /**
- * Exception
- *
- * @ingroup Exceptions
- */
- DeclException1 (ExcRowNotStoredHere,
- int,
- << "The index set given to this constraint matrix indicates "
- << "constraints for degree of freedom " << arg1
- << " should not be stored by this object, but a constraint "
- << "is being added.");
-
- private:
-
- /**
- * This class represents one line of a
- * constraint matrix.
- */
- struct ConstraintLine
- {
- /**
- * A data type in which we store the list
- * of entries that make up the homogenous
- * part of a constraint.
- */
- typedef std::vector<std::pair<types::global_dof_index,double> > Entries;
-
- /**
- * Number of this line. Since only
- * very few lines are stored, we
- * can not assume a specific order
- * and have to store the line
- * number explicitly.
- */
- types::global_dof_index line;
-
- /**
- * Row numbers and values of the
- * entries in this line.
- *
- * For the reason why we use a
- * vector instead of a map and the
- * consequences thereof, the same
- * applies as what is said for
- * ConstraintMatrix::lines.
- */
- Entries entries;
-
- /**
- * Value of the inhomogeneity.
- */
- double inhomogeneity;
-
- /**
- * This operator is a bit weird and
- * unintuitive: it compares the
- * line numbers of two lines. We
- * need this to sort the lines; in
- * fact we could do this using a
- * comparison predicate. However,
- * this way, it is easier, albeit
- * unintuitive since two lines
- * really have no god-given order
- * relation.
- */
- bool operator < (const ConstraintLine &) const;
-
- /**
- * This operator is likewise weird:
- * it checks whether the line
- * indices of the two operands are
- * equal, irrespective of the fact
- * that the contents of the line
- * may be different.
- */
- bool operator == (const ConstraintLine &) const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes) of
- * this object.
- */
- std::size_t memory_consumption () const;
- };
-
- /**
- * Store the lines of the matrix.
- * Entries are usually appended in an
- * arbitrary order and insertion into a
- * vector is done best at the end, so
- * the order is unspecified after all
- * entries are inserted. Sorting of the
- * entries takes place when calling the
- * <tt>close()</tt> function.
- *
- * We could, instead of using a vector,
- * use an associative array, like a map
- * to store the lines. This, however,
- * would mean a much more fractioned
- * heap since it allocates many small
- * objects, and would additionally make
- * usage of this matrix much slower.
- */
- std::vector<ConstraintLine> lines;
-
- /**
- * A list of unsigned integers that
- * contains the position of the
- * ConstraintLine of a constrained degree
- * of freedom, or
- * numbers::invalid_unsigned_int if the
- * degree of freedom is not
- * constrained. The
- * numbers::invalid_unsigned_int
- * return value returns thus whether
- * there is a constraint line for a given
- * degree of freedom index. Note that
- * this class has no notion of how many
- * degrees of freedom there really are,
- * so if we check whether there is a
- * constraint line for a given degree of
- * freedom, then this vector may actually
- * be shorter than the index of the DoF
- * we check for.
- *
- * This field exists since when adding a
- * new constraint line we have to figure
- * out whether it already
- * exists. Previously, we would simply
- * walk the unsorted list of constraint
- * lines until we either hit the end or
- * found it. This algorithm is O(N) if N
- * is the number of constraints, which
- * makes it O(N^2) when inserting all
- * constraints. For large problems with
- * many constraints, this could easily
- * take 5-10 per cent of the total run
- * time. With this field, we can save
- * this time since we find any constraint
- * in O(1) time or get to know that it a
- * certain degree of freedom is not
- * constrained.
- *
- * To make things worse, traversing the
- * list of existing constraints requires
- * reads from many different places in
- * memory. Thus, in large 3d
- * applications, the add_line() function
- * showed up very prominently in the
- * overall compute time, mainly because
- * it generated a lot of cache
- * misses. This should also be fixed by
- * using the O(1) algorithm to access the
- * fields of this array.
- *
- * The field is useful in a number of
- * other contexts as well, e.g. when one
- * needs random access to the constraints
- * as in all the functions that apply
- * constraints on the fly while add cell
- * contributions into vectors and
- * matrices.
- */
- std::vector<types::global_dof_index> lines_cache;
-
- /**
- * This IndexSet is used to limit the
- * lines to save in the ContraintMatrix
- * to a subset. This is necessary,
- * because the lines_cache vector would
- * become too big in a distributed
- * calculation.
- */
- IndexSet local_lines;
-
- /**
- * Store whether the arrays are sorted.
- * If so, no new entries can be added.
- */
- bool sorted;
-
- /**
- * Internal function to calculate the
- * index of line @p line in the vector
- * lines_cache using local_lines.
- */
- unsigned int calculate_line_index (const types::global_dof_index line) const;
-
- /**
- * Return @p true if the weight of an
- * entry (the second element of the
- * pair) equals zero. This function is
- * used to delete entries with zero
- * weight.
- */
- static bool check_zero_weight (const std::pair<types::global_dof_index, double> &p);
-
- /**
- * Dummy table that serves as default
- * argument for function
- * <tt>add_entries_local_to_global()</tt>.
- */
- static const Table<2,bool> default_empty_table;
-
- /**
- * This function actually implements
- * the local_to_global function for
- * standard (non-block) matrices.
- */
- template <typename MatrixType, typename VectorType>
- void
- distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const Vector<double> &local_vector,
- const std::vector<types::global_dof_index> &local_dof_indices,
- MatrixType &global_matrix,
- VectorType &global_vector,
- bool use_inhomogeneities_for_rhs,
- internal::bool2type<false>) const;
-
- /**
- * This function actually implements
- * the local_to_global function for
- * block matrices.
- */
- template <typename MatrixType, typename VectorType>
- void
- distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const Vector<double> &local_vector,
- const std::vector<types::global_dof_index> &local_dof_indices,
- MatrixType &global_matrix,
- VectorType &global_vector,
- bool use_inhomogeneities_for_rhs,
- internal::bool2type<true>) const;
-
- /**
- * This function actually implements
- * the local_to_global function for
- * standard (non-block) sparsity types.
- */
- template <typename SparsityType>
- void
- add_entries_local_to_global (const std::vector<types::global_dof_index> &local_dof_indices,
- SparsityType &sparsity_pattern,
- const bool keep_constrained_entries,
- const Table<2,bool> &dof_mask,
- internal::bool2type<false>) const;
-
- /**
- * This function actually implements
- * the local_to_global function for
- * block sparsity types.
- */
- template <typename SparsityType>
- void
- add_entries_local_to_global (const std::vector<types::global_dof_index> &local_dof_indices,
- SparsityType &sparsity_pattern,
- const bool keep_constrained_entries,
- const Table<2,bool> &dof_mask,
- internal::bool2type<true>) const;
-
- /**
- * Internal helper function for
- * distribute_local_to_global function.
- *
- * Creates a list of affected global rows
- * for distribution, including the local
- * rows where the entries come from. The
- * list is sorted according to the global
- * row indices.
- */
- void
- make_sorted_row_list (const std::vector<types::global_dof_index> &local_dof_indices,
- internals::GlobalRowsFromLocal &global_rows) const;
-
- /**
- * Internal helper function for
- * add_entries_local_to_global function.
- *
- * Creates a list of affected rows for
- * distribution without any additional
- * information, otherwise similar to the
- * other make_sorted_row_list()
- * function.
- */
- void
- make_sorted_row_list (const std::vector<types::global_dof_index> &local_dof_indices,
- std::vector<types::global_dof_index> &active_dofs) const;
-
- /**
- * Internal helper function for
- * distribute_local_to_global function.
- */
- double
- resolve_vector_entry (const types::global_dof_index i,
- const internals::GlobalRowsFromLocal &global_rows,
- const Vector<double> &local_vector,
- const std::vector<types::global_dof_index> &local_dof_indices,
- const FullMatrix<double> &local_matrix) const;
+ };
+
+ /**
+ * Store the lines of the matrix.
+ * Entries are usually appended in an
+ * arbitrary order and insertion into a
+ * vector is done best at the end, so
+ * the order is unspecified after all
+ * entries are inserted. Sorting of the
+ * entries takes place when calling the
+ * <tt>close()</tt> function.
+ *
+ * We could, instead of using a vector,
+ * use an associative array, like a map
+ * to store the lines. This, however,
+ * would mean a much more fractioned
+ * heap since it allocates many small
+ * objects, and would additionally make
+ * usage of this matrix much slower.
+ */
+ std::vector<ConstraintLine> lines;
+
+ /**
+ * A list of unsigned integers that
+ * contains the position of the
+ * ConstraintLine of a constrained degree
+ * of freedom, or
+ * numbers::invalid_unsigned_int if the
+ * degree of freedom is not
+ * constrained. The
+ * numbers::invalid_unsigned_int
+ * return value returns thus whether
+ * there is a constraint line for a given
+ * degree of freedom index. Note that
+ * this class has no notion of how many
+ * degrees of freedom there really are,
+ * so if we check whether there is a
+ * constraint line for a given degree of
+ * freedom, then this vector may actually
+ * be shorter than the index of the DoF
+ * we check for.
+ *
+ * This field exists since when adding a
+ * new constraint line we have to figure
+ * out whether it already
+ * exists. Previously, we would simply
+ * walk the unsorted list of constraint
+ * lines until we either hit the end or
+ * found it. This algorithm is O(N) if N
+ * is the number of constraints, which
+ * makes it O(N^2) when inserting all
+ * constraints. For large problems with
+ * many constraints, this could easily
+ * take 5-10 per cent of the total run
+ * time. With this field, we can save
+ * this time since we find any constraint
+ * in O(1) time or get to know that it a
+ * certain degree of freedom is not
+ * constrained.
+ *
+ * To make things worse, traversing the
+ * list of existing constraints requires
+ * reads from many different places in
+ * memory. Thus, in large 3d
+ * applications, the add_line() function
+ * showed up very prominently in the
+ * overall compute time, mainly because
+ * it generated a lot of cache
+ * misses. This should also be fixed by
+ * using the O(1) algorithm to access the
+ * fields of this array.
+ *
+ * The field is useful in a number of
+ * other contexts as well, e.g. when one
+ * needs random access to the constraints
+ * as in all the functions that apply
+ * constraints on the fly while add cell
+ * contributions into vectors and
+ * matrices.
+ */
- std::vector<unsigned int> lines_cache;
++ std::vector<types::global_dof_index> lines_cache;
+
+ /**
+ * This IndexSet is used to limit the
+ * lines to save in the ContraintMatrix
+ * to a subset. This is necessary,
+ * because the lines_cache vector would
+ * become too big in a distributed
+ * calculation.
+ */
+ IndexSet local_lines;
+
+ /**
+ * Store whether the arrays are sorted.
+ * If so, no new entries can be added.
+ */
+ bool sorted;
+
+ /**
+ * Internal function to calculate the
+ * index of line @p line in the vector
+ * lines_cache using local_lines.
+ */
- unsigned int calculate_line_index (const unsigned int line) const;
++ unsigned int calculate_line_index (const types::global_dof_index line) const;
+
+ /**
+ * Return @p true if the weight of an
+ * entry (the second element of the
+ * pair) equals zero. This function is
+ * used to delete entries with zero
+ * weight.
+ */
- static bool check_zero_weight (const std::pair<unsigned int, double> &p);
++ static bool check_zero_weight (const std::pair<types::global_dof_index, double> &p);
+
+ /**
+ * Dummy table that serves as default
+ * argument for function
+ * <tt>add_entries_local_to_global()</tt>.
+ */
+ static const Table<2,bool> default_empty_table;
+
+ /**
+ * This function actually implements
+ * the local_to_global function for
+ * standard (non-block) matrices.
+ */
+ template <typename MatrixType, typename VectorType>
+ void
+ distribute_local_to_global (const FullMatrix<double> &local_matrix,
+ const Vector<double> &local_vector,
- const std::vector<unsigned int> &local_dof_indices,
++ const std::vector<types::global_dof_index> &local_dof_indices,
+ MatrixType &global_matrix,
+ VectorType &global_vector,
+ bool use_inhomogeneities_for_rhs,
+ internal::bool2type<false>) const;
+
+ /**
+ * This function actually implements
+ * the local_to_global function for
+ * block matrices.
+ */
+ template <typename MatrixType, typename VectorType>
+ void
+ distribute_local_to_global (const FullMatrix<double> &local_matrix,
+ const Vector<double> &local_vector,
- const std::vector<unsigned int> &local_dof_indices,
++ const std::vector<types::global_dof_index> &local_dof_indices,
+ MatrixType &global_matrix,
+ VectorType &global_vector,
+ bool use_inhomogeneities_for_rhs,
+ internal::bool2type<true>) const;
+
+ /**
+ * This function actually implements
+ * the local_to_global function for
+ * standard (non-block) sparsity types.
+ */
+ template <typename SparsityType>
+ void
- add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
++ add_entries_local_to_global (const std::vector<types::global_dof_index> &local_dof_indices,
+ SparsityType &sparsity_pattern,
+ const bool keep_constrained_entries,
+ const Table<2,bool> &dof_mask,
+ internal::bool2type<false>) const;
+
+ /**
+ * This function actually implements
+ * the local_to_global function for
+ * block sparsity types.
+ */
+ template <typename SparsityType>
+ void
- add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
++ add_entries_local_to_global (const std::vector<types::global_dof_index> &local_dof_indices,
+ SparsityType &sparsity_pattern,
+ const bool keep_constrained_entries,
+ const Table<2,bool> &dof_mask,
+ internal::bool2type<true>) const;
+
+ /**
+ * Internal helper function for
+ * distribute_local_to_global function.
+ *
+ * Creates a list of affected global rows
+ * for distribution, including the local
+ * rows where the entries come from. The
+ * list is sorted according to the global
+ * row indices.
+ */
+ void
- make_sorted_row_list (const std::vector<unsigned int> &local_dof_indices,
- internals::GlobalRowsFromLocal &global_rows) const;
++ make_sorted_row_list (const std::vector<types::global_dof_index> &local_dof_indices,
++ internals::GlobalRowsFromLocal &global_rows) const;
+
+ /**
+ * Internal helper function for
+ * add_entries_local_to_global function.
+ *
+ * Creates a list of affected rows for
+ * distribution without any additional
+ * information, otherwise similar to the
+ * other make_sorted_row_list()
+ * function.
+ */
+ void
- make_sorted_row_list (const std::vector<unsigned int> &local_dof_indices,
- std::vector<unsigned int> &active_dofs) const;
++ make_sorted_row_list (const std::vector<types::global_dof_index> &local_dof_indices,
++ std::vector<types::global_dof_index> &active_dofs) const;
+
+ /**
+ * Internal helper function for
+ * distribute_local_to_global function.
+ */
+ double
- resolve_vector_entry (const unsigned int i,
++ resolve_vector_entry (const types::global_dof_index i,
+ const internals::GlobalRowsFromLocal &global_rows,
+ const Vector<double> &local_vector,
- const std::vector<unsigned int> &local_dof_indices,
++ const std::vector<types::global_dof_index> &local_dof_indices,
+ const FullMatrix<double> &local_matrix) const;
};
inline
bool
-ConstraintMatrix::is_inhomogeneously_constrained (const unsigned int index) const
+ConstraintMatrix::is_inhomogeneously_constrained (const types::global_dof_index index) const
{
- // check whether the entry is
- // constrained. could use is_constrained, but
- // that means computing the line index twice
+ // check whether the entry is
+ // constrained. could use is_constrained, but
+ // that means computing the line index twice
const unsigned int line_index = calculate_line_index(index);
if (line_index >= lines_cache.size() ||
lines_cache[line_index] == numbers::invalid_unsigned_int)
inline
- const std::vector<std::pair<types::global_dof_index,double> >*
-const std::vector<std::pair<unsigned int,double> > *
-ConstraintMatrix::get_constraint_entries (unsigned int line) const
++const std::vector<std::pair<types::global_dof_index,double> > *
+ConstraintMatrix::get_constraint_entries (const types::global_dof_index line) const
{
- // check whether the entry is
- // constrained. could use is_constrained, but
- // that means computing the line index twice
+ // check whether the entry is
+ // constrained. could use is_constrained, but
+ // that means computing the line index twice
const unsigned int line_index = calculate_line_index(line);
if (line_index >= lines_cache.size() ||
lines_cache[line_index] == numbers::invalid_unsigned_int)
inline
double
-ConstraintMatrix::get_inhomogeneity (const unsigned int line) const
+ConstraintMatrix::get_inhomogeneity (const types::global_dof_index line) const
{
- // check whether the entry is
- // constrained. could use is_constrained, but
- // that means computing the line index twice
+ // check whether the entry is
+ // constrained. could use is_constrained, but
+ // that means computing the line index twice
const unsigned int line_index = calculate_line_index(line);
if (line_index >= lines_cache.size() ||
lines_cache[line_index] == numbers::invalid_unsigned_int)
inline unsigned int
-ConstraintMatrix::calculate_line_index (const unsigned int line) const
+ConstraintMatrix::calculate_line_index (const types::global_dof_index line) const
{
- //IndexSet is unused (serial case)
+ //IndexSet is unused (serial case)
if (!local_lines.size())
return line;
template <typename ForwardIteratorVec, typename ForwardIteratorInd,
- class VectorType>
+ class VectorType>
inline
-void ConstraintMatrix::get_dof_values (const VectorType &global_vector,
+void ConstraintMatrix::get_dof_values (const VectorType &global_vector,
ForwardIteratorInd local_indices_begin,
ForwardIteratorVec local_vector_begin,
ForwardIteratorVec local_vector_end) const
void
ConstraintMatrix::
distribute_local_to_global (const FullMatrix<double> &local_matrix,
- const std::vector<unsigned int> &local_dof_indices,
+ const std::vector<types::global_dof_index> &local_dof_indices,
MatrixType &global_matrix) const
{
- // create a dummy and hand on to the
- // function actually implementing this
- // feature in the cm.templates.h file.
+ // create a dummy and hand on to the
+ // function actually implementing this
+ // feature in the cm.templates.h file.
Vector<double> dummy(0);
distribute_local_to_global (local_matrix, dummy, local_dof_indices,
global_matrix, dummy, false,
for (unsigned int row=0; row<n_rows; ++row)
{
if (distribute[row] == numbers::invalid_unsigned_int)
- // regular line. loop over cols
+ // regular line. loop over cols
{
for (typename SparseMatrix<number>::iterator
- entry = uncondensed.begin(row);
+ entry = uncondensed.begin(row);
entry != uncondensed.end(row); ++entry)
{
- const unsigned int column = entry->column();
+ const types::global_dof_index column = entry->column();
- // end of row reached?
- // this should not
- // happen, since we only
- // operate on compressed
- // matrices!
+ // end of row reached?
+ // this should not
+ // happen, since we only
+ // operate on compressed
+ // matrices!
Assert (column != SparsityPattern::invalid_entry,
ExcMatrixNotClosed());
}
}
else
- // row must be distributed
+ // row must be distributed
{
for (typename SparseMatrix<number>::iterator
- entry = uncondensed.begin(row);
+ entry = uncondensed.begin(row);
entry != uncondensed.end(row); ++entry)
{
- const unsigned int column = entry->column();
+ const types::global_dof_index column = entry->column();
- // end of row reached?
- // this should not
- // happen, since we only
- // operate on compressed
- // matrices!
+ // end of row reached?
+ // this should not
+ // happen, since we only
+ // operate on compressed
+ // matrices!
Assert (column != SparsityPattern::invalid_entry,
ExcMatrixNotClosed());
const unsigned int column_start,
const unsigned int column_end,
const FullMatrix<double> &local_matrix,
- unsigned int * &col_ptr,
- number * &val_ptr)
- unsigned int *&col_ptr,
- number *&val_ptr)
++ unsigned int *&col_ptr,
++ number *&val_ptr)
{
if (column_end == column_start)
return;
template <typename Number>
class Vector : public Subscriptor
{
- public:
- /**
- * Declare standard types used in all
- * containers. These types parallel those in
- * the <tt>C++</tt> standard libraries
- * <tt>vector<...></tt> class.
- */
- typedef Number value_type;
- typedef value_type *pointer;
- typedef const value_type *const_pointer;
- typedef value_type *iterator;
- typedef const value_type *const_iterator;
- typedef value_type &reference;
- typedef const value_type &const_reference;
- typedef size_t size_type;
- typedef typename numbers::NumberTraits<Number>::real_type real_type;
-
- /**
- * @name 1: Basic Object-handling
- */
- //@{
- /**
- * Empty constructor.
- */
- Vector ();
-
- /**
- * Copy constructor. Uses the parallel
- * partitioning of @p in_vector.
- */
- Vector (const Vector<Number> &in_vector);
-
- /**
- * Constructs a parallel vector of the given
- * global size without any actual parallel
- * distribution.
- */
- Vector (const unsigned int size);
-
- /**
- * Constructs a parallel vector. The local
- * range is specified by @p locally_owned_set
- * (note that this must be a contiguous
- * interval, multiple intervals are not
- * possible). The IndexSet @p ghost_indices
- * specifies ghost indices, i.e., indices
- * which one might need to read data from or
- * accumulate data from. It is allowed that
- * the set of ghost indices also contains the
- * local range, but it does not need to.
- *
- * This function involves global
- * communication, so it should only be called
- * once for a given layout. Use the
- * constructor with Vector<Number> argument to
- * create additional vectors with the same
- * parallel layout.
- */
- Vector (const IndexSet &local_range,
- const IndexSet &ghost_indices,
- const MPI_Comm communicator);
-
- /**
- * Create the vector based on the parallel
- * partitioning described in @p
- * partitioner. The input argument is a shared
- * pointer, which store the partitioner data
- * only once and share it between several
- * vectors with the same layout.
- */
- Vector (const std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
-
- /**
- * Destructor.
- */
- ~Vector ();
-
- /**
- * Sets the global size of the vector to @p
- * size without any actual parallel
- * distribution.
- */
- void reinit (const unsigned int size,
- const bool fast = false);
-
- /**
- * Uses the parallel layout of the input
- * vector @p in_vector and allocates memory
- * for this vector. Recommended initialization
- * function when several vectors with the same
- * layout should be created.
- *
- * If the flag @p fast is set to false, the
- * memory will be initialized with zero,
- * otherwise the memory will be untouched (and
- * the user must make sure to fill it with
- * reasonable data before using it).
- */
- template <typename Number2>
- void reinit(const Vector<Number2> &in_vector,
- const bool fast = false);
-
- /**
- * Initialize the vector. The local range is
- * specified by @p locally_owned_set (note
- * that this must be a contiguous interval,
- * multiple intervals are not possible). The
- * IndexSet @p ghost_indices specifies ghost
- * indices, i.e., indices which one might need
- * to read data from or accumulate data
- * from. It is allowed that the set of ghost
- * indices also contains the local range, but
- * it does not need to.
- *
- * This function involves global
- * communication, so it should only be called
- * once for a given layout. Use the @p reinit
- * function with Vector<Number> argument to
- * create additional vectors with the same
- * parallel layout.
- */
- void reinit (const IndexSet &local_range,
- const IndexSet &ghost_indices,
- const MPI_Comm communicator);
-
- /**
- * Initialize the vector given to the parallel
- * partitioning described in @p
- * partitioner. The input argument is a shared
- * pointer, which store the partitioner data
- * only once and share it between several
- * vectors with the same layout.
- */
- void reinit (const std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
-
- /**
- * Swap the contents of this
- * vector and the other vector
- * @p v. One could do this
- * operation with a temporary
- * variable and copying over the
- * data elements, but this
- * function is significantly more
- * efficient since it only swaps
- * the pointers to the data of
- * the two vectors and therefore
- * does not need to allocate
- * temporary storage and move
- * data around.
- *
- * This function is analog to the
- * the @p swap function of all C++
- * standard containers. Also,
- * there is a global function
- * <tt>swap(u,v)</tt> that simply calls
- * <tt>u.swap(v)</tt>, again in analogy
- * to standard functions.
- *
- * This function is virtual in
- * order to allow for derived
- * classes to handle memory
- * separately.
- */
- void swap (Vector<Number> &v);
-
- /**
- * Assigns the vector to the parallel
- * partitioning of the input vector @p
- * in_vector, and copies all the data.
- */
- Vector<Number> &
- operator = (const Vector<Number> &in_vector);
-
- /**
- * Assigns the vector to the parallel
- * partitioning of the input vector @p
- * in_vector, and copies all the data.
- */
- template <typename Number2>
- Vector<Number> &
- operator = (const Vector<Number2> &in_vector);
-
- /**
- * This method copies the local range from
- * another vector with the same local range,
- * but possibly different layout of ghost
- * indices.
- */
- void copy_from (const Vector<Number> &in_vector,
- const bool call_update_ghost_values = false);
-
- /**
- * Sets all elements of the vector to the
- * scalar @p s. If the scalar is zero, also
- * ghost elements are set to zero, otherwise
- * they remain unchanged.
- */
- Vector<Number>& operator = (const Number s);
-
- /**
- * This function copies the data that has
- * accumulated in the data buffer for ghost
- * indices to the owning processor.
- *
- * For the meaning of this argument,
- * see the entry on @ref
- * GlossCompress "Compressing
- * distributed vectors and matrices"
- * in the glossary.
- */
- void compress (::dealii::VectorOperation::values operation
- =::dealii::VectorOperation::unknown);
-
-
- /**
- * Fills the data field for ghost indices with
- * the values stored in the respective
- * positions of the owning processor. This
- * function is needed before reading from
- * ghosts. The function is @p const even
- * though ghost data is changed. This is
- * needed to allow functions with a @p const
- * vector to perform the data exchange without
- * creating temporaries.
- */
- void update_ghost_values () const;
-
- /**
- * Initiates communication for the @p
- * compress() function with non-blocking
- * communication. This function does not wait
- * for the transfer to finish, in order to
- * allow for other computations during the
- * time it takes until all data arrives.
- *
- * Before the data is actually exchanged, the
- * function must be followed by a call to @p
- * compress_finish().
- *
- * In case this function is called for more
- * than one vector before @p
- * compress_finish() is invoked, it is
- * mandatory to specify a unique
- * communication channel to each such call, in
- * order to avoid several messages with the
- * same ID that will corrupt this operation.
- */
- void compress_start (const unsigned int communication_channel = 0);
-
- /**
- * For all requests that have been initiated
- * in compress_start, wait for the
- * communication to finish. Once it is
- * finished, add or set the data (depending on
- * whether @p add_ghost_data is @p true or @p
- * false) to the respective positions in the
- * owning processor, and clear the contents in
- * the ghost data fields. The meaning of
- * this argument is the same as in compress().
- *
- * Must follow a call to the @p compress_start
- * function.
- */
- void compress_finish (const bool add_ghost_data = true);
-
-
- /**
- * Initiates communication for the @p
- * update_ghost_values() function with non-blocking
- * communication. This function does not wait
- * for the transfer to finish, in order to
- * allow for other computations during the
- * time it takes until all data arrives.
- *
- * Before the data is actually exchanged, the
- * function must be followed by a call to @p
- * update_ghost_values_finish().
- *
- * In case this function is called for more
- * than one vector before @p
- * update_ghost_values_finish() is invoked, it is
- * mandatory to specify a unique communication
- * channel to each such call, in order to
- * avoid several messages with the same ID
- * that will corrupt this operation.
- */
- void update_ghost_values_start (const unsigned int communication_channel = 0) const;
-
-
- /**
- * For all requests that have been started in
- * update_ghost_values_start, wait for the communication
- * to finish.
- *
- * Must follow a call to the @p
- * update_ghost_values_start function before reading
- * data from ghost indices.
- */
- void update_ghost_values_finish () const;
-
- /**
- * This method zeros the entries on ghost
- * dofs, but does not touch locally owned
- * DoFs.
- */
- void zero_out_ghosts ();
-
- /**
- * Return whether the vector contains only
- * elements with value zero. This function
- * is mainly for internal consistency
- * checks and should seldom be used when
- * not in debug mode since it uses quite
- * some time.
- */
- bool all_zero () const;
-
- /**
- * Return @p true if the vector has no
- * negative entries, i.e. all entries are
- * zero or positive. This function is
- * used, for example, to check whether
- * refinement indicators are really all
- * positive (or zero).
- *
- * The function obviously only makes
- * sense if the template argument of this
- * class is a real type. If it is a
- * complex type, then an exception is
- * thrown.
- */
- bool is_non_negative () const;
-
- /**
- * Checks for equality of the two vectors.
- */
- template <typename Number2>
- bool operator == (const Vector<Number2> &v) const;
-
- /**
- * Checks for inequality of the two vectors.
- */
- template <typename Number2>
- bool operator != (const Vector<Number2> &v) const;
-
- /**
- * Perform the inner product of two vectors.
- */
- template <typename Number2>
- Number operator * (const Vector<Number2> &V) const;
-
- /**
- * Computes the square of the l<sub>2</sub>
- * norm of the vector (i.e., the sum of the
- * squares of all entries among all
- * processors).
- */
- real_type norm_sqr () const;
-
- /**
- * Computes the mean value of all the entries
- * in the vector.
- */
- Number mean_value () const;
-
- /**
- * Returns the l<sub>1</sub> norm of the
- * vector (i.e., the sum of the absolute
- * values of all entries among all
- * processors).
- */
- real_type l1_norm () const;
-
- /**
- * Returns the l<sub>2</sub> norm of the
- * vector (i.e., square root of the sum of the
- * square of all entries among all
- * processors).
- */
- real_type l2_norm () const;
-
- /**
- * Returns the l<sub>p</sub> norm with real @p
- * p of the vector (i.e., the pth root of sum
- * of the pth power of all entries among all
- * processors).
- */
- real_type lp_norm (const real_type p) const;
-
- /**
- * Returns the maximum norm of the vector
- * (i.e., maximum absolute value among all
- * entries among all processors).
- */
- real_type linfty_norm () const;
-
- /**
- * Returns the global size of the vector,
- * equal to the sum of the number of locally
- * owned indices among all the processors.
- */
- types::global_dof_index size () const;
-
- /**
- * Returns the local size of the vector, i.e.,
- * the number of indices owned locally.
- */
- unsigned int local_size() const;
-
- /**
- * Returns the half-open interval that
- * specifies the locally owned range of the
- * vector. Note that <code>local_size() ==
- * local_range().second -
- * local_range().first</code>.
- */
- std::pair<types::global_dof_index, types::global_dof_index> local_range () const;
-
- /**
- * Returns true if the given global index is
- * in the local range of this processor.
- */
- bool in_local_range (const types::global_dof_index global_index) const;
-
- /**
- * Returns the number of ghost elements
- * present on the vector.
- */
- unsigned int n_ghost_entries () const;
-
- /**
- * Returns whether the given global index is a
- * ghost index on the present
- * processor. Returns false for indices that
- * are owned locally and for indices not
- * present at all.
- */
- bool is_ghost_entry (const types::global_dof_index global_index) const;
-
- /**
- * Make the @p Vector class a bit like
- * the <tt>vector<></tt> class of the C++
- * standard library by returning
- * iterators to the start and end of the
- * locally owned elements of this vector.
- */
- iterator begin ();
-
- /**
- * Return constant iterator to the start of
- * the vector.
- */
- const_iterator begin () const;
-
- /**
- * Return an iterator pointing to the
- * element past the end of the array of
- * locally owned entries.
- */
- iterator end ();
-
- /**
- * Return a constant iterator pointing to
- * the element past the end of the array
- * of the locally owned entries.
- */
- const_iterator end () const;
- //@}
-
-
- /**
- * @name 2: Data-Access
- */
- //@{
-
- /**
- * Read access to the data in the
- * position corresponding to @p
- * global_index. The index must be
- * either in the local range of the
- * vector or be specified as a ghost
- * index at construction.
- */
- Number operator () (const types::global_dof_index global_index) const;
-
- /**
- * Read and write access to the data
- * in the position corresponding to
- * @p global_index. The index must be
- * either in the local range of the
- * vector or be specified as a ghost
- * index at construction.
- */
- Number& operator () (const types::global_dof_index global_index);
-
- /**
- * Read access to the data in the
- * position corresponding to @p
- * global_index. The index must be
- * either in the local range of the
- * vector or be specified as a ghost
- * index at construction.
- *
- * This function does the same thing
- * as operator().
- */
- Number operator [] (const types::global_dof_index global_index) const;
-
- /**
- * Read and write access to the data
- * in the position corresponding to
- * @p global_index. The index must be
- * either in the local range of the
- * vector or be specified as a ghost
- * index at construction.
- *
- * This function does the same thing
- * as operator().
- */
- Number& operator [] (const types::global_dof_index global_index);
-
- /**
- * Read access to the data field specified by
- * @p local_index. Locally owned indices can
- * be accessed with indices
- * <code>[0,local_size)</code>, and ghost
- * indices with indices
- * <code>[local_size,local_size+
- * n_ghost_entries]</code>.
- */
- Number local_element (const unsigned int local_index) const;
-
- /**
- * Read and write access to the data field
- * specified by @p local_index. Locally owned
- * indices can be accessed with indices
- * <code>[0,local_size)</code>, and ghost
- * indices with indices
- * <code>[local_size,local_size+n_ghosts]</code>.
- */
- Number& local_element (const unsigned int local_index);
- //@}
-
-
- /**
- * @name 3: Modification of vectors
- */
- //@{
-
- /**
- * Add the given vector to the present
- * one.
- */
- Vector<Number> & operator += (const Vector<Number> &V);
-
- /**
- * Subtract the given vector from the
- * present one.
- */
- Vector<Number> & operator -= (const Vector<Number> &V);
-
- /**
- * A collective add operation:
- * This funnction adds a whole
- * set of values stored in @p
- * values to the vector
- * components specified by @p
- * indices.
- */
- template <typename OtherNumber>
- void add (const std::vector<unsigned int> &indices,
- const std::vector<OtherNumber> &values);
-
- /**
- * This is a second collective
- * add operation. As a
- * difference, this function
- * takes a deal.II vector of
- * values.
- */
- template <typename OtherNumber>
- void add (const std::vector<unsigned int> &indices,
- const ::dealii::Vector<OtherNumber> &values);
-
- /**
- * Take an address where
- * <tt>n_elements</tt> are stored
- * contiguously and add them into
- * the vector. Handles all cases
- * which are not covered by the
- * other two <tt>add()</tt>
- * functions above.
- */
- template <typename OtherNumber>
- void add (const unsigned int n_elements,
- const unsigned int *indices,
- const OtherNumber *values);
-
- /**
- * Addition of @p s to all
- * components. Note that @p s is a
- * scalar and not a vector.
- */
- void add (const Number s);
-
- /**
- * Simple vector addition, equal to the
- * <tt>operator +=</tt>.
- */
- void add (const Vector<Number> &V);
-
- /**
- * Simple addition of a multiple of a
- * vector, i.e. <tt>*this += a*V</tt>.
- */
- void add (const Number a, const Vector<Number> &V);
-
- /**
- * Multiple addition of scaled vectors,
- * i.e. <tt>*this += a*V+b*W</tt>.
- */
- void add (const Number a, const Vector<Number> &V,
- const Number b, const Vector<Number> &W);
-
- /**
- * Scaling and simple vector addition,
- * i.e.
- * <tt>*this = s*(*this)+V</tt>.
- */
- void sadd (const Number s,
- const Vector<Number> &V);
-
- /**
- * Scaling and simple addition, i.e.
- * <tt>*this = s*(*this)+a*V</tt>.
- */
- void sadd (const Number s,
- const Number a,
- const Vector<Number> &V);
-
- /**
- * Scaling and multiple addition.
- */
- void sadd (const Number s,
- const Number a,
- const Vector<Number> &V,
- const Number b,
- const Vector<Number> &W);
-
- /**
- * Scaling and multiple addition.
- * <tt>*this = s*(*this)+a*V + b*W + c*X</tt>.
- */
- void sadd (const Number s,
- const Number a,
- const Vector<Number> &V,
- const Number b,
- const Vector<Number> &W,
- const Number c,
- const Vector<Number> &X);
-
- /**
- * Scale each element of the
- * vector by the given factor.
- *
- * This function is deprecated
- * and will be removed in a
- * future version. Use
- * <tt>operator *=</tt> and
- * <tt>operator /=</tt> instead.
- */
- void scale (const Number factor);
-
-
- /**
- * Scale each element of the
- * vector by a constant
- * value.
- */
- Vector<Number> & operator *= (const Number factor);
-
- /**
- * Scale each element of the
- * vector by the inverse of the
- * given value.
- */
- Vector<Number> & operator /= (const Number factor);
-
- /**
- * Scale each element of this
- * vector by the corresponding
- * element in the argument. This
- * function is mostly meant to
- * simulate multiplication (and
- * immediate re-assignment) by a
- * diagonal scaling matrix.
- */
- void scale (const Vector<Number> &scaling_factors);
-
- /**
- * Scale each element of this
- * vector by the corresponding
- * element in the argument. This
- * function is mostly meant to
- * simulate multiplication (and
- * immediate re-assignment) by a
- * diagonal scaling matrix.
- */
- template <typename Number2>
- void scale (const Vector<Number2> &scaling_factors);
-
- /**
- * Assignment <tt>*this = a*u</tt>.
- */
- void equ (const Number a, const Vector<Number>& u);
-
- /**
- * Assignment <tt>*this = a*u</tt>.
- */
- template <typename Number2>
- void equ (const Number a, const Vector<Number2>& u);
-
- /**
- * Assignment <tt>*this = a*u + b*v</tt>.
- */
- void equ (const Number a, const Vector<Number>& u,
- const Number b, const Vector<Number>& v);
-
- /**
- * Assignment <tt>*this = a*u + b*v + b*w</tt>.
- */
- void equ (const Number a, const Vector<Number>& u,
- const Number b, const Vector<Number>& v,
- const Number c, const Vector<Number>& w);
-
- /**
- * Compute the elementwise ratio of the
- * two given vectors, that is let
- * <tt>this[i] = a[i]/b[i]</tt>. This is
- * useful for example if you want to
- * compute the cellwise ratio of true to
- * estimated error.
- *
- * This vector is appropriately
- * scaled to hold the result.
- *
- * If any of the <tt>b[i]</tt> is
- * zero, the result is
- * undefined. No attempt is made
- * to catch such situations.
- */
- void ratio (const Vector<Number> &a,
- const Vector<Number> &b);
- //@}
-
-
- /**
- * @name 4: Mixed stuff
- */
- //@{
- /**
- * Checks whether the given
- * partitioner is compatible with the
- * partitioner used for this
- * vector. Two partitioners are
- * compatible if the have the same
- * local size and the same ghost
- * indices. They do not necessarily
- * need to be the same data
- * field. This is a local operation
- * only, i.e., if only some
- * processors decide that the
- * partitioning is not compatible,
- * only these processors will return
- * @p false, whereas the other
- * processors will return @p true.
- */
- bool
- partitioners_are_compatible (const Utilities::MPI::Partitioner &part) const;
-
-
- /**
- * Prints the vector to the output stream @p
- * out.
- */
- void print (std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const bool across = true) const;
-
- /**
- * Returns the memory consumption of this
- * class in bytes.
- */
- std::size_t memory_consumption () const;
- //@}
-
- private:
- /**
- * Shared pointer to store the parallel
- * partitioning information. This information
- * can be shared between several vectors that
- * have the same partitioning.
- */
- std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> partitioner;
-
- /**
- * The size that is currently allocated in the
- * val array.
- */
- unsigned int allocated_size;
-
- /**
- * Pointer to the array of
- * local elements of this vector.
- */
- Number *val;
-
- /**
- * Temporary storage that holds the data that
- * is sent to this processor in @p compress()
- * or sent from this processor in @p
- * update_ghost_values.
- */
- mutable Number *import_data;
-
- /**
- * Provide this class with all functionality
- * of ::dealii::Vector by creating a
- * VectorView object.
- */
- VectorView<Number> vector_view;
+ public:
+ /**
+ * Declare standard types used in all
+ * containers. These types parallel those in
+ * the <tt>C++</tt> standard libraries
+ * <tt>vector<...></tt> class.
+ */
+ typedef Number value_type;
+ typedef value_type *pointer;
+ typedef const value_type *const_pointer;
+ typedef value_type *iterator;
+ typedef const value_type *const_iterator;
+ typedef value_type &reference;
+ typedef const value_type &const_reference;
+ typedef size_t size_type;
+ typedef typename numbers::NumberTraits<Number>::real_type real_type;
+
+ /**
+ * @name 1: Basic Object-handling
+ */
+ //@{
+ /**
+ * Empty constructor.
+ */
+ Vector ();
+
+ /**
+ * Copy constructor. Uses the parallel
+ * partitioning of @p in_vector.
+ */
+ Vector (const Vector<Number> &in_vector);
+
+ /**
+ * Constructs a parallel vector of the given
+ * global size without any actual parallel
+ * distribution.
+ */
+ Vector (const unsigned int size);
+
+ /**
+ * Constructs a parallel vector. The local
+ * range is specified by @p locally_owned_set
+ * (note that this must be a contiguous
+ * interval, multiple intervals are not
+ * possible). The IndexSet @p ghost_indices
+ * specifies ghost indices, i.e., indices
+ * which one might need to read data from or
+ * accumulate data from. It is allowed that
+ * the set of ghost indices also contains the
+ * local range, but it does not need to.
+ *
+ * This function involves global
+ * communication, so it should only be called
+ * once for a given layout. Use the
+ * constructor with Vector<Number> argument to
+ * create additional vectors with the same
+ * parallel layout.
+ */
+ Vector (const IndexSet &local_range,
+ const IndexSet &ghost_indices,
+ const MPI_Comm communicator);
+
+ /**
+ * Create the vector based on the parallel
+ * partitioning described in @p
+ * partitioner. The input argument is a shared
+ * pointer, which store the partitioner data
+ * only once and share it between several
+ * vectors with the same layout.
+ */
+ Vector (const std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
+
+ /**
+ * Destructor.
+ */
+ ~Vector ();
+
+ /**
+ * Sets the global size of the vector to @p
+ * size without any actual parallel
+ * distribution.
+ */
+ void reinit (const unsigned int size,
+ const bool fast = false);
+
+ /**
+ * Uses the parallel layout of the input
+ * vector @p in_vector and allocates memory
+ * for this vector. Recommended initialization
+ * function when several vectors with the same
+ * layout should be created.
+ *
+ * If the flag @p fast is set to false, the
+ * memory will be initialized with zero,
+ * otherwise the memory will be untouched (and
+ * the user must make sure to fill it with
+ * reasonable data before using it).
+ */
+ template <typename Number2>
+ void reinit(const Vector<Number2> &in_vector,
+ const bool fast = false);
+
+ /**
+ * Initialize the vector. The local range is
+ * specified by @p locally_owned_set (note
+ * that this must be a contiguous interval,
+ * multiple intervals are not possible). The
+ * IndexSet @p ghost_indices specifies ghost
+ * indices, i.e., indices which one might need
+ * to read data from or accumulate data
+ * from. It is allowed that the set of ghost
+ * indices also contains the local range, but
+ * it does not need to.
+ *
+ * This function involves global
+ * communication, so it should only be called
+ * once for a given layout. Use the @p reinit
+ * function with Vector<Number> argument to
+ * create additional vectors with the same
+ * parallel layout.
+ */
+ void reinit (const IndexSet &local_range,
+ const IndexSet &ghost_indices,
+ const MPI_Comm communicator);
+
+ /**
+ * Initialize the vector given to the parallel
+ * partitioning described in @p
+ * partitioner. The input argument is a shared
+ * pointer, which store the partitioner data
+ * only once and share it between several
+ * vectors with the same layout.
+ */
+ void reinit (const std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
+
+ /**
+ * Swap the contents of this
+ * vector and the other vector
+ * @p v. One could do this
+ * operation with a temporary
+ * variable and copying over the
+ * data elements, but this
+ * function is significantly more
+ * efficient since it only swaps
+ * the pointers to the data of
+ * the two vectors and therefore
+ * does not need to allocate
+ * temporary storage and move
+ * data around.
+ *
+ * This function is analog to the
+ * the @p swap function of all C++
+ * standard containers. Also,
+ * there is a global function
+ * <tt>swap(u,v)</tt> that simply calls
+ * <tt>u.swap(v)</tt>, again in analogy
+ * to standard functions.
+ *
+ * This function is virtual in
+ * order to allow for derived
+ * classes to handle memory
+ * separately.
+ */
+ void swap (Vector<Number> &v);
+
+ /**
+ * Assigns the vector to the parallel
+ * partitioning of the input vector @p
+ * in_vector, and copies all the data.
+ */
+ Vector<Number> &
- operator = (const Vector<Number> &in_vector);
++ operator = (const Vector<Number> &in_vector);
+
+ /**
+ * Assigns the vector to the parallel
+ * partitioning of the input vector @p
+ * in_vector, and copies all the data.
+ */
+ template <typename Number2>
+ Vector<Number> &
+ operator = (const Vector<Number2> &in_vector);
+
+ /**
+ * This method copies the local range from
+ * another vector with the same local range,
+ * but possibly different layout of ghost
+ * indices.
+ */
+ void copy_from (const Vector<Number> &in_vector,
+ const bool call_update_ghost_values = false);
+
+ /**
+ * Sets all elements of the vector to the
+ * scalar @p s. If the scalar is zero, also
+ * ghost elements are set to zero, otherwise
+ * they remain unchanged.
+ */
+ Vector<Number> &operator = (const Number s);
+
+ /**
+ * This function copies the data that has
+ * accumulated in the data buffer for ghost
+ * indices to the owning processor.
+ *
+ * For the meaning of this argument,
+ * see the entry on @ref
+ * GlossCompress "Compressing
+ * distributed vectors and matrices"
+ * in the glossary.
+ */
+ void compress (::dealii::VectorOperation::values operation
+ =::dealii::VectorOperation::unknown);
+
+
+ /**
+ * Fills the data field for ghost indices with
+ * the values stored in the respective
+ * positions of the owning processor. This
+ * function is needed before reading from
+ * ghosts. The function is @p const even
+ * though ghost data is changed. This is
+ * needed to allow functions with a @p const
+ * vector to perform the data exchange without
+ * creating temporaries.
+ */
+ void update_ghost_values () const;
+
+ /**
+ * Initiates communication for the @p
+ * compress() function with non-blocking
+ * communication. This function does not wait
+ * for the transfer to finish, in order to
+ * allow for other computations during the
+ * time it takes until all data arrives.
+ *
+ * Before the data is actually exchanged, the
+ * function must be followed by a call to @p
+ * compress_finish().
+ *
+ * In case this function is called for more
+ * than one vector before @p
+ * compress_finish() is invoked, it is
+ * mandatory to specify a unique
+ * communication channel to each such call, in
+ * order to avoid several messages with the
+ * same ID that will corrupt this operation.
+ */
+ void compress_start (const unsigned int communication_channel = 0);
+
+ /**
+ * For all requests that have been initiated
+ * in compress_start, wait for the
+ * communication to finish. Once it is
+ * finished, add or set the data (depending on
+ * whether @p add_ghost_data is @p true or @p
+ * false) to the respective positions in the
+ * owning processor, and clear the contents in
+ * the ghost data fields. The meaning of
+ * this argument is the same as in compress().
+ *
+ * Must follow a call to the @p compress_start
+ * function.
+ */
+ void compress_finish (const bool add_ghost_data = true);
+
+
+ /**
+ * Initiates communication for the @p
+ * update_ghost_values() function with non-blocking
+ * communication. This function does not wait
+ * for the transfer to finish, in order to
+ * allow for other computations during the
+ * time it takes until all data arrives.
+ *
+ * Before the data is actually exchanged, the
+ * function must be followed by a call to @p
+ * update_ghost_values_finish().
+ *
+ * In case this function is called for more
+ * than one vector before @p
+ * update_ghost_values_finish() is invoked, it is
+ * mandatory to specify a unique communication
+ * channel to each such call, in order to
+ * avoid several messages with the same ID
+ * that will corrupt this operation.
+ */
+ void update_ghost_values_start (const unsigned int communication_channel = 0) const;
+
+
+ /**
+ * For all requests that have been started in
+ * update_ghost_values_start, wait for the communication
+ * to finish.
+ *
+ * Must follow a call to the @p
+ * update_ghost_values_start function before reading
+ * data from ghost indices.
+ */
+ void update_ghost_values_finish () const;
+
+ /**
+ * This method zeros the entries on ghost
+ * dofs, but does not touch locally owned
+ * DoFs.
+ */
+ void zero_out_ghosts ();
+
+ /**
+ * Return whether the vector contains only
+ * elements with value zero. This function
+ * is mainly for internal consistency
+ * checks and should seldom be used when
+ * not in debug mode since it uses quite
+ * some time.
+ */
+ bool all_zero () const;
+
+ /**
+ * Return @p true if the vector has no
+ * negative entries, i.e. all entries are
+ * zero or positive. This function is
+ * used, for example, to check whether
+ * refinement indicators are really all
+ * positive (or zero).
+ *
+ * The function obviously only makes
+ * sense if the template argument of this
+ * class is a real type. If it is a
+ * complex type, then an exception is
+ * thrown.
+ */
+ bool is_non_negative () const;
+
+ /**
+ * Checks for equality of the two vectors.
+ */
+ template <typename Number2>
+ bool operator == (const Vector<Number2> &v) const;
+
+ /**
+ * Checks for inequality of the two vectors.
+ */
+ template <typename Number2>
+ bool operator != (const Vector<Number2> &v) const;
+
+ /**
+ * Perform the inner product of two vectors.
+ */
+ template <typename Number2>
+ Number operator * (const Vector<Number2> &V) const;
+
+ /**
+ * Computes the square of the l<sub>2</sub>
+ * norm of the vector (i.e., the sum of the
+ * squares of all entries among all
+ * processors).
+ */
+ real_type norm_sqr () const;
+
+ /**
+ * Computes the mean value of all the entries
+ * in the vector.
+ */
+ Number mean_value () const;
+
+ /**
+ * Returns the l<sub>1</sub> norm of the
+ * vector (i.e., the sum of the absolute
+ * values of all entries among all
+ * processors).
+ */
+ real_type l1_norm () const;
+
+ /**
+ * Returns the l<sub>2</sub> norm of the
+ * vector (i.e., square root of the sum of the
+ * square of all entries among all
+ * processors).
+ */
+ real_type l2_norm () const;
+
+ /**
+ * Returns the l<sub>p</sub> norm with real @p
+ * p of the vector (i.e., the pth root of sum
+ * of the pth power of all entries among all
+ * processors).
+ */
+ real_type lp_norm (const real_type p) const;
+
+ /**
+ * Returns the maximum norm of the vector
+ * (i.e., maximum absolute value among all
+ * entries among all processors).
+ */
+ real_type linfty_norm () const;
+
+ /**
+ * Returns the global size of the vector,
+ * equal to the sum of the number of locally
+ * owned indices among all the processors.
+ */
+ types::global_dof_index size () const;
+
+ /**
+ * Returns the local size of the vector, i.e.,
+ * the number of indices owned locally.
+ */
+ unsigned int local_size() const;
+
+ /**
+ * Returns the half-open interval that
+ * specifies the locally owned range of the
+ * vector. Note that <code>local_size() ==
+ * local_range().second -
+ * local_range().first</code>.
+ */
+ std::pair<types::global_dof_index, types::global_dof_index> local_range () const;
+
+ /**
+ * Returns true if the given global index is
+ * in the local range of this processor.
+ */
+ bool in_local_range (const types::global_dof_index global_index) const;
+
+ /**
+ * Returns the number of ghost elements
+ * present on the vector.
+ */
+ unsigned int n_ghost_entries () const;
+
+ /**
+ * Returns whether the given global index is a
+ * ghost index on the present
+ * processor. Returns false for indices that
+ * are owned locally and for indices not
+ * present at all.
+ */
+ bool is_ghost_entry (const types::global_dof_index global_index) const;
+
+ /**
+ * Make the @p Vector class a bit like
+ * the <tt>vector<></tt> class of the C++
+ * standard library by returning
+ * iterators to the start and end of the
+ * locally owned elements of this vector.
+ */
+ iterator begin ();
+
+ /**
+ * Return constant iterator to the start of
+ * the vector.
+ */
+ const_iterator begin () const;
+
+ /**
+ * Return an iterator pointing to the
+ * element past the end of the array of
+ * locally owned entries.
+ */
+ iterator end ();
+
+ /**
+ * Return a constant iterator pointing to
+ * the element past the end of the array
+ * of the locally owned entries.
+ */
+ const_iterator end () const;
+ //@}
+
+
+ /**
+ * @name 2: Data-Access
+ */
+ //@{
+
+ /**
+ * Read access to the data in the
+ * position corresponding to @p
+ * global_index. The index must be
+ * either in the local range of the
+ * vector or be specified as a ghost
+ * index at construction.
+ */
+ Number operator () (const types::global_dof_index global_index) const;
+
+ /**
+ * Read and write access to the data
+ * in the position corresponding to
+ * @p global_index. The index must be
+ * either in the local range of the
+ * vector or be specified as a ghost
+ * index at construction.
+ */
+ Number &operator () (const types::global_dof_index global_index);
+
+ /**
+ * Read access to the data in the
+ * position corresponding to @p
+ * global_index. The index must be
+ * either in the local range of the
+ * vector or be specified as a ghost
+ * index at construction.
+ *
+ * This function does the same thing
+ * as operator().
+ */
+ Number operator [] (const types::global_dof_index global_index) const;
+
+ /**
+ * Read and write access to the data
+ * in the position corresponding to
+ * @p global_index. The index must be
+ * either in the local range of the
+ * vector or be specified as a ghost
+ * index at construction.
+ *
+ * This function does the same thing
+ * as operator().
+ */
+ Number &operator [] (const types::global_dof_index global_index);
+
+ /**
+ * Read access to the data field specified by
+ * @p local_index. Locally owned indices can
+ * be accessed with indices
+ * <code>[0,local_size)</code>, and ghost
+ * indices with indices
+ * <code>[local_size,local_size+
+ * n_ghost_entries]</code>.
+ */
+ Number local_element (const unsigned int local_index) const;
+
+ /**
+ * Read and write access to the data field
+ * specified by @p local_index. Locally owned
+ * indices can be accessed with indices
+ * <code>[0,local_size)</code>, and ghost
+ * indices with indices
+ * <code>[local_size,local_size+n_ghosts]</code>.
+ */
+ Number &local_element (const unsigned int local_index);
+ //@}
+
+
+ /**
+ * @name 3: Modification of vectors
+ */
+ //@{
+
+ /**
+ * Add the given vector to the present
+ * one.
+ */
+ Vector<Number> &operator += (const Vector<Number> &V);
+
+ /**
+ * Subtract the given vector from the
+ * present one.
+ */
+ Vector<Number> &operator -= (const Vector<Number> &V);
+
+ /**
+ * A collective add operation:
+ * This funnction adds a whole
+ * set of values stored in @p
+ * values to the vector
+ * components specified by @p
+ * indices.
+ */
+ template <typename OtherNumber>
+ void add (const std::vector<unsigned int> &indices,
- const std::vector<OtherNumber> &values);
++ const std::vector<OtherNumber> &values);
+
+ /**
+ * This is a second collective
+ * add operation. As a
+ * difference, this function
+ * takes a deal.II vector of
+ * values.
+ */
+ template <typename OtherNumber>
+ void add (const std::vector<unsigned int> &indices,
+ const ::dealii::Vector<OtherNumber> &values);
+
+ /**
+ * Take an address where
+ * <tt>n_elements</tt> are stored
+ * contiguously and add them into
+ * the vector. Handles all cases
+ * which are not covered by the
+ * other two <tt>add()</tt>
+ * functions above.
+ */
+ template <typename OtherNumber>
+ void add (const unsigned int n_elements,
+ const unsigned int *indices,
- const OtherNumber *values);
++ const OtherNumber *values);
+
+ /**
+ * Addition of @p s to all
+ * components. Note that @p s is a
+ * scalar and not a vector.
+ */
+ void add (const Number s);
+
+ /**
+ * Simple vector addition, equal to the
+ * <tt>operator +=</tt>.
+ */
+ void add (const Vector<Number> &V);
+
+ /**
+ * Simple addition of a multiple of a
+ * vector, i.e. <tt>*this += a*V</tt>.
+ */
+ void add (const Number a, const Vector<Number> &V);
+
+ /**
+ * Multiple addition of scaled vectors,
+ * i.e. <tt>*this += a*V+b*W</tt>.
+ */
+ void add (const Number a, const Vector<Number> &V,
+ const Number b, const Vector<Number> &W);
+
+ /**
+ * Scaling and simple vector addition,
+ * i.e.
+ * <tt>*this = s*(*this)+V</tt>.
+ */
+ void sadd (const Number s,
+ const Vector<Number> &V);
+
+ /**
+ * Scaling and simple addition, i.e.
+ * <tt>*this = s*(*this)+a*V</tt>.
+ */
+ void sadd (const Number s,
+ const Number a,
+ const Vector<Number> &V);
+
+ /**
+ * Scaling and multiple addition.
+ */
+ void sadd (const Number s,
+ const Number a,
+ const Vector<Number> &V,
+ const Number b,
+ const Vector<Number> &W);
+
+ /**
+ * Scaling and multiple addition.
+ * <tt>*this = s*(*this)+a*V + b*W + c*X</tt>.
+ */
+ void sadd (const Number s,
+ const Number a,
+ const Vector<Number> &V,
+ const Number b,
+ const Vector<Number> &W,
+ const Number c,
+ const Vector<Number> &X);
+
+ /**
+ * Scale each element of the
+ * vector by the given factor.
+ *
+ * This function is deprecated
+ * and will be removed in a
+ * future version. Use
+ * <tt>operator *=</tt> and
+ * <tt>operator /=</tt> instead.
+ */
+ void scale (const Number factor);
+
+
+ /**
+ * Scale each element of the
+ * vector by a constant
+ * value.
+ */
+ Vector<Number> &operator *= (const Number factor);
+
+ /**
+ * Scale each element of the
+ * vector by the inverse of the
+ * given value.
+ */
+ Vector<Number> &operator /= (const Number factor);
+
+ /**
+ * Scale each element of this
+ * vector by the corresponding
+ * element in the argument. This
+ * function is mostly meant to
+ * simulate multiplication (and
+ * immediate re-assignment) by a
+ * diagonal scaling matrix.
+ */
+ void scale (const Vector<Number> &scaling_factors);
+
+ /**
+ * Scale each element of this
+ * vector by the corresponding
+ * element in the argument. This
+ * function is mostly meant to
+ * simulate multiplication (and
+ * immediate re-assignment) by a
+ * diagonal scaling matrix.
+ */
+ template <typename Number2>
+ void scale (const Vector<Number2> &scaling_factors);
+
+ /**
+ * Assignment <tt>*this = a*u</tt>.
+ */
+ void equ (const Number a, const Vector<Number> &u);
+
+ /**
+ * Assignment <tt>*this = a*u</tt>.
+ */
+ template <typename Number2>
+ void equ (const Number a, const Vector<Number2> &u);
+
+ /**
+ * Assignment <tt>*this = a*u + b*v</tt>.
+ */
+ void equ (const Number a, const Vector<Number> &u,
+ const Number b, const Vector<Number> &v);
+
+ /**
+ * Assignment <tt>*this = a*u + b*v + b*w</tt>.
+ */
+ void equ (const Number a, const Vector<Number> &u,
+ const Number b, const Vector<Number> &v,
+ const Number c, const Vector<Number> &w);
+
+ /**
+ * Compute the elementwise ratio of the
+ * two given vectors, that is let
+ * <tt>this[i] = a[i]/b[i]</tt>. This is
+ * useful for example if you want to
+ * compute the cellwise ratio of true to
+ * estimated error.
+ *
+ * This vector is appropriately
+ * scaled to hold the result.
+ *
+ * If any of the <tt>b[i]</tt> is
+ * zero, the result is
+ * undefined. No attempt is made
+ * to catch such situations.
+ */
+ void ratio (const Vector<Number> &a,
+ const Vector<Number> &b);
+ //@}
+
+
+ /**
+ * @name 4: Mixed stuff
+ */
+ //@{
+ /**
+ * Checks whether the given
+ * partitioner is compatible with the
+ * partitioner used for this
+ * vector. Two partitioners are
+ * compatible if the have the same
+ * local size and the same ghost
+ * indices. They do not necessarily
+ * need to be the same data
+ * field. This is a local operation
+ * only, i.e., if only some
+ * processors decide that the
+ * partitioning is not compatible,
+ * only these processors will return
+ * @p false, whereas the other
+ * processors will return @p true.
+ */
+ bool
+ partitioners_are_compatible (const Utilities::MPI::Partitioner &part) const;
+
+
+ /**
+ * Prints the vector to the output stream @p
+ * out.
+ */
+ void print (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /**
+ * Returns the memory consumption of this
+ * class in bytes.
+ */
+ std::size_t memory_consumption () const;
+ //@}
+
+ private:
+ /**
+ * Shared pointer to store the parallel
+ * partitioning information. This information
+ * can be shared between several vectors that
+ * have the same partitioning.
+ */
+ std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> partitioner;
+
+ /**
+ * The size that is currently allocated in the
+ * val array.
+ */
+ unsigned int allocated_size;
+
+ /**
+ * Pointer to the array of
+ * local elements of this vector.
+ */
+ Number *val;
+
+ /**
+ * Temporary storage that holds the data that
+ * is sent to this processor in @p compress()
+ * or sent from this processor in @p
+ * update_ghost_values.
+ */
+ mutable Number *import_data;
+
+ /**
+ * Provide this class with all functionality
+ * of ::dealii::Vector by creating a
+ * VectorView object.
+ */
+ VectorView<Number> vector_view;
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
- /**
- * A vector that collects all requests from @p
- * compress() operations. This class uses
- * persistent MPI communicators, i.e., the
- * communication channels are stored during
- * successive calls to a given function. This
- * reduces the overhead involved with setting
- * up the MPI machinery, but it does not
- * remove the need for a receive operation to
- * be posted before the data can actually be
- * sent.
- */
- std::vector<MPI_Request> compress_requests;
-
- /**
- * A vector that collects all requests from @p
- * update_ghost_values() operations. This class uses
- * persistent MPI communicators.
- */
- mutable std::vector<MPI_Request> update_ghost_values_requests;
+ /**
+ * A vector that collects all requests from @p
+ * compress() operations. This class uses
+ * persistent MPI communicators, i.e., the
+ * communication channels are stored during
+ * successive calls to a given function. This
+ * reduces the overhead involved with setting
+ * up the MPI machinery, but it does not
+ * remove the need for a receive operation to
+ * be posted before the data can actually be
+ * sent.
+ */
+ std::vector<MPI_Request> compress_requests;
+
+ /**
+ * A vector that collects all requests from @p
+ * update_ghost_values() operations. This class uses
+ * persistent MPI communicators.
+ */
+ mutable std::vector<MPI_Request> update_ghost_values_requests;
#endif
- /**
- * A lock that makes sure that
- * the @p compress and @p
- * update_ghost_values functions
- * give reasonable results also
- * when used with several
- * threads.
- */
- mutable Threads::ThreadMutex mutex;
-
- /**
- * A helper function that clears the
- * compress_requests and update_ghost_values_requests
- * field. Used in reinit functions.
- */
- void clear_mpi_requests ();
-
- /**
- * A helper function that is used to resize
- * the val array.
- */
- void resize_val (const unsigned int new_allocated_size);
-
- /*
- * Make all other vector types
- * friends.
- */
- template <typename Number2> friend class Vector;
+ /**
+ * A lock that makes sure that
+ * the @p compress and @p
+ * update_ghost_values functions
+ * give reasonable results also
+ * when used with several
+ * threads.
+ */
+ mutable Threads::ThreadMutex mutex;
+
+ /**
+ * A helper function that clears the
+ * compress_requests and update_ghost_values_requests
+ * field. Used in reinit functions.
+ */
+ void clear_mpi_requests ();
+
+ /**
+ * A helper function that is used to resize
+ * the val array.
+ */
+ void resize_val (const unsigned int new_allocated_size);
+
+ /*
+ * Make all other vector types
+ * friends.
+ */
+ template <typename Number2> friend class Vector;
};
- /*@}*/
+ /*@}*/
- /*----------------------- Inline functions ----------------------------------*/
+ /*----------------------- Inline functions ----------------------------------*/
#ifndef DOXYGEN
namespace PETScWrappers
{
- /*! @addtogroup PETScWrappers
- *@{
- */
-
- /**
- * Blocked sparse matrix based on the PETScWrappers::SparseMatrix class. This
- * class implements the functions that are specific to the PETSc SparseMatrix
- * base objects for a blocked sparse matrix, and leaves the actual work
- * relaying most of the calls to the individual blocks to the functions
- * implemented in the base class. See there also for a description of when
- * this class is useful.
- *
- * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices do
- * not have external objects for the sparsity patterns. Thus, one does not
- * determine the size of the individual blocks of a block matrix of this type
- * by attaching a block sparsity pattern, but by calling reinit() to set the
- * number of blocks and then by setting the size of each block separately. In
- * order to fix the data structures of the block matrix, it is then necessary
- * to let it know that we have changed the sizes of the underlying
- * matrices. For this, one has to call the collect_sizes() function, for much
- * the same reason as is documented with the BlockSparsityPattern class.
- *
- * @ingroup Matrix1
- * @see @ref GlossBlockLA "Block (linear algebra)"
- * @author Wolfgang Bangerth, 2004
- */
+ /*! @addtogroup PETScWrappers
+ *@{
+ */
+
+ /**
+ * Blocked sparse matrix based on the PETScWrappers::SparseMatrix class. This
+ * class implements the functions that are specific to the PETSc SparseMatrix
+ * base objects for a blocked sparse matrix, and leaves the actual work
+ * relaying most of the calls to the individual blocks to the functions
+ * implemented in the base class. See there also for a description of when
+ * this class is useful.
+ *
+ * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices do
+ * not have external objects for the sparsity patterns. Thus, one does not
+ * determine the size of the individual blocks of a block matrix of this type
+ * by attaching a block sparsity pattern, but by calling reinit() to set the
+ * number of blocks and then by setting the size of each block separately. In
+ * order to fix the data structures of the block matrix, it is then necessary
+ * to let it know that we have changed the sizes of the underlying
+ * matrices. For this, one has to call the collect_sizes() function, for much
+ * the same reason as is documented with the BlockSparsityPattern class.
+ *
+ * @ingroup Matrix1
+ * @see @ref GlossBlockLA "Block (linear algebra)"
+ * @author Wolfgang Bangerth, 2004
+ */
class BlockSparseMatrix : public BlockMatrixBase<PETScWrappers::SparseMatrix>
{
- public:
- /**
- * Typedef the base class for simpler
- * access to its own typedefs.
- */
- typedef BlockMatrixBase<SparseMatrix> BaseClass;
-
- /**
- * Typedef the type of the underlying
- * matrix.
- */
- typedef BaseClass::BlockType BlockType;
-
- /**
- * Import the typedefs from the base
- * class.
- */
- typedef BaseClass::value_type value_type;
- typedef BaseClass::pointer pointer;
- typedef BaseClass::const_pointer const_pointer;
- typedef BaseClass::reference reference;
- typedef BaseClass::const_reference const_reference;
- typedef BaseClass::size_type size_type;
- typedef BaseClass::iterator iterator;
- typedef BaseClass::const_iterator const_iterator;
-
- /**
- * Constructor; initializes the
- * matrix to be empty, without
- * any structure, i.e. the
- * matrix is not usable at
- * all. This constructor is
- * therefore only useful for
- * matrices which are members of
- * a class. All other matrices
- * should be created at a point
- * in the data flow where all
- * necessary information is
- * available.
- *
- * You have to initialize the
- * matrix before usage with
- * reinit(BlockSparsityPattern). The
- * number of blocks per row and
- * column are then determined by
- * that function.
- */
- BlockSparseMatrix ();
-
- /**
- * Destructor.
- */
- ~BlockSparseMatrix ();
-
- /**
- * Pseudo copy operator only copying
- * empty objects. The sizes of the block
- * matrices need to be the same.
- */
- BlockSparseMatrix &
- operator = (const BlockSparseMatrix &);
-
- /**
- * This operator assigns a scalar to a
- * matrix. Since this does usually not
- * make much sense (should we set all
- * matrix entries to this value? Only
- * the nonzero entries of the sparsity
- * pattern?), this operation is only
- * allowed if the actual value to be
- * assigned is zero. This operator only
- * exists to allow for the obvious
- * notation <tt>matrix=0</tt>, which
- * sets all elements of the matrix to
- * zero, but keep the sparsity pattern
- * previously used.
- */
- BlockSparseMatrix &
- operator = (const double d);
-
- /**
- * Resize the matrix, by setting
- * the number of block rows and
- * columns. This deletes all
- * blocks and replaces them by
- * unitialized ones, i.e. ones
- * for which also the sizes are
- * not yet set. You have to do
- * that by calling the @p reinit
- * functions of the blocks
- * themselves. Do not forget to
- * call collect_sizes() after
- * that on this object.
- *
- * The reason that you have to
- * set sizes of the blocks
- * yourself is that the sizes may
- * be varying, the maximum number
- * of elements per row may be
- * varying, etc. It is simpler
- * not to reproduce the interface
- * of the @p SparsityPattern
- * class here but rather let the
- * user call whatever function
- * she desires.
- */
- void reinit (const unsigned int n_block_rows,
- const unsigned int n_block_columns);
-
- /**
- * This function collects the
- * sizes of the sub-objects and
- * stores them in internal
- * arrays, in order to be able to
- * relay global indices into the
- * matrix to indices into the
- * subobjects. You *must* call
- * this function each time after
- * you have changed the size of
- * the sub-objects.
- */
- void collect_sizes ();
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M*src$ with $M$
- * being this matrix.
- */
- void vmult (BlockVector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- void vmult (BlockVector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- void vmult (Vector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- */
- void vmult (Vector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M^T*src$ with $M$
- * being this matrix. This
- * function does the same as
- * vmult() but takes the
- * transposed matrix.
- */
- void Tvmult (BlockVector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- void Tvmult (BlockVector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- void Tvmult (Vector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- */
- void Tvmult (Vector &dst,
- const Vector &src) const;
-
- /**
- * Make the clear() function in the
- * base class visible, though it is
- * protected.
- */
- using BlockMatrixBase<SparseMatrix>::clear;
-
- /** @addtogroup Exceptions
- * @{
- */
-
- /**
- * Exception
- */
- DeclException4 (ExcIncompatibleRowNumbers,
- int, int, int, int,
- << "The blocks [" << arg1 << ',' << arg2 << "] and ["
- << arg3 << ',' << arg4 << "] have differing row numbers.");
- /**
- * Exception
- */
- DeclException4 (ExcIncompatibleColNumbers,
- int, int, int, int,
- << "The blocks [" << arg1 << ',' << arg2 << "] and ["
- << arg3 << ',' << arg4 << "] have differing column numbers.");
- ///@}
+ public:
+ /**
+ * Typedef the base class for simpler
+ * access to its own typedefs.
+ */
+ typedef BlockMatrixBase<SparseMatrix> BaseClass;
+
+ /**
+ * Typedef the type of the underlying
+ * matrix.
+ */
+ typedef BaseClass::BlockType BlockType;
+
+ /**
+ * Import the typedefs from the base
+ * class.
+ */
+ typedef BaseClass::value_type value_type;
+ typedef BaseClass::pointer pointer;
+ typedef BaseClass::const_pointer const_pointer;
+ typedef BaseClass::reference reference;
+ typedef BaseClass::const_reference const_reference;
+ typedef BaseClass::size_type size_type;
+ typedef BaseClass::iterator iterator;
+ typedef BaseClass::const_iterator const_iterator;
+
+ /**
+ * Constructor; initializes the
+ * matrix to be empty, without
+ * any structure, i.e. the
+ * matrix is not usable at
+ * all. This constructor is
+ * therefore only useful for
+ * matrices which are members of
+ * a class. All other matrices
+ * should be created at a point
+ * in the data flow where all
+ * necessary information is
+ * available.
+ *
+ * You have to initialize the
+ * matrix before usage with
+ * reinit(BlockSparsityPattern). The
+ * number of blocks per row and
+ * column are then determined by
+ * that function.
+ */
+ BlockSparseMatrix ();
+
+ /**
+ * Destructor.
+ */
+ ~BlockSparseMatrix ();
+
+ /**
+ * Pseudo copy operator only copying
+ * empty objects. The sizes of the block
+ * matrices need to be the same.
+ */
+ BlockSparseMatrix &
+ operator = (const BlockSparseMatrix &);
+
+ /**
+ * This operator assigns a scalar to a
+ * matrix. Since this does usually not
+ * make much sense (should we set all
+ * matrix entries to this value? Only
+ * the nonzero entries of the sparsity
+ * pattern?), this operation is only
+ * allowed if the actual value to be
+ * assigned is zero. This operator only
+ * exists to allow for the obvious
+ * notation <tt>matrix=0</tt>, which
+ * sets all elements of the matrix to
+ * zero, but keep the sparsity pattern
+ * previously used.
+ */
+ BlockSparseMatrix &
+ operator = (const double d);
+
+ /**
+ * Resize the matrix, by setting
+ * the number of block rows and
+ * columns. This deletes all
+ * blocks and replaces them by
+ * unitialized ones, i.e. ones
+ * for which also the sizes are
+ * not yet set. You have to do
+ * that by calling the @p reinit
+ * functions of the blocks
+ * themselves. Do not forget to
+ * call collect_sizes() after
+ * that on this object.
+ *
+ * The reason that you have to
+ * set sizes of the blocks
+ * yourself is that the sizes may
+ * be varying, the maximum number
+ * of elements per row may be
+ * varying, etc. It is simpler
+ * not to reproduce the interface
+ * of the @p SparsityPattern
+ * class here but rather let the
+ * user call whatever function
+ * she desires.
+ */
+ void reinit (const unsigned int n_block_rows,
+ const unsigned int n_block_columns);
+
+ /**
+ * This function collects the
+ * sizes of the sub-objects and
+ * stores them in internal
+ * arrays, in order to be able to
+ * relay global indices into the
+ * matrix to indices into the
+ * subobjects. You *must* call
+ * this function each time after
+ * you have changed the size of
+ * the sub-objects.
+ */
+ void collect_sizes ();
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M*src$ with $M$
+ * being this matrix.
+ */
+ void vmult (BlockVector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ */
+ void vmult (BlockVector &dst,
+ const Vector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ */
+ void vmult (Vector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ */
+ void vmult (Vector &dst,
+ const Vector &src) const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M^T*src$ with $M$
+ * being this matrix. This
+ * function does the same as
+ * vmult() but takes the
+ * transposed matrix.
+ */
+ void Tvmult (BlockVector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ */
- void Tvmult (BlockVector &dst,
++ void Tvmult (BlockVector &dst,
+ const Vector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ */
+ void Tvmult (Vector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ */
+ void Tvmult (Vector &dst,
+ const Vector &src) const;
+
+ /**
+ * Make the clear() function in the
+ * base class visible, though it is
+ * protected.
+ */
+ using BlockMatrixBase<SparseMatrix>::clear;
+
+ /** @addtogroup Exceptions
+ * @{
+ */
+
+ /**
+ * Exception
+ */
+ DeclException4 (ExcIncompatibleRowNumbers,
+ int, int, int, int,
+ << "The blocks [" << arg1 << ',' << arg2 << "] and ["
+ << arg3 << ',' << arg4 << "] have differing row numbers.");
+ /**
+ * Exception
+ */
+ DeclException4 (ExcIncompatibleColNumbers,
+ int, int, int, int,
+ << "The blocks [" << arg1 << ',' << arg2 << "] and ["
+ << arg3 << ',' << arg4 << "] have differing column numbers.");
+ ///@}
};
inline
void
- BlockSparseMatrix::Tvmult (BlockVector &dst,
+ BlockSparseMatrix::Tvmult (BlockVector &dst,
- const Vector &src) const
+ const Vector &src) const
{
BaseClass::Tvmult_block_nonblock (dst, src);
}
namespace PETScWrappers
{
- /*! @addtogroup PETScWrappers
- *@{
- */
-
- /**
- * An implementation of block vectors based on the vector class implemented in
- * PETScWrappers. While the base class provides for most of the interface,
- * this class handles the actual allocation of vectors and provides functions
- * that are specific to the underlying vector type.
- *
- * @ingroup Vectors
- * @see @ref GlossBlockLA "Block (linear algebra)"
- * @author Wolfgang Bangerth, 2004
- */
+ /*! @addtogroup PETScWrappers
+ *@{
+ */
+
+ /**
+ * An implementation of block vectors based on the vector class implemented in
+ * PETScWrappers. While the base class provides for most of the interface,
+ * this class handles the actual allocation of vectors and provides functions
+ * that are specific to the underlying vector type.
+ *
+ * @ingroup Vectors
+ * @see @ref GlossBlockLA "Block (linear algebra)"
+ * @author Wolfgang Bangerth, 2004
+ */
class BlockVector : public BlockVectorBase<Vector>
{
- public:
- /**
- * Typedef the base class for simpler
- * access to its own typedefs.
- */
- typedef BlockVectorBase<Vector> BaseClass;
-
- /**
- * Typedef the type of the underlying
- * vector.
- */
- typedef BaseClass::BlockType BlockType;
-
- /**
- * Import the typedefs from the base
- * class.
- */
- typedef BaseClass::value_type value_type;
- typedef BaseClass::pointer pointer;
- typedef BaseClass::const_pointer const_pointer;
- typedef BaseClass::reference reference;
- typedef BaseClass::const_reference const_reference;
- typedef BaseClass::size_type size_type;
- typedef BaseClass::iterator iterator;
- typedef BaseClass::const_iterator const_iterator;
-
- /**
- * Constructor. There are three
- * ways to use this
- * constructor. First, without
- * any arguments, it generates
- * an object with no
- * blocks. Given one argument,
- * it initializes <tt>num_blocks</tt>
- * blocks, but these blocks have
- * size zero. The third variant
- * finally initializes all
- * blocks to the same size
- * <tt>block_size</tt>.
- *
- * Confer the other constructor
- * further down if you intend to
- * use blocks of different
- * sizes.
- */
- explicit BlockVector (const unsigned int num_blocks = 0,
- const unsigned int block_size = 0);
-
- /**
- * Copy-Constructor. Dimension set to
- * that of V, all components are copied
- * from V
- */
- BlockVector (const BlockVector &V);
-
- /**
- * Copy-constructor: copy the values
- * from a PETSc wrapper parallel block
- * vector class.
- *
- *
- * Note that due to the communication
- * model of MPI, @em all processes have
- * to actually perform this operation,
- * even if they do not use the
- * result. It is not sufficient if only
- * one processor tries to copy the
- * elements from the other processors
- * over to its own process space.
- */
- explicit BlockVector (const MPI::BlockVector &v);
-
- /**
- * Constructor. Set the number of
- * blocks to <tt>n.size()</tt> and
- * initialize each block with
- * <tt>n[i]</tt> zero elements.
- */
- BlockVector (const std::vector<unsigned int> &n);
-
- /**
- * Constructor. Set the number of
- * blocks to
- * <tt>n.size()</tt>. Initialize the
- * vector with the elements
- * pointed to by the range of
- * iterators given as second and
- * third argument. Apart from the
- * first argument, this
- * constructor is in complete
- * analogy to the respective
- * constructor of the
- * <tt>std::vector</tt> class, but the
- * first argument is needed in
- * order to know how to subdivide
- * the block vector into
- * different blocks.
- */
- template <typename InputIterator>
- BlockVector (const std::vector<unsigned int> &n,
- const InputIterator first,
- const InputIterator end);
-
- /**
- * Destructor. Clears memory
- */
- ~BlockVector ();
-
- /**
- * Copy operator: fill all components of
- * the vector with the given scalar
- * value.
- */
- BlockVector & operator = (const value_type s);
-
- /**
- * Copy operator for arguments of the
- * same type.
- */
- BlockVector &
- operator= (const BlockVector &V);
-
- /**
- * Copy all the elements of the
- * parallel block vector @p v into this
- * local vector. Note that due to the
- * communication model of MPI, @em all
- * processes have to actually perform
- * this operation, even if they do not
- * use the result. It is not sufficient
- * if only one processor tries to copy
- * the elements from the other
- * processors over to its own process
- * space.
- */
- BlockVector &
- operator = (const MPI::BlockVector &v);
-
- /**
- * Reinitialize the BlockVector to
- * contain <tt>num_blocks</tt> blocks of
- * size <tt>block_size</tt> each.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- */
- void reinit (const unsigned int num_blocks,
- const unsigned int block_size,
- const bool fast = false);
-
- /**
- * Reinitialize the BlockVector such
- * that it contains
- * <tt>block_sizes.size()</tt>
- * blocks. Each block is reinitialized
- * to dimension
- * <tt>block_sizes[i]</tt>.
- *
- * If the number of blocks is the
- * same as before this function
- * was called, all vectors remain
- * the same and reinit() is
- * called for each vector.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- *
- * Note that you must call this
- * (or the other reinit()
- * functions) function, rather
- * than calling the reinit()
- * functions of an individual
- * block, to allow the block
- * vector to update its caches of
- * vector sizes. If you call
- * reinit() on one of the
- * blocks, then subsequent
- * actions on this object may
- * yield unpredictable results
- * since they may be routed to
- * the wrong block.
- */
- void reinit (const std::vector<unsigned int> &N,
- const bool fast=false);
-
- /**
- * Change the dimension to that
- * of the vector <tt>V</tt>. The same
- * applies as for the other
- * reinit() function.
- *
- * The elements of <tt>V</tt> are not
- * copied, i.e. this function is
- * the same as calling <tt>reinit
- * (V.size(), fast)</tt>.
- *
- * Note that you must call this
- * (or the other reinit()
- * functions) function, rather
- * than calling the reinit()
- * functions of an individual
- * block, to allow the block
- * vector to update its caches of
- * vector sizes. If you call
- * reinit() of one of the
- * blocks, then subsequent
- * actions of this object may
- * yield unpredictable results
- * since they may be routed to
- * the wrong block.
- */
- void reinit (const BlockVector &V,
- const bool fast=false);
-
- /**
- * Swap the contents of this
- * vector and the other vector
- * <tt>v</tt>. One could do this
- * operation with a temporary
- * variable and copying over the
- * data elements, but this
- * function is significantly more
- * efficient since it only swaps
- * the pointers to the data of
- * the two vectors and therefore
- * does not need to allocate
- * temporary storage and move
- * data around.
- *
- * Limitation: right now this
- * function only works if both
- * vectors have the same number
- * of blocks. If needed, the
- * numbers of blocks should be
- * exchanged, too.
- *
- * This function is analog to the
- * the swap() function of all C++
- * standard containers. Also,
- * there is a global function
- * swap(u,v) that simply calls
- * <tt>u.swap(v)</tt>, again in analogy
- * to standard functions.
- */
- void swap (BlockVector &v);
-
- /**
- * Print to a stream.
- */
- void print (std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const bool across = true) const;
-
- /** @addtogroup Exceptions
- * @{ */
-
- /**
- * Exception
- */
- DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
- ///@}
+ public:
+ /**
+ * Typedef the base class for simpler
+ * access to its own typedefs.
+ */
+ typedef BlockVectorBase<Vector> BaseClass;
+
+ /**
+ * Typedef the type of the underlying
+ * vector.
+ */
+ typedef BaseClass::BlockType BlockType;
+
+ /**
+ * Import the typedefs from the base
+ * class.
+ */
+ typedef BaseClass::value_type value_type;
+ typedef BaseClass::pointer pointer;
+ typedef BaseClass::const_pointer const_pointer;
+ typedef BaseClass::reference reference;
+ typedef BaseClass::const_reference const_reference;
+ typedef BaseClass::size_type size_type;
+ typedef BaseClass::iterator iterator;
+ typedef BaseClass::const_iterator const_iterator;
+
+ /**
+ * Constructor. There are three
+ * ways to use this
+ * constructor. First, without
+ * any arguments, it generates
+ * an object with no
+ * blocks. Given one argument,
+ * it initializes <tt>num_blocks</tt>
+ * blocks, but these blocks have
+ * size zero. The third variant
+ * finally initializes all
+ * blocks to the same size
+ * <tt>block_size</tt>.
+ *
+ * Confer the other constructor
+ * further down if you intend to
+ * use blocks of different
+ * sizes.
+ */
+ explicit BlockVector (const unsigned int num_blocks = 0,
+ const unsigned int block_size = 0);
+
+ /**
+ * Copy-Constructor. Dimension set to
+ * that of V, all components are copied
+ * from V
+ */
- BlockVector (const BlockVector &V);
++ BlockVector (const BlockVector &V);
+
+ /**
+ * Copy-constructor: copy the values
+ * from a PETSc wrapper parallel block
+ * vector class.
+ *
+ *
+ * Note that due to the communication
+ * model of MPI, @em all processes have
+ * to actually perform this operation,
+ * even if they do not use the
+ * result. It is not sufficient if only
+ * one processor tries to copy the
+ * elements from the other processors
+ * over to its own process space.
+ */
+ explicit BlockVector (const MPI::BlockVector &v);
+
+ /**
+ * Constructor. Set the number of
+ * blocks to <tt>n.size()</tt> and
+ * initialize each block with
+ * <tt>n[i]</tt> zero elements.
+ */
+ BlockVector (const std::vector<unsigned int> &n);
+
+ /**
+ * Constructor. Set the number of
+ * blocks to
+ * <tt>n.size()</tt>. Initialize the
+ * vector with the elements
+ * pointed to by the range of
+ * iterators given as second and
+ * third argument. Apart from the
+ * first argument, this
+ * constructor is in complete
+ * analogy to the respective
+ * constructor of the
+ * <tt>std::vector</tt> class, but the
+ * first argument is needed in
+ * order to know how to subdivide
+ * the block vector into
+ * different blocks.
+ */
+ template <typename InputIterator>
+ BlockVector (const std::vector<unsigned int> &n,
+ const InputIterator first,
+ const InputIterator end);
+
+ /**
+ * Destructor. Clears memory
+ */
+ ~BlockVector ();
+
+ /**
+ * Copy operator: fill all components of
+ * the vector with the given scalar
+ * value.
+ */
+ BlockVector &operator = (const value_type s);
+
+ /**
+ * Copy operator for arguments of the
+ * same type.
+ */
+ BlockVector &
+ operator= (const BlockVector &V);
+
+ /**
+ * Copy all the elements of the
+ * parallel block vector @p v into this
+ * local vector. Note that due to the
+ * communication model of MPI, @em all
+ * processes have to actually perform
+ * this operation, even if they do not
+ * use the result. It is not sufficient
+ * if only one processor tries to copy
+ * the elements from the other
+ * processors over to its own process
+ * space.
+ */
+ BlockVector &
+ operator = (const MPI::BlockVector &v);
+
+ /**
+ * Reinitialize the BlockVector to
+ * contain <tt>num_blocks</tt> blocks of
+ * size <tt>block_size</tt> each.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ */
+ void reinit (const unsigned int num_blocks,
+ const unsigned int block_size,
+ const bool fast = false);
+
+ /**
+ * Reinitialize the BlockVector such
+ * that it contains
+ * <tt>block_sizes.size()</tt>
+ * blocks. Each block is reinitialized
+ * to dimension
+ * <tt>block_sizes[i]</tt>.
+ *
+ * If the number of blocks is the
+ * same as before this function
+ * was called, all vectors remain
+ * the same and reinit() is
+ * called for each vector.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ *
+ * Note that you must call this
+ * (or the other reinit()
+ * functions) function, rather
+ * than calling the reinit()
+ * functions of an individual
+ * block, to allow the block
+ * vector to update its caches of
+ * vector sizes. If you call
+ * reinit() on one of the
+ * blocks, then subsequent
+ * actions on this object may
+ * yield unpredictable results
+ * since they may be routed to
+ * the wrong block.
+ */
+ void reinit (const std::vector<unsigned int> &N,
+ const bool fast=false);
+
+ /**
+ * Change the dimension to that
+ * of the vector <tt>V</tt>. The same
+ * applies as for the other
+ * reinit() function.
+ *
+ * The elements of <tt>V</tt> are not
+ * copied, i.e. this function is
+ * the same as calling <tt>reinit
+ * (V.size(), fast)</tt>.
+ *
+ * Note that you must call this
+ * (or the other reinit()
+ * functions) function, rather
+ * than calling the reinit()
+ * functions of an individual
+ * block, to allow the block
+ * vector to update its caches of
+ * vector sizes. If you call
+ * reinit() of one of the
+ * blocks, then subsequent
+ * actions of this object may
+ * yield unpredictable results
+ * since they may be routed to
+ * the wrong block.
+ */
+ void reinit (const BlockVector &V,
+ const bool fast=false);
+
+ /**
+ * Swap the contents of this
+ * vector and the other vector
+ * <tt>v</tt>. One could do this
+ * operation with a temporary
+ * variable and copying over the
+ * data elements, but this
+ * function is significantly more
+ * efficient since it only swaps
+ * the pointers to the data of
+ * the two vectors and therefore
+ * does not need to allocate
+ * temporary storage and move
+ * data around.
+ *
+ * Limitation: right now this
+ * function only works if both
+ * vectors have the same number
+ * of blocks. If needed, the
+ * numbers of blocks should be
+ * exchanged, too.
+ *
+ * This function is analog to the
+ * the swap() function of all C++
+ * standard containers. Also,
+ * there is a global function
+ * swap(u,v) that simply calls
+ * <tt>u.swap(v)</tt>, again in analogy
+ * to standard functions.
+ */
+ void swap (BlockVector &v);
+
+ /**
+ * Print to a stream.
+ */
+ void print (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /** @addtogroup Exceptions
+ * @{ */
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
+ ///@}
};
- /*@}*/
+ /*@}*/
- /*----------------------- Inline functions ----------------------------------*/
+ /*----------------------- Inline functions ----------------------------------*/
}
- /**
- * Base class for all matrix classes that are implemented on top of the PETSc
- * matrix types. Since in PETSc all matrix types (i.e. sequential and
- * parallel, sparse, blocked, etc.) are built by filling the contents of an
- * abstract object that is only referenced through a pointer of a type that is
- * independent of the actual matrix type, we can implement almost all
- * functionality of matrices in this base class. Derived classes will then only
- * have to provide the functionality to create one or the other kind of
- * matrix.
- *
- * The interface of this class is modeled after the existing
- * SparseMatrix class in deal.II. It has almost the same member
- * functions, and is often exchangable. However, since PETSc only supports a
- * single scalar type (either double, float, or a complex data type), it is
- * not templated, and only works with whatever your PETSc installation has
- * defined the data type PetscScalar to.
- *
- * Note that PETSc only guarantees that operations do what you expect if the
- * functions @p MatAssemblyBegin and @p MatAssemblyEnd have been called
- * after matrix assembly. Therefore, you need to call
- * SparseMatrix::compress() before you actually use the matrix. This also
- * calls @p MatCompress that compresses the storage format for sparse
- * matrices by discarding unused elements. PETSc allows to continue with
- * assembling the matrix after calls to these functions, but since there are
- * no more free entries available after that any more, it is better to only
- * call SparseMatrix::compress() once at the end of the assembly stage and
- * before the matrix is actively used.
- *
- * @ingroup PETScWrappers
- * @ingroup Matrix1
- * @author Wolfgang Bangerth, 2004
- */
+ /**
+ * Base class for all matrix classes that are implemented on top of the PETSc
+ * matrix types. Since in PETSc all matrix types (i.e. sequential and
+ * parallel, sparse, blocked, etc.) are built by filling the contents of an
+ * abstract object that is only referenced through a pointer of a type that is
+ * independent of the actual matrix type, we can implement almost all
+ * functionality of matrices in this base class. Derived classes will then only
+ * have to provide the functionality to create one or the other kind of
+ * matrix.
+ *
+ * The interface of this class is modeled after the existing
+ * SparseMatrix class in deal.II. It has almost the same member
+ * functions, and is often exchangable. However, since PETSc only supports a
+ * single scalar type (either double, float, or a complex data type), it is
+ * not templated, and only works with whatever your PETSc installation has
+ * defined the data type PetscScalar to.
+ *
+ * Note that PETSc only guarantees that operations do what you expect if the
+ * functions @p MatAssemblyBegin and @p MatAssemblyEnd have been called
+ * after matrix assembly. Therefore, you need to call
+ * SparseMatrix::compress() before you actually use the matrix. This also
+ * calls @p MatCompress that compresses the storage format for sparse
+ * matrices by discarding unused elements. PETSc allows to continue with
+ * assembling the matrix after calls to these functions, but since there are
+ * no more free entries available after that any more, it is better to only
+ * call SparseMatrix::compress() once at the end of the assembly stage and
+ * before the matrix is actively used.
+ *
+ * @ingroup PETScWrappers
+ * @ingroup Matrix1
+ * @author Wolfgang Bangerth, 2004
+ */
class MatrixBase : public Subscriptor
{
- public:
- /**
- * Declare a typedef for the iterator
- * class.
- */
- typedef MatrixIterators::const_iterator const_iterator;
-
- /**
- * Declare a typedef in analogy to all
- * the other container classes.
- */
- typedef PetscScalar value_type;
-
- /**
- * Default constructor.
- */
- MatrixBase ();
-
- /**
- * Destructor. Made virtual so that one
- * can use pointers to this class.
- */
- virtual ~MatrixBase ();
-
- /**
- * This operator assigns a scalar to a
- * matrix. Since this does usually not
- * make much sense (should we set all
- * matrix entries to this value? Only
- * the nonzero entries of the sparsity
- * pattern?), this operation is only
- * allowed if the actual value to be
- * assigned is zero. This operator only
- * exists to allow for the obvious
- * notation <tt>matrix=0</tt>, which
- * sets all elements of the matrix to
- * zero, but keeps the sparsity pattern
- * previously used.
- */
- MatrixBase &
- operator = (const value_type d);
- /**
- * Release all memory and return
- * to a state just like after
- * having called the default
- * constructor.
- */
- void clear ();
-
- /**
- * Set the element (<i>i,j</i>) to @p
- * value.
- *
- * If the present object (from a
- * derived class of this one) happens
- * to be a sparse matrix, then this
- * function adds a new entry to the
- * matrix if it didn't exist before,
- * very much in contrast to the
- * SparseMatrix class which throws an
- * error if the entry does not exist.
- * If <tt>value</tt> is not a finite
- * number an exception is thrown.
- */
- void set (const unsigned int i,
- const unsigned int j,
- const PetscScalar value);
-
- /**
- * Set all elements given in a
- * FullMatrix<double> into the sparse
- * matrix locations given by
- * <tt>indices</tt>. In other words,
- * this function writes the elements
- * in <tt>full_matrix</tt> into the
- * calling matrix, using the
- * local-to-global indexing specified
- * by <tt>indices</tt> for both the
- * rows and the columns of the
- * matrix. This function assumes a
- * quadratic sparse matrix and a
- * quadratic full_matrix, the usual
- * situation in FE calculations.
- *
- * If the present object (from a
- * derived class of this one) happens
- * to be a sparse matrix, then this
- * function adds some new entries to
- * the matrix if they didn't exist
- * before, very much in contrast to
- * the SparseMatrix class which
- * throws an error if the entry does
- * not exist.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be inserted anyway
- * or they should be filtered
- * away. The default value is
- * <tt>false</tt>, i.e., even zero
- * values are inserted/replaced.
- */
- void set (const std::vector<unsigned int> &indices,
- const FullMatrix<PetscScalar> &full_matrix,
- const bool elide_zero_values = false);
-
- /**
- * Same function as before, but now
- * including the possibility to use
- * rectangular full_matrices and
- * different local-to-global indexing
- * on rows and columns, respectively.
- */
- void set (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<PetscScalar> &full_matrix,
- const bool elide_zero_values = false);
-
- /**
- * Set several elements in the
- * specified row of the matrix with
- * column indices as given by
- * <tt>col_indices</tt> to the
- * respective value.
- *
- * If the present object (from a
- * derived class of this one) happens
- * to be a sparse matrix, then this
- * function adds some new entries to
- * the matrix if they didn't exist
- * before, very much in contrast to
- * the SparseMatrix class which
- * throws an error if the entry does
- * not exist.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be inserted anyway
- * or they should be filtered
- * away. The default value is
- * <tt>false</tt>, i.e., even zero
- * values are inserted/replaced.
- */
- void set (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<PetscScalar> &values,
- const bool elide_zero_values = false);
-
- /**
- * Set several elements to values
- * given by <tt>values</tt> in a
- * given row in columns given by
- * col_indices into the sparse
- * matrix.
- *
- * If the present object (from a
- * derived class of this one) happens
- * to be a sparse matrix, then this
- * function adds some new entries to
- * the matrix if they didn't exist
- * before, very much in contrast to
- * the SparseMatrix class which
- * throws an error if the entry does
- * not exist.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be inserted anyway
- * or they should be filtered
- * away. The default value is
- * <tt>false</tt>, i.e., even zero
- * values are inserted/replaced.
- */
- void set (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const PetscScalar *values,
- const bool elide_zero_values = false);
-
- /**
- * Add @p value to the element
- * (<i>i,j</i>).
- *
- * If the present object (from a
- * derived class of this one) happens
- * to be a sparse matrix, then this
- * function adds a new entry to the
- * matrix if it didn't exist before,
- * very much in contrast to the
- * SparseMatrix class which throws an
- * error if the entry does not exist.
- * If <tt>value</tt> is not a finite
- * number an exception is thrown.
- */
- void add (const unsigned int i,
- const unsigned int j,
- const PetscScalar value);
-
- /**
- * Add all elements given in a
- * FullMatrix<double> into sparse
- * matrix locations given by
- * <tt>indices</tt>. In other words,
- * this function adds the elements in
- * <tt>full_matrix</tt> to the
- * respective entries in calling
- * matrix, using the local-to-global
- * indexing specified by
- * <tt>indices</tt> for both the rows
- * and the columns of the
- * matrix. This function assumes a
- * quadratic sparse matrix and a
- * quadratic full_matrix, the usual
- * situation in FE calculations.
- *
- * If the present object (from a
- * derived class of this one) happens
- * to be a sparse matrix, then this
- * function adds some new entries to
- * the matrix if they didn't exist
- * before, very much in contrast to
- * the SparseMatrix class which
- * throws an error if the entry does
- * not exist.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- void add (const std::vector<unsigned int> &indices,
- const FullMatrix<PetscScalar> &full_matrix,
- const bool elide_zero_values = true);
-
- /**
- * Same function as before, but now
- * including the possibility to use
- * rectangular full_matrices and
- * different local-to-global indexing
- * on rows and columns, respectively.
- */
- void add (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<PetscScalar> &full_matrix,
- const bool elide_zero_values = true);
-
- /**
- * Set several elements in the
- * specified row of the matrix with
- * column indices as given by
- * <tt>col_indices</tt> to the
- * respective value.
- *
- * If the present object (from a
- * derived class of this one) happens
- * to be a sparse matrix, then this
- * function adds some new entries to
- * the matrix if they didn't exist
- * before, very much in contrast to
- * the SparseMatrix class which
- * throws an error if the entry does
- * not exist.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- void add (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<PetscScalar> &values,
- const bool elide_zero_values = true);
-
- /**
- * Add an array of values given by
- * <tt>values</tt> in the given
- * global matrix row at columns
- * specified by col_indices in the
- * sparse matrix.
- *
- * If the present object (from a
- * derived class of this one) happens
- * to be a sparse matrix, then this
- * function adds some new entries to
- * the matrix if they didn't exist
- * before, very much in contrast to
- * the SparseMatrix class which
- * throws an error if the entry does
- * not exist.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- void add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const PetscScalar *values,
- const bool elide_zero_values = true,
- const bool col_indices_are_sorted = false);
-
- /**
- * Remove all elements from
- * this <tt>row</tt> by setting
- * them to zero. The function
- * does not modify the number
- * of allocated nonzero
- * entries, it only sets some
- * entries to zero. It may drop
- * them from the sparsity
- * pattern, though (but retains
- * the allocated memory in case
- * new entries are again added
- * later).
- *
- * This operation is used in
- * eliminating constraints (e.g. due to
- * hanging nodes) and makes sure that
- * we can write this modification to
- * the matrix without having to read
- * entries (such as the locations of
- * non-zero elements) from it --
- * without this operation, removing
- * constraints on parallel matrices is
- * a rather complicated procedure.
- *
- * The second parameter can be used to
- * set the diagonal entry of this row
- * to a value different from zero. The
- * default is to set it to zero.
- */
- void clear_row (const unsigned int row,
- const PetscScalar new_diag_value = 0);
-
- /**
- * Same as clear_row(), except that it
- * works on a number of rows at once.
- *
- * The second parameter can be used to
- * set the diagonal entries of all
- * cleared rows to something different
- * from zero. Note that all of these
- * diagonal entries get the same value
- * -- if you want different values for
- * the diagonal entries, you have to
- * set them by hand.
- */
- void clear_rows (const std::vector<unsigned int> &rows,
- const PetscScalar new_diag_value = 0);
-
- /**
- * PETSc matrices store their own
- * sparsity patterns. So, in analogy to
- * our own SparsityPattern class,
- * this function compresses the
- * sparsity pattern and allows the
- * resulting matrix to be used in all
- * other operations where before only
- * assembly functions were
- * allowed. This function must
- * therefore be called once you have
- * assembled the matrix.
- *
- * See @ref GlossCompress "Compressing distributed objects"
- * for more information.
- * more information.
- */
- void compress (::dealii::VectorOperation::values operation
- =::dealii::VectorOperation::unknown);
- /**
- * Return the value of the entry
- * (<i>i,j</i>). This may be an
- * expensive operation and you should
- * always take care where to call this
- * function. In contrast to the
- * respective function in the
- * @p MatrixBase class, we don't
- * throw an exception if the respective
- * entry doesn't exist in the sparsity
- * pattern of this class, since PETSc
- * does not transmit this information.
- *
- * This function is therefore exactly
- * equivalent to the <tt>el()</tt> function.
- */
- PetscScalar operator () (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * Return the value of the matrix entry
- * (<i>i,j</i>). If this entry does not
- * exist in the sparsity pattern, then
- * zero is returned. While this may be
- * convenient in some cases, note that
- * it is simple to write algorithms
- * that are slow compared to an optimal
- * solution, since the sparsity of the
- * matrix is not used.
- */
- PetscScalar el (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * Return the main diagonal
- * element in the <i>i</i>th
- * row. This function throws an
- * error if the matrix is not
- * quadratic.
- *
- * Since we do not have direct access
- * to the underlying data structure,
- * this function is no faster than the
- * elementwise access using the el()
- * function. However, we provide this
- * function for compatibility with the
- * SparseMatrix class.
- */
- PetscScalar diag_element (const unsigned int i) const;
-
- /**
- * Return the number of rows in this
- * matrix.
- */
- unsigned int m () const;
-
- /**
- * Return the number of columns in this
- * matrix.
- */
- unsigned int n () const;
-
- /**
- * Return the local dimension of the
- * matrix, i.e. the number of rows
- * stored on the present MPI
- * process. For sequential matrices,
- * this number is the same as m(),
- * but for parallel matrices it may be
- * smaller.
- *
- * To figure out which elements
- * exactly are stored locally,
- * use local_range().
- */
- unsigned int local_size () const;
-
- /**
- * Return a pair of indices
- * indicating which rows of
- * this matrix are stored
- * locally. The first number is
- * the index of the first
- * row stored, the second
- * the index of the one past
- * the last one that is stored
- * locally. If this is a
- * sequential matrix, then the
- * result will be the pair
- * (0,m()), otherwise it will be
- * a pair (i,i+n), where
- * <tt>n=local_size()</tt>.
- */
- std::pair<unsigned int, unsigned int>
- local_range () const;
-
- /**
- * Return whether @p index is
- * in the local range or not,
- * see also local_range().
- */
- bool in_local_range (const unsigned int index) const;
-
- /**
- * Return a reference to the MPI
- * communicator object in use with this
- * matrix. This function has to be
- * implemented in derived classes.
- */
- virtual const MPI_Comm & get_mpi_communicator () const = 0;
-
- /**
- * Return the number of nonzero
- * elements of this
- * matrix. Actually, it returns
- * the number of entries in the
- * sparsity pattern; if any of
- * the entries should happen to
- * be zero, it is counted anyway.
- */
- unsigned int n_nonzero_elements () const;
-
- /**
- * Number of entries in a specific row.
- */
- unsigned int row_length (const unsigned int row) const;
-
- /**
- * Return the l1-norm of the matrix, that is
- * $|M|_1=max_{all columns j}\sum_{all
- * rows i} |M_ij|$,
- * (max. sum of columns).
- * This is the
- * natural matrix norm that is compatible
- * to the l1-norm for vectors, i.e.
- * $|Mv|_1\leq |M|_1 |v|_1$.
- * (cf. Haemmerlin-Hoffmann:
- * Numerische Mathematik)
- */
- PetscReal l1_norm () const;
-
- /**
- * Return the linfty-norm of the
- * matrix, that is
- * $|M|_infty=max_{all rows i}\sum_{all
- * columns j} |M_ij|$,
- * (max. sum of rows).
- * This is the
- * natural matrix norm that is compatible
- * to the linfty-norm of vectors, i.e.
- * $|Mv|_infty \leq |M|_infty |v|_infty$.
- * (cf. Haemmerlin-Hoffmann:
- * Numerische Mathematik)
- */
- PetscReal linfty_norm () const;
-
- /**
- * Return the frobenius norm of the
- * matrix, i.e. the square root of the
- * sum of squares of all entries in the
- * matrix.
- */
- PetscReal frobenius_norm () const;
-
-
- /**
- * Return the square of the norm
- * of the vector $v$ with respect
- * to the norm induced by this
- * matrix,
- * i.e. $\left(v,Mv\right)$. This
- * is useful, e.g. in the finite
- * element context, where the
- * $L_2$ norm of a function
- * equals the matrix norm with
- * respect to the mass matrix of
- * the vector representing the
- * nodal values of the finite
- * element function.
- *
- * Obviously, the matrix needs to
- * be quadratic for this operation.
- *
- * The implementation of this function
- * is not as efficient as the one in
- * the @p MatrixBase class used in
- * deal.II (i.e. the original one, not
- * the PETSc wrapper class) since PETSc
- * doesn't support this operation and
- * needs a temporary vector.
- *
- * Note that if the current object
- * represents a parallel distributed
- * matrix (of type
- * PETScWrappers::MPI::SparseMatrix),
- * then the given vector has to be
- * a distributed vector as
- * well. Conversely, if the matrix is
- * not distributed, then neither
- * may the vector be.
- */
- PetscScalar matrix_norm_square (const VectorBase &v) const;
-
-
- /**
- * Compute the matrix scalar
- * product $\left(u,Mv\right)$.
- *
- * The implementation of this function
- * is not as efficient as the one in
- * the @p MatrixBase class used in
- * deal.II (i.e. the original one, not
- * the PETSc wrapper class) since PETSc
- * doesn't support this operation and
- * needs a temporary vector.
- *
- * Note that if the current object
- * represents a parallel distributed
- * matrix (of type
- * PETScWrappers::MPI::SparseMatrix),
- * then both vectors have to be
- * distributed vectors as
- * well. Conversely, if the matrix is
- * not distributed, then neither of the
- * vectors may be.
- */
- PetscScalar matrix_scalar_product (const VectorBase &u,
- const VectorBase &v) const;
+ public:
+ /**
+ * Declare a typedef for the iterator
+ * class.
+ */
+ typedef MatrixIterators::const_iterator const_iterator;
+
+ /**
+ * Declare a typedef in analogy to all
+ * the other container classes.
+ */
+ typedef PetscScalar value_type;
+
+ /**
+ * Default constructor.
+ */
+ MatrixBase ();
+
+ /**
+ * Destructor. Made virtual so that one
+ * can use pointers to this class.
+ */
+ virtual ~MatrixBase ();
+
+ /**
+ * This operator assigns a scalar to a
+ * matrix. Since this does usually not
+ * make much sense (should we set all
+ * matrix entries to this value? Only
+ * the nonzero entries of the sparsity
+ * pattern?), this operation is only
+ * allowed if the actual value to be
+ * assigned is zero. This operator only
+ * exists to allow for the obvious
+ * notation <tt>matrix=0</tt>, which
+ * sets all elements of the matrix to
+ * zero, but keeps the sparsity pattern
+ * previously used.
+ */
+ MatrixBase &
+ operator = (const value_type d);
+ /**
+ * Release all memory and return
+ * to a state just like after
+ * having called the default
+ * constructor.
+ */
+ void clear ();
+
+ /**
+ * Set the element (<i>i,j</i>) to @p
+ * value.
+ *
+ * If the present object (from a
+ * derived class of this one) happens
+ * to be a sparse matrix, then this
+ * function adds a new entry to the
+ * matrix if it didn't exist before,
+ * very much in contrast to the
+ * SparseMatrix class which throws an
+ * error if the entry does not exist.
+ * If <tt>value</tt> is not a finite
+ * number an exception is thrown.
+ */
+ void set (const unsigned int i,
+ const unsigned int j,
+ const PetscScalar value);
+
+ /**
+ * Set all elements given in a
+ * FullMatrix<double> into the sparse
+ * matrix locations given by
+ * <tt>indices</tt>. In other words,
+ * this function writes the elements
+ * in <tt>full_matrix</tt> into the
+ * calling matrix, using the
+ * local-to-global indexing specified
+ * by <tt>indices</tt> for both the
+ * rows and the columns of the
+ * matrix. This function assumes a
+ * quadratic sparse matrix and a
+ * quadratic full_matrix, the usual
+ * situation in FE calculations.
+ *
+ * If the present object (from a
+ * derived class of this one) happens
+ * to be a sparse matrix, then this
+ * function adds some new entries to
+ * the matrix if they didn't exist
+ * before, very much in contrast to
+ * the SparseMatrix class which
+ * throws an error if the entry does
+ * not exist.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be inserted anyway
+ * or they should be filtered
+ * away. The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are inserted/replaced.
+ */
+ void set (const std::vector<unsigned int> &indices,
+ const FullMatrix<PetscScalar> &full_matrix,
+ const bool elide_zero_values = false);
+
+ /**
+ * Same function as before, but now
+ * including the possibility to use
+ * rectangular full_matrices and
+ * different local-to-global indexing
+ * on rows and columns, respectively.
+ */
+ void set (const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<PetscScalar> &full_matrix,
+ const bool elide_zero_values = false);
+
+ /**
+ * Set several elements in the
+ * specified row of the matrix with
+ * column indices as given by
+ * <tt>col_indices</tt> to the
+ * respective value.
+ *
+ * If the present object (from a
+ * derived class of this one) happens
+ * to be a sparse matrix, then this
+ * function adds some new entries to
+ * the matrix if they didn't exist
+ * before, very much in contrast to
+ * the SparseMatrix class which
+ * throws an error if the entry does
+ * not exist.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be inserted anyway
+ * or they should be filtered
+ * away. The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are inserted/replaced.
+ */
+ void set (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
- const std::vector<PetscScalar> &values,
++ const std::vector<PetscScalar> &values,
+ const bool elide_zero_values = false);
+
+ /**
+ * Set several elements to values
+ * given by <tt>values</tt> in a
+ * given row in columns given by
+ * col_indices into the sparse
+ * matrix.
+ *
+ * If the present object (from a
+ * derived class of this one) happens
+ * to be a sparse matrix, then this
+ * function adds some new entries to
+ * the matrix if they didn't exist
+ * before, very much in contrast to
+ * the SparseMatrix class which
+ * throws an error if the entry does
+ * not exist.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be inserted anyway
+ * or they should be filtered
+ * away. The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are inserted/replaced.
+ */
+ void set (const unsigned int row,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
- const PetscScalar *values,
++ const PetscScalar *values,
+ const bool elide_zero_values = false);
+
+ /**
+ * Add @p value to the element
+ * (<i>i,j</i>).
+ *
+ * If the present object (from a
+ * derived class of this one) happens
+ * to be a sparse matrix, then this
+ * function adds a new entry to the
+ * matrix if it didn't exist before,
+ * very much in contrast to the
+ * SparseMatrix class which throws an
+ * error if the entry does not exist.
+ * If <tt>value</tt> is not a finite
+ * number an exception is thrown.
+ */
+ void add (const unsigned int i,
+ const unsigned int j,
+ const PetscScalar value);
+
+ /**
+ * Add all elements given in a
+ * FullMatrix<double> into sparse
+ * matrix locations given by
+ * <tt>indices</tt>. In other words,
+ * this function adds the elements in
+ * <tt>full_matrix</tt> to the
+ * respective entries in calling
+ * matrix, using the local-to-global
+ * indexing specified by
+ * <tt>indices</tt> for both the rows
+ * and the columns of the
+ * matrix. This function assumes a
+ * quadratic sparse matrix and a
+ * quadratic full_matrix, the usual
+ * situation in FE calculations.
+ *
+ * If the present object (from a
+ * derived class of this one) happens
+ * to be a sparse matrix, then this
+ * function adds some new entries to
+ * the matrix if they didn't exist
+ * before, very much in contrast to
+ * the SparseMatrix class which
+ * throws an error if the entry does
+ * not exist.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ void add (const std::vector<unsigned int> &indices,
+ const FullMatrix<PetscScalar> &full_matrix,
+ const bool elide_zero_values = true);
+
+ /**
+ * Same function as before, but now
+ * including the possibility to use
+ * rectangular full_matrices and
+ * different local-to-global indexing
+ * on rows and columns, respectively.
+ */
+ void add (const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<PetscScalar> &full_matrix,
+ const bool elide_zero_values = true);
+
+ /**
+ * Set several elements in the
+ * specified row of the matrix with
+ * column indices as given by
+ * <tt>col_indices</tt> to the
+ * respective value.
+ *
+ * If the present object (from a
+ * derived class of this one) happens
+ * to be a sparse matrix, then this
+ * function adds some new entries to
+ * the matrix if they didn't exist
+ * before, very much in contrast to
+ * the SparseMatrix class which
+ * throws an error if the entry does
+ * not exist.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ void add (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
- const std::vector<PetscScalar> &values,
++ const std::vector<PetscScalar> &values,
+ const bool elide_zero_values = true);
+
+ /**
+ * Add an array of values given by
+ * <tt>values</tt> in the given
+ * global matrix row at columns
+ * specified by col_indices in the
+ * sparse matrix.
+ *
+ * If the present object (from a
+ * derived class of this one) happens
+ * to be a sparse matrix, then this
+ * function adds some new entries to
+ * the matrix if they didn't exist
+ * before, very much in contrast to
+ * the SparseMatrix class which
+ * throws an error if the entry does
+ * not exist.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ void add (const unsigned int row,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
- const PetscScalar *values,
++ const PetscScalar *values,
+ const bool elide_zero_values = true,
+ const bool col_indices_are_sorted = false);
+
+ /**
+ * Remove all elements from
+ * this <tt>row</tt> by setting
+ * them to zero. The function
+ * does not modify the number
+ * of allocated nonzero
+ * entries, it only sets some
+ * entries to zero. It may drop
+ * them from the sparsity
+ * pattern, though (but retains
+ * the allocated memory in case
+ * new entries are again added
+ * later).
+ *
+ * This operation is used in
+ * eliminating constraints (e.g. due to
+ * hanging nodes) and makes sure that
+ * we can write this modification to
+ * the matrix without having to read
+ * entries (such as the locations of
+ * non-zero elements) from it --
+ * without this operation, removing
+ * constraints on parallel matrices is
+ * a rather complicated procedure.
+ *
+ * The second parameter can be used to
+ * set the diagonal entry of this row
+ * to a value different from zero. The
+ * default is to set it to zero.
+ */
+ void clear_row (const unsigned int row,
+ const PetscScalar new_diag_value = 0);
+
+ /**
+ * Same as clear_row(), except that it
+ * works on a number of rows at once.
+ *
+ * The second parameter can be used to
+ * set the diagonal entries of all
+ * cleared rows to something different
+ * from zero. Note that all of these
+ * diagonal entries get the same value
+ * -- if you want different values for
+ * the diagonal entries, you have to
+ * set them by hand.
+ */
+ void clear_rows (const std::vector<unsigned int> &rows,
+ const PetscScalar new_diag_value = 0);
+
+ /**
+ * PETSc matrices store their own
+ * sparsity patterns. So, in analogy to
+ * our own SparsityPattern class,
+ * this function compresses the
+ * sparsity pattern and allows the
+ * resulting matrix to be used in all
+ * other operations where before only
+ * assembly functions were
+ * allowed. This function must
+ * therefore be called once you have
+ * assembled the matrix.
+ *
+ * See @ref GlossCompress "Compressing distributed objects"
+ * for more information.
+ * more information.
+ */
+ void compress (::dealii::VectorOperation::values operation
+ =::dealii::VectorOperation::unknown);
+ /**
+ * Return the value of the entry
+ * (<i>i,j</i>). This may be an
+ * expensive operation and you should
+ * always take care where to call this
+ * function. In contrast to the
+ * respective function in the
+ * @p MatrixBase class, we don't
+ * throw an exception if the respective
+ * entry doesn't exist in the sparsity
+ * pattern of this class, since PETSc
+ * does not transmit this information.
+ *
+ * This function is therefore exactly
+ * equivalent to the <tt>el()</tt> function.
+ */
+ PetscScalar operator () (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * Return the value of the matrix entry
+ * (<i>i,j</i>). If this entry does not
+ * exist in the sparsity pattern, then
+ * zero is returned. While this may be
+ * convenient in some cases, note that
+ * it is simple to write algorithms
+ * that are slow compared to an optimal
+ * solution, since the sparsity of the
+ * matrix is not used.
+ */
+ PetscScalar el (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * Return the main diagonal
+ * element in the <i>i</i>th
+ * row. This function throws an
+ * error if the matrix is not
+ * quadratic.
+ *
+ * Since we do not have direct access
+ * to the underlying data structure,
+ * this function is no faster than the
+ * elementwise access using the el()
+ * function. However, we provide this
+ * function for compatibility with the
+ * SparseMatrix class.
+ */
+ PetscScalar diag_element (const unsigned int i) const;
+
+ /**
+ * Return the number of rows in this
+ * matrix.
+ */
+ unsigned int m () const;
+
+ /**
+ * Return the number of columns in this
+ * matrix.
+ */
+ unsigned int n () const;
+
+ /**
+ * Return the local dimension of the
+ * matrix, i.e. the number of rows
+ * stored on the present MPI
+ * process. For sequential matrices,
+ * this number is the same as m(),
+ * but for parallel matrices it may be
+ * smaller.
+ *
+ * To figure out which elements
+ * exactly are stored locally,
+ * use local_range().
+ */
+ unsigned int local_size () const;
+
+ /**
+ * Return a pair of indices
+ * indicating which rows of
+ * this matrix are stored
+ * locally. The first number is
+ * the index of the first
+ * row stored, the second
+ * the index of the one past
+ * the last one that is stored
+ * locally. If this is a
+ * sequential matrix, then the
+ * result will be the pair
+ * (0,m()), otherwise it will be
+ * a pair (i,i+n), where
+ * <tt>n=local_size()</tt>.
+ */
+ std::pair<unsigned int, unsigned int>
+ local_range () const;
+
+ /**
+ * Return whether @p index is
+ * in the local range or not,
+ * see also local_range().
+ */
+ bool in_local_range (const unsigned int index) const;
+
+ /**
+ * Return a reference to the MPI
+ * communicator object in use with this
+ * matrix. This function has to be
+ * implemented in derived classes.
+ */
+ virtual const MPI_Comm &get_mpi_communicator () const = 0;
+
+ /**
+ * Return the number of nonzero
+ * elements of this
+ * matrix. Actually, it returns
+ * the number of entries in the
+ * sparsity pattern; if any of
+ * the entries should happen to
+ * be zero, it is counted anyway.
+ */
+ unsigned int n_nonzero_elements () const;
+
+ /**
+ * Number of entries in a specific row.
+ */
+ unsigned int row_length (const unsigned int row) const;
+
+ /**
+ * Return the l1-norm of the matrix, that is
+ * $|M|_1=max_{all columns j}\sum_{all
+ * rows i} |M_ij|$,
+ * (max. sum of columns).
+ * This is the
+ * natural matrix norm that is compatible
+ * to the l1-norm for vectors, i.e.
+ * $|Mv|_1\leq |M|_1 |v|_1$.
+ * (cf. Haemmerlin-Hoffmann:
+ * Numerische Mathematik)
+ */
+ PetscReal l1_norm () const;
+
+ /**
+ * Return the linfty-norm of the
+ * matrix, that is
+ * $|M|_infty=max_{all rows i}\sum_{all
+ * columns j} |M_ij|$,
+ * (max. sum of rows).
+ * This is the
+ * natural matrix norm that is compatible
+ * to the linfty-norm of vectors, i.e.
+ * $|Mv|_infty \leq |M|_infty |v|_infty$.
+ * (cf. Haemmerlin-Hoffmann:
+ * Numerische Mathematik)
+ */
+ PetscReal linfty_norm () const;
+
+ /**
+ * Return the frobenius norm of the
+ * matrix, i.e. the square root of the
+ * sum of squares of all entries in the
+ * matrix.
+ */
+ PetscReal frobenius_norm () const;
+
+
+ /**
+ * Return the square of the norm
+ * of the vector $v$ with respect
+ * to the norm induced by this
+ * matrix,
+ * i.e. $\left(v,Mv\right)$. This
+ * is useful, e.g. in the finite
+ * element context, where the
+ * $L_2$ norm of a function
+ * equals the matrix norm with
+ * respect to the mass matrix of
+ * the vector representing the
+ * nodal values of the finite
+ * element function.
+ *
+ * Obviously, the matrix needs to
+ * be quadratic for this operation.
+ *
+ * The implementation of this function
+ * is not as efficient as the one in
+ * the @p MatrixBase class used in
+ * deal.II (i.e. the original one, not
+ * the PETSc wrapper class) since PETSc
+ * doesn't support this operation and
+ * needs a temporary vector.
+ *
+ * Note that if the current object
+ * represents a parallel distributed
+ * matrix (of type
+ * PETScWrappers::MPI::SparseMatrix),
+ * then the given vector has to be
+ * a distributed vector as
+ * well. Conversely, if the matrix is
+ * not distributed, then neither
+ * may the vector be.
+ */
+ PetscScalar matrix_norm_square (const VectorBase &v) const;
+
+
+ /**
+ * Compute the matrix scalar
+ * product $\left(u,Mv\right)$.
+ *
+ * The implementation of this function
+ * is not as efficient as the one in
+ * the @p MatrixBase class used in
+ * deal.II (i.e. the original one, not
+ * the PETSc wrapper class) since PETSc
+ * doesn't support this operation and
+ * needs a temporary vector.
+ *
+ * Note that if the current object
+ * represents a parallel distributed
+ * matrix (of type
+ * PETScWrappers::MPI::SparseMatrix),
+ * then both vectors have to be
+ * distributed vectors as
+ * well. Conversely, if the matrix is
+ * not distributed, then neither of the
+ * vectors may be.
+ */
+ PetscScalar matrix_scalar_product (const VectorBase &u,
+ const VectorBase &v) const;
#if DEAL_II_PETSC_VERSION_GTE(3,1,0)
namespace PETScWrappers
{
- /**
- * Implementation of a parallel matrix class based on PETSc <tt>MatShell</tt> matrix-type.
- * This base class implements only the interface to the PETSc matrix object,
- * while all the functionality is contained in the matrix-vector
- * multiplication which must be reimplmented in derived classes.
- *
- * This interface is an addition to the dealii::MatrixFree class to realize
- * user-defined matrix-classes together with PETSc solvers and functionalities.
- * See also the documentation of dealii::MatrixFree class and step-37 and step-48.
- *
- * Similar to other matrix classes in namespaces PETScWrappers and PETScWrappers::MPI,
- * the MatrxiFree class provides the usual matrix-vector multiplication
- * <tt>vmult(VectorBase &dst, const VectorBase &src)</tt>
- * which is pure virtual and must be reimplemented in derived classes.
- * Besides the usual interface, this class has a matrix-vector multiplication
- * <tt>vmult(Vec &dst, const Vec &src)</tt>
- * taking PETSc Vec objects, which will be called by
- * <tt>matrix_free_mult(Mat A, Vec src, Vec dst)</tt>
- * registered as matrix-vector multiplication of this PETSc matrix object.
- * The default implementation of the vmult function in the base class translates
- * the given PETSc <tt>Vec*</tt> vectors into a deal.II vector, calls
- * the usual vmult function with the usual interface and converts
- * the result back to PETSc <tt>Vec*</tt>. This could be made much more efficient
- * in derived classes without allocating new memory.
- *
- * @ingroup PETScWrappers
- * @ingroup Matrix1
- * @author Wolfgang Bangerth, Martin Steigemann, 2012
- */
+ /**
+ * Implementation of a parallel matrix class based on PETSc <tt>MatShell</tt> matrix-type.
+ * This base class implements only the interface to the PETSc matrix object,
+ * while all the functionality is contained in the matrix-vector
+ * multiplication which must be reimplmented in derived classes.
+ *
+ * This interface is an addition to the dealii::MatrixFree class to realize
+ * user-defined matrix-classes together with PETSc solvers and functionalities.
+ * See also the documentation of dealii::MatrixFree class and step-37 and step-48.
+ *
+ * Similar to other matrix classes in namespaces PETScWrappers and PETScWrappers::MPI,
+ * the MatrxiFree class provides the usual matrix-vector multiplication
+ * <tt>vmult(VectorBase &dst, const VectorBase &src)</tt>
+ * which is pure virtual and must be reimplemented in derived classes.
+ * Besides the usual interface, this class has a matrix-vector multiplication
+ * <tt>vmult(Vec &dst, const Vec &src)</tt>
+ * taking PETSc Vec objects, which will be called by
+ * <tt>matrix_free_mult(Mat A, Vec src, Vec dst)</tt>
+ * registered as matrix-vector multiplication of this PETSc matrix object.
+ * The default implementation of the vmult function in the base class translates
+ * the given PETSc <tt>Vec*</tt> vectors into a deal.II vector, calls
+ * the usual vmult function with the usual interface and converts
+ * the result back to PETSc <tt>Vec*</tt>. This could be made much more efficient
+ * in derived classes without allocating new memory.
+ *
+ * @ingroup PETScWrappers
+ * @ingroup Matrix1
+ * @author Wolfgang Bangerth, Martin Steigemann, 2012
+ */
class MatrixFree : public MatrixBase
{
- public:
-
- /**
- * Default constructor. Create an
- * empty matrix object.
- */
- MatrixFree ();
-
- /**
- * Create a matrix object of
- * dimensions @p m times @p n
- * with communication happening
- * over the provided @p communicator.
- *
- * For the meaning of the @p local_rows
- * and @p local_columns parameters,
- * see the PETScWrappers::MPI::SparseMatrix
- * class documentation.
- *
- * As other PETSc matrices, also the
- * the matrix-free object needs to
- * have a size and to perform matrix
- * vector multiplications efficiently
- * in parallel also @p local_rows
- * and @p local_columns. But in contrast
- * to PETSc::SparseMatrix classes a
- * PETSc matrix-free object does not need
- * any estimation of non_zero entries
- * and has no option <tt>is_symmetric</tt>.
- */
- MatrixFree (const MPI_Comm &communicator,
- const unsigned int m,
- const unsigned int n,
- const unsigned int local_rows,
- const unsigned int local_columns);
-
- /**
- * Create a matrix object of
- * dimensions @p m times @p n
- * with communication happening
- * over the provided @p communicator.
- *
- * As other PETSc matrices, also the
- * the matrix-free object needs to
- * have a size and to perform matrix
- * vector multiplications efficiently
- * in parallel also @p local_rows
- * and @p local_columns. But in contrast
- * to PETSc::SparseMatrix classes a
- * PETSc matrix-free object does not need
- * any estimation of non_zero entries
- * and has no option <tt>is_symmetric</tt>.
- */
- MatrixFree (const MPI_Comm &communicator,
- const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &local_rows_per_process,
- const std::vector<unsigned int> &local_columns_per_process,
- const unsigned int this_process);
-
- /**
- * Constructor for the serial case:
- * Same function as
- * <tt>MatrixFree()</tt>, see above,
- * with <tt>communicator = MPI_COMM_WORLD</tt>.
- */
- MatrixFree (const unsigned int m,
- const unsigned int n,
- const unsigned int local_rows,
- const unsigned int local_columns);
-
- /**
- * Constructor for the serial case:
- * Same function as
- * <tt>MatrixFree()</tt>, see above,
- * with <tt>communicator = MPI_COMM_WORLD</tt>.
- */
- MatrixFree (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &local_rows_per_process,
- const std::vector<unsigned int> &local_columns_per_process,
- const unsigned int this_process);
-
- /**
- * Throw away the present matrix and
- * generate one that has the same
- * properties as if it were created by
- * the constructor of this class with
- * the same argument list as the
- * present function.
- */
- void reinit (const MPI_Comm &communicator,
- const unsigned int m,
- const unsigned int n,
- const unsigned int local_rows,
- const unsigned int local_columns);
-
- /**
- * Throw away the present matrix and
- * generate one that has the same
- * properties as if it were created by
- * the constructor of this class with
- * the same argument list as the
- * present function.
- */
- void reinit (const MPI_Comm &communicator,
- const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &local_rows_per_process,
- const std::vector<unsigned int> &local_columns_per_process,
- const unsigned int this_process);
-
- /**
- * Calls the @p reinit() function
- * above with <tt>communicator = MPI_COMM_WORLD</tt>.
- */
- void reinit (const unsigned int m,
- const unsigned int n,
- const unsigned int local_rows,
- const unsigned int local_columns);
-
- /**
- * Calls the @p reinit() function
- * above with <tt>communicator = MPI_COMM_WORLD</tt>.
- */
- void reinit (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &local_rows_per_process,
- const std::vector<unsigned int> &local_columns_per_process,
- const unsigned int this_process);
-
- /**
- * Release all memory and return
- * to a state just like after
- * having called the default
- * constructor.
- */
- void clear ();
-
- /**
- * Return a reference to the MPI
- * communicator object in use with
- * this matrix.
- */
- const MPI_Comm & get_mpi_communicator () const;
-
- /**
- * Matrix-vector multiplication:
- * let <i>dst = M*src</i> with
- * <i>M</i> being this matrix.
- *
- * Source and destination must
- * not be the same vector.
- *
- * Note that if the current object
- * represents a parallel distributed
- * matrix (of type
- * PETScWrappers::MPI::SparseMatrix),
- * then both vectors have to be
- * distributed vectors as
- * well. Conversely, if the matrix is
- * not distributed, then neither of the
- * vectors may be.
- */
- virtual
- void vmult (VectorBase &dst,
- const VectorBase &src) const = 0;
-
- /**
- * Matrix-vector multiplication: let
- * <i>dst = M<sup>T</sup>*src</i> with
- * <i>M</i> being this matrix. This
- * function does the same as @p vmult()
- * but takes the transposed matrix.
- *
- * Source and destination must
- * not be the same vector.
- *
- * Note that if the current object
- * represents a parallel distributed
- * matrix then both vectors have to be
- * distributed vectors as
- * well. Conversely, if the matrix is
- * not distributed, then neither of the
- * vectors may be.
- */
- virtual
- void Tvmult (VectorBase &dst,
- const VectorBase &src) const = 0;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M*src</i> on <i>dst</i>
- * with <i>M</i> being this
- * matrix.
- *
- * Source and destination must
- * not be the same vector.
- *
- * Note that if the current object
- * represents a parallel distributed
- * matrix then both vectors have to be
- * distributed vectors as
- * well. Conversely, if the matrix is
- * not distributed, then neither of the
- * vectors may be.
- */
- virtual
- void vmult_add (VectorBase &dst,
- const VectorBase &src) const = 0;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M<sup>T</sup>*src</i> to
- * <i>dst</i> with <i>M</i> being
- * this matrix. This function
- * does the same as @p vmult_add()
- * but takes the transposed
- * matrix.
- *
- * Source and destination must
- * not be the same vector.
- *
- * Note that if the current object
- * represents a parallel distributed
- * matrix then both vectors have to be
- * distributed vectors as
- * well. Conversely, if the matrix is
- * not distributed, then neither of the
- * vectors may be.
- */
- virtual
- void Tvmult_add (VectorBase &dst,
- const VectorBase &src) const = 0;
-
- /**
- * The matrix-vector multiplication
- * called by @p matrix_free_mult().
- * This function can be reimplemented
- * in derived classes for efficiency. The default
- * implementation copies the given vectors
- * into PETScWrappers::*::Vector
- * and calls <tt>vmult(VectorBase &dst, const VectorBase &src)</tt>
- * which is purely virtual and must be reimplemented
- * in derived classes.
- */
- virtual
- void vmult (Vec &dst, const Vec &src) const;
-
- private:
-
- /**
- * Copy of the communicator object to
- * be used for this parallel matrix-free object.
- */
- MPI_Comm communicator;
-
- /**
- * Callback-function registered
- * as the matrix-vector multiplication
- * of this matrix-free object
- * called by PETSc routines.
- * This function must be static and
- * takes a PETSc matrix @p A,
- * and vectors @p src and @p dst,
- * where <i>dst = A*src</i>
- *
- * Source and destination must
- * not be the same vector.
- *
- * This function calls
- * <tt>vmult(Vec &dst, const Vec &src)</tt>
- * which should be reimplemented in
- * derived classes.
- */
- static int matrix_free_mult (Mat A, Vec src, Vec dst);
-
- /**
- * Do the actual work for the
- * respective @p reinit() function and
- * the matching constructor,
- * i.e. create a matrix object. Getting rid
- * of the previous matrix is left to
- * the caller.
- */
- void do_reinit (const unsigned int m,
- const unsigned int n,
- const unsigned int local_rows,
- const unsigned int local_columns);
- };
+ public:
+
+ /**
+ * Default constructor. Create an
+ * empty matrix object.
+ */
+ MatrixFree ();
+
+ /**
+ * Create a matrix object of
+ * dimensions @p m times @p n
+ * with communication happening
+ * over the provided @p communicator.
+ *
+ * For the meaning of the @p local_rows
+ * and @p local_columns parameters,
+ * see the PETScWrappers::MPI::SparseMatrix
+ * class documentation.
+ *
+ * As other PETSc matrices, also the
+ * the matrix-free object needs to
+ * have a size and to perform matrix
+ * vector multiplications efficiently
+ * in parallel also @p local_rows
+ * and @p local_columns. But in contrast
+ * to PETSc::SparseMatrix classes a
+ * PETSc matrix-free object does not need
+ * any estimation of non_zero entries
+ * and has no option <tt>is_symmetric</tt>.
+ */
+ MatrixFree (const MPI_Comm &communicator,
+ const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns);
+
+ /**
+ * Create a matrix object of
+ * dimensions @p m times @p n
+ * with communication happening
+ * over the provided @p communicator.
+ *
+ * As other PETSc matrices, also the
+ * the matrix-free object needs to
+ * have a size and to perform matrix
+ * vector multiplications efficiently
+ * in parallel also @p local_rows
+ * and @p local_columns. But in contrast
+ * to PETSc::SparseMatrix classes a
+ * PETSc matrix-free object does not need
+ * any estimation of non_zero entries
+ * and has no option <tt>is_symmetric</tt>.
+ */
+ MatrixFree (const MPI_Comm &communicator,
+ const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &local_rows_per_process,
+ const std::vector<unsigned int> &local_columns_per_process,
+ const unsigned int this_process);
+
+ /**
+ * Constructor for the serial case:
+ * Same function as
+ * <tt>MatrixFree()</tt>, see above,
+ * with <tt>communicator = MPI_COMM_WORLD</tt>.
+ */
+ MatrixFree (const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns);
+
+ /**
+ * Constructor for the serial case:
+ * Same function as
+ * <tt>MatrixFree()</tt>, see above,
+ * with <tt>communicator = MPI_COMM_WORLD</tt>.
+ */
+ MatrixFree (const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &local_rows_per_process,
+ const std::vector<unsigned int> &local_columns_per_process,
+ const unsigned int this_process);
+
+ /**
+ * Throw away the present matrix and
+ * generate one that has the same
+ * properties as if it were created by
+ * the constructor of this class with
+ * the same argument list as the
+ * present function.
+ */
+ void reinit (const MPI_Comm &communicator,
+ const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns);
+
+ /**
+ * Throw away the present matrix and
+ * generate one that has the same
+ * properties as if it were created by
+ * the constructor of this class with
+ * the same argument list as the
+ * present function.
+ */
+ void reinit (const MPI_Comm &communicator,
+ const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &local_rows_per_process,
+ const std::vector<unsigned int> &local_columns_per_process,
+ const unsigned int this_process);
+
+ /**
+ * Calls the @p reinit() function
+ * above with <tt>communicator = MPI_COMM_WORLD</tt>.
+ */
+ void reinit (const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns);
+
+ /**
+ * Calls the @p reinit() function
+ * above with <tt>communicator = MPI_COMM_WORLD</tt>.
+ */
+ void reinit (const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &local_rows_per_process,
+ const std::vector<unsigned int> &local_columns_per_process,
+ const unsigned int this_process);
+
+ /**
+ * Release all memory and return
+ * to a state just like after
+ * having called the default
+ * constructor.
+ */
+ void clear ();
+
+ /**
+ * Return a reference to the MPI
+ * communicator object in use with
+ * this matrix.
+ */
+ const MPI_Comm &get_mpi_communicator () const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let <i>dst = M*src</i> with
+ * <i>M</i> being this matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that if the current object
+ * represents a parallel distributed
+ * matrix (of type
+ * PETScWrappers::MPI::SparseMatrix),
+ * then both vectors have to be
+ * distributed vectors as
+ * well. Conversely, if the matrix is
+ * not distributed, then neither of the
+ * vectors may be.
+ */
+ virtual
+ void vmult (VectorBase &dst,
+ const VectorBase &src) const = 0;
+
+ /**
+ * Matrix-vector multiplication: let
+ * <i>dst = M<sup>T</sup>*src</i> with
+ * <i>M</i> being this matrix. This
+ * function does the same as @p vmult()
+ * but takes the transposed matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that if the current object
+ * represents a parallel distributed
+ * matrix then both vectors have to be
+ * distributed vectors as
+ * well. Conversely, if the matrix is
+ * not distributed, then neither of the
+ * vectors may be.
+ */
+ virtual
+ void Tvmult (VectorBase &dst,
+ const VectorBase &src) const = 0;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M*src</i> on <i>dst</i>
+ * with <i>M</i> being this
+ * matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that if the current object
+ * represents a parallel distributed
+ * matrix then both vectors have to be
+ * distributed vectors as
+ * well. Conversely, if the matrix is
+ * not distributed, then neither of the
+ * vectors may be.
+ */
+ virtual
+ void vmult_add (VectorBase &dst,
+ const VectorBase &src) const = 0;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M<sup>T</sup>*src</i> to
+ * <i>dst</i> with <i>M</i> being
+ * this matrix. This function
+ * does the same as @p vmult_add()
+ * but takes the transposed
+ * matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that if the current object
+ * represents a parallel distributed
+ * matrix then both vectors have to be
+ * distributed vectors as
+ * well. Conversely, if the matrix is
+ * not distributed, then neither of the
+ * vectors may be.
+ */
+ virtual
+ void Tvmult_add (VectorBase &dst,
+ const VectorBase &src) const = 0;
+
+ /**
+ * The matrix-vector multiplication
+ * called by @p matrix_free_mult().
+ * This function can be reimplemented
+ * in derived classes for efficiency. The default
+ * implementation copies the given vectors
+ * into PETScWrappers::*::Vector
+ * and calls <tt>vmult(VectorBase &dst, const VectorBase &src)</tt>
+ * which is purely virtual and must be reimplemented
+ * in derived classes.
+ */
+ virtual
- void vmult (Vec &dst, const Vec &src) const;
++ void vmult (Vec &dst, const Vec &src) const;
+
+ private:
+
+ /**
+ * Copy of the communicator object to
+ * be used for this parallel matrix-free object.
+ */
+ MPI_Comm communicator;
+
+ /**
+ * Callback-function registered
+ * as the matrix-vector multiplication
+ * of this matrix-free object
+ * called by PETSc routines.
+ * This function must be static and
+ * takes a PETSc matrix @p A,
+ * and vectors @p src and @p dst,
+ * where <i>dst = A*src</i>
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * This function calls
+ * <tt>vmult(Vec &dst, const Vec &src)</tt>
+ * which should be reimplemented in
+ * derived classes.
+ */
+ static int matrix_free_mult (Mat A, Vec src, Vec dst);
+
+ /**
+ * Do the actual work for the
+ * respective @p reinit() function and
+ * the matching constructor,
+ * i.e. create a matrix object. Getting rid
+ * of the previous matrix is left to
+ * the caller.
+ */
+ void do_reinit (const unsigned int m,
+ const unsigned int n,
+ const unsigned int local_rows,
+ const unsigned int local_columns);
+ };
namespace MPI
{
- /*! @addtogroup PETScWrappers
- *@{
- */
-
- /**
- * Blocked sparse matrix based on the PETScWrappers::SparseMatrix class. This
- * class implements the functions that are specific to the PETSc SparseMatrix
- * base objects for a blocked sparse matrix, and leaves the actual work
- * relaying most of the calls to the individual blocks to the functions
- * implemented in the base class. See there also for a description of when
- * this class is useful.
- *
- * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices do
- * not have external objects for the sparsity patterns. Thus, one does not
- * determine the size of the individual blocks of a block matrix of this type
- * by attaching a block sparsity pattern, but by calling reinit() to set the
- * number of blocks and then by setting the size of each block separately. In
- * order to fix the data structures of the block matrix, it is then necessary
- * to let it know that we have changed the sizes of the underlying
- * matrices. For this, one has to call the collect_sizes() function, for much
- * the same reason as is documented with the BlockSparsityPattern class.
- *
- * @ingroup Matrix1
- * @see @ref GlossBlockLA "Block (linear algebra)"
- * @author Wolfgang Bangerth, 2004
- */
+ /*! @addtogroup PETScWrappers
+ *@{
+ */
+
+ /**
+ * Blocked sparse matrix based on the PETScWrappers::SparseMatrix class. This
+ * class implements the functions that are specific to the PETSc SparseMatrix
+ * base objects for a blocked sparse matrix, and leaves the actual work
+ * relaying most of the calls to the individual blocks to the functions
+ * implemented in the base class. See there also for a description of when
+ * this class is useful.
+ *
+ * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices do
+ * not have external objects for the sparsity patterns. Thus, one does not
+ * determine the size of the individual blocks of a block matrix of this type
+ * by attaching a block sparsity pattern, but by calling reinit() to set the
+ * number of blocks and then by setting the size of each block separately. In
+ * order to fix the data structures of the block matrix, it is then necessary
+ * to let it know that we have changed the sizes of the underlying
+ * matrices. For this, one has to call the collect_sizes() function, for much
+ * the same reason as is documented with the BlockSparsityPattern class.
+ *
+ * @ingroup Matrix1
+ * @see @ref GlossBlockLA "Block (linear algebra)"
+ * @author Wolfgang Bangerth, 2004
+ */
class BlockSparseMatrix : public BlockMatrixBase<SparseMatrix>
{
- public:
- /**
- * Typedef the base class for simpler
- * access to its own typedefs.
- */
- typedef BlockMatrixBase<SparseMatrix> BaseClass;
-
- /**
- * Typedef the type of the underlying
- * matrix.
- */
- typedef BaseClass::BlockType BlockType;
-
- /**
- * Import the typedefs from the base
- * class.
- */
- typedef BaseClass::value_type value_type;
- typedef BaseClass::pointer pointer;
- typedef BaseClass::const_pointer const_pointer;
- typedef BaseClass::reference reference;
- typedef BaseClass::const_reference const_reference;
- typedef BaseClass::size_type size_type;
- typedef BaseClass::iterator iterator;
- typedef BaseClass::const_iterator const_iterator;
-
- /**
- * Constructor; initializes the
- * matrix to be empty, without
- * any structure, i.e. the
- * matrix is not usable at
- * all. This constructor is
- * therefore only useful for
- * matrices which are members of
- * a class. All other matrices
- * should be created at a point
- * in the data flow where all
- * necessary information is
- * available.
- *
- * You have to initialize the
- * matrix before usage with
- * reinit(BlockSparsityPattern). The
- * number of blocks per row and
- * column are then determined by
- * that function.
- */
- BlockSparseMatrix ();
-
- /**
- * Destructor.
- */
- ~BlockSparseMatrix ();
-
- /**
- * Pseudo copy operator only copying
- * empty objects. The sizes of the
- * block matrices need to be the
- * same.
- */
- BlockSparseMatrix &
- operator = (const BlockSparseMatrix &);
-
- /**
- * This operator assigns a scalar to
- * a matrix. Since this does usually
- * not make much sense (should we set
- * all matrix entries to this value?
- * Only the nonzero entries of the
- * sparsity pattern?), this operation
- * is only allowed if the actual
- * value to be assigned is zero. This
- * operator only exists to allow for
- * the obvious notation
- * <tt>matrix=0</tt>, which sets all
- * elements of the matrix to zero,
- * but keep the sparsity pattern
- * previously used.
- */
- BlockSparseMatrix &
- operator = (const double d);
-
- /**
- * Resize the matrix, by setting
- * the number of block rows and
- * columns. This deletes all
- * blocks and replaces them by
- * unitialized ones, i.e. ones
- * for which also the sizes are
- * not yet set. You have to do
- * that by calling the @p reinit
- * functions of the blocks
- * themselves. Do not forget to
- * call collect_sizes() after
- * that on this object.
- *
- * The reason that you have to
- * set sizes of the blocks
- * yourself is that the sizes may
- * be varying, the maximum number
- * of elements per row may be
- * varying, etc. It is simpler
- * not to reproduce the interface
- * of the SparsityPattern
- * class here but rather let the
- * user call whatever function
- * she desires.
- */
- void reinit (const unsigned int n_block_rows,
- const unsigned int n_block_columns);
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M*src$ with $M$
- * being this matrix.
- */
- void vmult (BlockVector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- void vmult (BlockVector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- void vmult (Vector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- */
- void vmult (Vector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M^T*src$ with $M$
- * being this matrix. This
- * function does the same as
- * vmult() but takes the
- * transposed matrix.
- */
- void Tvmult (BlockVector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- void Tvmult (BlockVector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- void Tvmult (Vector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- */
- void Tvmult (Vector &dst,
- const Vector &src) const;
-
- /**
- * This function collects the
- * sizes of the sub-objects and
- * stores them in internal
- * arrays, in order to be able to
- * relay global indices into the
- * matrix to indices into the
- * subobjects. You *must* call
- * this function each time after
- * you have changed the size of
- * the sub-objects.
- */
- void collect_sizes ();
-
- /**
- * Return a reference to the MPI
- * communicator object in use with
- * this matrix.
- */
- const MPI_Comm & get_mpi_communicator () const;
-
- /**
- * Make the clear() function in the
- * base class visible, though it is
- * protected.
- */
- using BlockMatrixBase<SparseMatrix>::clear;
+ public:
+ /**
+ * Typedef the base class for simpler
+ * access to its own typedefs.
+ */
+ typedef BlockMatrixBase<SparseMatrix> BaseClass;
+
+ /**
+ * Typedef the type of the underlying
+ * matrix.
+ */
+ typedef BaseClass::BlockType BlockType;
+
+ /**
+ * Import the typedefs from the base
+ * class.
+ */
+ typedef BaseClass::value_type value_type;
+ typedef BaseClass::pointer pointer;
+ typedef BaseClass::const_pointer const_pointer;
+ typedef BaseClass::reference reference;
+ typedef BaseClass::const_reference const_reference;
+ typedef BaseClass::size_type size_type;
+ typedef BaseClass::iterator iterator;
+ typedef BaseClass::const_iterator const_iterator;
+
+ /**
+ * Constructor; initializes the
+ * matrix to be empty, without
+ * any structure, i.e. the
+ * matrix is not usable at
+ * all. This constructor is
+ * therefore only useful for
+ * matrices which are members of
+ * a class. All other matrices
+ * should be created at a point
+ * in the data flow where all
+ * necessary information is
+ * available.
+ *
+ * You have to initialize the
+ * matrix before usage with
+ * reinit(BlockSparsityPattern). The
+ * number of blocks per row and
+ * column are then determined by
+ * that function.
+ */
+ BlockSparseMatrix ();
+
+ /**
+ * Destructor.
+ */
+ ~BlockSparseMatrix ();
+
+ /**
+ * Pseudo copy operator only copying
+ * empty objects. The sizes of the
+ * block matrices need to be the
+ * same.
+ */
+ BlockSparseMatrix &
+ operator = (const BlockSparseMatrix &);
+
+ /**
+ * This operator assigns a scalar to
+ * a matrix. Since this does usually
+ * not make much sense (should we set
+ * all matrix entries to this value?
+ * Only the nonzero entries of the
+ * sparsity pattern?), this operation
+ * is only allowed if the actual
+ * value to be assigned is zero. This
+ * operator only exists to allow for
+ * the obvious notation
+ * <tt>matrix=0</tt>, which sets all
+ * elements of the matrix to zero,
+ * but keep the sparsity pattern
+ * previously used.
+ */
+ BlockSparseMatrix &
+ operator = (const double d);
+
+ /**
+ * Resize the matrix, by setting
+ * the number of block rows and
+ * columns. This deletes all
+ * blocks and replaces them by
+ * unitialized ones, i.e. ones
+ * for which also the sizes are
+ * not yet set. You have to do
+ * that by calling the @p reinit
+ * functions of the blocks
+ * themselves. Do not forget to
+ * call collect_sizes() after
+ * that on this object.
+ *
+ * The reason that you have to
+ * set sizes of the blocks
+ * yourself is that the sizes may
+ * be varying, the maximum number
+ * of elements per row may be
+ * varying, etc. It is simpler
+ * not to reproduce the interface
+ * of the SparsityPattern
+ * class here but rather let the
+ * user call whatever function
+ * she desires.
+ */
+ void reinit (const unsigned int n_block_rows,
+ const unsigned int n_block_columns);
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M*src$ with $M$
+ * being this matrix.
+ */
+ void vmult (BlockVector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ */
+ void vmult (BlockVector &dst,
+ const Vector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ */
+ void vmult (Vector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ */
+ void vmult (Vector &dst,
+ const Vector &src) const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M^T*src$ with $M$
+ * being this matrix. This
+ * function does the same as
+ * vmult() but takes the
+ * transposed matrix.
+ */
+ void Tvmult (BlockVector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ */
- void Tvmult (BlockVector &dst,
++ void Tvmult (BlockVector &dst,
+ const Vector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ */
+ void Tvmult (Vector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ */
+ void Tvmult (Vector &dst,
+ const Vector &src) const;
+
+ /**
+ * This function collects the
+ * sizes of the sub-objects and
+ * stores them in internal
+ * arrays, in order to be able to
+ * relay global indices into the
+ * matrix to indices into the
+ * subobjects. You *must* call
+ * this function each time after
+ * you have changed the size of
+ * the sub-objects.
+ */
+ void collect_sizes ();
+
+ /**
+ * Return a reference to the MPI
+ * communicator object in use with
+ * this matrix.
+ */
+ const MPI_Comm &get_mpi_communicator () const;
+
+ /**
+ * Make the clear() function in the
+ * base class visible, though it is
+ * protected.
+ */
+ using BlockMatrixBase<SparseMatrix>::clear;
};
namespace MPI
{
- /*! @addtogroup PETScWrappers
- *@{
- */
-
- /**
- * An implementation of block vectors based on the parallel vector class
- * implemented in PETScWrappers. While the base class provides for most of the
- * interface, this class handles the actual allocation of vectors and provides
- * functions that are specific to the underlying vector type.
- *
- * The model of distribution of data is such that each of the blocks is
- * distributed across all MPI processes named in the MPI communicator. I.e. we
- * don't just distribute the whole vector, but each component. In the
- * constructors and reinit() functions, one therefore not only has to specify
- * the sizes of the individual blocks, but also the number of elements of each
- * of these blocks to be stored on the local process.
- *
- * @ingroup Vectors
- * @see @ref GlossBlockLA "Block (linear algebra)"
- * @author Wolfgang Bangerth, 2004
- */
+ /*! @addtogroup PETScWrappers
+ *@{
+ */
+
+ /**
+ * An implementation of block vectors based on the parallel vector class
+ * implemented in PETScWrappers. While the base class provides for most of the
+ * interface, this class handles the actual allocation of vectors and provides
+ * functions that are specific to the underlying vector type.
+ *
+ * The model of distribution of data is such that each of the blocks is
+ * distributed across all MPI processes named in the MPI communicator. I.e. we
+ * don't just distribute the whole vector, but each component. In the
+ * constructors and reinit() functions, one therefore not only has to specify
+ * the sizes of the individual blocks, but also the number of elements of each
+ * of these blocks to be stored on the local process.
+ *
+ * @ingroup Vectors
+ * @see @ref GlossBlockLA "Block (linear algebra)"
+ * @author Wolfgang Bangerth, 2004
+ */
class BlockVector : public BlockVectorBase<Vector>
{
- public:
- /**
- * Typedef the base class for simpler
- * access to its own typedefs.
- */
- typedef BlockVectorBase<Vector> BaseClass;
-
- /**
- * Typedef the type of the underlying
- * vector.
- */
- typedef BaseClass::BlockType BlockType;
-
- /**
- * Import the typedefs from the base
- * class.
- */
- typedef BaseClass::value_type value_type;
- typedef BaseClass::pointer pointer;
- typedef BaseClass::const_pointer const_pointer;
- typedef BaseClass::reference reference;
- typedef BaseClass::const_reference const_reference;
- typedef BaseClass::size_type size_type;
- typedef BaseClass::iterator iterator;
- typedef BaseClass::const_iterator const_iterator;
-
- /**
- * Default constructor. Generate an
- * empty vector without any blocks.
- */
- BlockVector ();
-
- /**
- * Constructor. Generate a block
- * vector with @p n_blocks blocks,
- * each of which is a parallel
- * vector across @p communicator
- * with @p block_size elements of
- * which @p local_size elements are
- * stored on the present process.
- */
- explicit BlockVector (const unsigned int n_blocks,
- const MPI_Comm &communicator,
- const unsigned int block_size,
- const unsigned int local_size);
-
- /**
- * Copy-Constructor. Set all the
- * properties of the parallel vector
- * to those of the given argument and
- * copy the elements.
- */
- BlockVector (const BlockVector &V);
-
- /**
- * Constructor. Set the number of
- * blocks to
- * <tt>block_sizes.size()</tt> and
- * initialize each block with
- * <tt>block_sizes[i]</tt> zero
- * elements. The individual blocks
- * are distributed across the given
- * communicator, and each store
- * <tt>local_elements[i]</tt>
- * elements on the present process.
- */
- BlockVector (const std::vector<unsigned int> &block_sizes,
- const MPI_Comm &communicator,
- const std::vector<unsigned int> &local_elements);
-
- /**
- * Destructor. Clears memory
- */
- ~BlockVector ();
-
- /**
- * Copy operator: fill all components
- * of the vector that are locally
- * stored with the given scalar value.
- */
- BlockVector & operator = (const value_type s);
-
- /**
- * Copy operator for arguments of the
- * same type.
- */
- BlockVector &
- operator= (const BlockVector &V);
-
- /**
- * Copy the given sequential
- * (non-distributed) block vector
- * into the present parallel block
- * vector. It is assumed that they
- * have the same size, and this
- * operation does not change the
- * partitioning of the parallel
- * vectors by which its elements are
- * distributed across several MPI
- * processes. What this operation
- * therefore does is to copy that
- * chunk of the given vector @p v
- * that corresponds to elements of
- * the target vector that are stored
- * locally, and copies them, for each
- * of the individual blocks of this
- * object. Elements that are not
- * stored locally are not touched.
- *
- * This being a parallel vector, you
- * must make sure that @em all
- * processes call this function at
- * the same time. It is not possible
- * to change the local part of a
- * parallel vector on only one
- * process, independent of what other
- * processes do, with this function.
- */
- BlockVector &
- operator = (const PETScWrappers::BlockVector &v);
-
- /**
- * Reinitialize the BlockVector to
- * contain @p n_blocks of size @p
- * block_size, each of which stores
- * @p local_size elements
- * locally. The @p communicator
- * argument denotes which MPI channel
- * each of these blocks shall
- * communicate.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- */
- void reinit (const unsigned int n_blocks,
- const MPI_Comm &communicator,
- const unsigned int block_size,
- const unsigned int local_size,
- const bool fast = false);
-
- /**
- * Reinitialize the BlockVector such
- * that it contains
- * <tt>block_sizes.size()</tt>
- * blocks. Each block is
- * reinitialized to dimension
- * <tt>block_sizes[i]</tt>. Each of
- * them stores
- * <tt>local_sizes[i]</tt> elements
- * on the present process.
- *
- * If the number of blocks is the
- * same as before this function
- * was called, all vectors remain
- * the same and reinit() is
- * called for each vector.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- *
- * Note that you must call this
- * (or the other reinit()
- * functions) function, rather
- * than calling the reinit()
- * functions of an individual
- * block, to allow the block
- * vector to update its caches of
- * vector sizes. If you call
- * reinit() of one of the
- * blocks, then subsequent
- * actions on this object may
- * yield unpredictable results
- * since they may be routed to
- * the wrong block.
- */
- void reinit (const std::vector<unsigned int> &block_sizes,
- const MPI_Comm &communicator,
- const std::vector<unsigned int> &local_sizes,
- const bool fast=false);
-
- /**
- * Change the dimension to that
- * of the vector <tt>V</tt>. The same
- * applies as for the other
- * reinit() function.
- *
- * The elements of <tt>V</tt> are not
- * copied, i.e. this function is
- * the same as calling <tt>reinit
- * (V.size(), fast)</tt>.
- *
- * Note that you must call this
- * (or the other reinit()
- * functions) function, rather
- * than calling the reinit()
- * functions of an individual
- * block, to allow the block
- * vector to update its caches of
- * vector sizes. If you call
- * reinit() on one of the
- * blocks, then subsequent
- * actions on this object may
- * yield unpredictable results
- * since they may be routed to
- * the wrong block.
- */
- void reinit (const BlockVector &V,
- const bool fast=false);
-
- /**
- * Return a reference to the MPI
- * communicator object in use with
- * this vector.
- */
- const MPI_Comm & get_mpi_communicator () const;
-
- /**
- * Swap the contents of this
- * vector and the other vector
- * <tt>v</tt>. One could do this
- * operation with a temporary
- * variable and copying over the
- * data elements, but this
- * function is significantly more
- * efficient since it only swaps
- * the pointers to the data of
- * the two vectors and therefore
- * does not need to allocate
- * temporary storage and move
- * data around.
- *
- * Limitation: right now this
- * function only works if both
- * vectors have the same number
- * of blocks. If needed, the
- * numbers of blocks should be
- * exchanged, too.
- *
- * This function is analog to the
- * the swap() function of all C++
- * standard containers. Also,
- * there is a global function
- * swap(u,v) that simply calls
- * <tt>u.swap(v)</tt>, again in analogy
- * to standard functions.
- */
- void swap (BlockVector &v);
-
- /**
- * Print to a stream.
- */
- void print (std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const bool across = true) const;
-
- /**
- * Exception
- */
- DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
- /**
- * Exception
- */
- DeclException0 (ExcNonMatchingBlockVectors);
+ public:
+ /**
+ * Typedef the base class for simpler
+ * access to its own typedefs.
+ */
+ typedef BlockVectorBase<Vector> BaseClass;
+
+ /**
+ * Typedef the type of the underlying
+ * vector.
+ */
+ typedef BaseClass::BlockType BlockType;
+
+ /**
+ * Import the typedefs from the base
+ * class.
+ */
+ typedef BaseClass::value_type value_type;
+ typedef BaseClass::pointer pointer;
+ typedef BaseClass::const_pointer const_pointer;
+ typedef BaseClass::reference reference;
+ typedef BaseClass::const_reference const_reference;
+ typedef BaseClass::size_type size_type;
+ typedef BaseClass::iterator iterator;
+ typedef BaseClass::const_iterator const_iterator;
+
+ /**
+ * Default constructor. Generate an
+ * empty vector without any blocks.
+ */
+ BlockVector ();
+
+ /**
+ * Constructor. Generate a block
+ * vector with @p n_blocks blocks,
+ * each of which is a parallel
+ * vector across @p communicator
+ * with @p block_size elements of
+ * which @p local_size elements are
+ * stored on the present process.
+ */
+ explicit BlockVector (const unsigned int n_blocks,
+ const MPI_Comm &communicator,
+ const unsigned int block_size,
+ const unsigned int local_size);
+
+ /**
+ * Copy-Constructor. Set all the
+ * properties of the parallel vector
+ * to those of the given argument and
+ * copy the elements.
+ */
- BlockVector (const BlockVector &V);
++ BlockVector (const BlockVector &V);
+
+ /**
+ * Constructor. Set the number of
+ * blocks to
+ * <tt>block_sizes.size()</tt> and
+ * initialize each block with
+ * <tt>block_sizes[i]</tt> zero
+ * elements. The individual blocks
+ * are distributed across the given
+ * communicator, and each store
+ * <tt>local_elements[i]</tt>
+ * elements on the present process.
+ */
+ BlockVector (const std::vector<unsigned int> &block_sizes,
+ const MPI_Comm &communicator,
+ const std::vector<unsigned int> &local_elements);
+
+ /**
+ * Destructor. Clears memory
+ */
+ ~BlockVector ();
+
+ /**
+ * Copy operator: fill all components
+ * of the vector that are locally
+ * stored with the given scalar value.
+ */
+ BlockVector &operator = (const value_type s);
+
+ /**
+ * Copy operator for arguments of the
+ * same type.
+ */
+ BlockVector &
+ operator= (const BlockVector &V);
+
+ /**
+ * Copy the given sequential
+ * (non-distributed) block vector
+ * into the present parallel block
+ * vector. It is assumed that they
+ * have the same size, and this
+ * operation does not change the
+ * partitioning of the parallel
+ * vectors by which its elements are
+ * distributed across several MPI
+ * processes. What this operation
+ * therefore does is to copy that
+ * chunk of the given vector @p v
+ * that corresponds to elements of
+ * the target vector that are stored
+ * locally, and copies them, for each
+ * of the individual blocks of this
+ * object. Elements that are not
+ * stored locally are not touched.
+ *
+ * This being a parallel vector, you
+ * must make sure that @em all
+ * processes call this function at
+ * the same time. It is not possible
+ * to change the local part of a
+ * parallel vector on only one
+ * process, independent of what other
+ * processes do, with this function.
+ */
+ BlockVector &
+ operator = (const PETScWrappers::BlockVector &v);
+
+ /**
+ * Reinitialize the BlockVector to
+ * contain @p n_blocks of size @p
+ * block_size, each of which stores
+ * @p local_size elements
+ * locally. The @p communicator
+ * argument denotes which MPI channel
+ * each of these blocks shall
+ * communicate.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ */
+ void reinit (const unsigned int n_blocks,
+ const MPI_Comm &communicator,
+ const unsigned int block_size,
+ const unsigned int local_size,
+ const bool fast = false);
+
+ /**
+ * Reinitialize the BlockVector such
+ * that it contains
+ * <tt>block_sizes.size()</tt>
+ * blocks. Each block is
+ * reinitialized to dimension
+ * <tt>block_sizes[i]</tt>. Each of
+ * them stores
+ * <tt>local_sizes[i]</tt> elements
+ * on the present process.
+ *
+ * If the number of blocks is the
+ * same as before this function
+ * was called, all vectors remain
+ * the same and reinit() is
+ * called for each vector.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ *
+ * Note that you must call this
+ * (or the other reinit()
+ * functions) function, rather
+ * than calling the reinit()
+ * functions of an individual
+ * block, to allow the block
+ * vector to update its caches of
+ * vector sizes. If you call
+ * reinit() of one of the
+ * blocks, then subsequent
+ * actions on this object may
+ * yield unpredictable results
+ * since they may be routed to
+ * the wrong block.
+ */
+ void reinit (const std::vector<unsigned int> &block_sizes,
+ const MPI_Comm &communicator,
+ const std::vector<unsigned int> &local_sizes,
+ const bool fast=false);
+
+ /**
+ * Change the dimension to that
+ * of the vector <tt>V</tt>. The same
+ * applies as for the other
+ * reinit() function.
+ *
+ * The elements of <tt>V</tt> are not
+ * copied, i.e. this function is
+ * the same as calling <tt>reinit
+ * (V.size(), fast)</tt>.
+ *
+ * Note that you must call this
+ * (or the other reinit()
+ * functions) function, rather
+ * than calling the reinit()
+ * functions of an individual
+ * block, to allow the block
+ * vector to update its caches of
+ * vector sizes. If you call
+ * reinit() on one of the
+ * blocks, then subsequent
+ * actions on this object may
+ * yield unpredictable results
+ * since they may be routed to
+ * the wrong block.
+ */
+ void reinit (const BlockVector &V,
+ const bool fast=false);
+
+ /**
+ * Return a reference to the MPI
+ * communicator object in use with
+ * this vector.
+ */
+ const MPI_Comm &get_mpi_communicator () const;
+
+ /**
+ * Swap the contents of this
+ * vector and the other vector
+ * <tt>v</tt>. One could do this
+ * operation with a temporary
+ * variable and copying over the
+ * data elements, but this
+ * function is significantly more
+ * efficient since it only swaps
+ * the pointers to the data of
+ * the two vectors and therefore
+ * does not need to allocate
+ * temporary storage and move
+ * data around.
+ *
+ * Limitation: right now this
+ * function only works if both
+ * vectors have the same number
+ * of blocks. If needed, the
+ * numbers of blocks should be
+ * exchanged, too.
+ *
+ * This function is analog to the
+ * the swap() function of all C++
+ * standard containers. Also,
+ * there is a global function
+ * swap(u,v) that simply calls
+ * <tt>u.swap(v)</tt>, again in analogy
+ * to standard functions.
+ */
+ void swap (BlockVector &v);
+
+ /**
+ * Print to a stream.
+ */
+ void print (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcNonMatchingBlockVectors);
};
- /*@}*/
+ /*@}*/
- /*----------------------- Inline functions ----------------------------------*/
+ /*----------------------- Inline functions ----------------------------------*/
inline
class PreconditionerBase;
- /**
- * Base class for solver classes using the PETSc solvers. Since solvers in
- * PETSc are selected based on flags passed to a generic solver object,
- * basically all the actual solver calls happen in this class, and derived
- * classes simply set the right flags to select one solver or another, or to
- * set certain parameters for individual solvers.
- *
- * Optionally, the user can create a solver derived from the
- * SolverBase class and can set the default arguments necessary to
- * solve the linear system of equations with SolverControl. These
- * default options can be overridden by specifying command line
- * arguments of the form @p -ksp_*. For example,
- * @p -ksp_monitor_true_residual prints out true residual norm
- * (unpreconditioned) at each iteration and @p -ksp_view provides
- * information about the linear solver and the preconditioner used in
- * the current context. The type of the solver can also be changed
- * during runtime by specifying @p -ksp_type {richardson, cg, gmres,
- * fgmres, ..} to dynamically test the optimal solver along with a
- * suitable preconditioner set using @p -pc_type {jacobi, bjacobi,
- * ilu, lu, ..}. There are several other command line options
- * available to modify the behavior of the PETSc linear solver and can
- * be obtained from the <a
- * href="http://www.mcs.anl.gov/petsc">documentation and manual
- * pages</a>.
- *
- * @note Repeated calls to solve() on a solver object with a Preconditioner
- * must be used with care. The preconditioner is initialized in the first call
- * to solve() and subsequent calls reuse the solver and preconditioner
- * object. This is done for performance reasons. The solver and preconditioner
- * can be reset by calling reset().
- *
- * One of the gotchas of PETSc is that -- in particular in MPI mode -- it
- * often does not produce very helpful error messages. In order to save
- * other users some time in searching a hard to track down error, here is
- * one situation and the error message one gets there:
- * when you don't specify an MPI communicator to your solver's constructor. In
- * this case, you will get an error of the following form from each of your
- * parallel processes:
- * @verbatim
- * [1]PETSC ERROR: PCSetVector() line 1173 in src/ksp/pc/interface/precon.c
- * [1]PETSC ERROR: Arguments must have same communicators!
- * [1]PETSC ERROR: Different communicators in the two objects: Argument # 1 and 2!
- * [1]PETSC ERROR: KSPSetUp() line 195 in src/ksp/ksp/interface/itfunc.c
- * @endverbatim
- *
- * This error, on which one can spend a very long time figuring out
- * what exactly goes wrong, results from not specifying an MPI
- * communicator. Note that the communicator @em must match that of the
- * matrix and all vectors in the linear system which we want to
- * solve. Aggravating the situation is the fact that the default
- * argument to the solver classes, @p PETSC_COMM_SELF, is the
- * appropriate argument for the sequential case (which is why it is
- * the default argument), so this error only shows up in parallel
- * mode.
- *
- * @ingroup PETScWrappers
- * @author Wolfgang Bangerth, 2004
- */
+ /**
+ * Base class for solver classes using the PETSc solvers. Since solvers in
+ * PETSc are selected based on flags passed to a generic solver object,
+ * basically all the actual solver calls happen in this class, and derived
+ * classes simply set the right flags to select one solver or another, or to
+ * set certain parameters for individual solvers.
+ *
+ * Optionally, the user can create a solver derived from the
+ * SolverBase class and can set the default arguments necessary to
+ * solve the linear system of equations with SolverControl. These
+ * default options can be overridden by specifying command line
+ * arguments of the form @p -ksp_*. For example,
+ * @p -ksp_monitor_true_residual prints out true residual norm
+ * (unpreconditioned) at each iteration and @p -ksp_view provides
+ * information about the linear solver and the preconditioner used in
+ * the current context. The type of the solver can also be changed
+ * during runtime by specifying @p -ksp_type {richardson, cg, gmres,
+ * fgmres, ..} to dynamically test the optimal solver along with a
+ * suitable preconditioner set using @p -pc_type {jacobi, bjacobi,
+ * ilu, lu, ..}. There are several other command line options
+ * available to modify the behavior of the PETSc linear solver and can
+ * be obtained from the <a
+ * href="http://www.mcs.anl.gov/petsc">documentation and manual
+ * pages</a>.
+ *
+ * @note Repeated calls to solve() on a solver object with a Preconditioner
+ * must be used with care. The preconditioner is initialized in the first call
+ * to solve() and subsequent calls reuse the solver and preconditioner
+ * object. This is done for performance reasons. The solver and preconditioner
+ * can be reset by calling reset().
+ *
+ * One of the gotchas of PETSc is that -- in particular in MPI mode -- it
+ * often does not produce very helpful error messages. In order to save
+ * other users some time in searching a hard to track down error, here is
+ * one situation and the error message one gets there:
+ * when you don't specify an MPI communicator to your solver's constructor. In
+ * this case, you will get an error of the following form from each of your
+ * parallel processes:
+ * @verbatim
+ * [1]PETSC ERROR: PCSetVector() line 1173 in src/ksp/pc/interface/precon.c
+ * [1]PETSC ERROR: Arguments must have same communicators!
+ * [1]PETSC ERROR: Different communicators in the two objects: Argument # 1 and 2!
+ * [1]PETSC ERROR: KSPSetUp() line 195 in src/ksp/ksp/interface/itfunc.c
+ * @endverbatim
+ *
+ * This error, on which one can spend a very long time figuring out
+ * what exactly goes wrong, results from not specifying an MPI
+ * communicator. Note that the communicator @em must match that of the
+ * matrix and all vectors in the linear system which we want to
+ * solve. Aggravating the situation is the fact that the default
+ * argument to the solver classes, @p PETSC_COMM_SELF, is the
+ * appropriate argument for the sequential case (which is why it is
+ * the default argument), so this error only shows up in parallel
+ * mode.
+ *
+ * @ingroup PETScWrappers
+ * @author Wolfgang Bangerth, 2004
+ */
class SolverBase
{
- public:
- /**
- * Constructor. Takes the solver
- * control object and the MPI
- * communicator over which parallel
- * computations are to happen.
- *
- * Note that the communicator used here
- * must match the communicator used in
- * the system matrix, solution, and
- * right hand side object of the solve
- * to be done with this
- * solver. Otherwise, PETSc will
- * generate hard to track down errors,
- * see the documentation of the
- * SolverBase class.
- */
- SolverBase (SolverControl &cn,
- const MPI_Comm &mpi_communicator);
-
- /**
- * Destructor.
- */
- virtual ~SolverBase ();
-
- /**
- * Solve the linear system
- * <tt>Ax=b</tt>. Depending on the
- * information provided by derived
- * classes and the object passed as a
- * preconditioner, one of the linear
- * solvers and preconditioners of PETSc
- * is chosen. Repeated calls to
- * solve() do not reconstruct the
- * preconditioner for performance
- * reasons. See class Documentation.
- */
- void
- solve (const MatrixBase &A,
- VectorBase &x,
- const VectorBase &b,
- const PreconditionerBase &preconditioner);
-
-
- /**
- * Resets the contained preconditioner
- * and solver object. See class
- * description for more details.
- */
- virtual void reset();
-
-
- /**
- * Sets a prefix name for the solver
- * object. Useful when customizing the
- * PETSc KSP object with command-line
- * options.
- */
- void set_prefix(const std::string &prefix);
-
-
- /**
- * Access to object that controls
- * convergence.
- */
- SolverControl & control() const;
-
- /**
- * Exception
- */
- DeclException1 (ExcPETScError,
- int,
- << "An error with error number " << arg1
- << " occurred while calling a PETSc function");
-
- protected:
-
- /**
- * Reference to the object that
- * controls convergence of the
- * iterative solver. In fact, for these
- * PETSc wrappers, PETSc does so
- * itself, but we copy the data from
- * this object before starting the
- * solution process, and copy the data
- * back into it afterwards.
- */
- SolverControl &solver_control;
-
- /**
- * Copy of the MPI communicator object
- * to be used for the solver.
- */
- const MPI_Comm mpi_communicator;
-
- /**
- * Function that takes a Krylov
- * Subspace Solver context object, and
- * sets the type of solver that is
- * requested by the derived class.
- */
- virtual void set_solver_type (KSP &ksp) const = 0;
-
- /**
- * Solver prefix name to qualify options
- * specific to the PETSc KSP object in the
- * current context.
- * Note: A hyphen (-) must NOT be given
- * at the beginning of the prefix name.
- * The first character of all runtime
- * options is AUTOMATICALLY the hyphen.
- */
- std::string prefix_name;
-
- private:
- /**
- * A function that is used in PETSc as
- * a callback to check on
- * convergence. It takes the
- * information provided from PETSc and
- * checks it against deal.II's own
- * SolverControl objects to see if
- * convergence has been reached.
- */
- static
+ public:
+ /**
+ * Constructor. Takes the solver
+ * control object and the MPI
+ * communicator over which parallel
+ * computations are to happen.
+ *
+ * Note that the communicator used here
+ * must match the communicator used in
+ * the system matrix, solution, and
+ * right hand side object of the solve
+ * to be done with this
+ * solver. Otherwise, PETSc will
+ * generate hard to track down errors,
+ * see the documentation of the
+ * SolverBase class.
+ */
- SolverBase (SolverControl &cn,
++ SolverBase (SolverControl &cn,
+ const MPI_Comm &mpi_communicator);
+
+ /**
+ * Destructor.
+ */
+ virtual ~SolverBase ();
+
+ /**
+ * Solve the linear system
+ * <tt>Ax=b</tt>. Depending on the
+ * information provided by derived
+ * classes and the object passed as a
+ * preconditioner, one of the linear
+ * solvers and preconditioners of PETSc
+ * is chosen. Repeated calls to
+ * solve() do not reconstruct the
+ * preconditioner for performance
+ * reasons. See class Documentation.
+ */
+ void
+ solve (const MatrixBase &A,
+ VectorBase &x,
+ const VectorBase &b,
+ const PreconditionerBase &preconditioner);
+
+
+ /**
+ * Resets the contained preconditioner
+ * and solver object. See class
+ * description for more details.
+ */
+ virtual void reset();
+
+
+ /**
+ * Sets a prefix name for the solver
+ * object. Useful when customizing the
+ * PETSc KSP object with command-line
+ * options.
+ */
+ void set_prefix(const std::string &prefix);
+
+
+ /**
+ * Access to object that controls
+ * convergence.
+ */
+ SolverControl &control() const;
+
+ /**
+ * Exception
+ */
+ DeclException1 (ExcPETScError,
+ int,
+ << "An error with error number " << arg1
+ << " occurred while calling a PETSc function");
+
+ protected:
+
+ /**
+ * Reference to the object that
+ * controls convergence of the
+ * iterative solver. In fact, for these
+ * PETSc wrappers, PETSc does so
+ * itself, but we copy the data from
+ * this object before starting the
+ * solution process, and copy the data
+ * back into it afterwards.
+ */
+ SolverControl &solver_control;
+
+ /**
+ * Copy of the MPI communicator object
+ * to be used for the solver.
+ */
+ const MPI_Comm mpi_communicator;
+
+ /**
+ * Function that takes a Krylov
+ * Subspace Solver context object, and
+ * sets the type of solver that is
+ * requested by the derived class.
+ */
+ virtual void set_solver_type (KSP &ksp) const = 0;
+
+ /**
+ * Solver prefix name to qualify options
+ * specific to the PETSc KSP object in the
+ * current context.
+ * Note: A hyphen (-) must NOT be given
+ * at the beginning of the prefix name.
+ * The first character of all runtime
+ * options is AUTOMATICALLY the hyphen.
+ */
+ std::string prefix_name;
+
+ private:
+ /**
+ * A function that is used in PETSc as
+ * a callback to check on
+ * convergence. It takes the
+ * information provided from PETSc and
+ * checks it against deal.II's own
+ * SolverControl objects to see if
+ * convergence has been reached.
+ */
+ static
#ifdef PETSC_USE_64BIT_INDICES
- PetscErrorCode
+ PetscErrorCode
#else
- int
+ int
#endif
- convergence_test (KSP ksp,
+ convergence_test (KSP ksp,
#ifdef PETSC_USE_64BIT_INDICES
- const PetscInt iteration,
+ const PetscInt iteration,
#else
- const int iteration,
+ const int iteration,
#endif
- const PetscReal residual_norm,
- KSPConvergedReason *reason,
- void *solver_control);
-
- /**
- * A structure that contains the PETSc
- * solver and preconditioner
- * objects. This object is preserved
- * between subsequent calls to the
- * solver if the same preconditioner is
- * used as in the previous solver
- * step. This may save some computation
- * time, if setting up a preconditioner
- * is expensive, such as in the case of
- * an ILU for example.
- *
- * The actual declaration of this class
- * is complicated by the fact that
- * PETSc changed its solver interface
- * completely and incompatibly between
- * versions 2.1.6 and 2.2.0 :-(
- *
- * Objects of this type are explicitly
- * created, but are destroyed when the
- * surrounding solver object goes out
- * of scope, or when we assign a new
- * value to the pointer to this
- * object. The respective *Destroy
- * functions are therefore written into
- * the destructor of this object, even
- * though the object does not have a
- * constructor.
- */
- struct SolverData
- {
- /**
- * Destructor
- */
- ~SolverData ();
-
- /**
- * Objects for Krylov subspace
- * solvers and preconditioners.
- */
- KSP ksp;
- PC pc;
- };
-
- /**
- * Pointer to an object that stores the
- * solver context. This is recreated in
- * the main solver routine if
- * necessary.
- */
- std_cxx1x::shared_ptr<SolverData> solver_data;
+ const PetscReal residual_norm,
+ KSPConvergedReason *reason,
+ void *solver_control);
+
+ /**
+ * A structure that contains the PETSc
+ * solver and preconditioner
+ * objects. This object is preserved
+ * between subsequent calls to the
+ * solver if the same preconditioner is
+ * used as in the previous solver
+ * step. This may save some computation
+ * time, if setting up a preconditioner
+ * is expensive, such as in the case of
+ * an ILU for example.
+ *
+ * The actual declaration of this class
+ * is complicated by the fact that
+ * PETSc changed its solver interface
+ * completely and incompatibly between
+ * versions 2.1.6 and 2.2.0 :-(
+ *
+ * Objects of this type are explicitly
+ * created, but are destroyed when the
+ * surrounding solver object goes out
+ * of scope, or when we assign a new
+ * value to the pointer to this
+ * object. The respective *Destroy
+ * functions are therefore written into
+ * the destructor of this object, even
+ * though the object does not have a
+ * constructor.
+ */
+ struct SolverData
+ {
+ /**
+ * Destructor
+ */
+ ~SolverData ();
+
+ /**
+ * Objects for Krylov subspace
+ * solvers and preconditioners.
+ */
+ KSP ksp;
+ PC pc;
+ };
+
+ /**
+ * Pointer to an object that stores the
+ * solver context. This is recreated in
+ * the main solver routine if
+ * necessary.
+ */
+ std_cxx1x::shared_ptr<SolverData> solver_data;
};
*/
namespace PETScWrappers
{
- // forward declaration
+ // forward declaration
class VectorBase;
- /**
- * @cond internal
- */
+ /**
+ * @cond internal
+ */
- /**
- * A namespace for internal implementation details of the PETScWrapper
- * members.
- * @ingroup PETScWrappers
- */
+ /**
+ * A namespace for internal implementation details of the PETScWrapper
+ * members.
+ * @ingroup PETScWrappers
+ */
namespace internal
{
- /**
- * Since access to PETSc vectors only
- * goes through functions, rather than by
- * obtaining a reference to a vector
- * element, we need a wrapper class that
- * acts as if it was a reference, and
- * basically redirects all accesses (read
- * and write) to member functions of this
- * class.
- *
- * This class implements such a wrapper:
- * it is initialized with a vector and an
- * element within it, and has a
- * conversion operator to extract the
- * scalar value of this element. It also
- * has a variety of assignment operator
- * for writing to this one element.
- * @ingroup PETScWrappers
- */
+ /**
+ * Since access to PETSc vectors only
+ * goes through functions, rather than by
+ * obtaining a reference to a vector
+ * element, we need a wrapper class that
+ * acts as if it was a reference, and
+ * basically redirects all accesses (read
+ * and write) to member functions of this
+ * class.
+ *
+ * This class implements such a wrapper:
+ * it is initialized with a vector and an
+ * element within it, and has a
+ * conversion operator to extract the
+ * scalar value of this element. It also
+ * has a variety of assignment operator
+ * for writing to this one element.
+ * @ingroup PETScWrappers
+ */
class VectorReference
{
- private:
- /**
- * Constructor. It is made private so
- * as to only allow the actual vector
- * class to create it.
- */
- VectorReference (const VectorBase &vector,
- const unsigned int index);
-
- public:
- /**
- * This looks like a copy operator,
- * but does something different than
- * usual. In particular, it does not
- * copy the member variables of this
- * reference. Rather, it handles the
- * situation where we have two
- * vectors @p v and @p w, and assign
- * elements like in
- * <tt>v(i)=w(i)</tt>. Here, both
- * left and right hand side of the
- * assignment have data type
- * VectorReference, but what we
- * really mean is to assign the
- * vector elements represented by the
- * two references. This operator
- * implements this operation. Note
- * also that this allows us to make
- * the assignment operator const.
- */
- const VectorReference & operator = (const VectorReference &r) const;
-
- /**
- * The same function as above, but
- * for non-const reference
- * objects. The function is needed
- * since the compiler might otherwise
- * automatically generate a copy
- * operator for non-const objects.
- */
- VectorReference & operator = (const VectorReference &r);
-
- /**
- * Set the referenced element of the
- * vector to <tt>s</tt>.
- */
- const VectorReference & operator = (const PetscScalar &s) const;
-
- /**
- * Add <tt>s</tt> to the referenced
- * element of the vector.
- */
- const VectorReference & operator += (const PetscScalar &s) const;
-
- /**
- * Subtract <tt>s</tt> from the
- * referenced element of the vector.
- */
- const VectorReference & operator -= (const PetscScalar &s) const;
-
- /**
- * Multiply the referenced element of
- * the vector by <tt>s</tt>.
- */
- const VectorReference & operator *= (const PetscScalar &s) const;
-
- /**
- * Divide the referenced element of
- * the vector by <tt>s</tt>.
- */
- const VectorReference & operator /= (const PetscScalar &s) const;
-
- /**
- * Convert the reference to an actual
- * value, i.e. return the value of
- * the referenced element of the
- * vector.
- */
- operator PetscScalar () const;
-
- /**
- * Exception
- */
- DeclException1 (ExcPETScError,
- int,
- << "An error with error number " << arg1
- << " occurred while calling a PETSc function");
- /**
- * Exception
- */
- DeclException3 (ExcAccessToNonlocalElement,
- int, int, int,
- << "You tried to access element " << arg1
- << " of a distributed vector, but only elements "
- << arg2 << " through " << arg3
- << " are stored locally and can be accessed.");
- /**
- * Exception.
- */
- DeclException2 (ExcWrongMode,
- int, int,
- << "You tried to do a "
- << (arg1 == 1 ?
- "'set'" :
- (arg1 == 2 ?
- "'add'" : "???"))
- << " operation but the vector is currently in "
- << (arg2 == 1 ?
- "'set'" :
- (arg2 == 2 ?
- "'add'" : "???"))
- << " mode. You first have to call 'compress()'.");
-
- private:
- /**
- * Point to the vector we are
- * referencing.
- */
- const VectorBase &vector;
-
- /**
- * Index of the referenced element of
- * the vector.
- */
- const unsigned int index;
-
- /**
- * Make the vector class a friend, so
- * that it can create objects of the
- * present type.
- */
- friend class ::dealii::PETScWrappers::VectorBase;
+ private:
+ /**
+ * Constructor. It is made private so
+ * as to only allow the actual vector
+ * class to create it.
+ */
- VectorReference (const VectorBase &vector,
++ VectorReference (const VectorBase &vector,
+ const unsigned int index);
+
+ public:
+ /**
+ * This looks like a copy operator,
+ * but does something different than
+ * usual. In particular, it does not
+ * copy the member variables of this
+ * reference. Rather, it handles the
+ * situation where we have two
+ * vectors @p v and @p w, and assign
+ * elements like in
+ * <tt>v(i)=w(i)</tt>. Here, both
+ * left and right hand side of the
+ * assignment have data type
+ * VectorReference, but what we
+ * really mean is to assign the
+ * vector elements represented by the
+ * two references. This operator
+ * implements this operation. Note
+ * also that this allows us to make
+ * the assignment operator const.
+ */
+ const VectorReference &operator = (const VectorReference &r) const;
+
+ /**
+ * The same function as above, but
+ * for non-const reference
+ * objects. The function is needed
+ * since the compiler might otherwise
+ * automatically generate a copy
+ * operator for non-const objects.
+ */
+ VectorReference &operator = (const VectorReference &r);
+
+ /**
+ * Set the referenced element of the
+ * vector to <tt>s</tt>.
+ */
+ const VectorReference &operator = (const PetscScalar &s) const;
+
+ /**
+ * Add <tt>s</tt> to the referenced
+ * element of the vector.
+ */
+ const VectorReference &operator += (const PetscScalar &s) const;
+
+ /**
+ * Subtract <tt>s</tt> from the
+ * referenced element of the vector.
+ */
+ const VectorReference &operator -= (const PetscScalar &s) const;
+
+ /**
+ * Multiply the referenced element of
+ * the vector by <tt>s</tt>.
+ */
+ const VectorReference &operator *= (const PetscScalar &s) const;
+
+ /**
+ * Divide the referenced element of
+ * the vector by <tt>s</tt>.
+ */
+ const VectorReference &operator /= (const PetscScalar &s) const;
+
+ /**
+ * Convert the reference to an actual
+ * value, i.e. return the value of
+ * the referenced element of the
+ * vector.
+ */
+ operator PetscScalar () const;
+
+ /**
+ * Exception
+ */
+ DeclException1 (ExcPETScError,
+ int,
+ << "An error with error number " << arg1
+ << " occurred while calling a PETSc function");
+ /**
+ * Exception
+ */
+ DeclException3 (ExcAccessToNonlocalElement,
+ int, int, int,
+ << "You tried to access element " << arg1
+ << " of a distributed vector, but only elements "
+ << arg2 << " through " << arg3
+ << " are stored locally and can be accessed.");
+ /**
+ * Exception.
+ */
+ DeclException2 (ExcWrongMode,
+ int, int,
+ << "You tried to do a "
+ << (arg1 == 1 ?
+ "'set'" :
+ (arg1 == 2 ?
+ "'add'" : "???"))
+ << " operation but the vector is currently in "
+ << (arg2 == 1 ?
+ "'set'" :
+ (arg2 == 2 ?
+ "'add'" : "???"))
+ << " mode. You first have to call 'compress()'.");
+
+ private:
+ /**
+ * Point to the vector we are
+ * referencing.
+ */
+ const VectorBase &vector;
+
+ /**
+ * Index of the referenced element of
+ * the vector.
+ */
+ const unsigned int index;
+
+ /**
+ * Make the vector class a friend, so
+ * that it can create objects of the
+ * present type.
+ */
+ friend class ::dealii::PETScWrappers::VectorBase;
};
}
- /**
- * @endcond
- */
-
-
- /**
- * Base class for all vector classes that are implemented on top of the PETSc
- * vector types. Since in PETSc all vector types (i.e. sequential and parallel
- * ones) are built by filling the contents of an abstract object that is only
- * referenced through a pointer of a type that is independent of the actual
- * vector type, we can implement almost all functionality of vectors in this
- * base class. Derived classes will then only have to provide the
- * functionality to create one or the other kind of vector.
- *
- * The interface of this class is modeled after the existing Vector
- * class in deal.II. It has almost the same member functions, and is often
- * exchangable. However, since PETSc only supports a single scalar type
- * (either double, float, or a complex data type), it is not templated, and
- * only works with whatever your PETSc installation has defined the data type
- * @p PetscScalar to.
- *
- * Note that PETSc only guarantees that operations do what you expect if the
- * functions @p VecAssemblyBegin and @p VecAssemblyEnd have been called
- * after vector assembly. Therefore, you need to call Vector::compress()
- * before you actually use the vector.
- *
- * @ingroup PETScWrappers
- * @author Wolfgang Bangerth, 2004
- */
+ /**
+ * @endcond
+ */
+
+
+ /**
+ * Base class for all vector classes that are implemented on top of the PETSc
+ * vector types. Since in PETSc all vector types (i.e. sequential and parallel
+ * ones) are built by filling the contents of an abstract object that is only
+ * referenced through a pointer of a type that is independent of the actual
+ * vector type, we can implement almost all functionality of vectors in this
+ * base class. Derived classes will then only have to provide the
+ * functionality to create one or the other kind of vector.
+ *
+ * The interface of this class is modeled after the existing Vector
+ * class in deal.II. It has almost the same member functions, and is often
+ * exchangable. However, since PETSc only supports a single scalar type
+ * (either double, float, or a complex data type), it is not templated, and
+ * only works with whatever your PETSc installation has defined the data type
+ * @p PetscScalar to.
+ *
+ * Note that PETSc only guarantees that operations do what you expect if the
+ * functions @p VecAssemblyBegin and @p VecAssemblyEnd have been called
+ * after vector assembly. Therefore, you need to call Vector::compress()
+ * before you actually use the vector.
+ *
+ * @ingroup PETScWrappers
+ * @author Wolfgang Bangerth, 2004
+ */
class VectorBase : public Subscriptor
{
- public:
- /**
- * Declare some of the standard types
- * used in all containers. These types
- * parallel those in the <tt>C++</tt>
- * standard libraries <tt>vector<...></tt>
- * class.
- */
- typedef PetscScalar value_type;
- typedef PetscReal real_type;
- typedef std::size_t size_type;
- typedef internal::VectorReference reference;
- typedef const internal::VectorReference const_reference;
-
- /**
- * Default constructor. It doesn't do
- * anything, derived classes will have
- * to initialize the data.
- */
- VectorBase ();
-
- /**
- * Copy constructor. Sets the dimension
- * to that of the given vector, and
- * copies all elements.
- */
- VectorBase (const VectorBase &v);
-
- /**
- * Initialize a Vector from a PETSc Vec
- * object. Note that we do not copy the
- * vector and we do not attain
- * ownership, so we do not destroy the
- * PETSc object in the destructor.
- */
- explicit VectorBase (const Vec & v);
-
- /**
- * Destructor
- */
- virtual ~VectorBase ();
-
- /**
- * Compress the underlying
- * representation of the PETSc object,
- * i.e. flush the buffers of the vector
- * object if it has any. This function
- * is necessary after writing into a
- * vector element-by-element and before
- * anything else can be done on it.
- *
- * See @ref GlossCompress "Compressing distributed objects"
- * for more information.
- */
- void compress (::dealii::VectorOperation::values operation
- =::dealii::VectorOperation::unknown);
-
- /**
- * Set all components of the vector to
- * the given number @p s. Simply pass
- * this down to the individual block
- * objects, but we still need to declare
- * this function to make the example
- * given in the discussion about making
- * the constructor explicit work.
- *
- *
- * Since the semantics of assigning a
- * scalar to a vector are not
- * immediately clear, this operator
- * should really only be used if you
- * want to set the entire vector to
- * zero. This allows the intuitive
- * notation <tt>v=0</tt>. Assigning
- * other values is deprecated and may
- * be disallowed in the future.
- */
- VectorBase & operator = (const PetscScalar s);
-
- /**
- * Test for equality. This function
- * assumes that the present vector and
- * the one to compare with have the same
- * size already, since comparing vectors
- * of different sizes makes not much
- * sense anyway.
- */
- bool operator == (const VectorBase &v) const;
-
- /**
- * Test for inequality. This function
- * assumes that the present vector and
- * the one to compare with have the same
- * size already, since comparing vectors
- * of different sizes makes not much
- * sense anyway.
- */
- bool operator != (const VectorBase &v) const;
-
- /**
- * Return the global dimension of the
- * vector.
- */
- unsigned int size () const;
-
- /**
- * Return the local dimension of the
- * vector, i.e. the number of elements
- * stored on the present MPI
- * process. For sequential vectors,
- * this number is the same as size(),
- * but for parallel vectors it may be
- * smaller.
- *
- * To figure out which elements
- * exactly are stored locally,
- * use local_range().
- */
- unsigned int local_size () const;
-
- /**
- * Return a pair of indices
- * indicating which elements of
- * this vector are stored
- * locally. The first number is
- * the index of the first
- * element stored, the second
- * the index of the one past
- * the last one that is stored
- * locally. If this is a
- * sequential vector, then the
- * result will be the pair
- * (0,N), otherwise it will be
- * a pair (i,i+n), where
- * <tt>n=local_size()</tt>.
- */
- std::pair<unsigned int, unsigned int>
- local_range () const;
-
- /**
- * Return whether @p index is
- * in the local range or not,
- * see also local_range().
- */
- bool in_local_range (const unsigned int index) const;
-
- /**
- * Return if the vector contains ghost
- * elements.
- */
- bool has_ghost_elements() const;
-
- /**
- * Provide access to a given element,
- * both read and write.
- */
- reference
- operator () (const unsigned int index);
-
- /**
- * Provide read-only access to an
- * element.
- */
- PetscScalar
- operator () (const unsigned int index) const;
-
- /**
- * Provide access to a given
- * element, both read and write.
- *
- * Exactly the same as operator().
- */
- reference
- operator [] (const unsigned int index);
-
- /**
- * Provide read-only access to an
- * element. This is equivalent to
- * the <code>el()</code> command.
- *
- * Exactly the same as operator().
- */
- PetscScalar
- operator [] (const unsigned int index) const;
-
- /**
- * A collective set operation: instead
- * of setting individual elements of a
- * vector, this function allows to set
- * a whole set of elements at once. The
- * indices of the elements to be set
- * are stated in the first argument,
- * the corresponding values in the
- * second.
- */
- void set (const std::vector<unsigned int> &indices,
- const std::vector<PetscScalar> &values);
-
- /**
- * A collective add operation: This
- * function adds a whole set of values
- * stored in @p values to the vector
- * components specified by @p indices.
- */
- void add (const std::vector<unsigned int> &indices,
- const std::vector<PetscScalar> &values);
-
- /**
- * This is a second collective
- * add operation. As a
- * difference, this function
- * takes a deal.II vector of
- * values.
- */
- void add (const std::vector<unsigned int> &indices,
- const ::dealii::Vector<PetscScalar> &values);
-
- /**
- * Take an address where
- * <tt>n_elements</tt> are stored
- * contiguously and add them into
- * the vector. Handles all cases
- * which are not covered by the
- * other two <tt>add()</tt>
- * functions above.
- */
- void add (const unsigned int n_elements,
- const unsigned int *indices,
- const PetscScalar *values);
-
- /**
- * Return the scalar product of two
- * vectors. The vectors must have the
- * same size.
- */
- PetscScalar operator * (const VectorBase &vec) const;
-
- /**
- * Return square of the $l_2$-norm.
- */
- real_type norm_sqr () const;
-
- /**
- * Mean value of the elements of
- * this vector.
- */
- PetscScalar mean_value () const;
-
- /**
- * $l_1$-norm of the vector.
- * The sum of the absolute values.
- */
- real_type l1_norm () const;
-
- /**
- * $l_2$-norm of the vector. The
- * square root of the sum of the
- * squares of the elements.
- */
- real_type l2_norm () const;
-
- /**
- * $l_p$-norm of the vector. The
- * pth root of the sum of the pth
- * powers of the absolute values
- * of the elements.
- */
- real_type lp_norm (const real_type p) const;
-
- /**
- * Maximum absolute value of the
- * elements.
- */
- real_type linfty_norm () const;
-
- /**
- * Normalize vector by dividing
- * by the $l_2$-norm of the
- * vector. Return vector norm
- * before normalization.
- */
- real_type normalize () const;
-
- /**
- * Return vector component with
- * the minimal magnitude.
- */
- real_type min () const;
-
- /**
- * Return vector component with
- * the maximal magnitude.
- */
- real_type max () const;
-
-
- /**
- * Replace every element in a
- * vector with its absolute
- * value.
- */
- VectorBase & abs ();
-
- /**
- * Conjugate a vector.
- */
- VectorBase & conjugate ();
-
- /**
- * A collective piecewise
- * multiply operation on
- * <code>this</code> vector
- * with itself. TODO: The model
- * for this function should be
- * similer to add ().
- */
- VectorBase & mult ();
-
- /**
- * Same as above, but a
- * collective piecewise
- * multiply operation of
- * <code>this</code> vector
- * with <b>v</b>.
- */
- VectorBase & mult (const VectorBase &v);
-
- /**
- * Same as above, but a
- * collective piecewise
- * multiply operation of
- * <b>u</b> with <b>v</b>.
- */
- VectorBase & mult (const VectorBase &u,
- const VectorBase &v);
-
- /**
- * Return whether the vector contains
- * only elements with value zero. This
- * function is mainly for internal
- * consistency checks and should
- * seldom be used when not in debug
- * mode since it uses quite some time.
- */
- bool all_zero () const;
-
- /**
- * Return @p true if the vector has no
- * negative entries, i.e. all entries
- * are zero or positive. This function
- * is used, for example, to check
- * whether refinement indicators are
- * really all positive (or zero).
- */
- bool is_non_negative () const;
-
- /**
- * Multiply the entire vector by a
- * fixed factor.
- */
- VectorBase & operator *= (const PetscScalar factor);
-
- /**
- * Divide the entire vector by a
- * fixed factor.
- */
- VectorBase & operator /= (const PetscScalar factor);
-
- /**
- * Add the given vector to the present
- * one.
- */
- VectorBase & operator += (const VectorBase &V);
-
- /**
- * Subtract the given vector from the
- * present one.
- */
- VectorBase & operator -= (const VectorBase &V);
-
- /**
- * Addition of @p s to all
- * components. Note that @p s is a
- * scalar and not a vector.
- */
- void add (const PetscScalar s);
-
- /**
- * Simple vector addition, equal to the
- * <tt>operator +=</tt>.
- */
- void add (const VectorBase &V);
-
- /**
- * Simple addition of a multiple of a
- * vector, i.e. <tt>*this += a*V</tt>.
- */
- void add (const PetscScalar a, const VectorBase &V);
-
- /**
- * Multiple addition of scaled vectors,
- * i.e. <tt>*this += a*V+b*W</tt>.
- */
- void add (const PetscScalar a, const VectorBase &V,
- const PetscScalar b, const VectorBase &W);
-
- /**
- * Scaling and simple vector addition,
- * i.e.
- * <tt>*this = s*(*this)+V</tt>.
- */
- void sadd (const PetscScalar s,
- const VectorBase &V);
-
- /**
- * Scaling and simple addition, i.e.
- * <tt>*this = s*(*this)+a*V</tt>.
- */
- void sadd (const PetscScalar s,
- const PetscScalar a,
- const VectorBase &V);
-
- /**
- * Scaling and multiple addition.
- */
- void sadd (const PetscScalar s,
- const PetscScalar a,
- const VectorBase &V,
- const PetscScalar b,
- const VectorBase &W);
-
- /**
- * Scaling and multiple addition.
- * <tt>*this = s*(*this)+a*V + b*W + c*X</tt>.
- */
- void sadd (const PetscScalar s,
- const PetscScalar a,
- const VectorBase &V,
- const PetscScalar b,
- const VectorBase &W,
- const PetscScalar c,
- const VectorBase &X);
-
- /**
- * Scale each element of this
- * vector by the corresponding
- * element in the argument. This
- * function is mostly meant to
- * simulate multiplication (and
- * immediate re-assignment) by a
- * diagonal scaling matrix.
- */
- void scale (const VectorBase &scaling_factors);
-
- /**
- * Assignment <tt>*this = a*V</tt>.
- */
- void equ (const PetscScalar a, const VectorBase &V);
-
- /**
- * Assignment <tt>*this = a*V + b*W</tt>.
- */
- void equ (const PetscScalar a, const VectorBase &V,
- const PetscScalar b, const VectorBase &W);
-
- /**
- * Compute the elementwise ratio of the
- * two given vectors, that is let
- * <tt>this[i] = a[i]/b[i]</tt>. This is
- * useful for example if you want to
- * compute the cellwise ratio of true to
- * estimated error.
- *
- * This vector is appropriately
- * scaled to hold the result.
- *
- * If any of the <tt>b[i]</tt> is
- * zero, the result is
- * undefined. No attempt is made
- * to catch such situations.
- */
- void ratio (const VectorBase &a,
- const VectorBase &b);
-
- /**
- * Updates the ghost values of this
- * vector. This is necessary after any
- * modification before reading ghost
- * values.
- */
- void update_ghost_values() const;
-
- /**
- * Print to a
- * stream. @p precision denotes
- * the desired precision with
- * which values shall be printed,
- * @p scientific whether
- * scientific notation shall be
- * used. If @p across is
- * @p true then the vector is
- * printed in a line, while if
- * @p false then the elements
- * are printed on a separate line
- * each.
- */
- void print (std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const bool across = true) const;
-
- /**
- * Swap the contents of this
- * vector and the other vector
- * @p v. One could do this
- * operation with a temporary
- * variable and copying over the
- * data elements, but this
- * function is significantly more
- * efficient since it only swaps
- * the pointers to the data of
- * the two vectors and therefore
- * does not need to allocate
- * temporary storage and move
- * data around.
- *
- * This function is analog to the
- * the @p swap function of all C++
- * standard containers. Also,
- * there is a global function
- * <tt>swap(u,v)</tt> that simply calls
- * <tt>u.swap(v)</tt>, again in analogy
- * to standard functions.
- */
- void swap (VectorBase &v);
-
- /**
- * Conversion operator to gain access
- * to the underlying PETSc type. If you
- * do this, you cut this class off some
- * information it may need, so this
- * conversion operator should only be
- * used if you know what you do. In
- * particular, it should only be used
- * for read-only operations into the
- * vector.
- */
- operator const Vec & () const;
-
- /**
- * Estimate for the memory
- * consumption (not implemented
- * for this class).
- */
- std::size_t memory_consumption () const;
-
- protected:
- /**
- * A generic vector object in
- * PETSc. The actual type, a sequential
- * vector, is set in the constructor.
- */
- Vec vector;
-
- /**
- * Denotes if this vector has ghost
- * indices associated with it. This
- * means that at least one of the
- * processes in a parallel programm has
- * at least one ghost index.
- */
- bool ghosted;
-
- /**
- * This vector contains the global
- * indices of the ghost values. The
- * location in this vector denotes the
- * local numbering, which is used in
- * PETSc.
- */
- IndexSet ghost_indices;
-
- /**
- * Store whether the last action was a
- * write or add operation. This
- * variable is @p mutable so that the
- * accessor classes can write to it,
- * even though the vector object they
- * refer to is constant.
- */
- mutable ::dealii::VectorOperation::values last_action;
-
- /**
- * Make the reference class a friend.
- */
- friend class internal::VectorReference;
-
- /**
- * Specifies if the vector is the owner
- * of the PETSc Vec. This is true if it
- * got created by this class and
- * determines if it gets destructed in
- * the destructor.
- */
- bool attained_ownership;
-
- /**
- * Collective set or add
- * operation: This function is
- * invoked by the collective @p
- * set and @p add with the
- * @p add_values flag set to the
- * corresponding value.
- */
- void do_set_add_operation (const unsigned int n_elements,
- const unsigned int *indices,
- const PetscScalar *values,
- const bool add_values);
+ public:
+ /**
+ * Declare some of the standard types
+ * used in all containers. These types
+ * parallel those in the <tt>C++</tt>
+ * standard libraries <tt>vector<...></tt>
+ * class.
+ */
+ typedef PetscScalar value_type;
+ typedef PetscReal real_type;
+ typedef std::size_t size_type;
+ typedef internal::VectorReference reference;
+ typedef const internal::VectorReference const_reference;
+
+ /**
+ * Default constructor. It doesn't do
+ * anything, derived classes will have
+ * to initialize the data.
+ */
+ VectorBase ();
+
+ /**
+ * Copy constructor. Sets the dimension
+ * to that of the given vector, and
+ * copies all elements.
+ */
+ VectorBase (const VectorBase &v);
+
+ /**
+ * Initialize a Vector from a PETSc Vec
+ * object. Note that we do not copy the
+ * vector and we do not attain
+ * ownership, so we do not destroy the
+ * PETSc object in the destructor.
+ */
+ explicit VectorBase (const Vec &v);
+
+ /**
+ * Destructor
+ */
+ virtual ~VectorBase ();
+
+ /**
+ * Compress the underlying
+ * representation of the PETSc object,
+ * i.e. flush the buffers of the vector
+ * object if it has any. This function
+ * is necessary after writing into a
+ * vector element-by-element and before
+ * anything else can be done on it.
+ *
+ * See @ref GlossCompress "Compressing distributed objects"
+ * for more information.
+ */
+ void compress (::dealii::VectorOperation::values operation
+ =::dealii::VectorOperation::unknown);
+
+ /**
+ * Set all components of the vector to
+ * the given number @p s. Simply pass
+ * this down to the individual block
+ * objects, but we still need to declare
+ * this function to make the example
+ * given in the discussion about making
+ * the constructor explicit work.
+ *
+ *
+ * Since the semantics of assigning a
+ * scalar to a vector are not
+ * immediately clear, this operator
+ * should really only be used if you
+ * want to set the entire vector to
+ * zero. This allows the intuitive
+ * notation <tt>v=0</tt>. Assigning
+ * other values is deprecated and may
+ * be disallowed in the future.
+ */
+ VectorBase &operator = (const PetscScalar s);
+
+ /**
+ * Test for equality. This function
+ * assumes that the present vector and
+ * the one to compare with have the same
+ * size already, since comparing vectors
+ * of different sizes makes not much
+ * sense anyway.
+ */
+ bool operator == (const VectorBase &v) const;
+
+ /**
+ * Test for inequality. This function
+ * assumes that the present vector and
+ * the one to compare with have the same
+ * size already, since comparing vectors
+ * of different sizes makes not much
+ * sense anyway.
+ */
+ bool operator != (const VectorBase &v) const;
+
+ /**
+ * Return the global dimension of the
+ * vector.
+ */
+ unsigned int size () const;
+
+ /**
+ * Return the local dimension of the
+ * vector, i.e. the number of elements
+ * stored on the present MPI
+ * process. For sequential vectors,
+ * this number is the same as size(),
+ * but for parallel vectors it may be
+ * smaller.
+ *
+ * To figure out which elements
+ * exactly are stored locally,
+ * use local_range().
+ */
+ unsigned int local_size () const;
+
+ /**
+ * Return a pair of indices
+ * indicating which elements of
+ * this vector are stored
+ * locally. The first number is
+ * the index of the first
+ * element stored, the second
+ * the index of the one past
+ * the last one that is stored
+ * locally. If this is a
+ * sequential vector, then the
+ * result will be the pair
+ * (0,N), otherwise it will be
+ * a pair (i,i+n), where
+ * <tt>n=local_size()</tt>.
+ */
+ std::pair<unsigned int, unsigned int>
+ local_range () const;
+
+ /**
+ * Return whether @p index is
+ * in the local range or not,
+ * see also local_range().
+ */
+ bool in_local_range (const unsigned int index) const;
+
+ /**
+ * Return if the vector contains ghost
+ * elements.
+ */
+ bool has_ghost_elements() const;
+
+ /**
+ * Provide access to a given element,
+ * both read and write.
+ */
+ reference
+ operator () (const unsigned int index);
+
+ /**
+ * Provide read-only access to an
+ * element.
+ */
+ PetscScalar
+ operator () (const unsigned int index) const;
+
+ /**
+ * Provide access to a given
+ * element, both read and write.
+ *
+ * Exactly the same as operator().
+ */
+ reference
+ operator [] (const unsigned int index);
+
+ /**
+ * Provide read-only access to an
+ * element. This is equivalent to
+ * the <code>el()</code> command.
+ *
+ * Exactly the same as operator().
+ */
+ PetscScalar
+ operator [] (const unsigned int index) const;
+
+ /**
+ * A collective set operation: instead
+ * of setting individual elements of a
+ * vector, this function allows to set
+ * a whole set of elements at once. The
+ * indices of the elements to be set
+ * are stated in the first argument,
+ * the corresponding values in the
+ * second.
+ */
+ void set (const std::vector<unsigned int> &indices,
- const std::vector<PetscScalar> &values);
++ const std::vector<PetscScalar> &values);
+
+ /**
+ * A collective add operation: This
+ * function adds a whole set of values
+ * stored in @p values to the vector
+ * components specified by @p indices.
+ */
+ void add (const std::vector<unsigned int> &indices,
- const std::vector<PetscScalar> &values);
++ const std::vector<PetscScalar> &values);
+
+ /**
+ * This is a second collective
+ * add operation. As a
+ * difference, this function
+ * takes a deal.II vector of
+ * values.
+ */
+ void add (const std::vector<unsigned int> &indices,
+ const ::dealii::Vector<PetscScalar> &values);
+
+ /**
+ * Take an address where
+ * <tt>n_elements</tt> are stored
+ * contiguously and add them into
+ * the vector. Handles all cases
+ * which are not covered by the
+ * other two <tt>add()</tt>
+ * functions above.
+ */
+ void add (const unsigned int n_elements,
+ const unsigned int *indices,
- const PetscScalar *values);
++ const PetscScalar *values);
+
+ /**
+ * Return the scalar product of two
+ * vectors. The vectors must have the
+ * same size.
+ */
+ PetscScalar operator * (const VectorBase &vec) const;
+
+ /**
+ * Return square of the $l_2$-norm.
+ */
+ real_type norm_sqr () const;
+
+ /**
+ * Mean value of the elements of
+ * this vector.
+ */
+ PetscScalar mean_value () const;
+
+ /**
+ * $l_1$-norm of the vector.
+ * The sum of the absolute values.
+ */
+ real_type l1_norm () const;
+
+ /**
+ * $l_2$-norm of the vector. The
+ * square root of the sum of the
+ * squares of the elements.
+ */
+ real_type l2_norm () const;
+
+ /**
+ * $l_p$-norm of the vector. The
+ * pth root of the sum of the pth
+ * powers of the absolute values
+ * of the elements.
+ */
+ real_type lp_norm (const real_type p) const;
+
+ /**
+ * Maximum absolute value of the
+ * elements.
+ */
+ real_type linfty_norm () const;
+
+ /**
+ * Normalize vector by dividing
+ * by the $l_2$-norm of the
+ * vector. Return vector norm
+ * before normalization.
+ */
+ real_type normalize () const;
+
+ /**
+ * Return vector component with
+ * the minimal magnitude.
+ */
+ real_type min () const;
+
+ /**
+ * Return vector component with
+ * the maximal magnitude.
+ */
+ real_type max () const;
+
+
+ /**
+ * Replace every element in a
+ * vector with its absolute
+ * value.
+ */
+ VectorBase &abs ();
+
+ /**
+ * Conjugate a vector.
+ */
+ VectorBase &conjugate ();
+
+ /**
+ * A collective piecewise
+ * multiply operation on
+ * <code>this</code> vector
+ * with itself. TODO: The model
+ * for this function should be
+ * similer to add ().
+ */
+ VectorBase &mult ();
+
+ /**
+ * Same as above, but a
+ * collective piecewise
+ * multiply operation of
+ * <code>this</code> vector
+ * with <b>v</b>.
+ */
+ VectorBase &mult (const VectorBase &v);
+
+ /**
+ * Same as above, but a
+ * collective piecewise
+ * multiply operation of
+ * <b>u</b> with <b>v</b>.
+ */
+ VectorBase &mult (const VectorBase &u,
+ const VectorBase &v);
+
+ /**
+ * Return whether the vector contains
+ * only elements with value zero. This
+ * function is mainly for internal
+ * consistency checks and should
+ * seldom be used when not in debug
+ * mode since it uses quite some time.
+ */
+ bool all_zero () const;
+
+ /**
+ * Return @p true if the vector has no
+ * negative entries, i.e. all entries
+ * are zero or positive. This function
+ * is used, for example, to check
+ * whether refinement indicators are
+ * really all positive (or zero).
+ */
+ bool is_non_negative () const;
+
+ /**
+ * Multiply the entire vector by a
+ * fixed factor.
+ */
+ VectorBase &operator *= (const PetscScalar factor);
+
+ /**
+ * Divide the entire vector by a
+ * fixed factor.
+ */
+ VectorBase &operator /= (const PetscScalar factor);
+
+ /**
+ * Add the given vector to the present
+ * one.
+ */
+ VectorBase &operator += (const VectorBase &V);
+
+ /**
+ * Subtract the given vector from the
+ * present one.
+ */
+ VectorBase &operator -= (const VectorBase &V);
+
+ /**
+ * Addition of @p s to all
+ * components. Note that @p s is a
+ * scalar and not a vector.
+ */
+ void add (const PetscScalar s);
+
+ /**
+ * Simple vector addition, equal to the
+ * <tt>operator +=</tt>.
+ */
+ void add (const VectorBase &V);
+
+ /**
+ * Simple addition of a multiple of a
+ * vector, i.e. <tt>*this += a*V</tt>.
+ */
+ void add (const PetscScalar a, const VectorBase &V);
+
+ /**
+ * Multiple addition of scaled vectors,
+ * i.e. <tt>*this += a*V+b*W</tt>.
+ */
+ void add (const PetscScalar a, const VectorBase &V,
+ const PetscScalar b, const VectorBase &W);
+
+ /**
+ * Scaling and simple vector addition,
+ * i.e.
+ * <tt>*this = s*(*this)+V</tt>.
+ */
+ void sadd (const PetscScalar s,
+ const VectorBase &V);
+
+ /**
+ * Scaling and simple addition, i.e.
+ * <tt>*this = s*(*this)+a*V</tt>.
+ */
+ void sadd (const PetscScalar s,
+ const PetscScalar a,
+ const VectorBase &V);
+
+ /**
+ * Scaling and multiple addition.
+ */
+ void sadd (const PetscScalar s,
+ const PetscScalar a,
+ const VectorBase &V,
+ const PetscScalar b,
+ const VectorBase &W);
+
+ /**
+ * Scaling and multiple addition.
+ * <tt>*this = s*(*this)+a*V + b*W + c*X</tt>.
+ */
+ void sadd (const PetscScalar s,
+ const PetscScalar a,
+ const VectorBase &V,
+ const PetscScalar b,
+ const VectorBase &W,
+ const PetscScalar c,
+ const VectorBase &X);
+
+ /**
+ * Scale each element of this
+ * vector by the corresponding
+ * element in the argument. This
+ * function is mostly meant to
+ * simulate multiplication (and
+ * immediate re-assignment) by a
+ * diagonal scaling matrix.
+ */
+ void scale (const VectorBase &scaling_factors);
+
+ /**
+ * Assignment <tt>*this = a*V</tt>.
+ */
+ void equ (const PetscScalar a, const VectorBase &V);
+
+ /**
+ * Assignment <tt>*this = a*V + b*W</tt>.
+ */
+ void equ (const PetscScalar a, const VectorBase &V,
+ const PetscScalar b, const VectorBase &W);
+
+ /**
+ * Compute the elementwise ratio of the
+ * two given vectors, that is let
+ * <tt>this[i] = a[i]/b[i]</tt>. This is
+ * useful for example if you want to
+ * compute the cellwise ratio of true to
+ * estimated error.
+ *
+ * This vector is appropriately
+ * scaled to hold the result.
+ *
+ * If any of the <tt>b[i]</tt> is
+ * zero, the result is
+ * undefined. No attempt is made
+ * to catch such situations.
+ */
+ void ratio (const VectorBase &a,
+ const VectorBase &b);
+
+ /**
+ * Updates the ghost values of this
+ * vector. This is necessary after any
+ * modification before reading ghost
+ * values.
+ */
+ void update_ghost_values() const;
+
+ /**
+ * Print to a
+ * stream. @p precision denotes
+ * the desired precision with
+ * which values shall be printed,
+ * @p scientific whether
+ * scientific notation shall be
+ * used. If @p across is
+ * @p true then the vector is
+ * printed in a line, while if
+ * @p false then the elements
+ * are printed on a separate line
+ * each.
+ */
+ void print (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /**
+ * Swap the contents of this
+ * vector and the other vector
+ * @p v. One could do this
+ * operation with a temporary
+ * variable and copying over the
+ * data elements, but this
+ * function is significantly more
+ * efficient since it only swaps
+ * the pointers to the data of
+ * the two vectors and therefore
+ * does not need to allocate
+ * temporary storage and move
+ * data around.
+ *
+ * This function is analog to the
+ * the @p swap function of all C++
+ * standard containers. Also,
+ * there is a global function
+ * <tt>swap(u,v)</tt> that simply calls
+ * <tt>u.swap(v)</tt>, again in analogy
+ * to standard functions.
+ */
+ void swap (VectorBase &v);
+
+ /**
+ * Conversion operator to gain access
+ * to the underlying PETSc type. If you
+ * do this, you cut this class off some
+ * information it may need, so this
+ * conversion operator should only be
+ * used if you know what you do. In
+ * particular, it should only be used
+ * for read-only operations into the
+ * vector.
+ */
+ operator const Vec &() const;
+
+ /**
+ * Estimate for the memory
+ * consumption (not implemented
+ * for this class).
+ */
+ std::size_t memory_consumption () const;
+
+ protected:
+ /**
+ * A generic vector object in
+ * PETSc. The actual type, a sequential
+ * vector, is set in the constructor.
+ */
+ Vec vector;
+
+ /**
+ * Denotes if this vector has ghost
+ * indices associated with it. This
+ * means that at least one of the
+ * processes in a parallel programm has
+ * at least one ghost index.
+ */
+ bool ghosted;
+
+ /**
+ * This vector contains the global
+ * indices of the ghost values. The
+ * location in this vector denotes the
+ * local numbering, which is used in
+ * PETSc.
+ */
+ IndexSet ghost_indices;
+
+ /**
+ * Store whether the last action was a
+ * write or add operation. This
+ * variable is @p mutable so that the
+ * accessor classes can write to it,
+ * even though the vector object they
+ * refer to is constant.
+ */
+ mutable ::dealii::VectorOperation::values last_action;
+
+ /**
+ * Make the reference class a friend.
+ */
+ friend class internal::VectorReference;
+
+ /**
+ * Specifies if the vector is the owner
+ * of the PETSc Vec. This is true if it
+ * got created by this class and
+ * determines if it gets destructed in
+ * the destructor.
+ */
+ bool attained_ownership;
+
+ /**
+ * Collective set or add
+ * operation: This function is
+ * invoked by the collective @p
+ * set and @p add with the
+ * @p add_values flag set to the
+ * corresponding value.
+ */
+ void do_set_add_operation (const unsigned int n_elements,
+ const unsigned int *indices,
- const PetscScalar *values,
++ const PetscScalar *values,
+ const bool add_values);
};
namespace internal
{
inline
- VectorReference::VectorReference (const VectorBase &vector,
+ VectorReference::VectorReference (const VectorBase &vector,
const unsigned int index)
- :
- vector (vector),
- index (index)
+ :
+ vector (vector),
+ index (index)
{}
template <typename number>
class SparseMatrix : public virtual Subscriptor
{
- public:
- /**
- * Type of matrix entries. In analogy to
- * the STL container classes.
- */
- typedef number value_type;
-
- /**
- * Declare a type that has holds
- * real-valued numbers with the
- * same precision as the template
- * argument to this class. If the
- * template argument of this
- * class is a real data type,
- * then real_type equals the
- * template argument. If the
- * template argument is a
- * std::complex type then
- * real_type equals the type
- * underlying the complex
- * numbers.
- *
- * This typedef is used to
- * represent the return type of
- * norms.
- */
- typedef typename numbers::NumberTraits<number>::real_type real_type;
-
- /**
- * Typedef of an STL conforming iterator
- * class walking over all the nonzero
- * entries of this matrix. This iterator
- * cannot change the values of the
- * matrix.
- */
- typedef
- SparseMatrixIterators::Iterator<number,true>
- const_iterator;
-
- /**
- * Typedef of an STL conforming iterator
- * class walking over all the nonzero
- * entries of this matrix. This iterator
- * @em can change the values of the
- * matrix, but of course can't change the
- * sparsity pattern as this is fixed once
- * a sparse matrix is attached to it.
- */
- typedef
- SparseMatrixIterators::Iterator<number,false>
- iterator;
-
- /**
- * A structure that describes some of the
- * traits of this class in terms of its
- * run-time behavior. Some other classes
- * (such as the block matrix classes)
- * that take one or other of the matrix
- * classes as its template parameters can
- * tune their behavior based on the
- * variables in this class.
- */
- struct Traits
- {
- /**
- * It is safe to elide additions of
- * zeros to individual elements of
- * this matrix.
- */
- static const bool zero_addition_can_be_elided = true;
- };
+ public:
+ /**
+ * Type of matrix entries. In analogy to
+ * the STL container classes.
+ */
+ typedef number value_type;
+
+ /**
+ * Declare a type that has holds
+ * real-valued numbers with the
+ * same precision as the template
+ * argument to this class. If the
+ * template argument of this
+ * class is a real data type,
+ * then real_type equals the
+ * template argument. If the
+ * template argument is a
+ * std::complex type then
+ * real_type equals the type
+ * underlying the complex
+ * numbers.
+ *
+ * This typedef is used to
+ * represent the return type of
+ * norms.
+ */
+ typedef typename numbers::NumberTraits<number>::real_type real_type;
+
+ /**
+ * Typedef of an STL conforming iterator
+ * class walking over all the nonzero
+ * entries of this matrix. This iterator
+ * cannot change the values of the
+ * matrix.
+ */
+ typedef
+ SparseMatrixIterators::Iterator<number,true>
+ const_iterator;
+
+ /**
+ * Typedef of an STL conforming iterator
+ * class walking over all the nonzero
+ * entries of this matrix. This iterator
+ * @em can change the values of the
+ * matrix, but of course can't change the
+ * sparsity pattern as this is fixed once
+ * a sparse matrix is attached to it.
+ */
+ typedef
+ SparseMatrixIterators::Iterator<number,false>
+ iterator;
+
+ /**
+ * A structure that describes some of the
+ * traits of this class in terms of its
+ * run-time behavior. Some other classes
+ * (such as the block matrix classes)
+ * that take one or other of the matrix
+ * classes as its template parameters can
+ * tune their behavior based on the
+ * variables in this class.
+ */
+ struct Traits
+ {
+ /**
+ * It is safe to elide additions of
+ * zeros to individual elements of
+ * this matrix.
+ */
+ static const bool zero_addition_can_be_elided = true;
+ };
- /**
- * @name Constructors and initalization
- */
+ /**
+ * @name Constructors and initalization
+ */
//@{
- /**
- * Constructor; initializes the matrix to
- * be empty, without any structure, i.e.
- * the matrix is not usable at all. This
- * constructor is therefore only useful
- * for matrices which are members of a
- * class. All other matrices should be
- * created at a point in the data flow
- * where all necessary information is
- * available.
- *
- * You have to initialize
- * the matrix before usage with
- * reinit(const SparsityPattern&).
- */
- SparseMatrix ();
-
- /**
- * Copy constructor. This constructor is
- * only allowed to be called if the matrix
- * to be copied is empty. This is for the
- * same reason as for the
- * SparsityPattern, see there for the
- * details.
- *
- * If you really want to copy a whole
- * matrix, you can do so by using the
- * copy_from() function.
- */
- SparseMatrix (const SparseMatrix &);
-
- /**
- * Constructor. Takes the given
- * matrix sparsity structure to
- * represent the sparsity pattern
- * of this matrix. You can change
- * the sparsity pattern later on
- * by calling the reinit(const
- * SparsityPattern&) function.
- *
- * You have to make sure that the
- * lifetime of the sparsity
- * structure is at least as long
- * as that of this matrix or as
- * long as reinit(const
- * SparsityPattern&) is not
- * called with a new sparsity
- * pattern.
- *
- * The constructor is marked
- * explicit so as to disallow
- * that someone passes a sparsity
- * pattern in place of a sparse
- * matrix to some function, where
- * an empty matrix would be
- * generated then.
- */
- explicit SparseMatrix (const SparsityPattern &sparsity);
-
- /**
- * Copy constructor: initialize
- * the matrix with the identity
- * matrix. This constructor will
- * throw an exception if the
- * sizes of the sparsity pattern
- * and the identity matrix do not
- * coincide, or if the sparsity
- * pattern does not provide for
- * nonzero entries on the entire
- * diagonal.
- */
- SparseMatrix (const SparsityPattern &sparsity,
- const IdentityMatrix &id);
-
- /**
- * Destructor. Free all memory, but do not
- * release the memory of the sparsity
- * structure.
- */
- virtual ~SparseMatrix ();
-
- /**
- * Copy operator. Since copying
- * entire sparse matrices is a
- * very expensive operation, we
- * disallow doing so except for
- * the special case of empty
- * matrices of size zero. This
- * doesn't seem particularly
- * useful, but is exactly what
- * one needs if one wanted to
- * have a
- * <code>std::vector@<SparseMatrix@<double@>
- * @></code>: in that case, one
- * can create a vector (which
- * needs the ability to copy
- * objects) of empty matrices
- * that are then later filled
- * with something useful.
- */
- SparseMatrix<number>& operator = (const SparseMatrix<number> &);
-
- /**
- * Copy operator: initialize
- * the matrix with the identity
- * matrix. This operator will
- * throw an exception if the
- * sizes of the sparsity pattern
- * and the identity matrix do not
- * coincide, or if the sparsity
- * pattern does not provide for
- * nonzero entries on the entire
- * diagonal.
- */
- SparseMatrix<number> &
- operator= (const IdentityMatrix &id);
-
- /**
- * This operator assigns a scalar to
- * a matrix. Since this does usually
- * not make much sense (should we set
- * all matrix entries to this value?
- * Only the nonzero entries of the
- * sparsity pattern?), this operation
- * is only allowed if the actual
- * value to be assigned is zero. This
- * operator only exists to allow for
- * the obvious notation
- * <tt>matrix=0</tt>, which sets all
- * elements of the matrix to zero,
- * but keep the sparsity pattern
- * previously used.
- */
- SparseMatrix & operator = (const double d);
-
- /**
- * Reinitialize the sparse matrix
- * with the given sparsity
- * pattern. The latter tells the
- * matrix how many nonzero
- * elements there need to be
- * reserved.
- *
- * Regarding memory allocation,
- * the same applies as said
- * above.
- *
- * You have to make sure that the
- * lifetime of the sparsity
- * structure is at least as long
- * as that of this matrix or as
- * long as reinit(const
- * SparsityPattern &) is not
- * called with a new sparsity
- * structure.
- *
- * The elements of the matrix are
- * set to zero by this function.
- */
- virtual void reinit (const SparsityPattern &sparsity);
-
- /**
- * Release all memory and return
- * to a state just like after
- * having called the default
- * constructor. It also forgets
- * the sparsity pattern it was
- * previously tied to.
- */
- virtual void clear ();
+ /**
+ * Constructor; initializes the matrix to
+ * be empty, without any structure, i.e.
+ * the matrix is not usable at all. This
+ * constructor is therefore only useful
+ * for matrices which are members of a
+ * class. All other matrices should be
+ * created at a point in the data flow
+ * where all necessary information is
+ * available.
+ *
+ * You have to initialize
+ * the matrix before usage with
+ * reinit(const SparsityPattern&).
+ */
+ SparseMatrix ();
+
+ /**
+ * Copy constructor. This constructor is
+ * only allowed to be called if the matrix
+ * to be copied is empty. This is for the
+ * same reason as for the
+ * SparsityPattern, see there for the
+ * details.
+ *
+ * If you really want to copy a whole
+ * matrix, you can do so by using the
+ * copy_from() function.
+ */
+ SparseMatrix (const SparseMatrix &);
+
+ /**
+ * Constructor. Takes the given
+ * matrix sparsity structure to
+ * represent the sparsity pattern
+ * of this matrix. You can change
+ * the sparsity pattern later on
+ * by calling the reinit(const
+ * SparsityPattern&) function.
+ *
+ * You have to make sure that the
+ * lifetime of the sparsity
+ * structure is at least as long
+ * as that of this matrix or as
+ * long as reinit(const
+ * SparsityPattern&) is not
+ * called with a new sparsity
+ * pattern.
+ *
+ * The constructor is marked
+ * explicit so as to disallow
+ * that someone passes a sparsity
+ * pattern in place of a sparse
+ * matrix to some function, where
+ * an empty matrix would be
+ * generated then.
+ */
+ explicit SparseMatrix (const SparsityPattern &sparsity);
+
+ /**
+ * Copy constructor: initialize
+ * the matrix with the identity
+ * matrix. This constructor will
+ * throw an exception if the
+ * sizes of the sparsity pattern
+ * and the identity matrix do not
+ * coincide, or if the sparsity
+ * pattern does not provide for
+ * nonzero entries on the entire
+ * diagonal.
+ */
+ SparseMatrix (const SparsityPattern &sparsity,
- const IdentityMatrix &id);
++ const IdentityMatrix &id);
+
+ /**
+ * Destructor. Free all memory, but do not
+ * release the memory of the sparsity
+ * structure.
+ */
+ virtual ~SparseMatrix ();
+
+ /**
+ * Copy operator. Since copying
+ * entire sparse matrices is a
+ * very expensive operation, we
+ * disallow doing so except for
+ * the special case of empty
+ * matrices of size zero. This
+ * doesn't seem particularly
+ * useful, but is exactly what
+ * one needs if one wanted to
+ * have a
+ * <code>std::vector@<SparseMatrix@<double@>
+ * @></code>: in that case, one
+ * can create a vector (which
+ * needs the ability to copy
+ * objects) of empty matrices
+ * that are then later filled
+ * with something useful.
+ */
+ SparseMatrix<number> &operator = (const SparseMatrix<number> &);
+
+ /**
+ * Copy operator: initialize
+ * the matrix with the identity
+ * matrix. This operator will
+ * throw an exception if the
+ * sizes of the sparsity pattern
+ * and the identity matrix do not
+ * coincide, or if the sparsity
+ * pattern does not provide for
+ * nonzero entries on the entire
+ * diagonal.
+ */
+ SparseMatrix<number> &
- operator= (const IdentityMatrix &id);
++ operator= (const IdentityMatrix &id);
+
+ /**
+ * This operator assigns a scalar to
+ * a matrix. Since this does usually
+ * not make much sense (should we set
+ * all matrix entries to this value?
+ * Only the nonzero entries of the
+ * sparsity pattern?), this operation
+ * is only allowed if the actual
+ * value to be assigned is zero. This
+ * operator only exists to allow for
+ * the obvious notation
+ * <tt>matrix=0</tt>, which sets all
+ * elements of the matrix to zero,
+ * but keep the sparsity pattern
+ * previously used.
+ */
+ SparseMatrix &operator = (const double d);
+
+ /**
+ * Reinitialize the sparse matrix
+ * with the given sparsity
+ * pattern. The latter tells the
+ * matrix how many nonzero
+ * elements there need to be
+ * reserved.
+ *
+ * Regarding memory allocation,
+ * the same applies as said
+ * above.
+ *
+ * You have to make sure that the
+ * lifetime of the sparsity
+ * structure is at least as long
+ * as that of this matrix or as
+ * long as reinit(const
+ * SparsityPattern &) is not
+ * called with a new sparsity
+ * structure.
+ *
+ * The elements of the matrix are
+ * set to zero by this function.
+ */
+ virtual void reinit (const SparsityPattern &sparsity);
+
+ /**
+ * Release all memory and return
+ * to a state just like after
+ * having called the default
+ * constructor. It also forgets
+ * the sparsity pattern it was
+ * previously tied to.
+ */
+ virtual void clear ();
//@}
- /**
- * @name Information on the matrix
- */
+ /**
+ * @name Information on the matrix
+ */
//@{
- /**
- * Return whether the object is
- * empty. It is empty if either
- * both dimensions are zero or no
- * SparsityPattern is
- * associated.
- */
- bool empty () const;
-
- /**
- * Return the dimension of the
- * image space. To remember: the
- * matrix is of dimension
- * $m \times n$.
- */
- unsigned int m () const;
-
- /**
- * Return the dimension of the
- * range space. To remember: the
- * matrix is of dimension
- * $m \times n$.
- */
- unsigned int n () const;
-
- /**
- * Return the number of entries
- * in a specific row.
- */
- unsigned int get_row_length (const unsigned int row) const;
-
- /**
- * Return the number of nonzero
- * elements of this
- * matrix. Actually, it returns
- * the number of entries in the
- * sparsity pattern; if any of
- * the entries should happen to
- * be zero, it is counted anyway.
- */
- unsigned int n_nonzero_elements () const;
-
- /**
- * Return the number of actually
- * nonzero elements of this matrix. It
- * is possible to specify the parameter
- * <tt>threshold</tt> in order to count
- * only the elements that have absolute
- * value greater than the threshold.
- *
- * Note, that this function does (in
- * contrary to n_nonzero_elements())
- * not count all entries of the
- * sparsity pattern but only the ones
- * that are nonzero (or whose absolute
- * value is greater than threshold).
- */
- unsigned int n_actually_nonzero_elements (const double threshold = 0.) const;
-
- /**
- * Return a (constant) reference
- * to the underlying sparsity
- * pattern of this matrix.
- *
- * Though the return value is
- * declared <tt>const</tt>, you
- * should be aware that it may
- * change if you call any
- * nonconstant function of
- * objects which operate on it.
- */
- const SparsityPattern & get_sparsity_pattern () const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object. See
- * MemoryConsumption.
- */
- std::size_t memory_consumption () const;
+ /**
+ * Return whether the object is
+ * empty. It is empty if either
+ * both dimensions are zero or no
+ * SparsityPattern is
+ * associated.
+ */
+ bool empty () const;
+
+ /**
+ * Return the dimension of the
+ * image space. To remember: the
+ * matrix is of dimension
+ * $m \times n$.
+ */
+ unsigned int m () const;
+
+ /**
+ * Return the dimension of the
+ * range space. To remember: the
+ * matrix is of dimension
+ * $m \times n$.
+ */
+ unsigned int n () const;
+
+ /**
+ * Return the number of entries
+ * in a specific row.
+ */
+ unsigned int get_row_length (const unsigned int row) const;
+
+ /**
+ * Return the number of nonzero
+ * elements of this
+ * matrix. Actually, it returns
+ * the number of entries in the
+ * sparsity pattern; if any of
+ * the entries should happen to
+ * be zero, it is counted anyway.
+ */
+ unsigned int n_nonzero_elements () const;
+
+ /**
+ * Return the number of actually
+ * nonzero elements of this matrix. It
+ * is possible to specify the parameter
+ * <tt>threshold</tt> in order to count
+ * only the elements that have absolute
+ * value greater than the threshold.
+ *
+ * Note, that this function does (in
+ * contrary to n_nonzero_elements())
+ * not count all entries of the
+ * sparsity pattern but only the ones
+ * that are nonzero (or whose absolute
+ * value is greater than threshold).
+ */
+ unsigned int n_actually_nonzero_elements (const double threshold = 0.) const;
+
+ /**
+ * Return a (constant) reference
+ * to the underlying sparsity
+ * pattern of this matrix.
+ *
+ * Though the return value is
+ * declared <tt>const</tt>, you
+ * should be aware that it may
+ * change if you call any
+ * nonconstant function of
+ * objects which operate on it.
+ */
+ const SparsityPattern &get_sparsity_pattern () const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object. See
+ * MemoryConsumption.
+ */
+ std::size_t memory_consumption () const;
//@}
- /**
- * @name Modifying entries
- */
+ /**
+ * @name Modifying entries
+ */
//@{
- /**
- * Set the element (<i>i,j</i>)
- * to <tt>value</tt>. Throws an
- * error if the entry does not
- * exist or if <tt>value</tt> is
- * not a finite number. Still, it
- * is allowed to store zero
- * values in non-existent fields.
- */
- void set (const unsigned int i,
- const unsigned int j,
- const number value);
-
- /**
- * Set all elements given in a
- * FullMatrix into the sparse matrix
- * locations given by
- * <tt>indices</tt>. In other words,
- * this function writes the elements
- * in <tt>full_matrix</tt> into the
- * calling matrix, using the
- * local-to-global indexing specified
- * by <tt>indices</tt> for both the
- * rows and the columns of the
- * matrix. This function assumes a
- * quadratic sparse matrix and a
- * quadratic full_matrix, the usual
- * situation in FE calculations.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be set anyway or
- * they should be filtered away (and
- * not change the previous content in
- * the respective element if it
- * exists). The default value is
- * <tt>false</tt>, i.e., even zero
- * values are treated.
- */
- template <typename number2>
- void set (const std::vector<unsigned int> &indices,
- const FullMatrix<number2> &full_matrix,
- const bool elide_zero_values = false);
-
- /**
- * Same function as before, but now
- * including the possibility to use
- * rectangular full_matrices and
- * different local-to-global indexing
- * on rows and columns, respectively.
- */
- template <typename number2>
- void set (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<number2> &full_matrix,
- const bool elide_zero_values = false);
-
- /**
- * Set several elements in the
- * specified row of the matrix with
- * column indices as given by
- * <tt>col_indices</tt> to the
- * respective value.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be set anyway or
- * they should be filtered away (and
- * not change the previous content in
- * the respective element if it
- * exists). The default value is
- * <tt>false</tt>, i.e., even zero
- * values are treated.
- */
- template <typename number2>
- void set (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<number2> &values,
- const bool elide_zero_values = false);
-
- /**
- * Set several elements to values
- * given by <tt>values</tt> in a
- * given row in columns given by
- * col_indices into the sparse
- * matrix.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be inserted anyway
- * or they should be filtered
- * away. The default value is
- * <tt>false</tt>, i.e., even zero
- * values are inserted/replaced.
- */
- template <typename number2>
- void set (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const number2 *values,
- const bool elide_zero_values = false);
-
- /**
- * Add <tt>value</tt> to the
- * element (<i>i,j</i>). Throws
- * an error if the entry does not
- * exist or if <tt>value</tt> is
- * not a finite number. Still, it
- * is allowed to store zero
- * values in non-existent fields.
- */
- void add (const unsigned int i,
- const unsigned int j,
- const number value);
-
- /**
- * Add all elements given in a
- * FullMatrix<double> into sparse
- * matrix locations given by
- * <tt>indices</tt>. In other words,
- * this function adds the elements in
- * <tt>full_matrix</tt> to the
- * respective entries in calling
- * matrix, using the local-to-global
- * indexing specified by
- * <tt>indices</tt> for both the rows
- * and the columns of the
- * matrix. This function assumes a
- * quadratic sparse matrix and a
- * quadratic full_matrix, the usual
- * situation in FE calculations.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- template <typename number2>
- void add (const std::vector<unsigned int> &indices,
- const FullMatrix<number2> &full_matrix,
- const bool elide_zero_values = true);
-
- /**
- * Same function as before, but now
- * including the possibility to use
- * rectangular full_matrices and
- * different local-to-global indexing
- * on rows and columns, respectively.
- */
- template <typename number2>
- void add (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<number2> &full_matrix,
- const bool elide_zero_values = true);
-
- /**
- * Set several elements in the
- * specified row of the matrix with
- * column indices as given by
- * <tt>col_indices</tt> to the
- * respective value.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- template <typename number2>
- void add (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<number2> &values,
- const bool elide_zero_values = true);
-
- /**
- * Add an array of values given by
- * <tt>values</tt> in the given
- * global matrix row at columns
- * specified by col_indices in the
- * sparse matrix.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- template <typename number2>
- void add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const number2 *values,
- const bool elide_zero_values = true,
- const bool col_indices_are_sorted = false);
-
- /**
- * Multiply the entire matrix by a
- * fixed factor.
- */
- SparseMatrix & operator *= (const number factor);
-
- /**
- * Divide the entire matrix by a
- * fixed factor.
- */
- SparseMatrix & operator /= (const number factor);
-
- /**
- * Symmetrize the matrix by
- * forming the mean value between
- * the existing matrix and its
- * transpose, $A = \frac 12(A+A^T)$.
- *
- * This operation assumes that
- * the underlying sparsity
- * pattern represents a symmetric
- * object. If this is not the
- * case, then the result of this
- * operation will not be a
- * symmetric matrix, since it
- * only explicitly symmetrizes
- * by looping over the lower left
- * triangular part for efficiency
- * reasons; if there are entries
- * in the upper right triangle,
- * then these elements are missed
- * in the
- * symmetrization. Symmetrization
- * of the sparsity pattern can be
- * obtain by
- * SparsityPattern::symmetrize().
- */
- void symmetrize ();
-
- /**
- * Copy the given matrix to this
- * one. The operation throws an
- * error if the sparsity patterns
- * of the two involved matrices
- * do not point to the same
- * object, since in this case the
- * copy operation is
- * cheaper. Since this operation
- * is notheless not for free, we
- * do not make it available
- * through <tt>operator =</tt>,
- * since this may lead to
- * unwanted usage, e.g. in copy
- * arguments to functions, which
- * should really be arguments by
- * reference.
- *
- * The source matrix may be a matrix
- * of arbitrary type, as long as its
- * data type is convertible to the
- * data type of this matrix.
- *
- * The function returns a reference to
- * <tt>*this</tt>.
- */
- template <typename somenumber>
- SparseMatrix<number> &
- copy_from (const SparseMatrix<somenumber> &source);
-
- /**
- * This function is complete
- * analogous to the
- * SparsityPattern::copy_from()
- * function in that it allows to
- * initialize a whole matrix in
- * one step. See there for more
- * information on argument types
- * and their meaning. You can
- * also find a small example on
- * how to use this function
- * there.
- *
- * The only difference to the
- * cited function is that the
- * objects which the inner
- * iterator points to need to be
- * of type <tt>std::pair<unsigned
- * int, value</tt>, where
- * <tt>value</tt> needs to be
- * convertible to the element
- * type of this class, as
- * specified by the
- * <tt>number</tt> template
- * argument.
- *
- * Previous content of the matrix
- * is overwritten. Note that the
- * entries specified by the input
- * parameters need not
- * necessarily cover all elements
- * of the matrix. Elements not
- * covered remain untouched.
- */
- template <typename ForwardIterator>
- void copy_from (const ForwardIterator begin,
- const ForwardIterator end);
-
- /**
- * Copy the nonzero entries of a
- * full matrix into this
- * object. Previous content is
- * deleted. Note that the
- * underlying sparsity pattern
- * must be appropriate to hold
- * the nonzero entries of the
- * full matrix.
- */
- template <typename somenumber>
- void copy_from (const FullMatrix<somenumber> &matrix);
-
- /**
- * Add <tt>matrix</tt> scaled by
- * <tt>factor</tt> to this matrix,
- * i.e. the matrix <tt>factor*matrix</tt>
- * is added to <tt>this</tt>. This
- * function throws an error if the
- * sparsity patterns of the two involved
- * matrices do not point to the same
- * object, since in this case the
- * operation is cheaper.
- *
- * The source matrix may be a sparse
- * matrix over an arbitrary underlying
- * scalar type, as long as its data type
- * is convertible to the data type of
- * this matrix.
- */
- template <typename somenumber>
- void add (const number factor,
- const SparseMatrix<somenumber> &matrix);
+ /**
+ * Set the element (<i>i,j</i>)
+ * to <tt>value</tt>. Throws an
+ * error if the entry does not
+ * exist or if <tt>value</tt> is
+ * not a finite number. Still, it
+ * is allowed to store zero
+ * values in non-existent fields.
+ */
+ void set (const unsigned int i,
+ const unsigned int j,
+ const number value);
+
+ /**
+ * Set all elements given in a
+ * FullMatrix into the sparse matrix
+ * locations given by
+ * <tt>indices</tt>. In other words,
+ * this function writes the elements
+ * in <tt>full_matrix</tt> into the
+ * calling matrix, using the
+ * local-to-global indexing specified
+ * by <tt>indices</tt> for both the
+ * rows and the columns of the
+ * matrix. This function assumes a
+ * quadratic sparse matrix and a
+ * quadratic full_matrix, the usual
+ * situation in FE calculations.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be set anyway or
+ * they should be filtered away (and
+ * not change the previous content in
+ * the respective element if it
+ * exists). The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are treated.
+ */
+ template <typename number2>
+ void set (const std::vector<unsigned int> &indices,
+ const FullMatrix<number2> &full_matrix,
+ const bool elide_zero_values = false);
+
+ /**
+ * Same function as before, but now
+ * including the possibility to use
+ * rectangular full_matrices and
+ * different local-to-global indexing
+ * on rows and columns, respectively.
+ */
+ template <typename number2>
+ void set (const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<number2> &full_matrix,
+ const bool elide_zero_values = false);
+
+ /**
+ * Set several elements in the
+ * specified row of the matrix with
+ * column indices as given by
+ * <tt>col_indices</tt> to the
+ * respective value.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be set anyway or
+ * they should be filtered away (and
+ * not change the previous content in
+ * the respective element if it
+ * exists). The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are treated.
+ */
+ template <typename number2>
+ void set (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
+ const std::vector<number2> &values,
+ const bool elide_zero_values = false);
+
+ /**
+ * Set several elements to values
+ * given by <tt>values</tt> in a
+ * given row in columns given by
+ * col_indices into the sparse
+ * matrix.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be inserted anyway
+ * or they should be filtered
+ * away. The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are inserted/replaced.
+ */
+ template <typename number2>
+ void set (const unsigned int row,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
+ const number2 *values,
+ const bool elide_zero_values = false);
+
+ /**
+ * Add <tt>value</tt> to the
+ * element (<i>i,j</i>). Throws
+ * an error if the entry does not
+ * exist or if <tt>value</tt> is
+ * not a finite number. Still, it
+ * is allowed to store zero
+ * values in non-existent fields.
+ */
+ void add (const unsigned int i,
+ const unsigned int j,
+ const number value);
+
+ /**
+ * Add all elements given in a
+ * FullMatrix<double> into sparse
+ * matrix locations given by
+ * <tt>indices</tt>. In other words,
+ * this function adds the elements in
+ * <tt>full_matrix</tt> to the
+ * respective entries in calling
+ * matrix, using the local-to-global
+ * indexing specified by
+ * <tt>indices</tt> for both the rows
+ * and the columns of the
+ * matrix. This function assumes a
+ * quadratic sparse matrix and a
+ * quadratic full_matrix, the usual
+ * situation in FE calculations.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ template <typename number2>
+ void add (const std::vector<unsigned int> &indices,
+ const FullMatrix<number2> &full_matrix,
+ const bool elide_zero_values = true);
+
+ /**
+ * Same function as before, but now
+ * including the possibility to use
+ * rectangular full_matrices and
+ * different local-to-global indexing
+ * on rows and columns, respectively.
+ */
+ template <typename number2>
+ void add (const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<number2> &full_matrix,
+ const bool elide_zero_values = true);
+
+ /**
+ * Set several elements in the
+ * specified row of the matrix with
+ * column indices as given by
+ * <tt>col_indices</tt> to the
+ * respective value.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ template <typename number2>
+ void add (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
+ const std::vector<number2> &values,
+ const bool elide_zero_values = true);
+
+ /**
+ * Add an array of values given by
+ * <tt>values</tt> in the given
+ * global matrix row at columns
+ * specified by col_indices in the
+ * sparse matrix.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ template <typename number2>
+ void add (const unsigned int row,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
+ const number2 *values,
+ const bool elide_zero_values = true,
+ const bool col_indices_are_sorted = false);
+
+ /**
+ * Multiply the entire matrix by a
+ * fixed factor.
+ */
+ SparseMatrix &operator *= (const number factor);
+
+ /**
+ * Divide the entire matrix by a
+ * fixed factor.
+ */
+ SparseMatrix &operator /= (const number factor);
+
+ /**
+ * Symmetrize the matrix by
+ * forming the mean value between
+ * the existing matrix and its
+ * transpose, $A = \frac 12(A+A^T)$.
+ *
+ * This operation assumes that
+ * the underlying sparsity
+ * pattern represents a symmetric
+ * object. If this is not the
+ * case, then the result of this
+ * operation will not be a
+ * symmetric matrix, since it
+ * only explicitly symmetrizes
+ * by looping over the lower left
+ * triangular part for efficiency
+ * reasons; if there are entries
+ * in the upper right triangle,
+ * then these elements are missed
+ * in the
+ * symmetrization. Symmetrization
+ * of the sparsity pattern can be
+ * obtain by
+ * SparsityPattern::symmetrize().
+ */
+ void symmetrize ();
+
+ /**
+ * Copy the given matrix to this
+ * one. The operation throws an
+ * error if the sparsity patterns
+ * of the two involved matrices
+ * do not point to the same
+ * object, since in this case the
+ * copy operation is
+ * cheaper. Since this operation
+ * is notheless not for free, we
+ * do not make it available
+ * through <tt>operator =</tt>,
+ * since this may lead to
+ * unwanted usage, e.g. in copy
+ * arguments to functions, which
+ * should really be arguments by
+ * reference.
+ *
+ * The source matrix may be a matrix
+ * of arbitrary type, as long as its
+ * data type is convertible to the
+ * data type of this matrix.
+ *
+ * The function returns a reference to
+ * <tt>*this</tt>.
+ */
+ template <typename somenumber>
+ SparseMatrix<number> &
+ copy_from (const SparseMatrix<somenumber> &source);
+
+ /**
+ * This function is complete
+ * analogous to the
+ * SparsityPattern::copy_from()
+ * function in that it allows to
+ * initialize a whole matrix in
+ * one step. See there for more
+ * information on argument types
+ * and their meaning. You can
+ * also find a small example on
+ * how to use this function
+ * there.
+ *
+ * The only difference to the
+ * cited function is that the
+ * objects which the inner
+ * iterator points to need to be
+ * of type <tt>std::pair<unsigned
+ * int, value</tt>, where
+ * <tt>value</tt> needs to be
+ * convertible to the element
+ * type of this class, as
+ * specified by the
+ * <tt>number</tt> template
+ * argument.
+ *
+ * Previous content of the matrix
+ * is overwritten. Note that the
+ * entries specified by the input
+ * parameters need not
+ * necessarily cover all elements
+ * of the matrix. Elements not
+ * covered remain untouched.
+ */
+ template <typename ForwardIterator>
+ void copy_from (const ForwardIterator begin,
+ const ForwardIterator end);
+
+ /**
+ * Copy the nonzero entries of a
+ * full matrix into this
+ * object. Previous content is
+ * deleted. Note that the
+ * underlying sparsity pattern
+ * must be appropriate to hold
+ * the nonzero entries of the
+ * full matrix.
+ */
+ template <typename somenumber>
+ void copy_from (const FullMatrix<somenumber> &matrix);
+
+ /**
+ * Add <tt>matrix</tt> scaled by
+ * <tt>factor</tt> to this matrix,
+ * i.e. the matrix <tt>factor*matrix</tt>
+ * is added to <tt>this</tt>. This
+ * function throws an error if the
+ * sparsity patterns of the two involved
+ * matrices do not point to the same
+ * object, since in this case the
+ * operation is cheaper.
+ *
+ * The source matrix may be a sparse
+ * matrix over an arbitrary underlying
+ * scalar type, as long as its data type
+ * is convertible to the data type of
+ * this matrix.
+ */
+ template <typename somenumber>
+ void add (const number factor,
+ const SparseMatrix<somenumber> &matrix);
//@}
- /**
- * @name Entry Access
- */
+ /**
+ * @name Entry Access
+ */
//@{
- /**
- * Return the value of the entry
- * (<i>i,j</i>). This may be an
- * expensive operation and you
- * should always take care where
- * to call this function. In
- * order to avoid abuse, this
- * function throws an exception
- * if the required element does
- * not exist in the matrix.
- *
- * In case you want a function
- * that returns zero instead (for
- * entries that are not in the
- * sparsity pattern of the
- * matrix), use the el()
- * function.
- *
- * If you are looping over all elements,
- * consider using one of the iterator
- * classes instead, since they are
- * tailored better to a sparse matrix
- * structure.
- */
- number operator () (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * This function is mostly like
- * operator()() in that it
- * returns the value of the
- * matrix entry (<i>i,j</i>). The
- * only difference is that if
- * this entry does not exist in
- * the sparsity pattern, then
- * instead of raising an
- * exception, zero is
- * returned. While this may be
- * convenient in some cases, note
- * that it is simple to write
- * algorithms that are slow
- * compared to an optimal
- * solution, since the sparsity
- * of the matrix is not used.
- *
- * If you are looping over all elements,
- * consider using one of the iterator
- * classes instead, since they are
- * tailored better to a sparse matrix
- * structure.
- */
- number el (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * Return the main diagonal
- * element in the <i>i</i>th
- * row. This function throws an
- * error if the matrix is not
- * quadratic (see
- * SparsityPattern::optimize_diagonal()).
- *
- * This function is considerably
- * faster than the operator()(),
- * since for quadratic matrices, the
- * diagonal entry may be the
- * first to be stored in each row
- * and access therefore does not
- * involve searching for the
- * right column number.
- */
- number diag_element (const unsigned int i) const;
-
- /**
- * Same as above, but return a
- * writeable reference. You're
- * sure you know what you do?
- */
- number & diag_element (const unsigned int i);
-
- /**
- * Access to values in internal
- * mode. Returns the value of
- * the <tt>index</tt>th entry in
- * <tt>row</tt>. Here,
- * <tt>index</tt> refers to the
- * internal representation of the
- * matrix, not the column. Be
- * sure to understand what you
- * are doing here.
- *
- * @deprecated Use iterator or
- * const_iterator instead!
- */
- number raw_entry (const unsigned int row,
- const unsigned int index) const;
-
- /**
- * @internal @deprecated Use iterator or
- * const_iterator instead!
- *
- * This is for hackers. Get
- * access to the <i>i</i>th element of
- * this matrix. The elements are
- * stored in a consecutive way,
- * refer to the SparsityPattern
- * class for more details.
- *
- * You should use this interface
- * very carefully and only if you
- * are absolutely sure to know
- * what you do. You should also
- * note that the structure of
- * these arrays may change over
- * time. If you change the
- * layout yourself, you should
- * also rename this function to
- * avoid programs relying on
- * outdated information!
- */
- number global_entry (const unsigned int i) const;
-
- /**
- * @internal @deprecated Use iterator or
- * const_iterator instead!
- *
- * Same as above, but with write
- * access. You certainly know
- * what you do?
- */
- number & global_entry (const unsigned int i);
+ /**
+ * Return the value of the entry
+ * (<i>i,j</i>). This may be an
+ * expensive operation and you
+ * should always take care where
+ * to call this function. In
+ * order to avoid abuse, this
+ * function throws an exception
+ * if the required element does
+ * not exist in the matrix.
+ *
+ * In case you want a function
+ * that returns zero instead (for
+ * entries that are not in the
+ * sparsity pattern of the
+ * matrix), use the el()
+ * function.
+ *
+ * If you are looping over all elements,
+ * consider using one of the iterator
+ * classes instead, since they are
+ * tailored better to a sparse matrix
+ * structure.
+ */
+ number operator () (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * This function is mostly like
+ * operator()() in that it
+ * returns the value of the
+ * matrix entry (<i>i,j</i>). The
+ * only difference is that if
+ * this entry does not exist in
+ * the sparsity pattern, then
+ * instead of raising an
+ * exception, zero is
+ * returned. While this may be
+ * convenient in some cases, note
+ * that it is simple to write
+ * algorithms that are slow
+ * compared to an optimal
+ * solution, since the sparsity
+ * of the matrix is not used.
+ *
+ * If you are looping over all elements,
+ * consider using one of the iterator
+ * classes instead, since they are
+ * tailored better to a sparse matrix
+ * structure.
+ */
+ number el (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * Return the main diagonal
+ * element in the <i>i</i>th
+ * row. This function throws an
+ * error if the matrix is not
+ * quadratic (see
+ * SparsityPattern::optimize_diagonal()).
+ *
+ * This function is considerably
+ * faster than the operator()(),
+ * since for quadratic matrices, the
+ * diagonal entry may be the
+ * first to be stored in each row
+ * and access therefore does not
+ * involve searching for the
+ * right column number.
+ */
+ number diag_element (const unsigned int i) const;
+
+ /**
+ * Same as above, but return a
+ * writeable reference. You're
+ * sure you know what you do?
+ */
+ number &diag_element (const unsigned int i);
+
+ /**
+ * Access to values in internal
+ * mode. Returns the value of
+ * the <tt>index</tt>th entry in
+ * <tt>row</tt>. Here,
+ * <tt>index</tt> refers to the
+ * internal representation of the
+ * matrix, not the column. Be
+ * sure to understand what you
+ * are doing here.
+ *
+ * @deprecated Use iterator or
+ * const_iterator instead!
+ */
+ number raw_entry (const unsigned int row,
+ const unsigned int index) const;
+
+ /**
+ * @internal @deprecated Use iterator or
+ * const_iterator instead!
+ *
+ * This is for hackers. Get
+ * access to the <i>i</i>th element of
+ * this matrix. The elements are
+ * stored in a consecutive way,
+ * refer to the SparsityPattern
+ * class for more details.
+ *
+ * You should use this interface
+ * very carefully and only if you
+ * are absolutely sure to know
+ * what you do. You should also
+ * note that the structure of
+ * these arrays may change over
+ * time. If you change the
+ * layout yourself, you should
+ * also rename this function to
+ * avoid programs relying on
+ * outdated information!
+ */
+ number global_entry (const unsigned int i) const;
+
+ /**
+ * @internal @deprecated Use iterator or
+ * const_iterator instead!
+ *
+ * Same as above, but with write
+ * access. You certainly know
+ * what you do?
+ */
+ number &global_entry (const unsigned int i);
//@}
- /**
- * @name Multiplications
- */
+ /**
+ * @name Multiplications
+ */
//@{
- /**
- * Matrix-vector multiplication:
- * let <i>dst = M*src</i> with
- * <i>M</i> being this matrix.
- *
- * Note that while this function can
- * operate on all vectors that offer
- * iterator classes, it is only really
- * effective for objects of type @ref
- * Vector. For all classes for which
- * iterating over elements, or random
- * member access is expensive, this
- * function is not efficient. In
- * particular, if you want to multiply
- * with BlockVector objects, you should
- * consider using a BlockSparseMatrix as
- * well.
- *
- * Source and destination must
- * not be the same vector.
- */
- template <class OutVector, class InVector>
- void vmult (OutVector& dst,
- const InVector& src) const;
-
- /**
- * Matrix-vector multiplication:
- * let <i>dst = M<sup>T</sup>*src</i> with
- * <i>M</i> being this
- * matrix. This function does the
- * same as vmult() but takes
- * the transposed matrix.
- *
- * Note that while this function can
- * operate on all vectors that offer
- * iterator classes, it is only really
- * effective for objects of type @ref
- * Vector. For all classes for which
- * iterating over elements, or random
- * member access is expensive, this
- * function is not efficient. In
- * particular, if you want to multiply
- * with BlockVector objects, you should
- * consider using a BlockSparseMatrix as
- * well.
- *
- * Source and destination must
- * not be the same vector.
- */
- template <class OutVector, class InVector>
- void Tvmult (OutVector& dst,
- const InVector& src) const;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M*src</i> on <i>dst</i>
- * with <i>M</i> being this
- * matrix.
- *
- * Note that while this function can
- * operate on all vectors that offer
- * iterator classes, it is only really
- * effective for objects of type @ref
- * Vector. For all classes for which
- * iterating over elements, or random
- * member access is expensive, this
- * function is not efficient. In
- * particular, if you want to multiply
- * with BlockVector objects, you should
- * consider using a BlockSparseMatrix as
- * well.
- *
- * Source and destination must
- * not be the same vector.
- */
- template <class OutVector, class InVector>
- void vmult_add (OutVector& dst,
- const InVector& src) const;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M<sup>T</sup>*src</i> to
- * <i>dst</i> with <i>M</i> being
- * this matrix. This function
- * does the same as vmult_add()
- * but takes the transposed
- * matrix.
- *
- * Note that while this function can
- * operate on all vectors that offer
- * iterator classes, it is only really
- * effective for objects of type @ref
- * Vector. For all classes for which
- * iterating over elements, or random
- * member access is expensive, this
- * function is not efficient. In
- * particular, if you want to multiply
- * with BlockVector objects, you should
- * consider using a BlockSparseMatrix as
- * well.
- *
- * Source and destination must
- * not be the same vector.
- */
- template <class OutVector, class InVector>
- void Tvmult_add (OutVector& dst,
- const InVector& src) const;
-
- /**
- * Return the square of the norm
- * of the vector $v$ with respect
- * to the norm induced by this
- * matrix,
- * i.e. $\left(v,Mv\right)$. This
- * is useful, e.g. in the finite
- * element context, where the
- * $L_2$ norm of a function
- * equals the matrix norm with
- * respect to the mass matrix of
- * the vector representing the
- * nodal values of the finite
- * element function.
- *
- * Obviously, the matrix needs to be
- * quadratic for this operation, and for
- * the result to actually be a norm it
- * also needs to be either real symmetric
- * or complex hermitian.
- *
- * The underlying template types of both
- * this matrix and the given vector
- * should either both be real or
- * complex-valued, but not mixed, for
- * this function to make sense.
- */
- template <typename somenumber>
- somenumber matrix_norm_square (const Vector<somenumber> &v) const;
-
- /**
- * Compute the matrix scalar
- * product $\left(u,Mv\right)$.
- */
- template <typename somenumber>
- somenumber matrix_scalar_product (const Vector<somenumber> &u,
- const Vector<somenumber> &v) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to be
- * <i>r=b-Mx</i>. Write the
- * residual into
- * <tt>dst</tt>. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned.
- *
- * Source <i>x</i> and destination
- * <i>dst</i> must not be the same
- * vector.
- */
- template <typename somenumber>
- somenumber residual (Vector<somenumber> &dst,
- const Vector<somenumber> &x,
- const Vector<somenumber> &b) const;
-
- /**
- * Perform the matrix-matrix
- * multiplication <tt>C = A * B</tt>,
- * or, if an optional vector argument
- * is given, <tt>C = A * diag(V) *
- * B</tt>, where <tt>diag(V)</tt>
- * defines a diagonal matrix with the
- * vector entries.
- *
- * This function assumes that the
- * calling matrix <tt>A</tt> and
- * <tt>B</tt> have compatible
- * sizes. The size of <tt>C</tt> will
- * be set within this function.
- *
- * The content as well as the sparsity
- * pattern of the matrix C will be
- * changed by this function, so make
- * sure that the sparsity pattern is
- * not used somewhere else in your
- * program. This is an expensive
- * operation, so think twice before you
- * use this function.
- *
- * There is an optional flag
- * <tt>rebuild_sparsity_pattern</tt>
- * that can be used to bypass the
- * creation of a new sparsity pattern
- * and instead uses the sparsity
- * pattern stored in <tt>C</tt>. In
- * that case, make sure that it really
- * fits. The default is to rebuild the
- * sparsity pattern.
- *
- * @note Rebuilding the sparsity pattern
- * requires changing it. This means that
- * all other matrices that are associated
- * with this sparsity pattern will
- * then have invalid entries.
- */
- template <typename numberB, typename numberC>
- void mmult (SparseMatrix<numberC> &C,
- const SparseMatrix<numberB> &B,
- const Vector<number> &V = Vector<number>(),
- const bool rebuild_sparsity_pattern = true) const;
-
- /**
- * Perform the matrix-matrix
- * multiplication with the transpose of
- * <tt>this</tt>, i.e., <tt>C =
- * A<sup>T</sup> * B</tt>, or, if an
- * optional vector argument is given,
- * <tt>C = A<sup>T</sup> * diag(V) *
- * B</tt>, where <tt>diag(V)</tt>
- * defines a diagonal matrix with the
- * vector entries.
- *
- * This function assumes that the
- * calling matrix <tt>A</tt> and
- * <tt>B</tt> have compatible
- * sizes. The size of <tt>C</tt> will
- * be set within this function.
- *
- * The content as well as the sparsity
- * pattern of the matrix C will be
- * changed by this function, so make
- * sure that the sparsity pattern is
- * not used somewhere else in your
- * program. This is an expensive
- * operation, so think twice before you
- * use this function.
- *
- * There is an optional flag
- * <tt>rebuild_sparsity_pattern</tt>
- * that can be used to bypass the
- * creation of a new sparsity pattern
- * and instead uses the sparsity
- * pattern stored in <tt>C</tt>. In
- * that case, make sure that it really
- * fits. The default is to rebuild the
- * sparsity pattern.
- *
- * @note Rebuilding the sparsity pattern
- * requires changing it. This means that
- * all other matrices that are associated
- * with this sparsity pattern will
- * then have invalid entries.
- */
- template <typename numberB, typename numberC>
- void Tmmult (SparseMatrix<numberC> &C,
- const SparseMatrix<numberB> &B,
- const Vector<number> &V = Vector<number>(),
- const bool rebuild_sparsity_pattern = true) const;
+ /**
+ * Matrix-vector multiplication:
+ * let <i>dst = M*src</i> with
+ * <i>M</i> being this matrix.
+ *
+ * Note that while this function can
+ * operate on all vectors that offer
+ * iterator classes, it is only really
+ * effective for objects of type @ref
+ * Vector. For all classes for which
+ * iterating over elements, or random
+ * member access is expensive, this
+ * function is not efficient. In
+ * particular, if you want to multiply
+ * with BlockVector objects, you should
+ * consider using a BlockSparseMatrix as
+ * well.
+ *
+ * Source and destination must
+ * not be the same vector.
+ */
+ template <class OutVector, class InVector>
+ void vmult (OutVector &dst,
+ const InVector &src) const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let <i>dst = M<sup>T</sup>*src</i> with
+ * <i>M</i> being this
+ * matrix. This function does the
+ * same as vmult() but takes
+ * the transposed matrix.
+ *
+ * Note that while this function can
+ * operate on all vectors that offer
+ * iterator classes, it is only really
+ * effective for objects of type @ref
+ * Vector. For all classes for which
+ * iterating over elements, or random
+ * member access is expensive, this
+ * function is not efficient. In
+ * particular, if you want to multiply
+ * with BlockVector objects, you should
+ * consider using a BlockSparseMatrix as
+ * well.
+ *
+ * Source and destination must
+ * not be the same vector.
+ */
+ template <class OutVector, class InVector>
+ void Tvmult (OutVector &dst,
+ const InVector &src) const;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M*src</i> on <i>dst</i>
+ * with <i>M</i> being this
+ * matrix.
+ *
+ * Note that while this function can
+ * operate on all vectors that offer
+ * iterator classes, it is only really
+ * effective for objects of type @ref
+ * Vector. For all classes for which
+ * iterating over elements, or random
+ * member access is expensive, this
+ * function is not efficient. In
+ * particular, if you want to multiply
+ * with BlockVector objects, you should
+ * consider using a BlockSparseMatrix as
+ * well.
+ *
+ * Source and destination must
+ * not be the same vector.
+ */
+ template <class OutVector, class InVector>
+ void vmult_add (OutVector &dst,
+ const InVector &src) const;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M<sup>T</sup>*src</i> to
+ * <i>dst</i> with <i>M</i> being
+ * this matrix. This function
+ * does the same as vmult_add()
+ * but takes the transposed
+ * matrix.
+ *
+ * Note that while this function can
+ * operate on all vectors that offer
+ * iterator classes, it is only really
+ * effective for objects of type @ref
+ * Vector. For all classes for which
+ * iterating over elements, or random
+ * member access is expensive, this
+ * function is not efficient. In
+ * particular, if you want to multiply
+ * with BlockVector objects, you should
+ * consider using a BlockSparseMatrix as
+ * well.
+ *
+ * Source and destination must
+ * not be the same vector.
+ */
+ template <class OutVector, class InVector>
+ void Tvmult_add (OutVector &dst,
+ const InVector &src) const;
+
+ /**
+ * Return the square of the norm
+ * of the vector $v$ with respect
+ * to the norm induced by this
+ * matrix,
+ * i.e. $\left(v,Mv\right)$. This
+ * is useful, e.g. in the finite
+ * element context, where the
+ * $L_2$ norm of a function
+ * equals the matrix norm with
+ * respect to the mass matrix of
+ * the vector representing the
+ * nodal values of the finite
+ * element function.
+ *
+ * Obviously, the matrix needs to be
+ * quadratic for this operation, and for
+ * the result to actually be a norm it
+ * also needs to be either real symmetric
+ * or complex hermitian.
+ *
+ * The underlying template types of both
+ * this matrix and the given vector
+ * should either both be real or
+ * complex-valued, but not mixed, for
+ * this function to make sense.
+ */
+ template <typename somenumber>
+ somenumber matrix_norm_square (const Vector<somenumber> &v) const;
+
+ /**
+ * Compute the matrix scalar
+ * product $\left(u,Mv\right)$.
+ */
+ template <typename somenumber>
+ somenumber matrix_scalar_product (const Vector<somenumber> &u,
+ const Vector<somenumber> &v) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to be
+ * <i>r=b-Mx</i>. Write the
+ * residual into
+ * <tt>dst</tt>. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned.
+ *
+ * Source <i>x</i> and destination
+ * <i>dst</i> must not be the same
+ * vector.
+ */
+ template <typename somenumber>
+ somenumber residual (Vector<somenumber> &dst,
+ const Vector<somenumber> &x,
+ const Vector<somenumber> &b) const;
+
+ /**
+ * Perform the matrix-matrix
+ * multiplication <tt>C = A * B</tt>,
+ * or, if an optional vector argument
+ * is given, <tt>C = A * diag(V) *
+ * B</tt>, where <tt>diag(V)</tt>
+ * defines a diagonal matrix with the
+ * vector entries.
+ *
+ * This function assumes that the
+ * calling matrix <tt>A</tt> and
+ * <tt>B</tt> have compatible
+ * sizes. The size of <tt>C</tt> will
+ * be set within this function.
+ *
+ * The content as well as the sparsity
+ * pattern of the matrix C will be
+ * changed by this function, so make
+ * sure that the sparsity pattern is
+ * not used somewhere else in your
+ * program. This is an expensive
+ * operation, so think twice before you
+ * use this function.
+ *
+ * There is an optional flag
+ * <tt>rebuild_sparsity_pattern</tt>
+ * that can be used to bypass the
+ * creation of a new sparsity pattern
+ * and instead uses the sparsity
+ * pattern stored in <tt>C</tt>. In
+ * that case, make sure that it really
+ * fits. The default is to rebuild the
+ * sparsity pattern.
+ *
+ * @note Rebuilding the sparsity pattern
+ * requires changing it. This means that
+ * all other matrices that are associated
+ * with this sparsity pattern will
+ * then have invalid entries.
+ */
+ template <typename numberB, typename numberC>
+ void mmult (SparseMatrix<numberC> &C,
+ const SparseMatrix<numberB> &B,
+ const Vector<number> &V = Vector<number>(),
+ const bool rebuild_sparsity_pattern = true) const;
+
+ /**
+ * Perform the matrix-matrix
+ * multiplication with the transpose of
+ * <tt>this</tt>, i.e., <tt>C =
+ * A<sup>T</sup> * B</tt>, or, if an
+ * optional vector argument is given,
+ * <tt>C = A<sup>T</sup> * diag(V) *
+ * B</tt>, where <tt>diag(V)</tt>
+ * defines a diagonal matrix with the
+ * vector entries.
+ *
+ * This function assumes that the
+ * calling matrix <tt>A</tt> and
+ * <tt>B</tt> have compatible
+ * sizes. The size of <tt>C</tt> will
+ * be set within this function.
+ *
+ * The content as well as the sparsity
+ * pattern of the matrix C will be
+ * changed by this function, so make
+ * sure that the sparsity pattern is
+ * not used somewhere else in your
+ * program. This is an expensive
+ * operation, so think twice before you
+ * use this function.
+ *
+ * There is an optional flag
+ * <tt>rebuild_sparsity_pattern</tt>
+ * that can be used to bypass the
+ * creation of a new sparsity pattern
+ * and instead uses the sparsity
+ * pattern stored in <tt>C</tt>. In
+ * that case, make sure that it really
+ * fits. The default is to rebuild the
+ * sparsity pattern.
+ *
+ * @note Rebuilding the sparsity pattern
+ * requires changing it. This means that
+ * all other matrices that are associated
+ * with this sparsity pattern will
+ * then have invalid entries.
+ */
+ template <typename numberB, typename numberC>
+ void Tmmult (SparseMatrix<numberC> &C,
+ const SparseMatrix<numberB> &B,
+ const Vector<number> &V = Vector<number>(),
+ const bool rebuild_sparsity_pattern = true) const;
//@}
- /**
- * @name Matrix norms
- */
+ /**
+ * @name Matrix norms
+ */
//@{
- /**
- * Return the $l_1$-norm of the matrix,
- * that is $|M|_1=\max_{\mathrm{all\
- * columns\ }j}\sum_{\mathrm{all\ rows\
- * } i} |M_{ij}|$, (max. sum of
- * columns). This is the natural
- * matrix norm that is compatible to
- * the $l_1$-norm for vectors, i.e.
- * $|Mv|_1\leq |M|_1 |v|_1$.
- * (cf. Haemmerlin-Hoffmann :
- * Numerische Mathematik)
- */
- real_type l1_norm () const;
-
- /**
- * Return the $l_\infty$-norm of the
- * matrix, that is
- * $|M|_\infty=\max_{\mathrm{all\ rows\
- * }i}\sum_{\mathrm{all\ columns\ }j}
- * |M_{ij}|$, (max. sum of rows). This
- * is the natural matrix norm that is
- * compatible to the $l_\infty$-norm of
- * vectors, i.e. $|Mv|_\infty \leq
- * |M|_\infty |v|_\infty$.
- * (cf. Haemmerlin-Hoffmann :
- * Numerische Mathematik)
- */
- real_type linfty_norm () const;
-
- /**
- * Return the frobenius norm of the
- * matrix, i.e. the square root of the
- * sum of squares of all entries in the
- * matrix.
- */
- real_type frobenius_norm () const;
+ /**
+ * Return the $l_1$-norm of the matrix,
+ * that is $|M|_1=\max_{\mathrm{all\
+ * columns\ }j}\sum_{\mathrm{all\ rows\
+ * } i} |M_{ij}|$, (max. sum of
+ * columns). This is the natural
+ * matrix norm that is compatible to
+ * the $l_1$-norm for vectors, i.e.
+ * $|Mv|_1\leq |M|_1 |v|_1$.
+ * (cf. Haemmerlin-Hoffmann :
+ * Numerische Mathematik)
+ */
+ real_type l1_norm () const;
+
+ /**
+ * Return the $l_\infty$-norm of the
+ * matrix, that is
+ * $|M|_\infty=\max_{\mathrm{all\ rows\
+ * }i}\sum_{\mathrm{all\ columns\ }j}
+ * |M_{ij}|$, (max. sum of rows). This
+ * is the natural matrix norm that is
+ * compatible to the $l_\infty$-norm of
+ * vectors, i.e. $|Mv|_\infty \leq
+ * |M|_\infty |v|_\infty$.
+ * (cf. Haemmerlin-Hoffmann :
+ * Numerische Mathematik)
+ */
+ real_type linfty_norm () const;
+
+ /**
+ * Return the frobenius norm of the
+ * matrix, i.e. the square root of the
+ * sum of squares of all entries in the
+ * matrix.
+ */
+ real_type frobenius_norm () const;
//@}
- /**
- * @name Preconditioning methods
- */
+ /**
+ * @name Preconditioning methods
+ */
//@{
- /**
- * Apply the Jacobi
- * preconditioner, which
- * multiplies every element of
- * the <tt>src</tt> vector by the
- * inverse of the respective
- * diagonal element and
- * multiplies the result with the
- * relaxation factor <tt>omega</tt>.
- */
- template <typename somenumber>
- void precondition_Jacobi (Vector<somenumber> &dst,
- const Vector<somenumber> &src,
- const number omega = 1.) const;
-
- /**
- * Apply SSOR preconditioning to
- * <tt>src</tt> with damping
- * <tt>omega</tt>. The optional
- * argument
- * <tt>pos_right_of_diagonal</tt> is
- * supposed to provide an array where
- * each entry specifies the position
- * just right of the diagonal in the
- * global array of nonzeros.
- */
- template <typename somenumber>
- void precondition_SSOR (Vector<somenumber> &dst,
- const Vector<somenumber> &src,
- const number omega = 1.,
- const std::vector<unsigned int>&pos_right_of_diagonal=std::vector<unsigned int>()) const;
-
- /**
- * Apply SOR preconditioning
- * matrix to <tt>src</tt>.
- */
- template <typename somenumber>
- void precondition_SOR (Vector<somenumber> &dst,
- const Vector<somenumber> &src,
- const number om = 1.) const;
-
- /**
- * Apply transpose SOR
- * preconditioning matrix to
- * <tt>src</tt>.
- */
- template <typename somenumber>
- void precondition_TSOR (Vector<somenumber> &dst,
+ /**
+ * Apply the Jacobi
+ * preconditioner, which
+ * multiplies every element of
+ * the <tt>src</tt> vector by the
+ * inverse of the respective
+ * diagonal element and
+ * multiplies the result with the
+ * relaxation factor <tt>omega</tt>.
+ */
+ template <typename somenumber>
+ void precondition_Jacobi (Vector<somenumber> &dst,
const Vector<somenumber> &src,
- const number om = 1.) const;
-
- /**
- * Perform SSOR preconditioning
- * in-place. Apply the
- * preconditioner matrix without
- * copying to a second vector.
- * <tt>omega</tt> is the relaxation
- * parameter.
- */
- template <typename somenumber>
- void SSOR (Vector<somenumber> &v,
- const number omega = 1.) const;
-
- /**
- * Perform an SOR preconditioning
- * in-place. <tt>omega</tt> is
- * the relaxation parameter.
- */
- template <typename somenumber>
- void SOR (Vector<somenumber> &v,
+ const number omega = 1.) const;
+
+ /**
+ * Apply SSOR preconditioning to
+ * <tt>src</tt> with damping
+ * <tt>omega</tt>. The optional
+ * argument
+ * <tt>pos_right_of_diagonal</tt> is
+ * supposed to provide an array where
+ * each entry specifies the position
+ * just right of the diagonal in the
+ * global array of nonzeros.
+ */
+ template <typename somenumber>
+ void precondition_SSOR (Vector<somenumber> &dst,
+ const Vector<somenumber> &src,
+ const number omega = 1.,
+ const std::vector<unsigned int> &pos_right_of_diagonal=std::vector<unsigned int>()) const;
+
+ /**
+ * Apply SOR preconditioning
+ * matrix to <tt>src</tt>.
+ */
+ template <typename somenumber>
+ void precondition_SOR (Vector<somenumber> &dst,
+ const Vector<somenumber> &src,
+ const number om = 1.) const;
+
+ /**
+ * Apply transpose SOR
+ * preconditioning matrix to
+ * <tt>src</tt>.
+ */
+ template <typename somenumber>
+ void precondition_TSOR (Vector<somenumber> &dst,
+ const Vector<somenumber> &src,
+ const number om = 1.) const;
+
+ /**
+ * Perform SSOR preconditioning
+ * in-place. Apply the
+ * preconditioner matrix without
+ * copying to a second vector.
+ * <tt>omega</tt> is the relaxation
+ * parameter.
+ */
+ template <typename somenumber>
+ void SSOR (Vector<somenumber> &v,
+ const number omega = 1.) const;
+
+ /**
+ * Perform an SOR preconditioning
+ * in-place. <tt>omega</tt> is
+ * the relaxation parameter.
+ */
+ template <typename somenumber>
+ void SOR (Vector<somenumber> &v,
+ const number om = 1.) const;
+
+ /**
+ * Perform a transpose SOR
+ * preconditioning in-place.
+ * <tt>omega</tt> is the
+ * relaxation parameter.
+ */
+ template <typename somenumber>
+ void TSOR (Vector<somenumber> &v,
+ const number om = 1.) const;
+
+ /**
+ * Perform a permuted SOR
+ * preconditioning in-place.
+ *
+ * The standard SOR method is
+ * applied in the order
+ * prescribed by <tt>permutation</tt>,
+ * that is, first the row
+ * <tt>permutation[0]</tt>, then
+ * <tt>permutation[1]</tt> and so
+ * on. For efficiency reasons,
+ * the permutation as well as its
+ * inverse are required.
+ *
+ * <tt>omega</tt> is the
+ * relaxation parameter.
+ */
+ template <typename somenumber>
+ void PSOR (Vector<somenumber> &v,
+ const std::vector<unsigned int> &permutation,
+ const std::vector<unsigned int> &inverse_permutation,
+ const number om = 1.) const;
+
+ /**
+ * Perform a transposed permuted SOR
+ * preconditioning in-place.
+ *
+ * The transposed SOR method is
+ * applied in the order
+ * prescribed by
+ * <tt>permutation</tt>, that is,
+ * first the row
+ * <tt>permutation[m()-1]</tt>,
+ * then
+ * <tt>permutation[m()-2]</tt>
+ * and so on. For efficiency
+ * reasons, the permutation as
+ * well as its inverse are
+ * required.
+ *
+ * <tt>omega</tt> is the
+ * relaxation parameter.
+ */
+ template <typename somenumber>
+ void TPSOR (Vector<somenumber> &v,
+ const std::vector<unsigned int> &permutation,
+ const std::vector<unsigned int> &inverse_permutation,
const number om = 1.) const;
- /**
- * Perform a transpose SOR
- * preconditioning in-place.
- * <tt>omega</tt> is the
- * relaxation parameter.
- */
- template <typename somenumber>
- void TSOR (Vector<somenumber> &v,
- const number om = 1.) const;
-
- /**
- * Perform a permuted SOR
- * preconditioning in-place.
- *
- * The standard SOR method is
- * applied in the order
- * prescribed by <tt>permutation</tt>,
- * that is, first the row
- * <tt>permutation[0]</tt>, then
- * <tt>permutation[1]</tt> and so
- * on. For efficiency reasons,
- * the permutation as well as its
- * inverse are required.
- *
- * <tt>omega</tt> is the
- * relaxation parameter.
- */
- template <typename somenumber>
- void PSOR (Vector<somenumber> &v,
- const std::vector<unsigned int>& permutation,
- const std::vector<unsigned int>& inverse_permutation,
- const number om = 1.) const;
-
- /**
- * Perform a transposed permuted SOR
- * preconditioning in-place.
- *
- * The transposed SOR method is
- * applied in the order
- * prescribed by
- * <tt>permutation</tt>, that is,
- * first the row
- * <tt>permutation[m()-1]</tt>,
- * then
- * <tt>permutation[m()-2]</tt>
- * and so on. For efficiency
- * reasons, the permutation as
- * well as its inverse are
- * required.
- *
- * <tt>omega</tt> is the
- * relaxation parameter.
- */
- template <typename somenumber>
- void TPSOR (Vector<somenumber> &v,
- const std::vector<unsigned int>& permutation,
- const std::vector<unsigned int>& inverse_permutation,
- const number om = 1.) const;
-
- /**
- * Do one Jacobi step on
- * <tt>v</tt>. Performs a direct
- * Jacobi step with right hand
- * side <tt>b</tt>. This function
- * will need an auxiliary vector,
- * which is acquired from
- * GrowingVectorMemory.
- */
- template <typename somenumber>
- void Jacobi_step (Vector<somenumber> &v,
- const Vector<somenumber> &b,
- const number om = 1.) const;
-
- /**
- * Do one SOR step on <tt>v</tt>.
- * Performs a direct SOR step
- * with right hand side
- * <tt>b</tt>.
- */
- template <typename somenumber>
- void SOR_step (Vector<somenumber> &v,
- const Vector<somenumber> &b,
- const number om = 1.) const;
-
- /**
- * Do one adjoint SOR step on
- * <tt>v</tt>. Performs a direct
- * TSOR step with right hand side
- * <tt>b</tt>.
- */
- template <typename somenumber>
- void TSOR_step (Vector<somenumber> &v,
+ /**
+ * Do one Jacobi step on
+ * <tt>v</tt>. Performs a direct
+ * Jacobi step with right hand
+ * side <tt>b</tt>. This function
+ * will need an auxiliary vector,
+ * which is acquired from
+ * GrowingVectorMemory.
+ */
+ template <typename somenumber>
+ void Jacobi_step (Vector<somenumber> &v,
const Vector<somenumber> &b,
const number om = 1.) const;
template <typename number>
SparseMatrix<number>::SparseMatrix (const SparsityPattern &c,
- const IdentityMatrix &id)
+ const IdentityMatrix &id)
- :
- cols(0, "SparseMatrix"),
- val(0),
- max_len(0)
+ :
+ cols(0, "SparseMatrix"),
+ val(0),
+ max_len(0)
{
Assert (c.n_rows() == id.m(), ExcDimensionMismatch (c.n_rows(), id.m()));
Assert (c.n_cols() == id.n(), ExcDimensionMismatch (c.n_cols(), id.n()));
const unsigned int n = src.size();
somenumber *dst_ptr = dst.begin();
const somenumber *src_ptr = src.begin();
- const std::size_t *rowstart_ptr = &cols->rowstart[0];
+ const std::size_t *rowstart_ptr = &cols->rowstart[0];
- // optimize the following loop for
- // the case that the relaxation
- // factor is one. In that case, we
- // can save one FP multiplication
- // per row
- //
- // note that for square matrices,
- // the diagonal entry is the first
- // in each row, i.e. at index
- // rowstart[i]. and we do have a
- // square matrix by above assertion
+ // optimize the following loop for
+ // the case that the relaxation
+ // factor is one. In that case, we
+ // can save one FP multiplication
+ // per row
+ //
+ // note that for square matrices,
+ // the diagonal entry is the first
+ // in each row, i.e. at index
+ // rowstart[i]. and we do have a
+ // square matrix by above assertion
if (om != 1.)
for (unsigned int i=0; i<n; ++i, ++dst_ptr, ++src_ptr, ++rowstart_ptr)
*dst_ptr = om * *src_ptr / val[*rowstart_ptr];
Assert (src.size() == n(), ExcDimensionMismatch (src.size(), n()));
const unsigned int n = src.size();
- const std::size_t *rowstart_ptr = &cols->rowstart[0];
+ const std::size_t *rowstart_ptr = &cols->rowstart[0];
somenumber *dst_ptr = &dst(0);
- // case when we have stored the position
- // just right of the diagonal (then we
- // don't have to search for it).
+ // case when we have stored the position
+ // just right of the diagonal (then we
+ // don't have to search for it).
if (pos_right_of_diagonal.size() != 0)
{
Assert (pos_right_of_diagonal.size() == dst.size(),
*/
class SparsityPattern : public Subscriptor
{
- public:
- /**
- * Typedef an iterator class that allows
- * to walk over all nonzero elements of a
- * sparsity pattern.
- */
- typedef
- SparsityPatternIterators::Iterator
- const_iterator;
-
- /**
- * Typedef an iterator class that allows
- * to walk over the nonzero elements of a
- * row of a sparsity pattern.
- */
- typedef
- const unsigned int * row_iterator;
-
- /**
- * Typedef an iterator class that allows
- * to walk over all nonzero elements of a
- * sparsity pattern.
- *
- * Since the iterator does not allow to
- * modify the sparsity pattern, this type
- * is the same as that for @p
- * const_iterator.
- */
- typedef
- SparsityPatternIterators::Iterator
- iterator;
-
-
- /**
- * Define a value which is used
- * to indicate that a certain
- * value in the #colnums array
- * is unused, i.e. does not
- * represent a certain column
- * number index.
- *
- * Indices with this invalid
- * value are used to insert new
- * entries to the sparsity
- * pattern using the add() member
- * function, and are removed when
- * calling compress().
- *
- * You should not assume that the
- * variable declared here has a
- * certain value. The
- * initialization is given here
- * only to enable the compiler to
- * perform some optimizations,
- * but the actual value of the
- * variable may change over time.
- */
- static const unsigned int invalid_entry = numbers::invalid_unsigned_int;
-
- /**
- * @name Construction and setup
- * Constructors, destructor; functions initializing, copying and filling an object.
- */
+ public:
+ /**
+ * Typedef an iterator class that allows
+ * to walk over all nonzero elements of a
+ * sparsity pattern.
+ */
+ typedef
+ SparsityPatternIterators::Iterator
+ const_iterator;
+
+ /**
+ * Typedef an iterator class that allows
+ * to walk over the nonzero elements of a
+ * row of a sparsity pattern.
+ */
+ typedef
+ const unsigned int *row_iterator;
+
+ /**
+ * Typedef an iterator class that allows
+ * to walk over all nonzero elements of a
+ * sparsity pattern.
+ *
+ * Since the iterator does not allow to
+ * modify the sparsity pattern, this type
+ * is the same as that for @p
+ * const_iterator.
+ */
+ typedef
+ SparsityPatternIterators::Iterator
+ iterator;
+
+
+ /**
+ * Define a value which is used
+ * to indicate that a certain
+ * value in the #colnums array
+ * is unused, i.e. does not
+ * represent a certain column
+ * number index.
+ *
+ * Indices with this invalid
+ * value are used to insert new
+ * entries to the sparsity
+ * pattern using the add() member
+ * function, and are removed when
+ * calling compress().
+ *
+ * You should not assume that the
+ * variable declared here has a
+ * certain value. The
+ * initialization is given here
+ * only to enable the compiler to
+ * perform some optimizations,
+ * but the actual value of the
+ * variable may change over time.
+ */
+ static const unsigned int invalid_entry = numbers::invalid_unsigned_int;
+
+ /**
+ * @name Construction and setup
+ * Constructors, destructor; functions initializing, copying and filling an object.
+ */
// @{
- /**
- * Initialize the matrix empty,
- * that is with no memory
- * allocated. This is useful if
- * you want such objects as
- * member variables in other
- * classes. You can make the
- * structure usable by calling
- * the reinit() function.
- */
- SparsityPattern ();
-
- /**
- * Copy constructor. This
- * constructor is only allowed to
- * be called if the matrix
- * structure to be copied is
- * empty. This is so in order to
- * prevent involuntary copies of
- * objects for temporaries, which
- * can use large amounts of
- * computing time. However, copy
- * constructors are needed if yo
- * want to use the STL data types
- * on classes like this, e.g. to
- * write such statements like
- * <tt>v.push_back
- * (SparsityPattern());</tt>,
- * with <tt>v</tt> a vector of
- * SparsityPattern objects.
- *
- * Usually, it is sufficient to
- * use the explicit keyword to
- * disallow unwanted temporaries,
- * but for the STL vectors, this
- * does not work. Since copying a
- * structure like this is not
- * useful anyway because multiple
- * matrices can use the same
- * sparsity structure, copies are
- * only allowed for empty
- * objects, as described above.
- */
- SparsityPattern (const SparsityPattern &);
-
- /**
- * Initialize a rectangular
- * matrix.
- *
- * @arg m number of rows
- * @arg n number of columns
- * @arg max_per_row maximum
- * number of nonzero entries per row
- *
- * @arg optimize_diagonal store
- * diagonal entries first in row;
- * see optimize_diagonal(). This
- * takes effect for quadratic
- * matrices only.
- */
- SparsityPattern (const unsigned int m,
- const unsigned int n,
- const unsigned int max_per_row,
- const bool optimize_diagonal = true);
-
- /**
- * Initialize a rectangular
- * matrix.
- *
- * @arg m number of rows
- * @arg n number of columns
- *
- * @arg row_lengths possible
- * number of nonzero entries for
- * each row. This vector must
- * have one entry for each row.
- *
- * @arg optimize_diagonal store
- * diagonal entries first in row;
- * see optimize_diagonal(). This
- * takes effect for quadratic
- * matrices only.
- */
- SparsityPattern (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int>& row_lengths,
- const bool optimize_diagonal = true);
-
- /**
- * Initialize a quadratic matrix
- * of dimension <tt>n</tt> with
- * at most <tt>max_per_row</tt>
- * nonzero entries per row.
- *
- * This constructor automatically
- * enables optimized storage of
- * diagonal elements. To avoid
- * this, use the constructor
- * taking row and column numbers
- * separately.
- */
- SparsityPattern (const unsigned int n,
- const unsigned int max_per_row);
-
- /**
- * Initialize a quadratic matrix.
- *
- * @arg m number of rows and columns
- *
- * @arg row_lengths possible
- * number of nonzero entries for
- * each row. This vector must
- * have one entry for each row.
- *
- * @arg optimize_diagonal store
- * diagonal entries first in row;
- * see optimize_diagonal().
- */
- SparsityPattern (const unsigned int m,
- const std::vector<unsigned int>& row_lengths,
- const bool optimize_diagonal = true);
-
- /**
- * Make a copy with extra off-diagonals.
- *
- * This constructs objects intended for
- * the application of the ILU(n)-method
- * or other incomplete decompositions.
- * Therefore, additional to the original
- * entry structure, space for
- * <tt>extra_off_diagonals</tt>
- * side-diagonals is provided on both
- * sides of the main diagonal.
- *
- * <tt>max_per_row</tt> is the
- * maximum number of nonzero
- * elements per row which this
- * structure is to hold. It is
- * assumed that this number is
- * sufficiently large to
- * accommodate both the elements
- * in <tt>original</tt> as well
- * as the new off-diagonal
- * elements created by this
- * constructor. You will usually
- * want to give the same number
- * as you gave for
- * <tt>original</tt> plus the
- * number of side diagonals times
- * two. You may however give a
- * larger value if you wish to
- * add further nonzero entries
- * for the decomposition based on
- * other criteria than their
- * being on side-diagonals.
- *
- * This function requires that
- * <tt>original</tt> refers to a
- * quadratic matrix structure.
- * It must be compressed. The
- * matrix structure is not
- * compressed after this function
- * finishes.
- */
- SparsityPattern (const SparsityPattern &original,
- const unsigned int max_per_row,
- const unsigned int extra_off_diagonals);
-
- /**
- * Destructor.
- */
- ~SparsityPattern ();
-
- /**
- * Copy operator. For this the
- * same holds as for the copy
- * constructor: it is declared,
- * defined and fine to be called,
- * but the latter only for empty
- * objects.
- */
- SparsityPattern & operator = (const SparsityPattern &);
-
- /**
- * Reallocate memory and set up data
- * structures for a new matrix with
- * <tt>m </tt>rows and <tt>n</tt> columns,
- * with at most <tt>max_per_row</tt>
- * nonzero entries per row.
- *
- * This function simply maps its
- * operations to the other
- * <tt>reinit</tt> function.
- */
- void reinit (const unsigned int m,
- const unsigned int n,
- const unsigned int max_per_row,
- const bool optimize_diagonal = true);
-
- /**
- * Reallocate memory for a matrix
- * of size <tt>m x n</tt>. The
- * number of entries for each row
- * is taken from the array
- * <tt>row_lengths</tt> which has to
- * give this number of each row
- * <tt>i=1...m</tt>.
- *
- * If <tt>m*n==0</tt> all memory is freed,
- * resulting in a total reinitialization
- * of the object. If it is nonzero, new
- * memory is only allocated if the new
- * size extends the old one. This is done
- * to save time and to avoid fragmentation
- * of the heap.
- *
- * If the number of rows equals
- * the number of columns and the
- * last parameter is true,
- * diagonal elements are stored
- * first in each row to allow
- * optimized access in relaxation
- * methods of SparseMatrix.
- */
- void reinit (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &row_lengths,
- const bool optimize_diagonal = true);
-
- /**
- * Same as above, but with a
- * VectorSlice argument instead.
- */
- void reinit (const unsigned int m,
- const unsigned int n,
- const VectorSlice<const std::vector<unsigned int> > &row_lengths,
- const bool optimize_diagonal = true);
-
- /**
- * This function compresses the sparsity
- * structure that this object represents.
- * It does so by eliminating unused
- * entries and sorting the remaining ones
- * to allow faster access by usage of
- * binary search algorithms. A special
- * sorting scheme is used for the
- * diagonal entry of quadratic matrices,
- * which is always the first entry of
- * each row.
- *
- * The memory which is no more
- * needed is released.
- *
- * SparseMatrix objects require the
- * SparsityPattern objects they are
- * initialized with to be compressed, to
- * reduce memory requirements.
- */
- void compress ();
-
- /**
- * This function can be used as a
- * replacement for reinit(),
- * subsequent calls to add() and
- * a final call to close() if you
- * know exactly in advance the
- * entries that will form the
- * matrix sparsity pattern.
- *
- * The first two parameters
- * determine the size of the
- * matrix. For the two last ones,
- * note that a sparse matrix can
- * be described by a sequence of
- * rows, each of which is
- * represented by a sequence of
- * pairs of column indices and
- * values. In the present
- * context, the begin() and
- * end() parameters designate
- * iterators (of forward iterator
- * type) into a container, one
- * representing one row. The
- * distance between begin()
- * and end() should therefore
- * be equal to
- * n_rows(). These iterators
- * may be iterators of
- * <tt>std::vector</tt>,
- * <tt>std::list</tt>, pointers into a
- * C-style array, or any other
- * iterator satisfying the
- * requirements of a forward
- * iterator. The objects pointed
- * to by these iterators
- * (i.e. what we get after
- * applying <tt>operator*</tt> or
- * <tt>operator-></tt> to one of these
- * iterators) must be a container
- * itself that provides functions
- * <tt>begin</tt> and <tt>end</tt>
- * designating a range of
- * iterators that describe the
- * contents of one
- * line. Dereferencing these
- * inner iterators must either
- * yield a pair of an unsigned
- * integer as column index and a
- * value of arbitrary type (such
- * a type would be used if we
- * wanted to describe a sparse
- * matrix with one such object),
- * or simply an unsigned integer
- * (of we only wanted to describe
- * a sparsity pattern). The
- * function is able to determine
- * itself whether an unsigned
- * integer or a pair is what we
- * get after dereferencing the
- * inner iterators, through some
- * template magic.
- *
- * While the order of the outer
- * iterators denotes the
- * different rows of the matrix,
- * the order of the inner
- * iterator denoting the columns
- * does not matter, as they are
- * sorted internal to this
- * function anyway.
- *
- * Since that all sounds very
- * complicated, consider the
- * following example code, which
- * may be used to fill a sparsity
- * pattern:
- * @code
- * std::vector<std::vector<unsigned int> > column_indices (n_rows);
- * for (unsigned int row=0; row<n_rows; ++row)
- * // generate necessary columns in this row
- * fill_row (column_indices[row]);
- *
- * sparsity.copy_from (n_rows, n_cols,
- * column_indices.begin(),
- * column_indices.end());
- * @endcode
- *
- * Note that this example works
- * since the iterators
- * dereferenced yield containers
- * with functions <tt>begin</tt> and
- * <tt>end</tt> (namely
- * <tt>std::vector</tt>s), and the
- * inner iterators dereferenced
- * yield unsigned integers as
- * column indices. Note that we
- * could have replaced each of
- * the two <tt>std::vector</tt>
- * occurrences by <tt>std::list</tt>,
- * and the inner one by
- * <tt>std::set</tt> as well.
- *
- * Another example would be as
- * follows, where we initialize a
- * whole matrix, not only a
- * sparsity pattern:
- * @code
- * std::vector<std::map<unsigned int,double> > entries (n_rows);
- * for (unsigned int row=0; row<n_rows; ++row)
- * // generate necessary pairs of columns
- * // and corresponding values in this row
- * fill_row (entries[row]);
- *
- * sparsity.copy_from (n_rows, n_cols,
- * column_indices.begin(),
- * column_indices.end());
- * matrix.reinit (sparsity);
- * matrix.copy_from (column_indices.begin(),
- * column_indices.end());
- * @endcode
- *
- * This example works because
- * dereferencing iterators of the
- * inner type yields a pair of
- * unsigned integers and a value,
- * the first of which we take as
- * column index. As previously,
- * the outer <tt>std::vector</tt>
- * could be replaced by
- * <tt>std::list</tt>, and the inner
- * <tt>std::map<unsigned int,double></tt>
- * could be replaced by
- * <tt>std::vector<std::pair<unsigned int,double> ></tt>,
- * or a list or set of such
- * pairs, as they all return
- * iterators that point to such
- * pairs.
- */
- template <typename ForwardIterator>
- void copy_from (const unsigned int n_rows,
- const unsigned int n_cols,
- const ForwardIterator begin,
- const ForwardIterator end,
- const bool optimize_diagonal = true);
-
- /**
- * Copy data from an object of type
- * CompressedSparsityPattern,
- * CompressedSetSparsityPattern or
- * CompressedSimpleSparsityPattern.
- * Previous content of this object is
- * lost, and the sparsity pattern is in
- * compressed mode afterwards.
- */
- template <typename CompressedSparsityType>
- void copy_from (const CompressedSparsityType &csp,
- const bool optimize_diagonal = true);
-
- /**
- * Take a full matrix and use its
- * nonzero entries to generate a
- * sparse matrix entry pattern
- * for this object.
- *
- * Previous content of this
- * object is lost, and the
- * sparsity pattern is in
- * compressed mode afterwards.
- */
- template <typename number>
- void copy_from (const FullMatrix<number> &matrix,
- const bool optimize_diagonal = true);
-
- /**
- * Make the sparsity pattern
- * symmetric by adding the
- * sparsity pattern of the
- * transpose object.
- *
- * This function throws an
- * exception if the sparsity
- * pattern does not represent a
- * quadratic matrix.
- */
- void symmetrize ();
-
- /**
- * Add a nonzero entry to the matrix.
- * This function may only be called
- * for non-compressed sparsity patterns.
- *
- * If the entry already exists, nothing
- * bad happens.
- */
- void add (const unsigned int i,
- const unsigned int j);
-
- /**
- * Add several nonzero entries to the
- * specified matrix row. This function
- * may only be called for
- * non-compressed sparsity patterns.
- *
- * If some of the entries already
- * exist, nothing bad happens.
- */
- template <typename ForwardIterator>
- void add_entries (const unsigned int row,
- ForwardIterator begin,
- ForwardIterator end,
- const bool indices_are_sorted = false);
+ /**
+ * Initialize the matrix empty,
+ * that is with no memory
+ * allocated. This is useful if
+ * you want such objects as
+ * member variables in other
+ * classes. You can make the
+ * structure usable by calling
+ * the reinit() function.
+ */
+ SparsityPattern ();
+
+ /**
+ * Copy constructor. This
+ * constructor is only allowed to
+ * be called if the matrix
+ * structure to be copied is
+ * empty. This is so in order to
+ * prevent involuntary copies of
+ * objects for temporaries, which
+ * can use large amounts of
+ * computing time. However, copy
+ * constructors are needed if yo
+ * want to use the STL data types
+ * on classes like this, e.g. to
+ * write such statements like
+ * <tt>v.push_back
+ * (SparsityPattern());</tt>,
+ * with <tt>v</tt> a vector of
+ * SparsityPattern objects.
+ *
+ * Usually, it is sufficient to
+ * use the explicit keyword to
+ * disallow unwanted temporaries,
+ * but for the STL vectors, this
+ * does not work. Since copying a
+ * structure like this is not
+ * useful anyway because multiple
+ * matrices can use the same
+ * sparsity structure, copies are
+ * only allowed for empty
+ * objects, as described above.
+ */
+ SparsityPattern (const SparsityPattern &);
+
+ /**
+ * Initialize a rectangular
+ * matrix.
+ *
+ * @arg m number of rows
+ * @arg n number of columns
+ * @arg max_per_row maximum
+ * number of nonzero entries per row
+ *
+ * @arg optimize_diagonal store
+ * diagonal entries first in row;
+ * see optimize_diagonal(). This
+ * takes effect for quadratic
+ * matrices only.
+ */
+ SparsityPattern (const unsigned int m,
+ const unsigned int n,
+ const unsigned int max_per_row,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Initialize a rectangular
+ * matrix.
+ *
+ * @arg m number of rows
+ * @arg n number of columns
+ *
+ * @arg row_lengths possible
+ * number of nonzero entries for
+ * each row. This vector must
+ * have one entry for each row.
+ *
+ * @arg optimize_diagonal store
+ * diagonal entries first in row;
+ * see optimize_diagonal(). This
+ * takes effect for quadratic
+ * matrices only.
+ */
+ SparsityPattern (const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &row_lengths,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Initialize a quadratic matrix
+ * of dimension <tt>n</tt> with
+ * at most <tt>max_per_row</tt>
+ * nonzero entries per row.
+ *
+ * This constructor automatically
+ * enables optimized storage of
+ * diagonal elements. To avoid
+ * this, use the constructor
+ * taking row and column numbers
+ * separately.
+ */
+ SparsityPattern (const unsigned int n,
+ const unsigned int max_per_row);
+
+ /**
+ * Initialize a quadratic matrix.
+ *
+ * @arg m number of rows and columns
+ *
+ * @arg row_lengths possible
+ * number of nonzero entries for
+ * each row. This vector must
+ * have one entry for each row.
+ *
+ * @arg optimize_diagonal store
+ * diagonal entries first in row;
+ * see optimize_diagonal().
+ */
+ SparsityPattern (const unsigned int m,
+ const std::vector<unsigned int> &row_lengths,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Make a copy with extra off-diagonals.
+ *
+ * This constructs objects intended for
+ * the application of the ILU(n)-method
+ * or other incomplete decompositions.
+ * Therefore, additional to the original
+ * entry structure, space for
+ * <tt>extra_off_diagonals</tt>
+ * side-diagonals is provided on both
+ * sides of the main diagonal.
+ *
+ * <tt>max_per_row</tt> is the
+ * maximum number of nonzero
+ * elements per row which this
+ * structure is to hold. It is
+ * assumed that this number is
+ * sufficiently large to
+ * accommodate both the elements
+ * in <tt>original</tt> as well
+ * as the new off-diagonal
+ * elements created by this
+ * constructor. You will usually
+ * want to give the same number
+ * as you gave for
+ * <tt>original</tt> plus the
+ * number of side diagonals times
+ * two. You may however give a
+ * larger value if you wish to
+ * add further nonzero entries
+ * for the decomposition based on
+ * other criteria than their
+ * being on side-diagonals.
+ *
+ * This function requires that
+ * <tt>original</tt> refers to a
+ * quadratic matrix structure.
+ * It must be compressed. The
+ * matrix structure is not
+ * compressed after this function
+ * finishes.
+ */
- SparsityPattern (const SparsityPattern &original,
++ SparsityPattern (const SparsityPattern &original,
+ const unsigned int max_per_row,
+ const unsigned int extra_off_diagonals);
+
+ /**
+ * Destructor.
+ */
+ ~SparsityPattern ();
+
+ /**
+ * Copy operator. For this the
+ * same holds as for the copy
+ * constructor: it is declared,
+ * defined and fine to be called,
+ * but the latter only for empty
+ * objects.
+ */
+ SparsityPattern &operator = (const SparsityPattern &);
+
+ /**
+ * Reallocate memory and set up data
+ * structures for a new matrix with
+ * <tt>m </tt>rows and <tt>n</tt> columns,
+ * with at most <tt>max_per_row</tt>
+ * nonzero entries per row.
+ *
+ * This function simply maps its
+ * operations to the other
+ * <tt>reinit</tt> function.
+ */
+ void reinit (const unsigned int m,
+ const unsigned int n,
+ const unsigned int max_per_row,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Reallocate memory for a matrix
+ * of size <tt>m x n</tt>. The
+ * number of entries for each row
+ * is taken from the array
+ * <tt>row_lengths</tt> which has to
+ * give this number of each row
+ * <tt>i=1...m</tt>.
+ *
+ * If <tt>m*n==0</tt> all memory is freed,
+ * resulting in a total reinitialization
+ * of the object. If it is nonzero, new
+ * memory is only allocated if the new
+ * size extends the old one. This is done
+ * to save time and to avoid fragmentation
+ * of the heap.
+ *
+ * If the number of rows equals
+ * the number of columns and the
+ * last parameter is true,
+ * diagonal elements are stored
+ * first in each row to allow
+ * optimized access in relaxation
+ * methods of SparseMatrix.
+ */
+ void reinit (const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &row_lengths,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Same as above, but with a
+ * VectorSlice argument instead.
+ */
+ void reinit (const unsigned int m,
+ const unsigned int n,
+ const VectorSlice<const std::vector<unsigned int> > &row_lengths,
+ const bool optimize_diagonal = true);
+
+ /**
+ * This function compresses the sparsity
+ * structure that this object represents.
+ * It does so by eliminating unused
+ * entries and sorting the remaining ones
+ * to allow faster access by usage of
+ * binary search algorithms. A special
+ * sorting scheme is used for the
+ * diagonal entry of quadratic matrices,
+ * which is always the first entry of
+ * each row.
+ *
+ * The memory which is no more
+ * needed is released.
+ *
+ * SparseMatrix objects require the
+ * SparsityPattern objects they are
+ * initialized with to be compressed, to
+ * reduce memory requirements.
+ */
+ void compress ();
+
+ /**
+ * This function can be used as a
+ * replacement for reinit(),
+ * subsequent calls to add() and
+ * a final call to close() if you
+ * know exactly in advance the
+ * entries that will form the
+ * matrix sparsity pattern.
+ *
+ * The first two parameters
+ * determine the size of the
+ * matrix. For the two last ones,
+ * note that a sparse matrix can
+ * be described by a sequence of
+ * rows, each of which is
+ * represented by a sequence of
+ * pairs of column indices and
+ * values. In the present
+ * context, the begin() and
+ * end() parameters designate
+ * iterators (of forward iterator
+ * type) into a container, one
+ * representing one row. The
+ * distance between begin()
+ * and end() should therefore
+ * be equal to
+ * n_rows(). These iterators
+ * may be iterators of
+ * <tt>std::vector</tt>,
+ * <tt>std::list</tt>, pointers into a
+ * C-style array, or any other
+ * iterator satisfying the
+ * requirements of a forward
+ * iterator. The objects pointed
+ * to by these iterators
+ * (i.e. what we get after
+ * applying <tt>operator*</tt> or
+ * <tt>operator-></tt> to one of these
+ * iterators) must be a container
+ * itself that provides functions
+ * <tt>begin</tt> and <tt>end</tt>
+ * designating a range of
+ * iterators that describe the
+ * contents of one
+ * line. Dereferencing these
+ * inner iterators must either
+ * yield a pair of an unsigned
+ * integer as column index and a
+ * value of arbitrary type (such
+ * a type would be used if we
+ * wanted to describe a sparse
+ * matrix with one such object),
+ * or simply an unsigned integer
+ * (of we only wanted to describe
+ * a sparsity pattern). The
+ * function is able to determine
+ * itself whether an unsigned
+ * integer or a pair is what we
+ * get after dereferencing the
+ * inner iterators, through some
+ * template magic.
+ *
+ * While the order of the outer
+ * iterators denotes the
+ * different rows of the matrix,
+ * the order of the inner
+ * iterator denoting the columns
+ * does not matter, as they are
+ * sorted internal to this
+ * function anyway.
+ *
+ * Since that all sounds very
+ * complicated, consider the
+ * following example code, which
+ * may be used to fill a sparsity
+ * pattern:
+ * @code
+ * std::vector<std::vector<unsigned int> > column_indices (n_rows);
+ * for (unsigned int row=0; row<n_rows; ++row)
+ * // generate necessary columns in this row
+ * fill_row (column_indices[row]);
+ *
+ * sparsity.copy_from (n_rows, n_cols,
+ * column_indices.begin(),
+ * column_indices.end());
+ * @endcode
+ *
+ * Note that this example works
+ * since the iterators
+ * dereferenced yield containers
+ * with functions <tt>begin</tt> and
+ * <tt>end</tt> (namely
+ * <tt>std::vector</tt>s), and the
+ * inner iterators dereferenced
+ * yield unsigned integers as
+ * column indices. Note that we
+ * could have replaced each of
+ * the two <tt>std::vector</tt>
+ * occurrences by <tt>std::list</tt>,
+ * and the inner one by
+ * <tt>std::set</tt> as well.
+ *
+ * Another example would be as
+ * follows, where we initialize a
+ * whole matrix, not only a
+ * sparsity pattern:
+ * @code
+ * std::vector<std::map<unsigned int,double> > entries (n_rows);
+ * for (unsigned int row=0; row<n_rows; ++row)
+ * // generate necessary pairs of columns
+ * // and corresponding values in this row
+ * fill_row (entries[row]);
+ *
+ * sparsity.copy_from (n_rows, n_cols,
+ * column_indices.begin(),
+ * column_indices.end());
+ * matrix.reinit (sparsity);
+ * matrix.copy_from (column_indices.begin(),
+ * column_indices.end());
+ * @endcode
+ *
+ * This example works because
+ * dereferencing iterators of the
+ * inner type yields a pair of
+ * unsigned integers and a value,
+ * the first of which we take as
+ * column index. As previously,
+ * the outer <tt>std::vector</tt>
+ * could be replaced by
+ * <tt>std::list</tt>, and the inner
+ * <tt>std::map<unsigned int,double></tt>
+ * could be replaced by
+ * <tt>std::vector<std::pair<unsigned int,double> ></tt>,
+ * or a list or set of such
+ * pairs, as they all return
+ * iterators that point to such
+ * pairs.
+ */
+ template <typename ForwardIterator>
+ void copy_from (const unsigned int n_rows,
+ const unsigned int n_cols,
+ const ForwardIterator begin,
+ const ForwardIterator end,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Copy data from an object of type
+ * CompressedSparsityPattern,
+ * CompressedSetSparsityPattern or
+ * CompressedSimpleSparsityPattern.
+ * Previous content of this object is
+ * lost, and the sparsity pattern is in
+ * compressed mode afterwards.
+ */
+ template <typename CompressedSparsityType>
+ void copy_from (const CompressedSparsityType &csp,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Take a full matrix and use its
+ * nonzero entries to generate a
+ * sparse matrix entry pattern
+ * for this object.
+ *
+ * Previous content of this
+ * object is lost, and the
+ * sparsity pattern is in
+ * compressed mode afterwards.
+ */
+ template <typename number>
+ void copy_from (const FullMatrix<number> &matrix,
+ const bool optimize_diagonal = true);
+
+ /**
+ * Make the sparsity pattern
+ * symmetric by adding the
+ * sparsity pattern of the
+ * transpose object.
+ *
+ * This function throws an
+ * exception if the sparsity
+ * pattern does not represent a
+ * quadratic matrix.
+ */
+ void symmetrize ();
+
+ /**
+ * Add a nonzero entry to the matrix.
+ * This function may only be called
+ * for non-compressed sparsity patterns.
+ *
+ * If the entry already exists, nothing
+ * bad happens.
+ */
+ void add (const unsigned int i,
+ const unsigned int j);
+
+ /**
+ * Add several nonzero entries to the
+ * specified matrix row. This function
+ * may only be called for
+ * non-compressed sparsity patterns.
+ *
+ * If some of the entries already
+ * exist, nothing bad happens.
+ */
+ template <typename ForwardIterator>
+ void add_entries (const unsigned int row,
+ ForwardIterator begin,
+ ForwardIterator end,
+ const bool indices_are_sorted = false);
// @}
- /**
- * @name Iterators
- */
+ /**
+ * @name Iterators
+ */
// @{
- /**
- * STL-like iterator with the first entry
- * of the matrix. The resulting iterator
- * can be used to walk over all nonzero
- * entries of the sparsity pattern.
- */
- inline iterator begin () const;
-
- /**
- * Final iterator.
- */
- inline iterator end () const;
-
- /**
- * STL-like iterator with the first entry
- * of row <tt>r</tt>.
- *
- * Note that if the given row is empty,
- * i.e. does not contain any nonzero
- * entries, then the iterator returned by
- * this function equals
- * <tt>end(r)</tt>. Note also that the
- * iterator may not be dereferencable in
- * that case.
- */
- inline iterator begin (const unsigned int r) const;
-
- /**
- * Final iterator of row <tt>r</tt>. It
- * points to the first element past the
- * end of line @p r, or past the end of
- * the entire sparsity pattern.
- *
- * Note that the end iterator is not
- * necessarily dereferencable. This is in
- * particular the case if it is the end
- * iterator for the last row of a matrix.
- */
- inline iterator end (const unsigned int r) const;
-
- /**
- * STL-like iterator with the first entry
- * of row <tt>r</tt>.
- *
- * Note that if the given row is empty,
- * i.e. does not contain any nonzero
- * entries, then the iterator returned by
- * this function equals
- * <tt>end(r)</tt>. Note also that the
- * iterator may not be dereferencable in
- * that case.
- */
- inline row_iterator row_begin (const unsigned int r) const;
-
- /**
- * Final iterator of row <tt>r</tt>. It
- * points to the first element past the
- * end of line @p r, or past the end of
- * the entire sparsity pattern.
- *
- * Note that the end iterator is not
- * necessarily dereferencable. This is in
- * particular the case if it is the end
- * iterator for the last row of a matrix.
- */
- inline row_iterator row_end (const unsigned int r) const;
+ /**
+ * STL-like iterator with the first entry
+ * of the matrix. The resulting iterator
+ * can be used to walk over all nonzero
+ * entries of the sparsity pattern.
+ */
+ inline iterator begin () const;
+
+ /**
+ * Final iterator.
+ */
+ inline iterator end () const;
+
+ /**
+ * STL-like iterator with the first entry
+ * of row <tt>r</tt>.
+ *
+ * Note that if the given row is empty,
+ * i.e. does not contain any nonzero
+ * entries, then the iterator returned by
+ * this function equals
+ * <tt>end(r)</tt>. Note also that the
+ * iterator may not be dereferencable in
+ * that case.
+ */
+ inline iterator begin (const unsigned int r) const;
+
+ /**
+ * Final iterator of row <tt>r</tt>. It
+ * points to the first element past the
+ * end of line @p r, or past the end of
+ * the entire sparsity pattern.
+ *
+ * Note that the end iterator is not
+ * necessarily dereferencable. This is in
+ * particular the case if it is the end
+ * iterator for the last row of a matrix.
+ */
+ inline iterator end (const unsigned int r) const;
+
+ /**
+ * STL-like iterator with the first entry
+ * of row <tt>r</tt>.
+ *
+ * Note that if the given row is empty,
+ * i.e. does not contain any nonzero
+ * entries, then the iterator returned by
+ * this function equals
+ * <tt>end(r)</tt>. Note also that the
+ * iterator may not be dereferencable in
+ * that case.
+ */
+ inline row_iterator row_begin (const unsigned int r) const;
+
+ /**
+ * Final iterator of row <tt>r</tt>. It
+ * points to the first element past the
+ * end of line @p r, or past the end of
+ * the entire sparsity pattern.
+ *
+ * Note that the end iterator is not
+ * necessarily dereferencable. This is in
+ * particular the case if it is the end
+ * iterator for the last row of a matrix.
+ */
+ inline row_iterator row_end (const unsigned int r) const;
// @}
- /**
- * @name Querying information
- */
+ /**
+ * @name Querying information
+ */
// @{
- /**
- * Test for equality of two SparsityPatterns.
- */
- bool operator == (const SparsityPattern &) const;
-
- /**
- * Return whether the object is empty. It
- * is empty if no memory is allocated,
- * which is the same as that both
- * dimensions are zero.
- */
- bool empty () const;
-
- /**
- * Return the maximum number of entries per
- * row. Before compression, this equals the
- * number given to the constructor, while
- * after compression, it equals the maximum
- * number of entries actually allocated by
- * the user.
- */
- unsigned int max_entries_per_row () const;
-
- /**
- * Compute the bandwidth of the matrix
- * represented by this structure. The
- * bandwidth is the maximum of $|i-j|$
- * for which the index pair $(i,j)$
- * represents a nonzero entry of the
- * matrix. Consequently, the maximum
- * bandwidth a $n\times m$ matrix can
- * have is $\max\{n-1,m-1\}$.
- */
- unsigned int bandwidth () const;
-
- /**
- * Return the number of nonzero elements of
- * this matrix. Actually, it returns the
- * number of entries in the sparsity
- * pattern; if any of the entries should
- * happen to be zero, it is counted
- * anyway.
- *
- * This function may only be called if the
- * matrix struct is compressed. It does not
- * make too much sense otherwise anyway.
- */
- std::size_t n_nonzero_elements () const;
-
- /**
- * Return whether the structure is
- * compressed or not.
- */
- bool is_compressed () const;
-
- /**
- * Return number of rows of this
- * matrix, which equals the dimension
- * of the image space.
- */
- inline unsigned int n_rows () const;
-
- /**
- * Return number of columns of this
- * matrix, which equals the dimension
- * of the range space.
- */
- inline unsigned int n_cols () const;
-
- /**
- * Number of entries in a specific row.
- */
- unsigned int row_length (const unsigned int row) const;
-
- /**
- * Determine whether the matrix
- * uses special convention for
- * quadratic matrices.
- *
- * A return value <tt>true</tt> means
- * that diagonal elements are stored
- * first in each row. A number of
- * functions in this class and the
- * library in general, for example
- * relaxation methods like Jacobi() and
- * SOR(), require this to make their
- * operations more efficient, since they
- * need to quickly access the diagonal
- * elements and do not have to search for
- * them if they are the first element of
- * each row. A side effect of this scheme
- * is that each row contains at least one
- * element, even if the row is empty
- * (i.e. the diagonal element exists, but
- * has value zero).
- *
- * A return value <tt>false</tt> means
- * that diagonal elements are stored
- * anywhere in the row, or not at all. In
- * particular, a row or even the whole
- * matrix may be empty. This can be used
- * if you have block matrices where the
- * off-diagonal blocks are quadratic but
- * are never used for operations like the
- * ones mentioned above. In this case,
- * some memory can be saved by not using
- * the diagonal storage optimization.
- */
- bool optimize_diagonal () const;
-
- /**
- * Return whether this object stores only
- * those entries that have been added
- * explicitly, or if the sparsity pattern
- * contains elements that have been added
- * through other means (implicitly) while
- * building it. For the current class,
- * the result is true iff optimize_diag
- * in the constructor or reinit() calls
- * has been set to false, or if the
- * represented matrix is not square.
- *
- * This function mainly serves the
- * purpose of describing the current
- * class in cases where several kinds of
- * sparsity patterns can be passed as
- * template arguments.
- */
- bool stores_only_added_elements () const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object. See
- * MemoryConsumption.
- */
- std::size_t memory_consumption () const;
+ /**
+ * Test for equality of two SparsityPatterns.
+ */
+ bool operator == (const SparsityPattern &) const;
+
+ /**
+ * Return whether the object is empty. It
+ * is empty if no memory is allocated,
+ * which is the same as that both
+ * dimensions are zero.
+ */
+ bool empty () const;
+
+ /**
+ * Return the maximum number of entries per
+ * row. Before compression, this equals the
+ * number given to the constructor, while
+ * after compression, it equals the maximum
+ * number of entries actually allocated by
+ * the user.
+ */
+ unsigned int max_entries_per_row () const;
+
+ /**
+ * Compute the bandwidth of the matrix
+ * represented by this structure. The
+ * bandwidth is the maximum of $|i-j|$
+ * for which the index pair $(i,j)$
+ * represents a nonzero entry of the
+ * matrix. Consequently, the maximum
+ * bandwidth a $n\times m$ matrix can
+ * have is $\max\{n-1,m-1\}$.
+ */
+ unsigned int bandwidth () const;
+
+ /**
+ * Return the number of nonzero elements of
+ * this matrix. Actually, it returns the
+ * number of entries in the sparsity
+ * pattern; if any of the entries should
+ * happen to be zero, it is counted
+ * anyway.
+ *
+ * This function may only be called if the
+ * matrix struct is compressed. It does not
+ * make too much sense otherwise anyway.
+ */
+ std::size_t n_nonzero_elements () const;
+
+ /**
+ * Return whether the structure is
+ * compressed or not.
+ */
+ bool is_compressed () const;
+
+ /**
+ * Return number of rows of this
+ * matrix, which equals the dimension
+ * of the image space.
+ */
+ inline unsigned int n_rows () const;
+
+ /**
+ * Return number of columns of this
+ * matrix, which equals the dimension
+ * of the range space.
+ */
+ inline unsigned int n_cols () const;
+
+ /**
+ * Number of entries in a specific row.
+ */
+ unsigned int row_length (const unsigned int row) const;
+
+ /**
+ * Determine whether the matrix
+ * uses special convention for
+ * quadratic matrices.
+ *
+ * A return value <tt>true</tt> means
+ * that diagonal elements are stored
+ * first in each row. A number of
+ * functions in this class and the
+ * library in general, for example
+ * relaxation methods like Jacobi() and
+ * SOR(), require this to make their
+ * operations more efficient, since they
+ * need to quickly access the diagonal
+ * elements and do not have to search for
+ * them if they are the first element of
+ * each row. A side effect of this scheme
+ * is that each row contains at least one
+ * element, even if the row is empty
+ * (i.e. the diagonal element exists, but
+ * has value zero).
+ *
+ * A return value <tt>false</tt> means
+ * that diagonal elements are stored
+ * anywhere in the row, or not at all. In
+ * particular, a row or even the whole
+ * matrix may be empty. This can be used
+ * if you have block matrices where the
+ * off-diagonal blocks are quadratic but
+ * are never used for operations like the
+ * ones mentioned above. In this case,
+ * some memory can be saved by not using
+ * the diagonal storage optimization.
+ */
+ bool optimize_diagonal () const;
+
+ /**
+ * Return whether this object stores only
+ * those entries that have been added
+ * explicitly, or if the sparsity pattern
+ * contains elements that have been added
+ * through other means (implicitly) while
+ * building it. For the current class,
+ * the result is true iff optimize_diag
+ * in the constructor or reinit() calls
+ * has been set to false, or if the
+ * represented matrix is not square.
+ *
+ * This function mainly serves the
+ * purpose of describing the current
+ * class in cases where several kinds of
+ * sparsity patterns can be passed as
+ * template arguments.
+ */
+ bool stores_only_added_elements () const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object. See
+ * MemoryConsumption.
+ */
+ std::size_t memory_consumption () const;
// @}
- /**
- * @name Accessing entries
- */
+ /**
+ * @name Accessing entries
+ */
// @{
- /**
- * Return the index of the matrix
- * element with row number <tt>i</tt>
- * and column number <tt>j</tt>. If
- * the matrix element is not a
- * nonzero one, return
- * SparsityPattern::invalid_entry.
- *
- * This function is usually
- * called by the
- * SparseMatrix::operator()(). It
- * may only be called for
- * compressed sparsity patterns,
- * since in this case searching
- * whether the entry exists can
- * be done quite fast with a
- * binary sort algorithm because
- * the column numbers are sorted.
- *
- * If <tt>m</tt> is the number of
- * entries in <tt>row</tt>, then the
- * complexity of this function is
- * <i>log(m)</i> if the sparsity
- * pattern is compressed.
- *
- * @deprecated Use
- * SparseMatrix::const_iterator
- */
- unsigned int operator() (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * This is the inverse operation
- * to operator()(): given a
- * global index, find out row and
- * column of the matrix entry to
- * which it belongs. The returned
- * value is the pair composed of
- * row and column index.
- *
- * This function may only be
- * called if the sparsity pattern
- * is closed. The global index
- * must then be between zero and
- * n_nonzero_elements().
- *
- * If <tt>N</tt> is the number of
- * rows of this matrix, then the
- * complexity of this function is
- * <i>log(N)</i>.
- */
- std::pair<unsigned int, unsigned int>
- matrix_position (const unsigned int global_index) const;
-
- /**
- * Check if a value at a certain
- * position may be non-zero.
- */
- bool exists (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * The index of a global matrix
- * entry in its row.
- *
- * This function is analogous to
- * operator(), but it computes
- * the index not with respect to
- * the total field, but only with
- * respect to the row <tt>j</tt>.
- */
- unsigned int row_position(const unsigned int i,
- const unsigned int j) const;
-
- /**
- * Access to column number field.
- * Return the column number of
- * the <tt>index</tt>th entry in
- * <tt>row</tt>. Note that if
- * diagonal elements are
- * optimized, the first element
- * in each row is the diagonal
- * element,
- * i.e. <tt>column_number(row,0)==row</tt>.
- *
- * If the sparsity pattern is
- * already compressed, then
- * (except for the diagonal
- * element), the entries are
- * sorted by columns,
- * i.e. <tt>column_number(row,i)</tt>
- * <tt><</tt> <tt>column_number(row,i+1)</tt>.
- */
- unsigned int column_number (const unsigned int row,
- const unsigned int index) const;
+ /**
+ * Return the index of the matrix
+ * element with row number <tt>i</tt>
+ * and column number <tt>j</tt>. If
+ * the matrix element is not a
+ * nonzero one, return
+ * SparsityPattern::invalid_entry.
+ *
+ * This function is usually
+ * called by the
+ * SparseMatrix::operator()(). It
+ * may only be called for
+ * compressed sparsity patterns,
+ * since in this case searching
+ * whether the entry exists can
+ * be done quite fast with a
+ * binary sort algorithm because
+ * the column numbers are sorted.
+ *
+ * If <tt>m</tt> is the number of
+ * entries in <tt>row</tt>, then the
+ * complexity of this function is
+ * <i>log(m)</i> if the sparsity
+ * pattern is compressed.
+ *
+ * @deprecated Use
+ * SparseMatrix::const_iterator
+ */
+ unsigned int operator() (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * This is the inverse operation
+ * to operator()(): given a
+ * global index, find out row and
+ * column of the matrix entry to
+ * which it belongs. The returned
+ * value is the pair composed of
+ * row and column index.
+ *
+ * This function may only be
+ * called if the sparsity pattern
+ * is closed. The global index
+ * must then be between zero and
+ * n_nonzero_elements().
+ *
+ * If <tt>N</tt> is the number of
+ * rows of this matrix, then the
+ * complexity of this function is
+ * <i>log(N)</i>.
+ */
+ std::pair<unsigned int, unsigned int>
+ matrix_position (const unsigned int global_index) const;
+
+ /**
+ * Check if a value at a certain
+ * position may be non-zero.
+ */
+ bool exists (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * The index of a global matrix
+ * entry in its row.
+ *
+ * This function is analogous to
+ * operator(), but it computes
+ * the index not with respect to
+ * the total field, but only with
+ * respect to the row <tt>j</tt>.
+ */
+ unsigned int row_position(const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * Access to column number field.
+ * Return the column number of
+ * the <tt>index</tt>th entry in
+ * <tt>row</tt>. Note that if
+ * diagonal elements are
+ * optimized, the first element
+ * in each row is the diagonal
+ * element,
+ * i.e. <tt>column_number(row,0)==row</tt>.
+ *
+ * If the sparsity pattern is
+ * already compressed, then
+ * (except for the diagonal
+ * element), the entries are
+ * sorted by columns,
+ * i.e. <tt>column_number(row,i)</tt>
+ * <tt><</tt> <tt>column_number(row,i+1)</tt>.
+ */
+ unsigned int column_number (const unsigned int row,
+ const unsigned int index) const;
// @}
namespace TrilinosWrappers
{
- /*! @addtogroup TrilinosWrappers
- *@{
- */
-
- /**
- * Blocked sparse matrix based on the TrilinosWrappers::SparseMatrix class. This
- * class implements the functions that are specific to the Trilinos SparseMatrix
- * base objects for a blocked sparse matrix, and leaves the actual work
- * relaying most of the calls to the individual blocks to the functions
- * implemented in the base class. See there also for a description of when
- * this class is useful.
- *
- * In contrast to the deal.II-type SparseMatrix class, the Trilinos matrices do
- * not have external objects for the sparsity patterns. Thus, one does not
- * determine the size of the individual blocks of a block matrix of this type
- * by attaching a block sparsity pattern, but by calling reinit() to set the
- * number of blocks and then by setting the size of each block separately. In
- * order to fix the data structures of the block matrix, it is then necessary
- * to let it know that we have changed the sizes of the underlying
- * matrices. For this, one has to call the collect_sizes() function, for much
- * the same reason as is documented with the BlockSparsityPattern class.
- *
- * @ingroup Matrix1
- * @see @ref GlossBlockLA "Block (linear algebra)"
- * @author Martin Kronbichler, Wolfgang Bangerth, 2008
- */
+ /*! @addtogroup TrilinosWrappers
+ *@{
+ */
+
+ /**
+ * Blocked sparse matrix based on the TrilinosWrappers::SparseMatrix class. This
+ * class implements the functions that are specific to the Trilinos SparseMatrix
+ * base objects for a blocked sparse matrix, and leaves the actual work
+ * relaying most of the calls to the individual blocks to the functions
+ * implemented in the base class. See there also for a description of when
+ * this class is useful.
+ *
+ * In contrast to the deal.II-type SparseMatrix class, the Trilinos matrices do
+ * not have external objects for the sparsity patterns. Thus, one does not
+ * determine the size of the individual blocks of a block matrix of this type
+ * by attaching a block sparsity pattern, but by calling reinit() to set the
+ * number of blocks and then by setting the size of each block separately. In
+ * order to fix the data structures of the block matrix, it is then necessary
+ * to let it know that we have changed the sizes of the underlying
+ * matrices. For this, one has to call the collect_sizes() function, for much
+ * the same reason as is documented with the BlockSparsityPattern class.
+ *
+ * @ingroup Matrix1
+ * @see @ref GlossBlockLA "Block (linear algebra)"
+ * @author Martin Kronbichler, Wolfgang Bangerth, 2008
+ */
class BlockSparseMatrix : public BlockMatrixBase<SparseMatrix>
{
- public:
- /**
- * Typedef the base class for simpler
- * access to its own typedefs.
- */
- typedef BlockMatrixBase<SparseMatrix> BaseClass;
-
- /**
- * Typedef the type of the underlying
- * matrix.
- */
- typedef BaseClass::BlockType BlockType;
-
- /**
- * Import the typedefs from the base
- * class.
- */
- typedef BaseClass::value_type value_type;
- typedef BaseClass::pointer pointer;
- typedef BaseClass::const_pointer const_pointer;
- typedef BaseClass::reference reference;
- typedef BaseClass::const_reference const_reference;
- typedef BaseClass::size_type size_type;
- typedef BaseClass::iterator iterator;
- typedef BaseClass::const_iterator const_iterator;
-
- /**
- * Constructor; initializes the
- * matrix to be empty, without
- * any structure, i.e. the
- * matrix is not usable at
- * all. This constructor is
- * therefore only useful for
- * matrices which are members of
- * a class. All other matrices
- * should be created at a point
- * in the data flow where all
- * necessary information is
- * available.
- *
- * You have to initialize the
- * matrix before usage with
- * reinit(BlockSparsityPattern). The
- * number of blocks per row and
- * column are then determined by
- * that function.
- */
- BlockSparseMatrix ();
-
- /**
- * Destructor.
- */
- ~BlockSparseMatrix ();
-
- /**
- * Pseudo copy operator only copying
- * empty objects. The sizes of the block
- * matrices need to be the same.
- */
- BlockSparseMatrix &
- operator = (const BlockSparseMatrix &);
-
- /**
- * This operator assigns a scalar to a
- * matrix. Since this does usually not
- * make much sense (should we set all
- * matrix entries to this value? Only
- * the nonzero entries of the sparsity
- * pattern?), this operation is only
- * allowed if the actual value to be
- * assigned is zero. This operator only
- * exists to allow for the obvious
- * notation <tt>matrix=0</tt>, which
- * sets all elements of the matrix to
- * zero, but keep the sparsity pattern
- * previously used.
- */
- BlockSparseMatrix &
- operator = (const double d);
-
- /**
- * Resize the matrix, by setting
- * the number of block rows and
- * columns. This deletes all
- * blocks and replaces them by
- * unitialized ones, i.e. ones
- * for which also the sizes are
- * not yet set. You have to do
- * that by calling the @p reinit
- * functions of the blocks
- * themselves. Do not forget to
- * call collect_sizes() after
- * that on this object.
- *
- * The reason that you have to
- * set sizes of the blocks
- * yourself is that the sizes may
- * be varying, the maximum number
- * of elements per row may be
- * varying, etc. It is simpler
- * not to reproduce the interface
- * of the @p SparsityPattern
- * class here but rather let the
- * user call whatever function
- * she desires.
- */
- void reinit (const unsigned int n_block_rows,
- const unsigned int n_block_columns);
-
- /**
- * Resize the matrix, by using an
- * array of Epetra maps to determine
- * the %parallel distribution of the
- * individual matrices. This function
- * assumes that a quadratic block
- * matrix is generated.
- */
- template <typename BlockSparsityType>
- void reinit (const std::vector<Epetra_Map> &input_maps,
- const BlockSparsityType &block_sparsity_pattern);
-
- /**
- * Resize the matrix, by using an
- * array of index sets to determine
- * the %parallel distribution of the
- * individual matrices. This function
- * assumes that a quadratic block
- * matrix is generated.
- */
- template <typename BlockSparsityType>
- void reinit (const std::vector<IndexSet> &input_maps,
- const BlockSparsityType &block_sparsity_pattern,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
-
- /**
- * Resize the matrix and initialize it
- * by the given sparsity pattern. Since
- * no distribution map is given, the
- * result is a block matrix for which
- * all elements are stored locally.
- */
- template <typename BlockSparsityType>
- void reinit (const BlockSparsityType &block_sparsity_pattern);
-
- /**
- * This function initializes the
- * Trilinos matrix using the deal.II
- * sparse matrix and the entries stored
- * therein. It uses a threshold
- * to copy only elements whose
- * modulus is larger than the
- * threshold (so zeros in the
- * deal.II matrix can be filtered
- * away).
- */
- void reinit (const std::vector<Epetra_Map> &input_maps,
- const ::dealii::BlockSparseMatrix<double> &deal_ii_sparse_matrix,
- const double drop_tolerance=1e-13);
-
- /**
- * This function initializes
- * the Trilinos matrix using
- * the deal.II sparse matrix
- * and the entries stored
- * therein. It uses a threshold
- * to copy only elements whose
- * modulus is larger than the
- * threshold (so zeros in the
- * deal.II matrix can be
- * filtered away). Since no
- * Epetra_Map is given, all the
- * elements will be locally
- * stored.
- */
- void reinit (const ::dealii::BlockSparseMatrix<double> &deal_ii_sparse_matrix,
- const double drop_tolerance=1e-13);
-
- /**
- * Returns the state of the
- * matrix, i.e., whether
- * compress() needs to be called
- * after an operation requiring
- * data exchange. Does only
- * return non-true values when
- * used in <tt>debug</tt> mode,
- * since it is quite expensive to
- * keep track of all operations
- * that lead to the need for
- * compress().
- */
- bool is_compressed () const;
-
- /**
- * This function collects the
- * sizes of the sub-objects and
- * stores them in internal
- * arrays, in order to be able to
- * relay global indices into the
- * matrix to indices into the
- * subobjects. You *must* call
- * this function each time after
- * you have changed the size of
- * the sub-objects. Note that
- * this is a collective
- * operation, i.e., it needs to
- * be called on all MPI
- * processes. This command
- * internally calls the method
- * <tt>compress()</tt>, so you
- * don't need to call that
- * function in case you use
- * <tt>collect_sizes()</tt>.
- */
- void collect_sizes ();
-
- /**
- * Return the number of nonzero
- * elements of this
- * matrix.
- */
- unsigned int n_nonzero_elements () const;
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M*src$ with $M$
- * being this matrix.
- */
- void vmult (MPI::BlockVector &dst,
- const MPI::BlockVector &src) const;
-
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M*src$ with $M$
- * being this matrix, now applied
- * to localized block vectors
- * (works only when run on one
- * processor).
- */
- void vmult (BlockVector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- void vmult (MPI::BlockVector &dst,
- const MPI::Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column, now
- * applied to localized vectors
- * (works only when run on one
- * processor).
- */
- void vmult (BlockVector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- void vmult (MPI::Vector &dst,
- const MPI::BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row, now
- * applied to localized vectors
- * (works only when run on one
- * processor).
- */
- void vmult (Vector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- */
- void vmult (VectorBase &dst,
- const VectorBase &src) const;
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M^T*src$ with $M$
- * being this matrix. This
- * function does the same as
- * vmult() but takes the
- * transposed matrix.
- */
- void Tvmult (MPI::BlockVector &dst,
- const MPI::BlockVector &src) const;
-
- /**
- * Matrix-vector multiplication:
- * let $dst = M^T*src$ with $M$
- * being this matrix. This
- * function does the same as
- * vmult() but takes the
- * transposed matrix, now applied
- * to localized Trilinos vectors
- * (works only when run on one
- * processor).
- */
- void Tvmult (BlockVector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row.
- */
- void Tvmult (MPI::BlockVector &dst,
- const MPI::Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block row, now
- * applied to localized Trilinos
- * vectors (works only when run
- * on one processor).
- */
- void Tvmult (BlockVector &dst,
- const Vector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column.
- */
- void Tvmult (MPI::Vector &dst,
- const MPI::BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block column, now
- * applied to localized Trilinos
- * vectors (works only when run
- * on one processor).
- */
- void Tvmult (Vector &dst,
- const BlockVector &src) const;
-
- /**
- * Matrix-vector
- * multiplication. Just like the
- * previous function, but only
- * applicable if the matrix has
- * only one block.
- */
- void Tvmult (VectorBase &dst,
- const VectorBase &src) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to
- * be <i>r=b-Mx</i>. Write the
- * residual into @p dst. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned.
- *
- * Source <i>x</i> and
- * destination <i>dst</i> must
- * not be the same vector.
- *
- * Note that both vectors have
- * to be distributed vectors
- * generated using the same Map
- * as was used for the matrix
- * in case you work on a
- * distributed memory
- * architecture, using the
- * interface in the
- * TrilinosWrappers::MPI::BlockVector
- * class.
- */
- TrilinosScalar residual (MPI::BlockVector &dst,
- const MPI::BlockVector &x,
- const MPI::BlockVector &b) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to
- * be <i>r=b-Mx</i>. Write the
- * residual into @p dst. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned.
- *
- * Source <i>x</i> and
- * destination <i>dst</i> must
- * not be the same vector.
- *
- * Note that both vectors have
- * to be distributed vectors
- * generated using the same Map
- * as was used for the matrix
- * in case you work on a
- * distributed memory
- * architecture, using the
- * interface in the
- * TrilinosWrappers::BlockVector
- * class. Since the block
- * matrix is in general
- * distributed among processes,
- * this function only works
- * when running the program on
- * one processor.
- */
- TrilinosScalar residual (BlockVector &dst,
- const BlockVector &x,
- const BlockVector &b) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to
- * be <i>r=b-Mx</i>. Write the
- * residual into @p dst. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned. Just like the
- * previous function, but only
- * applicable if the matrix
- * only has one block row.
- */
- TrilinosScalar residual (MPI::BlockVector &dst,
- const MPI::Vector &x,
- const MPI::BlockVector &b) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to
- * be <i>r=b-Mx</i>. Write the
- * residual into @p dst. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned. Just like the
- * previous function, but only
- * applicable if the matrix
- * only has one block row.
- */
- TrilinosScalar residual (BlockVector &dst,
- const Vector &x,
- const BlockVector &b) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to
- * be <i>r=b-Mx</i>. Write the
- * residual into @p dst. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned. Just like the
- * previous function, but only
- * applicable if the matrix
- * only has one block column.
- */
- TrilinosScalar residual (MPI::Vector &dst,
- const MPI::BlockVector &x,
- const MPI::Vector &b) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to
- * be <i>r=b-Mx</i>. Write the
- * residual into @p dst. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned. Just like the
- * previous function, but only
- * applicable if the matrix
- * only has one block column.
- */
- TrilinosScalar residual (Vector &dst,
- const BlockVector &x,
- const Vector &b) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to
- * be <i>r=b-Mx</i>. Write the
- * residual into @p dst. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned. Just like the
- * previous function, but only
- * applicable if the matrix
- * only has one block.
- */
- TrilinosScalar residual (VectorBase &dst,
- const VectorBase &x,
- const VectorBase &b) const;
-
- /**
- * Make the clear() function in the
- * base class visible, though it is
- * protected.
- */
- using BlockMatrixBase<SparseMatrix>::clear;
-
- /** @addtogroup Exceptions
- * @{
- */
-
- /**
- * Exception
- */
- DeclException4 (ExcIncompatibleRowNumbers,
- int, int, int, int,
- << "The blocks [" << arg1 << ',' << arg2 << "] and ["
- << arg3 << ',' << arg4 << "] have differing row numbers.");
-
- /**
- * Exception
- */
- DeclException4 (ExcIncompatibleColNumbers,
- int, int, int, int,
- << "The blocks [" << arg1 << ',' << arg2 << "] and ["
- << arg3 << ',' << arg4 << "] have differing column numbers.");
- ///@}
+ public:
+ /**
+ * Typedef the base class for simpler
+ * access to its own typedefs.
+ */
+ typedef BlockMatrixBase<SparseMatrix> BaseClass;
+
+ /**
+ * Typedef the type of the underlying
+ * matrix.
+ */
+ typedef BaseClass::BlockType BlockType;
+
+ /**
+ * Import the typedefs from the base
+ * class.
+ */
+ typedef BaseClass::value_type value_type;
+ typedef BaseClass::pointer pointer;
+ typedef BaseClass::const_pointer const_pointer;
+ typedef BaseClass::reference reference;
+ typedef BaseClass::const_reference const_reference;
+ typedef BaseClass::size_type size_type;
+ typedef BaseClass::iterator iterator;
+ typedef BaseClass::const_iterator const_iterator;
+
+ /**
+ * Constructor; initializes the
+ * matrix to be empty, without
+ * any structure, i.e. the
+ * matrix is not usable at
+ * all. This constructor is
+ * therefore only useful for
+ * matrices which are members of
+ * a class. All other matrices
+ * should be created at a point
+ * in the data flow where all
+ * necessary information is
+ * available.
+ *
+ * You have to initialize the
+ * matrix before usage with
+ * reinit(BlockSparsityPattern). The
+ * number of blocks per row and
+ * column are then determined by
+ * that function.
+ */
+ BlockSparseMatrix ();
+
+ /**
+ * Destructor.
+ */
+ ~BlockSparseMatrix ();
+
+ /**
+ * Pseudo copy operator only copying
+ * empty objects. The sizes of the block
+ * matrices need to be the same.
+ */
+ BlockSparseMatrix &
+ operator = (const BlockSparseMatrix &);
+
+ /**
+ * This operator assigns a scalar to a
+ * matrix. Since this does usually not
+ * make much sense (should we set all
+ * matrix entries to this value? Only
+ * the nonzero entries of the sparsity
+ * pattern?), this operation is only
+ * allowed if the actual value to be
+ * assigned is zero. This operator only
+ * exists to allow for the obvious
+ * notation <tt>matrix=0</tt>, which
+ * sets all elements of the matrix to
+ * zero, but keep the sparsity pattern
+ * previously used.
+ */
+ BlockSparseMatrix &
+ operator = (const double d);
+
+ /**
+ * Resize the matrix, by setting
+ * the number of block rows and
+ * columns. This deletes all
+ * blocks and replaces them by
+ * unitialized ones, i.e. ones
+ * for which also the sizes are
+ * not yet set. You have to do
+ * that by calling the @p reinit
+ * functions of the blocks
+ * themselves. Do not forget to
+ * call collect_sizes() after
+ * that on this object.
+ *
+ * The reason that you have to
+ * set sizes of the blocks
+ * yourself is that the sizes may
+ * be varying, the maximum number
+ * of elements per row may be
+ * varying, etc. It is simpler
+ * not to reproduce the interface
+ * of the @p SparsityPattern
+ * class here but rather let the
+ * user call whatever function
+ * she desires.
+ */
+ void reinit (const unsigned int n_block_rows,
+ const unsigned int n_block_columns);
+
+ /**
+ * Resize the matrix, by using an
+ * array of Epetra maps to determine
+ * the %parallel distribution of the
+ * individual matrices. This function
+ * assumes that a quadratic block
+ * matrix is generated.
+ */
+ template <typename BlockSparsityType>
+ void reinit (const std::vector<Epetra_Map> &input_maps,
+ const BlockSparsityType &block_sparsity_pattern);
+
+ /**
+ * Resize the matrix, by using an
+ * array of index sets to determine
+ * the %parallel distribution of the
+ * individual matrices. This function
+ * assumes that a quadratic block
+ * matrix is generated.
+ */
+ template <typename BlockSparsityType>
+ void reinit (const std::vector<IndexSet> &input_maps,
+ const BlockSparsityType &block_sparsity_pattern,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
+
+ /**
+ * Resize the matrix and initialize it
+ * by the given sparsity pattern. Since
+ * no distribution map is given, the
+ * result is a block matrix for which
+ * all elements are stored locally.
+ */
+ template <typename BlockSparsityType>
+ void reinit (const BlockSparsityType &block_sparsity_pattern);
+
+ /**
+ * This function initializes the
+ * Trilinos matrix using the deal.II
+ * sparse matrix and the entries stored
+ * therein. It uses a threshold
+ * to copy only elements whose
+ * modulus is larger than the
+ * threshold (so zeros in the
+ * deal.II matrix can be filtered
+ * away).
+ */
+ void reinit (const std::vector<Epetra_Map> &input_maps,
+ const ::dealii::BlockSparseMatrix<double> &deal_ii_sparse_matrix,
+ const double drop_tolerance=1e-13);
+
+ /**
+ * This function initializes
+ * the Trilinos matrix using
+ * the deal.II sparse matrix
+ * and the entries stored
+ * therein. It uses a threshold
+ * to copy only elements whose
+ * modulus is larger than the
+ * threshold (so zeros in the
+ * deal.II matrix can be
+ * filtered away). Since no
+ * Epetra_Map is given, all the
+ * elements will be locally
+ * stored.
+ */
+ void reinit (const ::dealii::BlockSparseMatrix<double> &deal_ii_sparse_matrix,
+ const double drop_tolerance=1e-13);
+
+ /**
+ * Returns the state of the
+ * matrix, i.e., whether
+ * compress() needs to be called
+ * after an operation requiring
+ * data exchange. Does only
+ * return non-true values when
+ * used in <tt>debug</tt> mode,
+ * since it is quite expensive to
+ * keep track of all operations
+ * that lead to the need for
+ * compress().
+ */
+ bool is_compressed () const;
+
+ /**
+ * This function collects the
+ * sizes of the sub-objects and
+ * stores them in internal
+ * arrays, in order to be able to
+ * relay global indices into the
+ * matrix to indices into the
+ * subobjects. You *must* call
+ * this function each time after
+ * you have changed the size of
+ * the sub-objects. Note that
+ * this is a collective
+ * operation, i.e., it needs to
+ * be called on all MPI
+ * processes. This command
+ * internally calls the method
+ * <tt>compress()</tt>, so you
+ * don't need to call that
+ * function in case you use
+ * <tt>collect_sizes()</tt>.
+ */
+ void collect_sizes ();
+
+ /**
+ * Return the number of nonzero
+ * elements of this
+ * matrix.
+ */
+ unsigned int n_nonzero_elements () const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M*src$ with $M$
+ * being this matrix.
+ */
+ void vmult (MPI::BlockVector &dst,
+ const MPI::BlockVector &src) const;
+
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M*src$ with $M$
+ * being this matrix, now applied
+ * to localized block vectors
+ * (works only when run on one
+ * processor).
+ */
+ void vmult (BlockVector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ */
- void vmult (MPI::BlockVector &dst,
++ void vmult (MPI::BlockVector &dst,
+ const MPI::Vector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column, now
+ * applied to localized vectors
+ * (works only when run on one
+ * processor).
+ */
- void vmult (BlockVector &dst,
++ void vmult (BlockVector &dst,
+ const Vector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ */
+ void vmult (MPI::Vector &dst,
+ const MPI::BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row, now
+ * applied to localized vectors
+ * (works only when run on one
+ * processor).
+ */
+ void vmult (Vector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ */
+ void vmult (VectorBase &dst,
+ const VectorBase &src) const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M^T*src$ with $M$
+ * being this matrix. This
+ * function does the same as
+ * vmult() but takes the
+ * transposed matrix.
+ */
+ void Tvmult (MPI::BlockVector &dst,
+ const MPI::BlockVector &src) const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let $dst = M^T*src$ with $M$
+ * being this matrix. This
+ * function does the same as
+ * vmult() but takes the
+ * transposed matrix, now applied
+ * to localized Trilinos vectors
+ * (works only when run on one
+ * processor).
+ */
+ void Tvmult (BlockVector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row.
+ */
- void Tvmult (MPI::BlockVector &dst,
++ void Tvmult (MPI::BlockVector &dst,
+ const MPI::Vector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block row, now
+ * applied to localized Trilinos
+ * vectors (works only when run
+ * on one processor).
+ */
- void Tvmult (BlockVector &dst,
++ void Tvmult (BlockVector &dst,
+ const Vector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column.
+ */
+ void Tvmult (MPI::Vector &dst,
+ const MPI::BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block column, now
+ * applied to localized Trilinos
+ * vectors (works only when run
+ * on one processor).
+ */
+ void Tvmult (Vector &dst,
+ const BlockVector &src) const;
+
+ /**
+ * Matrix-vector
+ * multiplication. Just like the
+ * previous function, but only
+ * applicable if the matrix has
+ * only one block.
+ */
+ void Tvmult (VectorBase &dst,
+ const VectorBase &src) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to
+ * be <i>r=b-Mx</i>. Write the
+ * residual into @p dst. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned.
+ *
+ * Source <i>x</i> and
+ * destination <i>dst</i> must
+ * not be the same vector.
+ *
+ * Note that both vectors have
+ * to be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix
+ * in case you work on a
+ * distributed memory
+ * architecture, using the
+ * interface in the
+ * TrilinosWrappers::MPI::BlockVector
+ * class.
+ */
+ TrilinosScalar residual (MPI::BlockVector &dst,
+ const MPI::BlockVector &x,
+ const MPI::BlockVector &b) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to
+ * be <i>r=b-Mx</i>. Write the
+ * residual into @p dst. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned.
+ *
+ * Source <i>x</i> and
+ * destination <i>dst</i> must
+ * not be the same vector.
+ *
+ * Note that both vectors have
+ * to be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix
+ * in case you work on a
+ * distributed memory
+ * architecture, using the
+ * interface in the
+ * TrilinosWrappers::BlockVector
+ * class. Since the block
+ * matrix is in general
+ * distributed among processes,
+ * this function only works
+ * when running the program on
+ * one processor.
+ */
+ TrilinosScalar residual (BlockVector &dst,
+ const BlockVector &x,
+ const BlockVector &b) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to
+ * be <i>r=b-Mx</i>. Write the
+ * residual into @p dst. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned. Just like the
+ * previous function, but only
+ * applicable if the matrix
+ * only has one block row.
+ */
+ TrilinosScalar residual (MPI::BlockVector &dst,
+ const MPI::Vector &x,
+ const MPI::BlockVector &b) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to
+ * be <i>r=b-Mx</i>. Write the
+ * residual into @p dst. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned. Just like the
+ * previous function, but only
+ * applicable if the matrix
+ * only has one block row.
+ */
+ TrilinosScalar residual (BlockVector &dst,
+ const Vector &x,
+ const BlockVector &b) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to
+ * be <i>r=b-Mx</i>. Write the
+ * residual into @p dst. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned. Just like the
+ * previous function, but only
+ * applicable if the matrix
+ * only has one block column.
+ */
+ TrilinosScalar residual (MPI::Vector &dst,
+ const MPI::BlockVector &x,
+ const MPI::Vector &b) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to
+ * be <i>r=b-Mx</i>. Write the
+ * residual into @p dst. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned. Just like the
+ * previous function, but only
+ * applicable if the matrix
+ * only has one block column.
+ */
+ TrilinosScalar residual (Vector &dst,
+ const BlockVector &x,
+ const Vector &b) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to
+ * be <i>r=b-Mx</i>. Write the
+ * residual into @p dst. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned. Just like the
+ * previous function, but only
+ * applicable if the matrix
+ * only has one block.
+ */
+ TrilinosScalar residual (VectorBase &dst,
+ const VectorBase &x,
+ const VectorBase &b) const;
+
+ /**
+ * Make the clear() function in the
+ * base class visible, though it is
+ * protected.
+ */
+ using BlockMatrixBase<SparseMatrix>::clear;
+
+ /** @addtogroup Exceptions
+ * @{
+ */
+
+ /**
+ * Exception
+ */
+ DeclException4 (ExcIncompatibleRowNumbers,
+ int, int, int, int,
+ << "The blocks [" << arg1 << ',' << arg2 << "] and ["
+ << arg3 << ',' << arg4 << "] have differing row numbers.");
+
+ /**
+ * Exception
+ */
+ DeclException4 (ExcIncompatibleColNumbers,
+ int, int, int, int,
+ << "The blocks [" << arg1 << ',' << arg2 << "] and ["
+ << arg3 << ',' << arg4 << "] have differing column numbers.");
+ ///@}
};
class BlockSparseMatrix;
- /**
- * An implementation of block vectors based on the vector class
- * implemented in TrilinosWrappers. While the base class provides for
- * most of the interface, this class handles the actual allocation of
- * vectors and provides functions that are specific to the underlying
- * vector type.
- *
- * In contrast to the class MPI::BlockVector, this class is based on a
- * localized version of the vectors, which means that the whole vector
- * is stored on each processor. Note that matrix vector products with
- * this block vector class do only work in case the program is run on
- * only one processor, since the Trilinos matrices are inherently
- * parallel.
- *
- * @ingroup Vectors
- * @ingroup TrilinosWrappers
- * @see @ref GlossBlockLA "Block (linear algebra)"
- * @author Martin Kronbichler, 2008
- */
+ /**
+ * An implementation of block vectors based on the vector class
+ * implemented in TrilinosWrappers. While the base class provides for
+ * most of the interface, this class handles the actual allocation of
+ * vectors and provides functions that are specific to the underlying
+ * vector type.
+ *
+ * In contrast to the class MPI::BlockVector, this class is based on a
+ * localized version of the vectors, which means that the whole vector
+ * is stored on each processor. Note that matrix vector products with
+ * this block vector class do only work in case the program is run on
+ * only one processor, since the Trilinos matrices are inherently
+ * parallel.
+ *
+ * @ingroup Vectors
+ * @ingroup TrilinosWrappers
+ * @see @ref GlossBlockLA "Block (linear algebra)"
+ * @author Martin Kronbichler, 2008
+ */
class BlockVector : public BlockVectorBase<Vector>
{
- public:
- /**
- * Typedef the base class for simpler
- * access to its own typedefs.
- */
- typedef BlockVectorBase<Vector> BaseClass;
-
- /**
- * Typedef the type of the underlying
- * vector.
- */
- typedef BaseClass::BlockType BlockType;
-
- /**
- * Import the typedefs from the base
- * class.
- */
- typedef BaseClass::value_type value_type;
- typedef BaseClass::pointer pointer;
- typedef BaseClass::const_pointer const_pointer;
- typedef BaseClass::reference reference;
- typedef BaseClass::const_reference const_reference;
- typedef BaseClass::size_type size_type;
- typedef BaseClass::iterator iterator;
- typedef BaseClass::const_iterator const_iterator;
-
- /**
- * Default constructor. Generate an
- * empty vector without any blocks.
- */
- BlockVector ();
-
- /**
- * Constructor. Generate a block
- * vector with as many blocks as
- * there are entries in Input_Maps.
- * For this non-distributed vector,
- * the %parallel partitioning is not
- * used, just the global size of the
- * partitioner.
- */
- BlockVector (const std::vector<Epetra_Map> &partitioner);
-
- /**
- * Constructor. Generate a block
- * vector with as many blocks as
- * there are entries in Input_Maps.
- * For this non-distributed vector,
- * the %parallel partitioning is not
- * used, just the global size of the
- * partitioner.
- */
- BlockVector (const std::vector<IndexSet> &partitioner,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
-
- /**
- * Copy-Constructor. Set all the
- * properties of the non-%parallel
- * vector to those of the given
- * %parallel vector and import the
- * elements.
- */
- BlockVector (const MPI::BlockVector &V);
-
- /**
- * Copy-Constructor. Set all the
- * properties of the vector to those
- * of the given input vector and copy
- * the elements.
- */
- BlockVector (const BlockVector &V);
-
- /**
- * Creates a block vector
- * consisting of
- * <tt>num_blocks</tt>
- * components, but there is no
- * content in the individual
- * components and the user has to
- * fill appropriate data using a
- * reinit of the blocks.
- */
- BlockVector (const unsigned int num_blocks);
-
- /**
- * Constructor. Set the number of
- * blocks to <tt>n.size()</tt> and
- * initialize each block with
- * <tt>n[i]</tt> zero elements.
- *
- * References BlockVector.reinit().
- */
- BlockVector (const std::vector<unsigned int> &N);
-
- /**
- * Constructor. Set the number of
- * blocks to
- * <tt>n.size()</tt>. Initialize the
- * vector with the elements
- * pointed to by the range of
- * iterators given as second and
- * third argument. Apart from the
- * first argument, this
- * constructor is in complete
- * analogy to the respective
- * constructor of the
- * <tt>std::vector</tt> class, but the
- * first argument is needed in
- * order to know how to subdivide
- * the block vector into
- * different blocks.
- */
- template <typename InputIterator>
- BlockVector (const std::vector<unsigned int> &n,
- const InputIterator first,
- const InputIterator end);
-
- /**
- * Destructor. Clears memory
- */
- ~BlockVector ();
-
- /**
- * use compress(VectorOperation) instead
- *
- * @deprecated
- *
- * See @ref GlossCompress "Compressing
- * distributed objects" for more
- * information.
- */
- void compress (const Epetra_CombineMode last_action);
-
- /**
- * so it is not hidden
- */
- using BlockVectorBase<Vector>::compress;
-
- /**
- * Copy operator: fill all
- * components of the vector that
- * are locally stored with the
- * given scalar value.
- */
- BlockVector &
- operator = (const value_type s);
-
- /**
- * Copy operator for a
- * distributed Trilinos vector to
- * a localized one.
- */
- BlockVector &
- operator = (const MPI::BlockVector &V);
-
- /**
- * Copy operator for arguments of
- * the same type.
- */
- BlockVector &
- operator = (const BlockVector &V);
-
- /**
- * Another copy function. This
- * one takes a deal.II block
- * vector and copies it into a
- * TrilinosWrappers block
- * vector. Note that the number
- * of blocks has to be the same
- * in the vector as in the input
- * vector. Use the reinit()
- * command for resizing the
- * BlockVector or for changing
- * the internal structure of the
- * block components.
- *
- * Since Trilinos only works on
- * doubles, this function is
- * limited to accept only one
- * possible number type in the
- * deal.II vector.
- */
- template <typename Number>
- BlockVector &
- operator = (const ::dealii::BlockVector<Number> &V);
-
- /**
- * Reinitialize the BlockVector to
- * contain as many blocks as there
- * are Epetra_Maps given in the
- * input argument, according to the
- * global size of the individual
- * components described in the
- * maps. Note that the resulting
- * vector will be stored completely
- * on each process. The Epetra_Map
- * is useful when data exchange
- * with a distributed vector based
- * on the same Epetra_map is
- * intended. In that case, the same
- * communicator is used for data
- * exchange.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- */
- void reinit (const std::vector<Epetra_Map> &partitioning,
- const bool fast = false);
-
- /**
- * Reinitialize the BlockVector to
- * contain as many blocks as there
- * are index sets given in the
- * input argument, according to the
- * global size of the individual
- * components described in the
- * index set, and using a given MPI
- * communicator. The MPI
- * communicator is useful when data
- * exchange with a distributed
- * vector based on the same
- * initialization is intended. In
- * that case, the same communicator
- * is used for data exchange.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- */
- void reinit (const std::vector<IndexSet> &partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const bool fast = false);
-
- /**
- * Reinitialize the BlockVector to
- * contain as many blocks as there
- * are elements in the first
- * argument, and with the respective
- * sizes. Since no distribution map
- * is given, all vectors are local
- * vectors.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- */
- void reinit (const std::vector<unsigned int> &N,
- const bool fast=false);
-
- /**
- * Reinit the function
- * according to a distributed
- * block vector. The elements
- * will be copied in this
- * process.
- */
- void reinit (const MPI::BlockVector &V);
-
- /**
- * Change the dimension to that
- * of the vector <tt>V</tt>. The same
- * applies as for the other
- * reinit() function.
- *
- * The elements of <tt>V</tt> are not
- * copied, i.e. this function is
- * the same as calling <tt>reinit
- * (V.size(), fast)</tt>.
- *
- * Note that you must call this
- * (or the other reinit()
- * functions) function, rather
- * than calling the reinit()
- * functions of an individual
- * block, to allow the block
- * vector to update its caches of
- * vector sizes. If you call
- * reinit() on one of the
- * blocks, then subsequent
- * actions on this object may
- * yield unpredictable results
- * since they may be routed to
- * the wrong block.
- */
- void reinit (const BlockVector &V,
- const bool fast = false);
-
- /**
- * Change the number of blocks to
- * <tt>num_blocks</tt>. The individual
- * blocks will get initialized with
- * zero size, so it is assumed that
- * the user resizes the
- * individual blocks by herself
- * in an appropriate way, and
- * calls <tt>collect_sizes</tt>
- * afterwards.
- */
- void reinit (const unsigned int num_blocks);
-
- /**
- * Swap the contents of this
- * vector and the other vector
- * <tt>v</tt>. One could do this
- * operation with a temporary
- * variable and copying over the
- * data elements, but this
- * function is significantly more
- * efficient since it only swaps
- * the pointers to the data of
- * the two vectors and therefore
- * does not need to allocate
- * temporary storage and move
- * data around.
- *
- * Limitation: right now this
- * function only works if both
- * vectors have the same number
- * of blocks. If needed, the
- * numbers of blocks should be
- * exchanged, too.
- *
- * This function is analog to the
- * the swap() function of all C++
- * standard containers. Also,
- * there is a global function
- * swap(u,v) that simply calls
- * <tt>u.swap(v)</tt>, again in analogy
- * to standard functions.
- */
- void swap (BlockVector &v);
-
- /**
- * Print to a stream.
- */
- void print (std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const bool across = true) const;
-
- /**
- * Exception
- */
- DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
-
- /**
- * Exception
- */
- DeclException0 (ExcNonMatchingBlockVectors);
-
- /**
- * Exception
- */
- DeclException2 (ExcNonLocalizedMap,
- int, int,
- << "For the generation of a localized vector the map has "
- << "to assign all elements to all vectors! "
- << "local_size = global_size is a necessary condition, but"
- << arg1 << " != " << arg2 << " was given!");
+ public:
+ /**
+ * Typedef the base class for simpler
+ * access to its own typedefs.
+ */
+ typedef BlockVectorBase<Vector> BaseClass;
+
+ /**
+ * Typedef the type of the underlying
+ * vector.
+ */
+ typedef BaseClass::BlockType BlockType;
+
+ /**
+ * Import the typedefs from the base
+ * class.
+ */
+ typedef BaseClass::value_type value_type;
+ typedef BaseClass::pointer pointer;
+ typedef BaseClass::const_pointer const_pointer;
+ typedef BaseClass::reference reference;
+ typedef BaseClass::const_reference const_reference;
+ typedef BaseClass::size_type size_type;
+ typedef BaseClass::iterator iterator;
+ typedef BaseClass::const_iterator const_iterator;
+
+ /**
+ * Default constructor. Generate an
+ * empty vector without any blocks.
+ */
+ BlockVector ();
+
+ /**
+ * Constructor. Generate a block
+ * vector with as many blocks as
+ * there are entries in Input_Maps.
+ * For this non-distributed vector,
+ * the %parallel partitioning is not
+ * used, just the global size of the
+ * partitioner.
+ */
+ BlockVector (const std::vector<Epetra_Map> &partitioner);
+
+ /**
+ * Constructor. Generate a block
+ * vector with as many blocks as
+ * there are entries in Input_Maps.
+ * For this non-distributed vector,
+ * the %parallel partitioning is not
+ * used, just the global size of the
+ * partitioner.
+ */
+ BlockVector (const std::vector<IndexSet> &partitioner,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
+
+ /**
+ * Copy-Constructor. Set all the
+ * properties of the non-%parallel
+ * vector to those of the given
+ * %parallel vector and import the
+ * elements.
+ */
+ BlockVector (const MPI::BlockVector &V);
+
+ /**
+ * Copy-Constructor. Set all the
+ * properties of the vector to those
+ * of the given input vector and copy
+ * the elements.
+ */
- BlockVector (const BlockVector &V);
++ BlockVector (const BlockVector &V);
+
+ /**
+ * Creates a block vector
+ * consisting of
+ * <tt>num_blocks</tt>
+ * components, but there is no
+ * content in the individual
+ * components and the user has to
+ * fill appropriate data using a
+ * reinit of the blocks.
+ */
+ BlockVector (const unsigned int num_blocks);
+
+ /**
+ * Constructor. Set the number of
+ * blocks to <tt>n.size()</tt> and
+ * initialize each block with
+ * <tt>n[i]</tt> zero elements.
+ *
+ * References BlockVector.reinit().
+ */
+ BlockVector (const std::vector<unsigned int> &N);
+
+ /**
+ * Constructor. Set the number of
+ * blocks to
+ * <tt>n.size()</tt>. Initialize the
+ * vector with the elements
+ * pointed to by the range of
+ * iterators given as second and
+ * third argument. Apart from the
+ * first argument, this
+ * constructor is in complete
+ * analogy to the respective
+ * constructor of the
+ * <tt>std::vector</tt> class, but the
+ * first argument is needed in
+ * order to know how to subdivide
+ * the block vector into
+ * different blocks.
+ */
+ template <typename InputIterator>
+ BlockVector (const std::vector<unsigned int> &n,
+ const InputIterator first,
+ const InputIterator end);
+
+ /**
+ * Destructor. Clears memory
+ */
+ ~BlockVector ();
+
+ /**
+ * use compress(VectorOperation) instead
+ *
+ * @deprecated
+ *
+ * See @ref GlossCompress "Compressing
+ * distributed objects" for more
+ * information.
+ */
+ void compress (const Epetra_CombineMode last_action);
+
+ /**
+ * so it is not hidden
+ */
+ using BlockVectorBase<Vector>::compress;
+
+ /**
+ * Copy operator: fill all
+ * components of the vector that
+ * are locally stored with the
+ * given scalar value.
+ */
+ BlockVector &
+ operator = (const value_type s);
+
+ /**
+ * Copy operator for a
+ * distributed Trilinos vector to
+ * a localized one.
+ */
+ BlockVector &
+ operator = (const MPI::BlockVector &V);
+
+ /**
+ * Copy operator for arguments of
+ * the same type.
+ */
+ BlockVector &
+ operator = (const BlockVector &V);
+
+ /**
+ * Another copy function. This
+ * one takes a deal.II block
+ * vector and copies it into a
+ * TrilinosWrappers block
+ * vector. Note that the number
+ * of blocks has to be the same
+ * in the vector as in the input
+ * vector. Use the reinit()
+ * command for resizing the
+ * BlockVector or for changing
+ * the internal structure of the
+ * block components.
+ *
+ * Since Trilinos only works on
+ * doubles, this function is
+ * limited to accept only one
+ * possible number type in the
+ * deal.II vector.
+ */
+ template <typename Number>
+ BlockVector &
+ operator = (const ::dealii::BlockVector<Number> &V);
+
+ /**
+ * Reinitialize the BlockVector to
+ * contain as many blocks as there
+ * are Epetra_Maps given in the
+ * input argument, according to the
+ * global size of the individual
+ * components described in the
+ * maps. Note that the resulting
+ * vector will be stored completely
+ * on each process. The Epetra_Map
+ * is useful when data exchange
+ * with a distributed vector based
+ * on the same Epetra_map is
+ * intended. In that case, the same
+ * communicator is used for data
+ * exchange.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ */
+ void reinit (const std::vector<Epetra_Map> &partitioning,
+ const bool fast = false);
+
+ /**
+ * Reinitialize the BlockVector to
+ * contain as many blocks as there
+ * are index sets given in the
+ * input argument, according to the
+ * global size of the individual
+ * components described in the
+ * index set, and using a given MPI
+ * communicator. The MPI
+ * communicator is useful when data
+ * exchange with a distributed
+ * vector based on the same
+ * initialization is intended. In
+ * that case, the same communicator
+ * is used for data exchange.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ */
+ void reinit (const std::vector<IndexSet> &partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const bool fast = false);
+
+ /**
+ * Reinitialize the BlockVector to
+ * contain as many blocks as there
+ * are elements in the first
+ * argument, and with the respective
+ * sizes. Since no distribution map
+ * is given, all vectors are local
+ * vectors.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ */
+ void reinit (const std::vector<unsigned int> &N,
+ const bool fast=false);
+
+ /**
+ * Reinit the function
+ * according to a distributed
+ * block vector. The elements
+ * will be copied in this
+ * process.
+ */
+ void reinit (const MPI::BlockVector &V);
+
+ /**
+ * Change the dimension to that
+ * of the vector <tt>V</tt>. The same
+ * applies as for the other
+ * reinit() function.
+ *
+ * The elements of <tt>V</tt> are not
+ * copied, i.e. this function is
+ * the same as calling <tt>reinit
+ * (V.size(), fast)</tt>.
+ *
+ * Note that you must call this
+ * (or the other reinit()
+ * functions) function, rather
+ * than calling the reinit()
+ * functions of an individual
+ * block, to allow the block
+ * vector to update its caches of
+ * vector sizes. If you call
+ * reinit() on one of the
+ * blocks, then subsequent
+ * actions on this object may
+ * yield unpredictable results
+ * since they may be routed to
+ * the wrong block.
+ */
+ void reinit (const BlockVector &V,
+ const bool fast = false);
+
+ /**
+ * Change the number of blocks to
+ * <tt>num_blocks</tt>. The individual
+ * blocks will get initialized with
+ * zero size, so it is assumed that
+ * the user resizes the
+ * individual blocks by herself
+ * in an appropriate way, and
+ * calls <tt>collect_sizes</tt>
+ * afterwards.
+ */
+ void reinit (const unsigned int num_blocks);
+
+ /**
+ * Swap the contents of this
+ * vector and the other vector
+ * <tt>v</tt>. One could do this
+ * operation with a temporary
+ * variable and copying over the
+ * data elements, but this
+ * function is significantly more
+ * efficient since it only swaps
+ * the pointers to the data of
+ * the two vectors and therefore
+ * does not need to allocate
+ * temporary storage and move
+ * data around.
+ *
+ * Limitation: right now this
+ * function only works if both
+ * vectors have the same number
+ * of blocks. If needed, the
+ * numbers of blocks should be
+ * exchanged, too.
+ *
+ * This function is analog to the
+ * the swap() function of all C++
+ * standard containers. Also,
+ * there is a global function
+ * swap(u,v) that simply calls
+ * <tt>u.swap(v)</tt>, again in analogy
+ * to standard functions.
+ */
+ void swap (BlockVector &v);
+
+ /**
+ * Print to a stream.
+ */
+ void print (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcNonMatchingBlockVectors);
+
+ /**
+ * Exception
+ */
+ DeclException2 (ExcNonLocalizedMap,
+ int, int,
+ << "For the generation of a localized vector the map has "
+ << "to assign all elements to all vectors! "
+ << "local_size = global_size is a necessary condition, but"
+ << arg1 << " != " << arg2 << " was given!");
};
namespace MPI
{
- /**
- * An implementation of block vectors based on the vector class
- * implemented in TrilinosWrappers. While the base class provides for
- * most of the interface, this class handles the actual allocation of
- * vectors and provides functions that are specific to the underlying
- * vector type.
- *
- * The model of distribution of data is such that each of the blocks
- * is distributed across all MPI processes named in the MPI
- * communicator. I.e. we don't just distribute the whole vector, but
- * each component. In the constructors and reinit() functions, one
- * therefore not only has to specify the sizes of the individual
- * blocks, but also the number of elements of each of these blocks to
- * be stored on the local process.
- *
- * @ingroup Vectors
- * @ingroup TrilinosWrappers
- * @see @ref GlossBlockLA "Block (linear algebra)"
- * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009
- */
+ /**
+ * An implementation of block vectors based on the vector class
+ * implemented in TrilinosWrappers. While the base class provides for
+ * most of the interface, this class handles the actual allocation of
+ * vectors and provides functions that are specific to the underlying
+ * vector type.
+ *
+ * The model of distribution of data is such that each of the blocks
+ * is distributed across all MPI processes named in the MPI
+ * communicator. I.e. we don't just distribute the whole vector, but
+ * each component. In the constructors and reinit() functions, one
+ * therefore not only has to specify the sizes of the individual
+ * blocks, but also the number of elements of each of these blocks to
+ * be stored on the local process.
+ *
+ * @ingroup Vectors
+ * @ingroup TrilinosWrappers
+ * @see @ref GlossBlockLA "Block (linear algebra)"
+ * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009
+ */
class BlockVector : public BlockVectorBase<Vector>
{
- public:
- /**
- * Typedef the base class for simpler
- * access to its own typedefs.
- */
- typedef BlockVectorBase<Vector> BaseClass;
-
- /**
- * Typedef the type of the underlying
- * vector.
- */
- typedef BaseClass::BlockType BlockType;
-
- /**
- * Import the typedefs from the base
- * class.
- */
- typedef BaseClass::value_type value_type;
- typedef BaseClass::pointer pointer;
- typedef BaseClass::const_pointer const_pointer;
- typedef BaseClass::reference reference;
- typedef BaseClass::const_reference const_reference;
- typedef BaseClass::size_type size_type;
- typedef BaseClass::iterator iterator;
- typedef BaseClass::const_iterator const_iterator;
-
- /**
- * Default constructor. Generate an
- * empty vector without any blocks.
- */
- BlockVector ();
-
- /**
- * Constructor. Generate a block
- * vector with as many blocks as
- * there are entries in @p
- * partitioning. Each Epetra_Map
- * contains the layout of the
- * distribution of data among the MPI
- * processes.
- */
- BlockVector (const std::vector<Epetra_Map> ¶llel_partitioning);
-
- /**
- * Constructor. Generate a block
- * vector with as many blocks as
- * there are entries in
- * @p partitioning. Each IndexSet
- * together with the MPI communicator
- * contains the layout of the
- * distribution of data among the MPI
- * processes.
- */
- BlockVector (const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
-
- /**
- * Copy-Constructor. Set all the
- * properties of the parallel vector
- * to those of the given argument and
- * copy the elements.
- */
- BlockVector (const BlockVector &V);
-
- /**
- * Creates a block vector
- * consisting of
- * <tt>num_blocks</tt>
- * components, but there is no
- * content in the individual
- * components and the user has to
- * fill appropriate data using a
- * reinit of the blocks.
- */
- BlockVector (const unsigned int num_blocks);
-
- /**
- * Destructor. Clears memory
- */
- ~BlockVector ();
-
- /**
- * Copy operator: fill all
- * components of the vector that
- * are locally stored with the
- * given scalar value.
- */
- BlockVector &
- operator = (const value_type s);
-
- /**
- * Copy operator for arguments of
- * the same type.
- */
- BlockVector &
- operator = (const BlockVector &V);
-
- /**
- * Copy operator for arguments of
- * the localized Trilinos vector
- * type.
- */
- BlockVector &
- operator = (const ::dealii::TrilinosWrappers::BlockVector &V);
-
- /**
- * Another copy function. This
- * one takes a deal.II block
- * vector and copies it into a
- * TrilinosWrappers block
- * vector. Note that the number
- * of blocks has to be the same
- * in the vector as in the input
- * vector. Use the reinit()
- * command for resizing the
- * BlockVector or for changing
- * the internal structure of the
- * block components.
- *
- * Since Trilinos only works on
- * doubles, this function is
- * limited to accept only one
- * possible number type in the
- * deal.II vector.
- */
- template <typename Number>
- BlockVector &
- operator = (const ::dealii::BlockVector<Number> &V);
-
- /**
- * Reinitialize the BlockVector to
- * contain as many blocks as there
- * are Epetra_Maps given in the input
- * argument, according to the
- * parallel distribution of the
- * individual components described
- * in the maps.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- */
- void reinit (const std::vector<Epetra_Map> ¶llel_partitioning,
- const bool fast = false);
-
- /**
- * Reinitialize the BlockVector to
- * contain as many blocks as there
- * are index sets given in the input
- * argument, according to the
- * parallel distribution of the
- * individual components described
- * in the maps.
- *
- * If <tt>fast==false</tt>, the vector
- * is filled with zeros.
- */
- void reinit (const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const bool fast = false);
-
- /**
- * Change the dimension to that
- * of the vector <tt>V</tt>. The same
- * applies as for the other
- * reinit() function.
- *
- * The elements of <tt>V</tt> are not
- * copied, i.e. this function is
- * the same as calling <tt>reinit
- * (V.size(), fast)</tt>.
- *
- * Note that you must call this
- * (or the other reinit()
- * functions) function, rather
- * than calling the reinit()
- * functions of an individual
- * block, to allow the block
- * vector to update its caches of
- * vector sizes. If you call
- * reinit() on one of the
- * blocks, then subsequent
- * actions on this object may
- * yield unpredictable results
- * since they may be routed to
- * the wrong block.
- */
- void reinit (const BlockVector &V,
- const bool fast = false);
-
- /**
- * Change the number of blocks to
- * <tt>num_blocks</tt>. The individual
- * blocks will get initialized with
- * zero size, so it is assumed that
- * the user resizes the
- * individual blocks by herself
- * in an appropriate way, and
- * calls <tt>collect_sizes</tt>
- * afterwards.
- */
- void reinit (const unsigned int num_blocks);
-
- /**
- * This reinit function is meant to
- * be used for parallel
- * calculations where some
- * non-local data has to be
- * used. The typical situation
- * where one needs this function is
- * the call of the
- * FEValues<dim>::get_function_values
- * function (or of some
- * derivatives) in parallel. Since
- * it is usually faster to retrieve
- * the data in advance, this
- * function can be called before
- * the assembly forks out to the
- * different processors. What this
- * function does is the following:
- * It takes the information in the
- * columns of the given matrix and
- * looks which data couples between
- * the different processors. That
- * data is then queried from the
- * input vector. Note that you
- * should not write to the
- * resulting vector any more, since
- * the some data can be stored
- * several times on different
- * processors, leading to
- * unpredictable results. In
- * particular, such a vector cannot
- * be used for matrix-vector
- * products as for example done
- * during the solution of linear
- * systems.
- */
- void import_nonlocal_data_for_fe (const TrilinosWrappers::BlockSparseMatrix &m,
- const BlockVector &v);
-
-
- /**
- * use compress(VectorOperation) instead
- *
- * @deprecated
- *
- * See @ref GlossCompress "Compressing
- * distributed objects" for more
- * information.
- */
- void compress (const Epetra_CombineMode last_action);
-
- /**
- * so it is not hidden
- */
- using BlockVectorBase<Vector>::compress;
-
-
- /**
- * Returns the state of the
- * vector, i.e., whether
- * compress() needs to be
- * called after an operation
- * requiring data
- * exchange. Does only return
- * non-true values when used in
- * <tt>debug</tt> mode, since
- * it is quite expensive to
- * keep track of all operations
- * that lead to the need for
- * compress().
- */
- bool is_compressed () const;
-
- /**
- * Swap the contents of this
- * vector and the other vector
- * <tt>v</tt>. One could do this
- * operation with a temporary
- * variable and copying over the
- * data elements, but this
- * function is significantly more
- * efficient since it only swaps
- * the pointers to the data of
- * the two vectors and therefore
- * does not need to allocate
- * temporary storage and move
- * data around.
- *
- * Limitation: right now this
- * function only works if both
- * vectors have the same number
- * of blocks. If needed, the
- * numbers of blocks should be
- * exchanged, too.
- *
- * This function is analog to the
- * the swap() function of all C++
- * standard containers. Also,
- * there is a global function
- * swap(u,v) that simply calls
- * <tt>u.swap(v)</tt>, again in analogy
- * to standard functions.
- */
- void swap (BlockVector &v);
-
- /**
- * Print to a stream.
- */
- void print (std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const bool across = true) const;
-
- /**
- * Exception
- */
- DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
-
- /**
- * Exception
- */
- DeclException0 (ExcNonMatchingBlockVectors);
+ public:
+ /**
+ * Typedef the base class for simpler
+ * access to its own typedefs.
+ */
+ typedef BlockVectorBase<Vector> BaseClass;
+
+ /**
+ * Typedef the type of the underlying
+ * vector.
+ */
+ typedef BaseClass::BlockType BlockType;
+
+ /**
+ * Import the typedefs from the base
+ * class.
+ */
+ typedef BaseClass::value_type value_type;
+ typedef BaseClass::pointer pointer;
+ typedef BaseClass::const_pointer const_pointer;
+ typedef BaseClass::reference reference;
+ typedef BaseClass::const_reference const_reference;
+ typedef BaseClass::size_type size_type;
+ typedef BaseClass::iterator iterator;
+ typedef BaseClass::const_iterator const_iterator;
+
+ /**
+ * Default constructor. Generate an
+ * empty vector without any blocks.
+ */
+ BlockVector ();
+
+ /**
+ * Constructor. Generate a block
+ * vector with as many blocks as
+ * there are entries in @p
+ * partitioning. Each Epetra_Map
+ * contains the layout of the
+ * distribution of data among the MPI
+ * processes.
+ */
+ BlockVector (const std::vector<Epetra_Map> ¶llel_partitioning);
+
+ /**
+ * Constructor. Generate a block
+ * vector with as many blocks as
+ * there are entries in
+ * @p partitioning. Each IndexSet
+ * together with the MPI communicator
+ * contains the layout of the
+ * distribution of data among the MPI
+ * processes.
+ */
+ BlockVector (const std::vector<IndexSet> ¶llel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
+
+ /**
+ * Copy-Constructor. Set all the
+ * properties of the parallel vector
+ * to those of the given argument and
+ * copy the elements.
+ */
- BlockVector (const BlockVector &V);
++ BlockVector (const BlockVector &V);
+
+ /**
+ * Creates a block vector
+ * consisting of
+ * <tt>num_blocks</tt>
+ * components, but there is no
+ * content in the individual
+ * components and the user has to
+ * fill appropriate data using a
+ * reinit of the blocks.
+ */
+ BlockVector (const unsigned int num_blocks);
+
+ /**
+ * Destructor. Clears memory
+ */
+ ~BlockVector ();
+
+ /**
+ * Copy operator: fill all
+ * components of the vector that
+ * are locally stored with the
+ * given scalar value.
+ */
+ BlockVector &
+ operator = (const value_type s);
+
+ /**
+ * Copy operator for arguments of
+ * the same type.
+ */
+ BlockVector &
+ operator = (const BlockVector &V);
+
+ /**
+ * Copy operator for arguments of
+ * the localized Trilinos vector
+ * type.
+ */
+ BlockVector &
+ operator = (const ::dealii::TrilinosWrappers::BlockVector &V);
+
+ /**
+ * Another copy function. This
+ * one takes a deal.II block
+ * vector and copies it into a
+ * TrilinosWrappers block
+ * vector. Note that the number
+ * of blocks has to be the same
+ * in the vector as in the input
+ * vector. Use the reinit()
+ * command for resizing the
+ * BlockVector or for changing
+ * the internal structure of the
+ * block components.
+ *
+ * Since Trilinos only works on
+ * doubles, this function is
+ * limited to accept only one
+ * possible number type in the
+ * deal.II vector.
+ */
+ template <typename Number>
+ BlockVector &
+ operator = (const ::dealii::BlockVector<Number> &V);
+
+ /**
+ * Reinitialize the BlockVector to
+ * contain as many blocks as there
+ * are Epetra_Maps given in the input
+ * argument, according to the
+ * parallel distribution of the
+ * individual components described
+ * in the maps.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ */
+ void reinit (const std::vector<Epetra_Map> ¶llel_partitioning,
+ const bool fast = false);
+
+ /**
+ * Reinitialize the BlockVector to
+ * contain as many blocks as there
+ * are index sets given in the input
+ * argument, according to the
+ * parallel distribution of the
+ * individual components described
+ * in the maps.
+ *
+ * If <tt>fast==false</tt>, the vector
+ * is filled with zeros.
+ */
+ void reinit (const std::vector<IndexSet> ¶llel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const bool fast = false);
+
+ /**
+ * Change the dimension to that
+ * of the vector <tt>V</tt>. The same
+ * applies as for the other
+ * reinit() function.
+ *
+ * The elements of <tt>V</tt> are not
+ * copied, i.e. this function is
+ * the same as calling <tt>reinit
+ * (V.size(), fast)</tt>.
+ *
+ * Note that you must call this
+ * (or the other reinit()
+ * functions) function, rather
+ * than calling the reinit()
+ * functions of an individual
+ * block, to allow the block
+ * vector to update its caches of
+ * vector sizes. If you call
+ * reinit() on one of the
+ * blocks, then subsequent
+ * actions on this object may
+ * yield unpredictable results
+ * since they may be routed to
+ * the wrong block.
+ */
+ void reinit (const BlockVector &V,
+ const bool fast = false);
+
+ /**
+ * Change the number of blocks to
+ * <tt>num_blocks</tt>. The individual
+ * blocks will get initialized with
+ * zero size, so it is assumed that
+ * the user resizes the
+ * individual blocks by herself
+ * in an appropriate way, and
+ * calls <tt>collect_sizes</tt>
+ * afterwards.
+ */
+ void reinit (const unsigned int num_blocks);
+
+ /**
+ * This reinit function is meant to
+ * be used for parallel
+ * calculations where some
+ * non-local data has to be
+ * used. The typical situation
+ * where one needs this function is
+ * the call of the
+ * FEValues<dim>::get_function_values
+ * function (or of some
+ * derivatives) in parallel. Since
+ * it is usually faster to retrieve
+ * the data in advance, this
+ * function can be called before
+ * the assembly forks out to the
+ * different processors. What this
+ * function does is the following:
+ * It takes the information in the
+ * columns of the given matrix and
+ * looks which data couples between
+ * the different processors. That
+ * data is then queried from the
+ * input vector. Note that you
+ * should not write to the
+ * resulting vector any more, since
+ * the some data can be stored
+ * several times on different
+ * processors, leading to
+ * unpredictable results. In
+ * particular, such a vector cannot
+ * be used for matrix-vector
+ * products as for example done
+ * during the solution of linear
+ * systems.
+ */
+ void import_nonlocal_data_for_fe (const TrilinosWrappers::BlockSparseMatrix &m,
+ const BlockVector &v);
+
+
+ /**
+ * use compress(VectorOperation) instead
+ *
+ * @deprecated
+ *
+ * See @ref GlossCompress "Compressing
+ * distributed objects" for more
+ * information.
+ */
+ void compress (const Epetra_CombineMode last_action);
+
+ /**
+ * so it is not hidden
+ */
+ using BlockVectorBase<Vector>::compress;
+
+
+ /**
+ * Returns the state of the
+ * vector, i.e., whether
+ * compress() needs to be
+ * called after an operation
+ * requiring data
+ * exchange. Does only return
+ * non-true values when used in
+ * <tt>debug</tt> mode, since
+ * it is quite expensive to
+ * keep track of all operations
+ * that lead to the need for
+ * compress().
+ */
+ bool is_compressed () const;
+
+ /**
+ * Swap the contents of this
+ * vector and the other vector
+ * <tt>v</tt>. One could do this
+ * operation with a temporary
+ * variable and copying over the
+ * data elements, but this
+ * function is significantly more
+ * efficient since it only swaps
+ * the pointers to the data of
+ * the two vectors and therefore
+ * does not need to allocate
+ * temporary storage and move
+ * data around.
+ *
+ * Limitation: right now this
+ * function only works if both
+ * vectors have the same number
+ * of blocks. If needed, the
+ * numbers of blocks should be
+ * exchanged, too.
+ *
+ * This function is analog to the
+ * the swap() function of all C++
+ * standard containers. Also,
+ * there is a global function
+ * swap(u,v) that simply calls
+ * <tt>u.swap(v)</tt>, again in analogy
+ * to standard functions.
+ */
+ void swap (BlockVector &v);
+
+ /**
+ * Print to a stream.
+ */
+ void print (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcNonMatchingBlockVectors);
};
class PreconditionBase;
- /**
- * Base class for solver classes using the Trilinos solvers. Since
- * solvers in Trilinos are selected based on flags passed to a generic
- * solver object, basically all the actual solver calls happen in this
- * class, and derived classes simply set the right flags to select one
- * solver or another, or to set certain parameters for individual
- * solvers. For a general discussion on the Trilinos solver package
- * AztecOO, we refer to the <a href =
- * "http://trilinos.sandia.gov/packages/aztecoo/AztecOOUserGuide.pdf">AztecOO
- * user guide</a>.
- *
- * This solver class can also be used as a standalone class, where the
- * respective Krylov method is set via the flag
- * <tt>solver_name</tt>. This can be done at runtime (e.g., when
- * parsing the solver from a ParameterList) and is similar to the
- * deal.II class SolverSelector.
- *
- * @ingroup TrilinosWrappers
- * @author Martin Kronbichler, 2008, 2009
- */
+ /**
+ * Base class for solver classes using the Trilinos solvers. Since
+ * solvers in Trilinos are selected based on flags passed to a generic
+ * solver object, basically all the actual solver calls happen in this
+ * class, and derived classes simply set the right flags to select one
+ * solver or another, or to set certain parameters for individual
+ * solvers. For a general discussion on the Trilinos solver package
+ * AztecOO, we refer to the <a href =
+ * "http://trilinos.sandia.gov/packages/aztecoo/AztecOOUserGuide.pdf">AztecOO
+ * user guide</a>.
+ *
+ * This solver class can also be used as a standalone class, where the
+ * respective Krylov method is set via the flag
+ * <tt>solver_name</tt>. This can be done at runtime (e.g., when
+ * parsing the solver from a ParameterList) and is similar to the
+ * deal.II class SolverSelector.
+ *
+ * @ingroup TrilinosWrappers
+ * @author Martin Kronbichler, 2008, 2009
+ */
class SolverBase
{
- public:
-
- /**
- * Enumeration object that is
- * set in the constructor of
- * the derived classes and
- * tells Trilinos which solver
- * to use. This option can also
- * be set in the user program,
- * so one might use this base
- * class instead of one of the
- * specialized derived classes
- * when the solver should be
- * set at runtime. Currently
- * enabled options are:
- */
- enum SolverName {cg, cgs, gmres, bicgstab, tfqmr} solver_name;
-
- /**
- * Standardized data struct to
- * pipe additional data to the
- * solver.
- */
-
- struct AdditionalData
- {
- /**
- * Sets the additional data field to
- * the desired output format and puts
- * the restart parameter in case the
- * derived class is GMRES.
- *
- * TODO: Find a better way for
- * setting the GMRES restart
- * parameter since it is quite
- * inelegant to set a specific option
- * of one solver in the base class
- * for all solvers.
- */
- AdditionalData (const bool output_solver_details = false,
- const unsigned int gmres_restart_parameter = 30);
-
- /**
- * Enables/disables the output of
- * solver details (residual in each
- * iterations etc.).
- */
- const bool output_solver_details;
-
- /**
- * Restart parameter for GMRES
- * solver.
- */
- const unsigned int gmres_restart_parameter;
- };
-
- /**
- * Constructor. Takes the
- * solver control object and
- * creates the solver.
- */
- SolverBase (SolverControl &cn);
-
- /**
- * Second constructor. This
- * constructor takes an enum
- * object that specifies the
- * solver name and sets the
- * appropriate Krylov
- * method.
- */
- SolverBase (const enum SolverName solver_name,
- SolverControl &cn);
-
- /**
- * Destructor.
- */
- virtual ~SolverBase ();
-
- /**
- * Solve the linear system
- * <tt>Ax=b</tt>. Depending on
- * the information provided by
- * derived classes and the
- * object passed as a
- * preconditioner, one of the
- * linear solvers and
- * preconditioners of Trilinos
- * is chosen.
- */
- void
- solve (const SparseMatrix &A,
- VectorBase &x,
- const VectorBase &b,
- const PreconditionBase &preconditioner);
-
- /**
- * Solve the linear system
- * <tt>Ax=b</tt>. Depending on the
- * information provided by derived
- * classes and the object passed as a
- * preconditioner, one of the linear
- * solvers and preconditioners of
- * Trilinos is chosen. This class
- * works with matrices according to
- * the TrilinosWrappers format, but
- * can take deal.II vectors as
- * argument. Since deal.II are serial
- * vectors (not distributed), this
- * function does only what you expect
- * in case the matrix is locally
- * owned. Otherwise, an exception
- * will be thrown.
- */
- void
- solve (const SparseMatrix &A,
- dealii::Vector<double> &x,
- const dealii::Vector<double> &b,
- const PreconditionBase &preconditioner);
-
- /**
- * Access to object that controls
- * convergence.
- */
- SolverControl & control() const;
-
- /**
- * Exception
- */
- DeclException1 (ExcTrilinosError,
- int,
- << "An error with error number " << arg1
- << " occurred while calling a Trilinos function");
-
- protected:
-
- /**
- * Reference to the object that
- * controls convergence of the
- * iterative solver. In fact,
- * for these Trilinos wrappers,
- * Trilinos does so itself, but
- * we copy the data from this
- * object before starting the
- * solution process, and copy
- * the data back into it
- * afterwards.
- */
- SolverControl &solver_control;
-
- private:
-
- /**
- * A structure that collects
- * the Trilinos sparse matrix,
- * the right hand side vector
- * and the solution vector,
- * which is passed down to the
- * Trilinos solver.
- */
- std_cxx1x::shared_ptr<Epetra_LinearProblem> linear_problem;
-
- /**
- * A structure that contains
- * the Trilinos solver and
- * preconditioner objects.
- */
- AztecOO solver;
-
- /**
- * Store a copy of the flags for this
- * particular solver.
- */
- const AdditionalData additional_data;
+ public:
+
+ /**
+ * Enumeration object that is
+ * set in the constructor of
+ * the derived classes and
+ * tells Trilinos which solver
+ * to use. This option can also
+ * be set in the user program,
+ * so one might use this base
+ * class instead of one of the
+ * specialized derived classes
+ * when the solver should be
+ * set at runtime. Currently
+ * enabled options are:
+ */
+ enum SolverName {cg, cgs, gmres, bicgstab, tfqmr} solver_name;
+
+ /**
+ * Standardized data struct to
+ * pipe additional data to the
+ * solver.
+ */
+
+ struct AdditionalData
+ {
+ /**
+ * Sets the additional data field to
+ * the desired output format and puts
+ * the restart parameter in case the
+ * derived class is GMRES.
+ *
+ * TODO: Find a better way for
+ * setting the GMRES restart
+ * parameter since it is quite
+ * inelegant to set a specific option
+ * of one solver in the base class
+ * for all solvers.
+ */
+ AdditionalData (const bool output_solver_details = false,
+ const unsigned int gmres_restart_parameter = 30);
+
+ /**
+ * Enables/disables the output of
+ * solver details (residual in each
+ * iterations etc.).
+ */
+ const bool output_solver_details;
+
+ /**
+ * Restart parameter for GMRES
+ * solver.
+ */
+ const unsigned int gmres_restart_parameter;
+ };
+
+ /**
+ * Constructor. Takes the
+ * solver control object and
+ * creates the solver.
+ */
- SolverBase (SolverControl &cn);
++ SolverBase (SolverControl &cn);
+
+ /**
+ * Second constructor. This
+ * constructor takes an enum
+ * object that specifies the
+ * solver name and sets the
+ * appropriate Krylov
+ * method.
+ */
+ SolverBase (const enum SolverName solver_name,
+ SolverControl &cn);
+
+ /**
+ * Destructor.
+ */
+ virtual ~SolverBase ();
+
+ /**
+ * Solve the linear system
+ * <tt>Ax=b</tt>. Depending on
+ * the information provided by
+ * derived classes and the
+ * object passed as a
+ * preconditioner, one of the
+ * linear solvers and
+ * preconditioners of Trilinos
+ * is chosen.
+ */
+ void
+ solve (const SparseMatrix &A,
+ VectorBase &x,
+ const VectorBase &b,
+ const PreconditionBase &preconditioner);
+
+ /**
+ * Solve the linear system
+ * <tt>Ax=b</tt>. Depending on the
+ * information provided by derived
+ * classes and the object passed as a
+ * preconditioner, one of the linear
+ * solvers and preconditioners of
+ * Trilinos is chosen. This class
+ * works with matrices according to
+ * the TrilinosWrappers format, but
+ * can take deal.II vectors as
+ * argument. Since deal.II are serial
+ * vectors (not distributed), this
+ * function does only what you expect
+ * in case the matrix is locally
+ * owned. Otherwise, an exception
+ * will be thrown.
+ */
+ void
+ solve (const SparseMatrix &A,
+ dealii::Vector<double> &x,
+ const dealii::Vector<double> &b,
+ const PreconditionBase &preconditioner);
+
+ /**
+ * Access to object that controls
+ * convergence.
+ */
+ SolverControl &control() const;
+
+ /**
+ * Exception
+ */
+ DeclException1 (ExcTrilinosError,
+ int,
+ << "An error with error number " << arg1
+ << " occurred while calling a Trilinos function");
+
+ protected:
+
+ /**
+ * Reference to the object that
+ * controls convergence of the
+ * iterative solver. In fact,
+ * for these Trilinos wrappers,
+ * Trilinos does so itself, but
+ * we copy the data from this
+ * object before starting the
+ * solution process, and copy
+ * the data back into it
+ * afterwards.
+ */
+ SolverControl &solver_control;
+
+ private:
+
+ /**
+ * A structure that collects
+ * the Trilinos sparse matrix,
+ * the right hand side vector
+ * and the solution vector,
+ * which is passed down to the
+ * Trilinos solver.
+ */
+ std_cxx1x::shared_ptr<Epetra_LinearProblem> linear_problem;
+
+ /**
+ * A structure that contains
+ * the Trilinos solver and
+ * preconditioner objects.
+ */
+ AztecOO solver;
+
+ /**
+ * Store a copy of the flags for this
+ * particular solver.
+ */
+ const AdditionalData additional_data;
};
- /**
- * An implementation of the Trilinos KLU direct solver (using the Amesos
- * package).
- *
- * @ingroup TrilinosWrappers
- * @author Martin Kronbichler, 2009
- */
+ /**
+ * An implementation of the Trilinos KLU direct solver (using the Amesos
+ * package).
+ *
+ * @ingroup TrilinosWrappers
+ * @author Martin Kronbichler, 2009
+ */
class SolverDirect
{
- public:
-
- /**
- * Standardized data struct to
- * pipe additional data to the
- * solver.
- */
-
- struct AdditionalData
- {
- /**
- * Sets the additional data field to
- * the desired output format.
- */
- AdditionalData (const bool output_solver_details = false);
-
- /**
- * Enables/disables the output of
- * solver details (residual in each
- * iterations etc.).
- */
- bool output_solver_details;
- };
-
- /**
- * Constructor. Takes the
- * solver control object and
- * creates the solver.
- */
- SolverDirect (SolverControl &cn,
- const AdditionalData &data = AdditionalData());
-
- /**
- * Destructor.
- */
- virtual ~SolverDirect ();
-
- /**
- * Solve the linear system
- * <tt>Ax=b</tt>. Creates a KLU
- * factorization of the matrix and
- * performs the solve. Note that
- * there is no need for a
- * preconditioner here.
- */
- void
- solve (const SparseMatrix &A,
- VectorBase &x,
- const VectorBase &b);
-
- /**
- * Solve the linear system
- * <tt>Ax=b</tt>. Depending on the
- * information provided by derived
- * classes and the object passed as a
- * preconditioner, one of the linear
- * solvers and preconditioners of
- * Trilinos is chosen. This class
- * works with matrices according to
- * the TrilinosWrappers format, but
- * can take deal.II vectors as
- * argument. Since deal.II are serial
- * vectors (not distributed), this
- * function does only what you expect
- * in case the matrix is locally
- * owned. Otherwise, an exception
- * will be thrown.
- */
- void
- solve (const SparseMatrix &A,
- dealii::Vector<double> &x,
- const dealii::Vector<double> &b);
-
- /**
- * Access to object that controls
- * convergence.
- */
- SolverControl & control() const;
-
- /**
- * Exception
- */
- DeclException1 (ExcTrilinosError,
- int,
- << "An error with error number " << arg1
- << " occurred while calling a Trilinos function");
-
- private:
-
- /**
- * Reference to the object that
- * controls convergence of the
- * iterative solver. In fact,
- * for these Trilinos wrappers,
- * Trilinos does so itself, but
- * we copy the data from this
- * object before starting the
- * solution process, and copy
- * the data back into it
- * afterwards.
- */
- SolverControl &solver_control;
-
- /**
- * A structure that collects
- * the Trilinos sparse matrix,
- * the right hand side vector
- * and the solution vector,
- * which is passed down to the
- * Trilinos solver.
- */
- std_cxx1x::shared_ptr<Epetra_LinearProblem> linear_problem;
-
- /**
- * A structure that contains
- * the Trilinos solver and
- * preconditioner objects.
- */
- std_cxx1x::shared_ptr<Amesos_BaseSolver> solver;
-
- /**
- * Store a copy of the flags for this
- * particular solver.
- */
- const AdditionalData additional_data;
+ public:
+
+ /**
+ * Standardized data struct to
+ * pipe additional data to the
+ * solver.
+ */
+
+ struct AdditionalData
+ {
+ /**
+ * Sets the additional data field to
+ * the desired output format.
+ */
+ AdditionalData (const bool output_solver_details = false);
+
+ /**
+ * Enables/disables the output of
+ * solver details (residual in each
+ * iterations etc.).
+ */
+ bool output_solver_details;
+ };
+
+ /**
+ * Constructor. Takes the
+ * solver control object and
+ * creates the solver.
+ */
- SolverDirect (SolverControl &cn,
++ SolverDirect (SolverControl &cn,
+ const AdditionalData &data = AdditionalData());
+
+ /**
+ * Destructor.
+ */
+ virtual ~SolverDirect ();
+
+ /**
+ * Solve the linear system
+ * <tt>Ax=b</tt>. Creates a KLU
+ * factorization of the matrix and
+ * performs the solve. Note that
+ * there is no need for a
+ * preconditioner here.
+ */
+ void
+ solve (const SparseMatrix &A,
+ VectorBase &x,
+ const VectorBase &b);
+
+ /**
+ * Solve the linear system
+ * <tt>Ax=b</tt>. Depending on the
+ * information provided by derived
+ * classes and the object passed as a
+ * preconditioner, one of the linear
+ * solvers and preconditioners of
+ * Trilinos is chosen. This class
+ * works with matrices according to
+ * the TrilinosWrappers format, but
+ * can take deal.II vectors as
+ * argument. Since deal.II are serial
+ * vectors (not distributed), this
+ * function does only what you expect
+ * in case the matrix is locally
+ * owned. Otherwise, an exception
+ * will be thrown.
+ */
+ void
+ solve (const SparseMatrix &A,
+ dealii::Vector<double> &x,
+ const dealii::Vector<double> &b);
+
+ /**
+ * Access to object that controls
+ * convergence.
+ */
+ SolverControl &control() const;
+
+ /**
+ * Exception
+ */
+ DeclException1 (ExcTrilinosError,
+ int,
+ << "An error with error number " << arg1
+ << " occurred while calling a Trilinos function");
+
+ private:
+
+ /**
+ * Reference to the object that
+ * controls convergence of the
+ * iterative solver. In fact,
+ * for these Trilinos wrappers,
+ * Trilinos does so itself, but
+ * we copy the data from this
+ * object before starting the
+ * solution process, and copy
+ * the data back into it
+ * afterwards.
+ */
+ SolverControl &solver_control;
+
+ /**
+ * A structure that collects
+ * the Trilinos sparse matrix,
+ * the right hand side vector
+ * and the solution vector,
+ * which is passed down to the
+ * Trilinos solver.
+ */
+ std_cxx1x::shared_ptr<Epetra_LinearProblem> linear_problem;
+
+ /**
+ * A structure that contains
+ * the Trilinos solver and
+ * preconditioner objects.
+ */
+ std_cxx1x::shared_ptr<Amesos_BaseSolver> solver;
+
+ /**
+ * Store a copy of the flags for this
+ * particular solver.
+ */
+ const AdditionalData additional_data;
};
}
- /**
- * This class implements a wrapper to use the Trilinos distributed
- * sparse matrix class Epetra_FECrsMatrix. This is precisely the kind of
- * matrix we deal with all the time - we most likely get it from some
- * assembly process, where also entries not locally owned might need to
- * be written and hence need to be forwarded to the owner process. This
- * class is designed to be used in a distributed memory architecture
- * with an MPI compiler on the bottom, but works equally well also for
- * serial processes. The only requirement for this class to work is that
- * Trilinos has been installed with the same compiler as is used for
- * generating deal.II.
- *
- * The interface of this class is modeled after the existing
- * SparseMatrix class in deal.II. It has almost the same member
- * functions, and is often exchangable. However, since Trilinos only
- * supports a single scalar type (double), it is not templated, and only
- * works with doubles.
- *
- * Note that Trilinos only guarantees that operations do what you expect
- * if the functions @p GlobalAssemble has been called after matrix
- * assembly. Therefore, you need to call SparseMatrix::compress()
- * before you actually use the matrix. This also calls @p FillComplete
- * that compresses the storage format for sparse matrices by discarding
- * unused elements. Trilinos allows to continue with assembling the
- * matrix after calls to these functions, though.
- *
- * @ingroup TrilinosWrappers
- * @ingroup Matrix1
- * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009
- */
+ /**
+ * This class implements a wrapper to use the Trilinos distributed
+ * sparse matrix class Epetra_FECrsMatrix. This is precisely the kind of
+ * matrix we deal with all the time - we most likely get it from some
+ * assembly process, where also entries not locally owned might need to
+ * be written and hence need to be forwarded to the owner process. This
+ * class is designed to be used in a distributed memory architecture
+ * with an MPI compiler on the bottom, but works equally well also for
+ * serial processes. The only requirement for this class to work is that
+ * Trilinos has been installed with the same compiler as is used for
+ * generating deal.II.
+ *
+ * The interface of this class is modeled after the existing
+ * SparseMatrix class in deal.II. It has almost the same member
+ * functions, and is often exchangable. However, since Trilinos only
+ * supports a single scalar type (double), it is not templated, and only
+ * works with doubles.
+ *
+ * Note that Trilinos only guarantees that operations do what you expect
+ * if the functions @p GlobalAssemble has been called after matrix
+ * assembly. Therefore, you need to call SparseMatrix::compress()
+ * before you actually use the matrix. This also calls @p FillComplete
+ * that compresses the storage format for sparse matrices by discarding
+ * unused elements. Trilinos allows to continue with assembling the
+ * matrix after calls to these functions, though.
+ *
+ * @ingroup TrilinosWrappers
+ * @ingroup Matrix1
+ * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009
+ */
class SparseMatrix : public Subscriptor
{
- public:
- /**
- * A structure that describes
- * some of the traits of this
- * class in terms of its run-time
- * behavior. Some other classes
- * (such as the block matrix
- * classes) that take one or
- * other of the matrix classes as
- * its template parameters can
- * tune their behavior based on
- * the variables in this class.
- */
- struct Traits
- {
- /**
- * It is safe to elide additions
- * of zeros to individual
- * elements of this matrix.
- */
- static const bool zero_addition_can_be_elided = true;
- };
+ public:
+ /**
+ * A structure that describes
+ * some of the traits of this
+ * class in terms of its run-time
+ * behavior. Some other classes
+ * (such as the block matrix
+ * classes) that take one or
+ * other of the matrix classes as
+ * its template parameters can
+ * tune their behavior based on
+ * the variables in this class.
+ */
+ struct Traits
+ {
+ /**
+ * It is safe to elide additions
+ * of zeros to individual
+ * elements of this matrix.
+ */
+ static const bool zero_addition_can_be_elided = true;
+ };
- /**
- * Declare a typedef for the
- * iterator class.
- */
- typedef MatrixIterators::const_iterator const_iterator;
-
- /**
- * Declare a typedef in analogy
- * to all the other container
- * classes.
- */
- typedef TrilinosScalar value_type;
-
- /**
- * @name Constructors and initalization.
- */
+ /**
+ * Declare a typedef for the
+ * iterator class.
+ */
+ typedef MatrixIterators::const_iterator const_iterator;
+
+ /**
+ * Declare a typedef in analogy
+ * to all the other container
+ * classes.
+ */
+ typedef TrilinosScalar value_type;
+
+ /**
+ * @name Constructors and initalization.
+ */
//@{
- /**
- * Default constructor. Generates
- * an empty (zero-size) matrix.
- */
- SparseMatrix ();
-
- /**
- * Generate a matrix that is completely
- * stored locally, having #m rows and
- * #n columns.
- *
- * The number of columns entries per
- * row is specified as the maximum
- * number of entries argument.
- */
- SparseMatrix (const unsigned int m,
- const unsigned int n,
- const unsigned int n_max_entries_per_row);
-
- /**
- * Generate a matrix that is completely
- * stored locally, having #m rows and
- * #n columns.
- *
- * The vector
- * <tt>n_entries_per_row</tt>
- * specifies the number of entries in
- * each row.
- */
- SparseMatrix (const unsigned int m,
- const unsigned int n,
- const std::vector<unsigned int> &n_entries_per_row);
-
- /**
- * Generate a matrix from a Trilinos
- * sparsity pattern object.
- */
- SparseMatrix (const SparsityPattern &InputSparsityPattern);
-
- /**
- * Copy constructor. Sets the
- * calling matrix to be the same
- * as the input matrix, i.e.,
- * using the same sparsity
- * pattern and entries.
- */
- SparseMatrix (const SparseMatrix &InputMatrix);
-
- /**
- * Destructor. Made virtual so
- * that one can use pointers to
- * this class.
- */
- virtual ~SparseMatrix ();
-
- /**
- * This function initializes the
- * Trilinos matrix with a deal.II
- * sparsity pattern, i.e. it makes
- * the Trilinos Epetra matrix know
- * the position of nonzero entries
- * according to the sparsity
- * pattern. This function is meant
- * for use in serial programs, where
- * there is no need to specify how
- * the matrix is going to be
- * distributed among different
- * processors. This function works in
- * %parallel, too, but it is
- * recommended to manually specify
- * the %parallel partioning of the
- * matrix using an Epetra_Map. When
- * run in %parallel, it is currently
- * necessary that each processor
- * holds the sparsity_pattern
- * structure because each processor
- * sets its rows.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template<typename SparsityType>
- void reinit (const SparsityType &sparsity_pattern);
-
- /**
- * This function reinitializes the
- * Trilinos sparse matrix from a
- * (possibly distributed) Trilinos
- * sparsity pattern.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- void reinit (const SparsityPattern &sparsity_pattern);
-
- /**
- * This function copies the content
- * in <tt>sparse_matrix</tt> to the
- * calling matrix.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- void reinit (const SparseMatrix &sparse_matrix);
-
- /**
- * This function initializes the
- * Trilinos matrix using the deal.II
- * sparse matrix and the entries
- * stored therein. It uses a
- * threshold to copy only elements
- * with modulus larger than the
- * threshold (so zeros in the deal.II
- * matrix can be filtered away).
- *
- * The optional parameter
- * <tt>copy_values</tt> decides
- * whether only the sparsity
- * structure of the input matrix
- * should be used or the matrix
- * entries should be copied, too.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template <typename number>
- void reinit (const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
- const double drop_tolerance=1e-13,
- const bool copy_values=true,
- const ::dealii::SparsityPattern *use_this_sparsity=0);
-
- /**
- * This reinit function takes as
- * input a Trilinos Epetra_CrsMatrix
- * and copies its sparsity
- * pattern. If so requested, even the
- * content (values) will be copied.
- */
- void reinit (const Epetra_CrsMatrix &input_matrix,
- const bool copy_values = true);
+ /**
+ * Default constructor. Generates
+ * an empty (zero-size) matrix.
+ */
+ SparseMatrix ();
+
+ /**
+ * Generate a matrix that is completely
+ * stored locally, having #m rows and
+ * #n columns.
+ *
+ * The number of columns entries per
+ * row is specified as the maximum
+ * number of entries argument.
+ */
+ SparseMatrix (const unsigned int m,
+ const unsigned int n,
+ const unsigned int n_max_entries_per_row);
+
+ /**
+ * Generate a matrix that is completely
+ * stored locally, having #m rows and
+ * #n columns.
+ *
+ * The vector
+ * <tt>n_entries_per_row</tt>
+ * specifies the number of entries in
+ * each row.
+ */
+ SparseMatrix (const unsigned int m,
+ const unsigned int n,
+ const std::vector<unsigned int> &n_entries_per_row);
+
+ /**
+ * Generate a matrix from a Trilinos
+ * sparsity pattern object.
+ */
+ SparseMatrix (const SparsityPattern &InputSparsityPattern);
+
+ /**
+ * Copy constructor. Sets the
+ * calling matrix to be the same
+ * as the input matrix, i.e.,
+ * using the same sparsity
+ * pattern and entries.
+ */
+ SparseMatrix (const SparseMatrix &InputMatrix);
+
+ /**
+ * Destructor. Made virtual so
+ * that one can use pointers to
+ * this class.
+ */
+ virtual ~SparseMatrix ();
+
+ /**
+ * This function initializes the
+ * Trilinos matrix with a deal.II
+ * sparsity pattern, i.e. it makes
+ * the Trilinos Epetra matrix know
+ * the position of nonzero entries
+ * according to the sparsity
+ * pattern. This function is meant
+ * for use in serial programs, where
+ * there is no need to specify how
+ * the matrix is going to be
+ * distributed among different
+ * processors. This function works in
+ * %parallel, too, but it is
+ * recommended to manually specify
+ * the %parallel partioning of the
+ * matrix using an Epetra_Map. When
+ * run in %parallel, it is currently
+ * necessary that each processor
+ * holds the sparsity_pattern
+ * structure because each processor
+ * sets its rows.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template<typename SparsityType>
+ void reinit (const SparsityType &sparsity_pattern);
+
+ /**
+ * This function reinitializes the
+ * Trilinos sparse matrix from a
+ * (possibly distributed) Trilinos
+ * sparsity pattern.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ void reinit (const SparsityPattern &sparsity_pattern);
+
+ /**
+ * This function copies the content
+ * in <tt>sparse_matrix</tt> to the
+ * calling matrix.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ void reinit (const SparseMatrix &sparse_matrix);
+
+ /**
+ * This function initializes the
+ * Trilinos matrix using the deal.II
+ * sparse matrix and the entries
+ * stored therein. It uses a
+ * threshold to copy only elements
+ * with modulus larger than the
+ * threshold (so zeros in the deal.II
+ * matrix can be filtered away).
+ *
+ * The optional parameter
+ * <tt>copy_values</tt> decides
+ * whether only the sparsity
+ * structure of the input matrix
+ * should be used or the matrix
+ * entries should be copied, too.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template <typename number>
+ void reinit (const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
+ const double drop_tolerance=1e-13,
+ const bool copy_values=true,
+ const ::dealii::SparsityPattern *use_this_sparsity=0);
+
+ /**
+ * This reinit function takes as
+ * input a Trilinos Epetra_CrsMatrix
+ * and copies its sparsity
+ * pattern. If so requested, even the
+ * content (values) will be copied.
+ */
+ void reinit (const Epetra_CrsMatrix &input_matrix,
+ const bool copy_values = true);
//@}
- /**
- * @name Constructors and initialization using an Epetra_Map description
- */
+ /**
+ * @name Constructors and initialization using an Epetra_Map description
+ */
//@{
- /**
- * Constructor using an Epetra_Map to
- * describe the %parallel
- * partitioning. The parameter @p
- * n_max_entries_per_row sets the
- * number of nonzero entries in each
- * row that will be allocated. Note
- * that this number does not need to
- * be exact, and it is even allowed
- * that the actual matrix structure
- * has more nonzero entries than
- * specified in the
- * constructor. However it is still
- * advantageous to provide good
- * estimates here since this will
- * considerably increase the
- * performance of the matrix
- * setup. However, there is no effect
- * in the performance of
- * matrix-vector products, since
- * Trilinos reorganizes the matrix
- * memory prior to use (in the
- * compress() step).
- */
- SparseMatrix (const Epetra_Map ¶llel_partitioning,
- const unsigned int n_max_entries_per_row = 0);
-
- /**
- * Same as before, but now set a
- * value of nonzeros for each matrix
- * row. Since we know the number of
- * elements in the matrix exactly in
- * this case, we can already allocate
- * the right amount of memory, which
- * makes the creation process
- * including the insertion of nonzero
- * elements by the respective
- * SparseMatrix::reinit call
- * considerably faster.
- */
- SparseMatrix (const Epetra_Map ¶llel_partitioning,
- const std::vector<unsigned int> &n_entries_per_row);
-
- /**
- * This constructor is similar to the
- * one above, but it now takes two
- * different Epetra maps for rows and
- * columns. This interface is meant
- * to be used for generating
- * rectangular matrices, where one
- * map describes the %parallel
- * partitioning of the dofs
- * associated with the matrix rows
- * and the other one the partitioning
- * of dofs in the matrix
- * columns. Note that there is no
- * real parallelism along the columns
- * – the processor that owns a
- * certain row always owns all the
- * column elements, no matter how far
- * they might be spread out. The
- * second Epetra_Map is only used to
- * specify the number of columns and
- * for internal arragements when
- * doing matrix-vector products with
- * vectors based on that column map.
- *
- * The integer input @p
- * n_max_entries_per_row defines the
- * number of columns entries per row
- * that will be allocated.
- */
- SparseMatrix (const Epetra_Map &row_parallel_partitioning,
- const Epetra_Map &col_parallel_partitioning,
- const unsigned int n_max_entries_per_row = 0);
-
- /**
- * This constructor is similar to the
- * one above, but it now takes two
- * different Epetra maps for rows and
- * columns. This interface is meant
- * to be used for generating
- * rectangular matrices, where one
- * map specifies the %parallel
- * distribution of degrees of freedom
- * associated with matrix rows and
- * the second one specifies the
- * %parallel distribution the dofs
- * associated with columns in the
- * matrix. The second map also
- * provides information for the
- * internal arrangement in matrix
- * vector products (i.e., the
- * distribution of vector this matrix
- * is to be multiplied with), but is
- * not used for the distribution of
- * the columns – rather, all
- * column elements of a row are
- * stored on the same processor in
- * any case. The vector
- * <tt>n_entries_per_row</tt>
- * specifies the number of entries in
- * each row of the newly generated
- * matrix.
- */
- SparseMatrix (const Epetra_Map &row_parallel_partitioning,
- const Epetra_Map &col_parallel_partitioning,
- const std::vector<unsigned int> &n_entries_per_row);
-
- /**
- * This function is initializes the
- * Trilinos Epetra matrix according to
- * the specified sparsity_pattern, and
- * also reassigns the matrix rows to
- * different processes according to a
- * user-supplied Epetra map. In
- * programs following the style of the
- * tutorial programs, this function
- * (and the respective call for a
- * rectangular matrix) are the natural
- * way to initialize the matrix size,
- * its distribution among the MPI
- * processes (if run in %parallel) as
- * well as the locatoin of non-zero
- * elements. Trilinos stores the
- * sparsity pattern internally, so it
- * won't be needed any more after this
- * call, in contrast to the deal.II own
- * object. The optional argument @p
- * exchange_data can be used for
- * reinitialization with a sparsity
- * pattern that is not fully
- * constructed. This feature is only
- * implemented for input sparsity
- * patterns of type
- * CompressedSimpleSparsityPattern. If
- * the flag is not set, each processor
- * just sets the elements in the
- * sparsity pattern that belong to its
- * rows.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template<typename SparsityType>
- void reinit (const Epetra_Map ¶llel_partitioning,
- const SparsityType &sparsity_pattern,
- const bool exchange_data = false);
-
- /**
- * This function is similar to the
- * other initialization function
- * above, but now also reassigns the
- * matrix rows and columns according
- * to two user-supplied Epetra maps.
- * To be used for rectangular
- * matrices. The optional argument @p
- * exchange_data can be used for
- * reinitialization with a sparsity
- * pattern that is not fully
- * constructed. This feature is only
- * implemented for input sparsity
- * patterns of type
- * CompressedSimpleSparsityPattern.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template<typename SparsityType>
- void reinit (const Epetra_Map &row_parallel_partitioning,
- const Epetra_Map &col_parallel_partitioning,
- const SparsityType &sparsity_pattern,
- const bool exchange_data = false);
-
- /**
- * This function initializes the
- * Trilinos matrix using the deal.II
- * sparse matrix and the entries
- * stored therein. It uses a
- * threshold to copy only elements
- * with modulus larger than the
- * threshold (so zeros in the deal.II
- * matrix can be filtered away). In
- * contrast to the other reinit
- * function with deal.II sparse
- * matrix argument, this function
- * takes a %parallel partitioning
- * specified by the user instead of
- * internally generating it.
- *
- * The optional parameter
- * <tt>copy_values</tt> decides
- * whether only the sparsity
- * structure of the input matrix
- * should be used or the matrix
- * entries should be copied, too.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template <typename number>
- void reinit (const Epetra_Map ¶llel_partitioning,
- const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
- const double drop_tolerance=1e-13,
- const bool copy_values=true,
- const ::dealii::SparsityPattern *use_this_sparsity=0);
-
- /**
- * This function is similar to the
- * other initialization function with
- * deal.II sparse matrix input above,
- * but now takes Epetra maps for both
- * the rows and the columns of the
- * matrix. Chosen for rectangular
- * matrices.
- *
- * The optional parameter
- * <tt>copy_values</tt> decides
- * whether only the sparsity
- * structure of the input matrix
- * should be used or the matrix
- * entries should be copied, too.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template <typename number>
- void reinit (const Epetra_Map &row_parallel_partitioning,
- const Epetra_Map &col_parallel_partitioning,
- const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
- const double drop_tolerance=1e-13,
- const bool copy_values=true,
- const ::dealii::SparsityPattern *use_this_sparsity=0);
+ /**
+ * Constructor using an Epetra_Map to
+ * describe the %parallel
+ * partitioning. The parameter @p
+ * n_max_entries_per_row sets the
+ * number of nonzero entries in each
+ * row that will be allocated. Note
+ * that this number does not need to
+ * be exact, and it is even allowed
+ * that the actual matrix structure
+ * has more nonzero entries than
+ * specified in the
+ * constructor. However it is still
+ * advantageous to provide good
+ * estimates here since this will
+ * considerably increase the
+ * performance of the matrix
+ * setup. However, there is no effect
+ * in the performance of
+ * matrix-vector products, since
+ * Trilinos reorganizes the matrix
+ * memory prior to use (in the
+ * compress() step).
+ */
+ SparseMatrix (const Epetra_Map ¶llel_partitioning,
+ const unsigned int n_max_entries_per_row = 0);
+
+ /**
+ * Same as before, but now set a
+ * value of nonzeros for each matrix
+ * row. Since we know the number of
+ * elements in the matrix exactly in
+ * this case, we can already allocate
+ * the right amount of memory, which
+ * makes the creation process
+ * including the insertion of nonzero
+ * elements by the respective
+ * SparseMatrix::reinit call
+ * considerably faster.
+ */
+ SparseMatrix (const Epetra_Map ¶llel_partitioning,
+ const std::vector<unsigned int> &n_entries_per_row);
+
+ /**
+ * This constructor is similar to the
+ * one above, but it now takes two
+ * different Epetra maps for rows and
+ * columns. This interface is meant
+ * to be used for generating
+ * rectangular matrices, where one
+ * map describes the %parallel
+ * partitioning of the dofs
+ * associated with the matrix rows
+ * and the other one the partitioning
+ * of dofs in the matrix
+ * columns. Note that there is no
+ * real parallelism along the columns
+ * – the processor that owns a
+ * certain row always owns all the
+ * column elements, no matter how far
+ * they might be spread out. The
+ * second Epetra_Map is only used to
+ * specify the number of columns and
+ * for internal arragements when
+ * doing matrix-vector products with
+ * vectors based on that column map.
+ *
+ * The integer input @p
+ * n_max_entries_per_row defines the
+ * number of columns entries per row
+ * that will be allocated.
+ */
+ SparseMatrix (const Epetra_Map &row_parallel_partitioning,
+ const Epetra_Map &col_parallel_partitioning,
+ const unsigned int n_max_entries_per_row = 0);
+
+ /**
+ * This constructor is similar to the
+ * one above, but it now takes two
+ * different Epetra maps for rows and
+ * columns. This interface is meant
+ * to be used for generating
+ * rectangular matrices, where one
+ * map specifies the %parallel
+ * distribution of degrees of freedom
+ * associated with matrix rows and
+ * the second one specifies the
+ * %parallel distribution the dofs
+ * associated with columns in the
+ * matrix. The second map also
+ * provides information for the
+ * internal arrangement in matrix
+ * vector products (i.e., the
+ * distribution of vector this matrix
+ * is to be multiplied with), but is
+ * not used for the distribution of
+ * the columns – rather, all
+ * column elements of a row are
+ * stored on the same processor in
+ * any case. The vector
+ * <tt>n_entries_per_row</tt>
+ * specifies the number of entries in
+ * each row of the newly generated
+ * matrix.
+ */
+ SparseMatrix (const Epetra_Map &row_parallel_partitioning,
+ const Epetra_Map &col_parallel_partitioning,
+ const std::vector<unsigned int> &n_entries_per_row);
+
+ /**
+ * This function is initializes the
+ * Trilinos Epetra matrix according to
+ * the specified sparsity_pattern, and
+ * also reassigns the matrix rows to
+ * different processes according to a
+ * user-supplied Epetra map. In
+ * programs following the style of the
+ * tutorial programs, this function
+ * (and the respective call for a
+ * rectangular matrix) are the natural
+ * way to initialize the matrix size,
+ * its distribution among the MPI
+ * processes (if run in %parallel) as
+ * well as the locatoin of non-zero
+ * elements. Trilinos stores the
+ * sparsity pattern internally, so it
+ * won't be needed any more after this
+ * call, in contrast to the deal.II own
+ * object. The optional argument @p
+ * exchange_data can be used for
+ * reinitialization with a sparsity
+ * pattern that is not fully
+ * constructed. This feature is only
+ * implemented for input sparsity
+ * patterns of type
+ * CompressedSimpleSparsityPattern. If
+ * the flag is not set, each processor
+ * just sets the elements in the
+ * sparsity pattern that belong to its
+ * rows.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template<typename SparsityType>
+ void reinit (const Epetra_Map ¶llel_partitioning,
- const SparsityType &sparsity_pattern,
++ const SparsityType &sparsity_pattern,
+ const bool exchange_data = false);
+
+ /**
+ * This function is similar to the
+ * other initialization function
+ * above, but now also reassigns the
+ * matrix rows and columns according
+ * to two user-supplied Epetra maps.
+ * To be used for rectangular
+ * matrices. The optional argument @p
+ * exchange_data can be used for
+ * reinitialization with a sparsity
+ * pattern that is not fully
+ * constructed. This feature is only
+ * implemented for input sparsity
+ * patterns of type
+ * CompressedSimpleSparsityPattern.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template<typename SparsityType>
+ void reinit (const Epetra_Map &row_parallel_partitioning,
+ const Epetra_Map &col_parallel_partitioning,
- const SparsityType &sparsity_pattern,
++ const SparsityType &sparsity_pattern,
+ const bool exchange_data = false);
+
+ /**
+ * This function initializes the
+ * Trilinos matrix using the deal.II
+ * sparse matrix and the entries
+ * stored therein. It uses a
+ * threshold to copy only elements
+ * with modulus larger than the
+ * threshold (so zeros in the deal.II
+ * matrix can be filtered away). In
+ * contrast to the other reinit
+ * function with deal.II sparse
+ * matrix argument, this function
+ * takes a %parallel partitioning
+ * specified by the user instead of
+ * internally generating it.
+ *
+ * The optional parameter
+ * <tt>copy_values</tt> decides
+ * whether only the sparsity
+ * structure of the input matrix
+ * should be used or the matrix
+ * entries should be copied, too.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template <typename number>
+ void reinit (const Epetra_Map ¶llel_partitioning,
+ const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
+ const double drop_tolerance=1e-13,
+ const bool copy_values=true,
+ const ::dealii::SparsityPattern *use_this_sparsity=0);
+
+ /**
+ * This function is similar to the
+ * other initialization function with
+ * deal.II sparse matrix input above,
+ * but now takes Epetra maps for both
+ * the rows and the columns of the
+ * matrix. Chosen for rectangular
+ * matrices.
+ *
+ * The optional parameter
+ * <tt>copy_values</tt> decides
+ * whether only the sparsity
+ * structure of the input matrix
+ * should be used or the matrix
+ * entries should be copied, too.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template <typename number>
+ void reinit (const Epetra_Map &row_parallel_partitioning,
+ const Epetra_Map &col_parallel_partitioning,
- const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
++ const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
+ const double drop_tolerance=1e-13,
+ const bool copy_values=true,
+ const ::dealii::SparsityPattern *use_this_sparsity=0);
//@}
- /**
- * @name Constructors and initialization using an IndexSet description
- */
+ /**
+ * @name Constructors and initialization using an IndexSet description
+ */
//@{
- /**
- * Constructor using an IndexSet and
- * an MPI communicator to describe
- * the %parallel partitioning. The
- * parameter @p n_max_entries_per_row
- * sets the number of nonzero entries
- * in each row that will be
- * allocated. Note that this number
- * does not need to be exact, and it
- * is even allowed that the actual
- * matrix structure has more nonzero
- * entries than specified in the
- * constructor. However it is still
- * advantageous to provide good
- * estimates here since this will
- * considerably increase the
- * performance of the matrix
- * setup. However, there is no effect
- * in the performance of
- * matrix-vector products, since
- * Trilinos reorganizes the matrix
- * memory prior to use (in the
- * compress() step).
- */
- SparseMatrix (const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const unsigned int n_max_entries_per_row = 0);
-
- /**
- * Same as before, but now set the
- * number of nonzeros in each matrix
- * row separately. Since we know the
- * number of elements in the matrix
- * exactly in this case, we can
- * already allocate the right amount
- * of memory, which makes the
- * creation process including the
- * insertion of nonzero elements by
- * the respective
- * SparseMatrix::reinit call
- * considerably faster.
- */
- SparseMatrix (const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator,
- const std::vector<unsigned int> &n_entries_per_row);
-
- /**
- * This constructor is similar to the
- * one above, but it now takes two
- * different IndexSet partitions for
- * row and columns. This interface is
- * meant to be used for generating
- * rectangular matrices, where the
- * first index set describes the
- * %parallel partitioning of the
- * degrees of freedom associated with
- * the matrix rows and the second one
- * the partitioning of the matrix
- * columns. The second index set
- * specifies the partitioning of the
- * vectors this matrix is to be
- * multiplied with, not the
- * distribution of the elements that
- * actually appear in the matrix.
- *
- * The parameter @p
- * n_max_entries_per_row defines how
- * much memory will be allocated for
- * each row. This number does not
- * need to be accurate, as the
- * structure is reorganized in the
- * compress() call.
- */
- SparseMatrix (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const unsigned int n_max_entries_per_row = 0);
-
- /**
- * This constructor is similar to the
- * one above, but it now takes two
- * different Epetra maps for rows and
- * columns. This interface is meant
- * to be used for generating
- * rectangular matrices, where one
- * map specifies the %parallel
- * distribution of degrees of freedom
- * associated with matrix rows and
- * the second one specifies the
- * %parallel distribution the dofs
- * associated with columns in the
- * matrix. The second map also
- * provides information for the
- * internal arrangement in matrix
- * vector products (i.e., the
- * distribution of vector this matrix
- * is to be multiplied with), but is
- * not used for the distribution of
- * the columns – rather, all
- * column elements of a row are
- * stored on the same processor in
- * any case. The vector
- * <tt>n_entries_per_row</tt>
- * specifies the number of entries in
- * each row of the newly generated
- * matrix.
- */
- SparseMatrix (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator,
- const std::vector<unsigned int> &n_entries_per_row);
-
- /**
- * This function is initializes the
- * Trilinos Epetra matrix according
- * to the specified sparsity_pattern,
- * and also reassigns the matrix rows
- * to different processes according
- * to a user-supplied index set and
- * %parallel communicator. In
- * programs following the style of
- * the tutorial programs, this
- * function (and the respective call
- * for a rectangular matrix) are the
- * natural way to initialize the
- * matrix size, its distribution
- * among the MPI processes (if run in
- * %parallel) as well as the locatoin
- * of non-zero elements. Trilinos
- * stores the sparsity pattern
- * internally, so it won't be needed
- * any more after this call, in
- * contrast to the deal.II own
- * object. The optional argument @p
- * exchange_data can be used for
- * reinitialization with a sparsity
- * pattern that is not fully
- * constructed. This feature is only
- * implemented for input sparsity
- * patterns of type
- * CompressedSimpleSparsityPattern. If
- * the flag is not set, each
- * processor just sets the elements
- * in the sparsity pattern that
- * belong to its rows.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template<typename SparsityType>
- void reinit (const IndexSet ¶llel_partitioning,
- const SparsityType &sparsity_pattern,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const bool exchange_data = false);
-
- /**
- * This function is similar to the
- * other initialization function
- * above, but now also reassigns the
- * matrix rows and columns according
- * to two user-supplied index sets.
- * To be used for rectangular
- * matrices. The optional argument @p
- * exchange_data can be used for
- * reinitialization with a sparsity
- * pattern that is not fully
- * constructed. This feature is only
- * implemented for input sparsity
- * patterns of type
- * CompressedSimpleSparsityPattern.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template<typename SparsityType>
- void reinit (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const SparsityType &sparsity_pattern,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const bool exchange_data = false);
-
- /**
- * This function initializes the
- * Trilinos matrix using the deal.II
- * sparse matrix and the entries
- * stored therein. It uses a
- * threshold to copy only elements
- * with modulus larger than the
- * threshold (so zeros in the deal.II
- * matrix can be filtered away). In
- * contrast to the other reinit
- * function with deal.II sparse
- * matrix argument, this function
- * takes a %parallel partitioning
- * specified by the user instead of
- * internally generating it.
- *
- * The optional parameter
- * <tt>copy_values</tt> decides
- * whether only the sparsity
- * structure of the input matrix
- * should be used or the matrix
- * entries should be copied, too.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template <typename number>
- void reinit (const IndexSet ¶llel_partitioning,
- const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const double drop_tolerance=1e-13,
- const bool copy_values=true,
- const ::dealii::SparsityPattern *use_this_sparsity=0);
-
- /**
- * This function is similar to the
- * other initialization function with
- * deal.II sparse matrix input above,
- * but now takes index sets for both
- * the rows and the columns of the
- * matrix. Chosen for rectangular
- * matrices.
- *
- * The optional parameter
- * <tt>copy_values</tt> decides
- * whether only the sparsity
- * structure of the input matrix
- * should be used or the matrix
- * entries should be copied, too.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- template <typename number>
- void reinit (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
- const double drop_tolerance=1e-13,
- const bool copy_values=true,
- const ::dealii::SparsityPattern *use_this_sparsity=0);
+ /**
+ * Constructor using an IndexSet and
+ * an MPI communicator to describe
+ * the %parallel partitioning. The
+ * parameter @p n_max_entries_per_row
+ * sets the number of nonzero entries
+ * in each row that will be
+ * allocated. Note that this number
+ * does not need to be exact, and it
+ * is even allowed that the actual
+ * matrix structure has more nonzero
+ * entries than specified in the
+ * constructor. However it is still
+ * advantageous to provide good
+ * estimates here since this will
+ * considerably increase the
+ * performance of the matrix
+ * setup. However, there is no effect
+ * in the performance of
+ * matrix-vector products, since
+ * Trilinos reorganizes the matrix
+ * memory prior to use (in the
+ * compress() step).
+ */
+ SparseMatrix (const IndexSet ¶llel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const unsigned int n_max_entries_per_row = 0);
+
+ /**
+ * Same as before, but now set the
+ * number of nonzeros in each matrix
+ * row separately. Since we know the
+ * number of elements in the matrix
+ * exactly in this case, we can
+ * already allocate the right amount
+ * of memory, which makes the
+ * creation process including the
+ * insertion of nonzero elements by
+ * the respective
+ * SparseMatrix::reinit call
+ * considerably faster.
+ */
+ SparseMatrix (const IndexSet ¶llel_partitioning,
+ const MPI_Comm &communicator,
+ const std::vector<unsigned int> &n_entries_per_row);
+
+ /**
+ * This constructor is similar to the
+ * one above, but it now takes two
+ * different IndexSet partitions for
+ * row and columns. This interface is
+ * meant to be used for generating
+ * rectangular matrices, where the
+ * first index set describes the
+ * %parallel partitioning of the
+ * degrees of freedom associated with
+ * the matrix rows and the second one
+ * the partitioning of the matrix
+ * columns. The second index set
+ * specifies the partitioning of the
+ * vectors this matrix is to be
+ * multiplied with, not the
+ * distribution of the elements that
+ * actually appear in the matrix.
+ *
+ * The parameter @p
+ * n_max_entries_per_row defines how
+ * much memory will be allocated for
+ * each row. This number does not
+ * need to be accurate, as the
+ * structure is reorganized in the
+ * compress() call.
+ */
+ SparseMatrix (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const unsigned int n_max_entries_per_row = 0);
+
+ /**
+ * This constructor is similar to the
+ * one above, but it now takes two
+ * different Epetra maps for rows and
+ * columns. This interface is meant
+ * to be used for generating
+ * rectangular matrices, where one
+ * map specifies the %parallel
+ * distribution of degrees of freedom
+ * associated with matrix rows and
+ * the second one specifies the
+ * %parallel distribution the dofs
+ * associated with columns in the
+ * matrix. The second map also
+ * provides information for the
+ * internal arrangement in matrix
+ * vector products (i.e., the
+ * distribution of vector this matrix
+ * is to be multiplied with), but is
+ * not used for the distribution of
+ * the columns – rather, all
+ * column elements of a row are
+ * stored on the same processor in
+ * any case. The vector
+ * <tt>n_entries_per_row</tt>
+ * specifies the number of entries in
+ * each row of the newly generated
+ * matrix.
+ */
+ SparseMatrix (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const MPI_Comm &communicator,
+ const std::vector<unsigned int> &n_entries_per_row);
+
+ /**
+ * This function is initializes the
+ * Trilinos Epetra matrix according
+ * to the specified sparsity_pattern,
+ * and also reassigns the matrix rows
+ * to different processes according
+ * to a user-supplied index set and
+ * %parallel communicator. In
+ * programs following the style of
+ * the tutorial programs, this
+ * function (and the respective call
+ * for a rectangular matrix) are the
+ * natural way to initialize the
+ * matrix size, its distribution
+ * among the MPI processes (if run in
+ * %parallel) as well as the locatoin
+ * of non-zero elements. Trilinos
+ * stores the sparsity pattern
+ * internally, so it won't be needed
+ * any more after this call, in
+ * contrast to the deal.II own
+ * object. The optional argument @p
+ * exchange_data can be used for
+ * reinitialization with a sparsity
+ * pattern that is not fully
+ * constructed. This feature is only
+ * implemented for input sparsity
+ * patterns of type
+ * CompressedSimpleSparsityPattern. If
+ * the flag is not set, each
+ * processor just sets the elements
+ * in the sparsity pattern that
+ * belong to its rows.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template<typename SparsityType>
+ void reinit (const IndexSet ¶llel_partitioning,
- const SparsityType &sparsity_pattern,
++ const SparsityType &sparsity_pattern,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const bool exchange_data = false);
+
+ /**
+ * This function is similar to the
+ * other initialization function
+ * above, but now also reassigns the
+ * matrix rows and columns according
+ * to two user-supplied index sets.
+ * To be used for rectangular
+ * matrices. The optional argument @p
+ * exchange_data can be used for
+ * reinitialization with a sparsity
+ * pattern that is not fully
+ * constructed. This feature is only
+ * implemented for input sparsity
+ * patterns of type
+ * CompressedSimpleSparsityPattern.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template<typename SparsityType>
+ void reinit (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
- const SparsityType &sparsity_pattern,
++ const SparsityType &sparsity_pattern,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const bool exchange_data = false);
+
+ /**
+ * This function initializes the
+ * Trilinos matrix using the deal.II
+ * sparse matrix and the entries
+ * stored therein. It uses a
+ * threshold to copy only elements
+ * with modulus larger than the
+ * threshold (so zeros in the deal.II
+ * matrix can be filtered away). In
+ * contrast to the other reinit
+ * function with deal.II sparse
+ * matrix argument, this function
+ * takes a %parallel partitioning
+ * specified by the user instead of
+ * internally generating it.
+ *
+ * The optional parameter
+ * <tt>copy_values</tt> decides
+ * whether only the sparsity
+ * structure of the input matrix
+ * should be used or the matrix
+ * entries should be copied, too.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template <typename number>
+ void reinit (const IndexSet ¶llel_partitioning,
+ const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const double drop_tolerance=1e-13,
+ const bool copy_values=true,
+ const ::dealii::SparsityPattern *use_this_sparsity=0);
+
+ /**
+ * This function is similar to the
+ * other initialization function with
+ * deal.II sparse matrix input above,
+ * but now takes index sets for both
+ * the rows and the columns of the
+ * matrix. Chosen for rectangular
+ * matrices.
+ *
+ * The optional parameter
+ * <tt>copy_values</tt> decides
+ * whether only the sparsity
+ * structure of the input matrix
+ * should be used or the matrix
+ * entries should be copied, too.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ template <typename number>
+ void reinit (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
- const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
++ const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
+ const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const double drop_tolerance=1e-13,
+ const bool copy_values=true,
+ const ::dealii::SparsityPattern *use_this_sparsity=0);
//@}
- /**
- * @name Information on the matrix
- */
+ /**
+ * @name Information on the matrix
+ */
//@{
- /**
- * Return the number of rows in
- * this matrix.
- */
- unsigned int m () const;
-
- /**
- * Return the number of columns
- * in this matrix.
- */
- unsigned int n () const;
-
- /**
- * Return the local dimension
- * of the matrix, i.e. the
- * number of rows stored on the
- * present MPI process. For
- * sequential matrices, this
- * number is the same as m(),
- * but for %parallel matrices it
- * may be smaller.
- *
- * To figure out which elements
- * exactly are stored locally,
- * use local_range().
- */
- unsigned int local_size () const;
-
- /**
- * Return a pair of indices
- * indicating which rows of
- * this matrix are stored
- * locally. The first number is
- * the index of the first row
- * stored, the second the index
- * of the one past the last one
- * that is stored locally. If
- * this is a sequential matrix,
- * then the result will be the
- * pair (0,m()), otherwise it
- * will be a pair (i,i+n),
- * where
- * <tt>n=local_size()</tt>.
- */
- std::pair<unsigned int, unsigned int>
- local_range () const;
-
- /**
- * Return whether @p index is
- * in the local range or not,
- * see also local_range().
- */
- bool in_local_range (const unsigned int index) const;
-
- /**
- * Return the number of nonzero
- * elements of this matrix.
- */
- unsigned int n_nonzero_elements () const;
-
- /**
- * Number of entries in a
- * specific row.
- */
- unsigned int row_length (const unsigned int row) const;
-
- /**
- * Returns the state of the matrix,
- * i.e., whether compress() needs to
- * be called after an operation
- * requiring data exchange. A call to
- * compress() is also needed when the
- * method set() has been called (even
- * when working in serial).
- */
- bool is_compressed () const;
-
- /**
- * Determine an estimate for the memory
- * consumption (in bytes) of this
- * object. Note that only the memory
- * reserved on the current processor is
- * returned in case this is called in
- * an MPI-based program.
- */
- std::size_t memory_consumption () const;
+ /**
+ * Return the number of rows in
+ * this matrix.
+ */
+ unsigned int m () const;
+
+ /**
+ * Return the number of columns
+ * in this matrix.
+ */
+ unsigned int n () const;
+
+ /**
+ * Return the local dimension
+ * of the matrix, i.e. the
+ * number of rows stored on the
+ * present MPI process. For
+ * sequential matrices, this
+ * number is the same as m(),
+ * but for %parallel matrices it
+ * may be smaller.
+ *
+ * To figure out which elements
+ * exactly are stored locally,
+ * use local_range().
+ */
+ unsigned int local_size () const;
+
+ /**
+ * Return a pair of indices
+ * indicating which rows of
+ * this matrix are stored
+ * locally. The first number is
+ * the index of the first row
+ * stored, the second the index
+ * of the one past the last one
+ * that is stored locally. If
+ * this is a sequential matrix,
+ * then the result will be the
+ * pair (0,m()), otherwise it
+ * will be a pair (i,i+n),
+ * where
+ * <tt>n=local_size()</tt>.
+ */
+ std::pair<unsigned int, unsigned int>
+ local_range () const;
+
+ /**
+ * Return whether @p index is
+ * in the local range or not,
+ * see also local_range().
+ */
+ bool in_local_range (const unsigned int index) const;
+
+ /**
+ * Return the number of nonzero
+ * elements of this matrix.
+ */
+ unsigned int n_nonzero_elements () const;
+
+ /**
+ * Number of entries in a
+ * specific row.
+ */
+ unsigned int row_length (const unsigned int row) const;
+
+ /**
+ * Returns the state of the matrix,
+ * i.e., whether compress() needs to
+ * be called after an operation
+ * requiring data exchange. A call to
+ * compress() is also needed when the
+ * method set() has been called (even
+ * when working in serial).
+ */
+ bool is_compressed () const;
+
+ /**
+ * Determine an estimate for the memory
+ * consumption (in bytes) of this
+ * object. Note that only the memory
+ * reserved on the current processor is
+ * returned in case this is called in
+ * an MPI-based program.
+ */
+ std::size_t memory_consumption () const;
//@}
- /**
- * @name Modifying entries
- */
+ /**
+ * @name Modifying entries
+ */
//@{
- /**
- * This operator assigns a scalar to
- * a matrix. Since this does usually
- * not make much sense (should we set
- * all matrix entries to this value?
- * Only the nonzero entries of the
- * sparsity pattern?), this operation
- * is only allowed if the actual
- * value to be assigned is zero. This
- * operator only exists to allow for
- * the obvious notation
- * <tt>matrix=0</tt>, which sets all
- * elements of the matrix to zero,
- * but keeps the sparsity pattern
- * previously used.
- */
- SparseMatrix &
- operator = (const double d);
-
- /**
- * Release all memory and return to a
- * state just like after having
- * called the default constructor.
- *
- * This is a collective operation
- * that needs to be called on all
- * processors in order to avoid a
- * dead lock.
- */
- void clear ();
-
- /**
- * This command does two things:
- * <ul>
- * <li> If the matrix was initialized
- * without a sparsity pattern,
- * elements have been added manually
- * using the set() command. When this
- * process is completed, a call to
- * compress() reorganizes the
- * internal data structures (aparsity
- * pattern) so that a fast access to
- * data is possible in matrix-vector
- * products.
- * <li> If the matrix structure has
- * already been fixed (either by
- * initialization with a sparsity
- * pattern or by calling compress()
- * during the setup phase), this
- * command does the %parallel
- * exchange of data. This is
- * necessary when we perform assembly
- * on more than one (MPI) process,
- * because then some non-local row
- * data will accumulate on nodes that
- * belong to the current's processor
- * element, but are actually held by
- * another. This command is usually
- * called after all elements have
- * been traversed.
- * </ul>
- *
- * In both cases, this function
- * compresses the data structures and
- * allows the resulting matrix to be
- * used in all other operations like
- * matrix-vector products. This is a
- * collective operation, i.e., it
- * needs to be run on all processors
- * when used in %parallel.
- *
- * See @ref GlossCompress "Compressing distributed objects"
- * for more information.
- */
- void compress (::dealii::VectorOperation::values operation
- =::dealii::VectorOperation::unknown);
-
- /**
- * Set the element (<i>i,j</i>)
- * to @p value.
- *
- * This function is able to insert new
- * elements into the matrix as long as
- * compress() has not been called, so
- * the sparsity pattern will be
- * extended. When compress() is called
- * for the first time, then this is no
- * longer possible and an insertion of
- * elements at positions which have not
- * been initialized will throw an
- * exception. Note that in case
- * elements need to be inserted, it is
- * mandatory that elements are inserted
- * only once. Otherwise, the elements
- * will actually be added in the end
- * (since it is not possible to
- * efficiently find values to the same
- * entry before compress() has been
- * called). In the case that an element
- * is set more than once, initialize
- * the matrix with a sparsity pattern
- * first.
- */
- void set (const unsigned int i,
- const unsigned int j,
- const TrilinosScalar value);
-
- /**
- * Set all elements given in a
- * FullMatrix<double> into the sparse
- * matrix locations given by
- * <tt>indices</tt>. In other words,
- * this function writes the elements
- * in <tt>full_matrix</tt> into the
- * calling matrix, using the
- * local-to-global indexing specified
- * by <tt>indices</tt> for both the
- * rows and the columns of the
- * matrix. This function assumes a
- * quadratic sparse matrix and a
- * quadratic full_matrix, the usual
- * situation in FE calculations.
- *
- * This function is able to insert
- * new elements into the matrix as
- * long as compress() has not been
- * called, so the sparsity pattern
- * will be extended. When compress()
- * is called for the first time, then
- * this is no longer possible and an
- * insertion of elements at positions
- * which have not been initialized
- * will throw an exception.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be inserted anyway
- * or they should be filtered
- * away. The default value is
- * <tt>false</tt>, i.e., even zero
- * values are inserted/replaced.
- */
- void set (const std::vector<unsigned int> &indices,
- const FullMatrix<TrilinosScalar> &full_matrix,
- const bool elide_zero_values = false);
-
- /**
- * Same function as before, but now
- * including the possibility to use
- * rectangular full_matrices and
- * different local-to-global indexing
- * on rows and columns, respectively.
- */
- void set (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<TrilinosScalar> &full_matrix,
- const bool elide_zero_values = false);
-
- /**
- * Set several elements in the
- * specified row of the matrix with
- * column indices as given by
- * <tt>col_indices</tt> to the
- * respective value.
- *
- * This function is able to insert
- * new elements into the matrix as
- * long as compress() has not been
- * called, so the sparsity pattern
- * will be extended. When compress()
- * is called for the first time, then
- * this is no longer possible and an
- * insertion of elements at positions
- * which have not been initialized
- * will throw an exception.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be inserted anyway
- * or they should be filtered
- * away. The default value is
- * <tt>false</tt>, i.e., even zero
- * values are inserted/replaced.
- */
- void set (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<TrilinosScalar> &values,
- const bool elide_zero_values = false);
-
- /**
- * Set several elements to values
- * given by <tt>values</tt> in a
- * given row in columns given by
- * col_indices into the sparse
- * matrix.
- *
- * This function is able to insert
- * new elements into the matrix as
- * long as compress() has not been
- * called, so the sparsity pattern
- * will be extended. When compress()
- * is called for the first time, then
- * this is no longer possible and an
- * insertion of elements at positions
- * which have not been initialized
- * will throw an exception.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be inserted anyway
- * or they should be filtered
- * away. The default value is
- * <tt>false</tt>, i.e., even zero
- * values are inserted/replaced.
- */
- void set (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const TrilinosScalar *values,
- const bool elide_zero_values = false);
-
- /**
- * Add @p value to the element
- * (<i>i,j</i>).
- *
- * Just as the respective call in
- * deal.II SparseMatrix<Number>
- * class (but in contrast to the
- * situation for PETSc based
- * matrices), this function
- * throws an exception if an
- * entry does not exist in the
- * sparsity pattern. Moreover, if
- * <tt>value</tt> is not a finite
- * number an exception is thrown.
- */
- void add (const unsigned int i,
- const unsigned int j,
- const TrilinosScalar value);
-
- /**
- * Add all elements given in a
- * FullMatrix<double> into sparse
- * matrix locations given by
- * <tt>indices</tt>. In other words,
- * this function adds the elements in
- * <tt>full_matrix</tt> to the
- * respective entries in calling
- * matrix, using the local-to-global
- * indexing specified by
- * <tt>indices</tt> for both the rows
- * and the columns of the
- * matrix. This function assumes a
- * quadratic sparse matrix and a
- * quadratic full_matrix, the usual
- * situation in FE calculations.
- *
- * Just as the respective call in
- * deal.II SparseMatrix<Number>
- * class (but in contrast to the
- * situation for PETSc based
- * matrices), this function
- * throws an exception if an
- * entry does not exist in the
- * sparsity pattern.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- void add (const std::vector<unsigned int> &indices,
- const FullMatrix<TrilinosScalar> &full_matrix,
- const bool elide_zero_values = true);
-
- /**
- * Same function as before, but now
- * including the possibility to use
- * rectangular full_matrices and
- * different local-to-global indexing
- * on rows and columns, respectively.
- */
- void add (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<TrilinosScalar> &full_matrix,
- const bool elide_zero_values = true);
-
- /**
- * Set several elements in the
- * specified row of the matrix with
- * column indices as given by
- * <tt>col_indices</tt> to the
- * respective value.
- *
- * Just as the respective call in
- * deal.II SparseMatrix<Number>
- * class (but in contrast to the
- * situation for PETSc based
- * matrices), this function
- * throws an exception if an
- * entry does not exist in the
- * sparsity pattern.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- void add (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<TrilinosScalar> &values,
- const bool elide_zero_values = true);
-
- /**
- * Add an array of values given by
- * <tt>values</tt> in the given
- * global matrix row at columns
- * specified by col_indices in the
- * sparse matrix.
- *
- * Just as the respective call in
- * deal.II SparseMatrix<Number> class
- * (but in contrast to the situation
- * for PETSc based matrices), this
- * function throws an exception if an
- * entry does not exist in the
- * sparsity pattern.
- *
- * The optional parameter
- * <tt>elide_zero_values</tt> can be
- * used to specify whether zero
- * values should be added anyway or
- * these should be filtered away and
- * only non-zero data is added. The
- * default value is <tt>true</tt>,
- * i.e., zero values won't be added
- * into the matrix.
- */
- void add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const TrilinosScalar *values,
- const bool elide_zero_values = true,
- const bool col_indices_are_sorted = false);
-
- /**
- * Multiply the entire matrix
- * by a fixed factor.
- */
- SparseMatrix & operator *= (const TrilinosScalar factor);
-
- /**
- * Divide the entire matrix by
- * a fixed factor.
- */
- SparseMatrix & operator /= (const TrilinosScalar factor);
-
- /**
- * Copy the given (Trilinos) matrix
- * (sparsity pattern and entries).
- */
- void copy_from (const SparseMatrix &source);
-
- /**
- * Add <tt>matrix</tt> scaled by
- * <tt>factor</tt> to this matrix,
- * i.e. the matrix
- * <tt>factor*matrix</tt> is added to
- * <tt>this</tt>. If the sparsity
- * pattern of the calling matrix does
- * not contain all the elements in
- * the sparsity pattern of the input
- * matrix, this function will throw
- * an exception.
- */
- void add (const TrilinosScalar factor,
- const SparseMatrix &matrix);
-
- /**
- * Remove all elements from
- * this <tt>row</tt> by setting
- * them to zero. The function
- * does not modify the number
- * of allocated nonzero
- * entries, it only sets some
- * entries to zero. It may drop
- * them from the sparsity
- * pattern, though (but retains
- * the allocated memory in case
- * new entries are again added
- * later). Note that this is a
- * global operation, so this
- * needs to be done on all MPI
- * processes.
- *
- * This operation is used in
- * eliminating constraints
- * (e.g. due to hanging nodes)
- * and makes sure that we can
- * write this modification to
- * the matrix without having to
- * read entries (such as the
- * locations of non-zero
- * elements) from it —
- * without this operation,
- * removing constraints on
- * %parallel matrices is a
- * rather complicated
- * procedure.
- *
- * The second parameter can be
- * used to set the diagonal
- * entry of this row to a value
- * different from zero. The
- * default is to set it to
- * zero.
- */
- void clear_row (const unsigned int row,
- const TrilinosScalar new_diag_value = 0);
-
- /**
- * Same as clear_row(), except
- * that it works on a number of
- * rows at once.
- *
- * The second parameter can be
- * used to set the diagonal
- * entries of all cleared rows
- * to something different from
- * zero. Note that all of these
- * diagonal entries get the
- * same value -- if you want
- * different values for the
- * diagonal entries, you have
- * to set them by hand.
- */
- void clear_rows (const std::vector<unsigned int> &rows,
- const TrilinosScalar new_diag_value = 0);
-
- /**
- * Make an in-place transpose
- * of a matrix.
- */
- void transpose ();
+ /**
+ * This operator assigns a scalar to
+ * a matrix. Since this does usually
+ * not make much sense (should we set
+ * all matrix entries to this value?
+ * Only the nonzero entries of the
+ * sparsity pattern?), this operation
+ * is only allowed if the actual
+ * value to be assigned is zero. This
+ * operator only exists to allow for
+ * the obvious notation
+ * <tt>matrix=0</tt>, which sets all
+ * elements of the matrix to zero,
+ * but keeps the sparsity pattern
+ * previously used.
+ */
+ SparseMatrix &
+ operator = (const double d);
+
+ /**
+ * Release all memory and return to a
+ * state just like after having
+ * called the default constructor.
+ *
+ * This is a collective operation
+ * that needs to be called on all
+ * processors in order to avoid a
+ * dead lock.
+ */
+ void clear ();
+
+ /**
+ * This command does two things:
+ * <ul>
+ * <li> If the matrix was initialized
+ * without a sparsity pattern,
+ * elements have been added manually
+ * using the set() command. When this
+ * process is completed, a call to
+ * compress() reorganizes the
+ * internal data structures (aparsity
+ * pattern) so that a fast access to
+ * data is possible in matrix-vector
+ * products.
+ * <li> If the matrix structure has
+ * already been fixed (either by
+ * initialization with a sparsity
+ * pattern or by calling compress()
+ * during the setup phase), this
+ * command does the %parallel
+ * exchange of data. This is
+ * necessary when we perform assembly
+ * on more than one (MPI) process,
+ * because then some non-local row
+ * data will accumulate on nodes that
+ * belong to the current's processor
+ * element, but are actually held by
+ * another. This command is usually
+ * called after all elements have
+ * been traversed.
+ * </ul>
+ *
+ * In both cases, this function
+ * compresses the data structures and
+ * allows the resulting matrix to be
+ * used in all other operations like
+ * matrix-vector products. This is a
+ * collective operation, i.e., it
+ * needs to be run on all processors
+ * when used in %parallel.
+ *
+ * See @ref GlossCompress "Compressing distributed objects"
+ * for more information.
+ */
+ void compress (::dealii::VectorOperation::values operation
+ =::dealii::VectorOperation::unknown);
+
+ /**
+ * Set the element (<i>i,j</i>)
+ * to @p value.
+ *
+ * This function is able to insert new
+ * elements into the matrix as long as
+ * compress() has not been called, so
+ * the sparsity pattern will be
+ * extended. When compress() is called
+ * for the first time, then this is no
+ * longer possible and an insertion of
+ * elements at positions which have not
+ * been initialized will throw an
+ * exception. Note that in case
+ * elements need to be inserted, it is
+ * mandatory that elements are inserted
+ * only once. Otherwise, the elements
+ * will actually be added in the end
+ * (since it is not possible to
+ * efficiently find values to the same
+ * entry before compress() has been
+ * called). In the case that an element
+ * is set more than once, initialize
+ * the matrix with a sparsity pattern
+ * first.
+ */
+ void set (const unsigned int i,
+ const unsigned int j,
+ const TrilinosScalar value);
+
+ /**
+ * Set all elements given in a
+ * FullMatrix<double> into the sparse
+ * matrix locations given by
+ * <tt>indices</tt>. In other words,
+ * this function writes the elements
+ * in <tt>full_matrix</tt> into the
+ * calling matrix, using the
+ * local-to-global indexing specified
+ * by <tt>indices</tt> for both the
+ * rows and the columns of the
+ * matrix. This function assumes a
+ * quadratic sparse matrix and a
+ * quadratic full_matrix, the usual
+ * situation in FE calculations.
+ *
+ * This function is able to insert
+ * new elements into the matrix as
+ * long as compress() has not been
+ * called, so the sparsity pattern
+ * will be extended. When compress()
+ * is called for the first time, then
+ * this is no longer possible and an
+ * insertion of elements at positions
+ * which have not been initialized
+ * will throw an exception.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be inserted anyway
+ * or they should be filtered
+ * away. The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are inserted/replaced.
+ */
- void set (const std::vector<unsigned int> &indices,
++ void set (const std::vector<unsigned int> &indices,
+ const FullMatrix<TrilinosScalar> &full_matrix,
+ const bool elide_zero_values = false);
+
+ /**
+ * Same function as before, but now
+ * including the possibility to use
+ * rectangular full_matrices and
+ * different local-to-global indexing
+ * on rows and columns, respectively.
+ */
- void set (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
++ void set (const std::vector<unsigned int> &row_indices,
++ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<TrilinosScalar> &full_matrix,
+ const bool elide_zero_values = false);
+
+ /**
+ * Set several elements in the
+ * specified row of the matrix with
+ * column indices as given by
+ * <tt>col_indices</tt> to the
+ * respective value.
+ *
+ * This function is able to insert
+ * new elements into the matrix as
+ * long as compress() has not been
+ * called, so the sparsity pattern
+ * will be extended. When compress()
+ * is called for the first time, then
+ * this is no longer possible and an
+ * insertion of elements at positions
+ * which have not been initialized
+ * will throw an exception.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be inserted anyway
+ * or they should be filtered
+ * away. The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are inserted/replaced.
+ */
+ void set (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
+ const std::vector<TrilinosScalar> &values,
+ const bool elide_zero_values = false);
+
+ /**
+ * Set several elements to values
+ * given by <tt>values</tt> in a
+ * given row in columns given by
+ * col_indices into the sparse
+ * matrix.
+ *
+ * This function is able to insert
+ * new elements into the matrix as
+ * long as compress() has not been
+ * called, so the sparsity pattern
+ * will be extended. When compress()
+ * is called for the first time, then
+ * this is no longer possible and an
+ * insertion of elements at positions
+ * which have not been initialized
+ * will throw an exception.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be inserted anyway
+ * or they should be filtered
+ * away. The default value is
+ * <tt>false</tt>, i.e., even zero
+ * values are inserted/replaced.
+ */
+ void set (const unsigned int row,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
+ const TrilinosScalar *values,
+ const bool elide_zero_values = false);
+
+ /**
+ * Add @p value to the element
+ * (<i>i,j</i>).
+ *
+ * Just as the respective call in
+ * deal.II SparseMatrix<Number>
+ * class (but in contrast to the
+ * situation for PETSc based
+ * matrices), this function
+ * throws an exception if an
+ * entry does not exist in the
+ * sparsity pattern. Moreover, if
+ * <tt>value</tt> is not a finite
+ * number an exception is thrown.
+ */
+ void add (const unsigned int i,
+ const unsigned int j,
+ const TrilinosScalar value);
+
+ /**
+ * Add all elements given in a
+ * FullMatrix<double> into sparse
+ * matrix locations given by
+ * <tt>indices</tt>. In other words,
+ * this function adds the elements in
+ * <tt>full_matrix</tt> to the
+ * respective entries in calling
+ * matrix, using the local-to-global
+ * indexing specified by
+ * <tt>indices</tt> for both the rows
+ * and the columns of the
+ * matrix. This function assumes a
+ * quadratic sparse matrix and a
+ * quadratic full_matrix, the usual
+ * situation in FE calculations.
+ *
+ * Just as the respective call in
+ * deal.II SparseMatrix<Number>
+ * class (but in contrast to the
+ * situation for PETSc based
+ * matrices), this function
+ * throws an exception if an
+ * entry does not exist in the
+ * sparsity pattern.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
- void add (const std::vector<unsigned int> &indices,
++ void add (const std::vector<unsigned int> &indices,
+ const FullMatrix<TrilinosScalar> &full_matrix,
+ const bool elide_zero_values = true);
+
+ /**
+ * Same function as before, but now
+ * including the possibility to use
+ * rectangular full_matrices and
+ * different local-to-global indexing
+ * on rows and columns, respectively.
+ */
- void add (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
++ void add (const std::vector<unsigned int> &row_indices,
++ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<TrilinosScalar> &full_matrix,
+ const bool elide_zero_values = true);
+
+ /**
+ * Set several elements in the
+ * specified row of the matrix with
+ * column indices as given by
+ * <tt>col_indices</tt> to the
+ * respective value.
+ *
+ * Just as the respective call in
+ * deal.II SparseMatrix<Number>
+ * class (but in contrast to the
+ * situation for PETSc based
+ * matrices), this function
+ * throws an exception if an
+ * entry does not exist in the
+ * sparsity pattern.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ void add (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
+ const std::vector<TrilinosScalar> &values,
+ const bool elide_zero_values = true);
+
+ /**
+ * Add an array of values given by
+ * <tt>values</tt> in the given
+ * global matrix row at columns
+ * specified by col_indices in the
+ * sparse matrix.
+ *
+ * Just as the respective call in
+ * deal.II SparseMatrix<Number> class
+ * (but in contrast to the situation
+ * for PETSc based matrices), this
+ * function throws an exception if an
+ * entry does not exist in the
+ * sparsity pattern.
+ *
+ * The optional parameter
+ * <tt>elide_zero_values</tt> can be
+ * used to specify whether zero
+ * values should be added anyway or
+ * these should be filtered away and
+ * only non-zero data is added. The
+ * default value is <tt>true</tt>,
+ * i.e., zero values won't be added
+ * into the matrix.
+ */
+ void add (const unsigned int row,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
+ const TrilinosScalar *values,
+ const bool elide_zero_values = true,
+ const bool col_indices_are_sorted = false);
+
+ /**
+ * Multiply the entire matrix
+ * by a fixed factor.
+ */
+ SparseMatrix &operator *= (const TrilinosScalar factor);
+
+ /**
+ * Divide the entire matrix by
+ * a fixed factor.
+ */
+ SparseMatrix &operator /= (const TrilinosScalar factor);
+
+ /**
+ * Copy the given (Trilinos) matrix
+ * (sparsity pattern and entries).
+ */
+ void copy_from (const SparseMatrix &source);
+
+ /**
+ * Add <tt>matrix</tt> scaled by
+ * <tt>factor</tt> to this matrix,
+ * i.e. the matrix
+ * <tt>factor*matrix</tt> is added to
+ * <tt>this</tt>. If the sparsity
+ * pattern of the calling matrix does
+ * not contain all the elements in
+ * the sparsity pattern of the input
+ * matrix, this function will throw
+ * an exception.
+ */
+ void add (const TrilinosScalar factor,
+ const SparseMatrix &matrix);
+
+ /**
+ * Remove all elements from
+ * this <tt>row</tt> by setting
+ * them to zero. The function
+ * does not modify the number
+ * of allocated nonzero
+ * entries, it only sets some
+ * entries to zero. It may drop
+ * them from the sparsity
+ * pattern, though (but retains
+ * the allocated memory in case
+ * new entries are again added
+ * later). Note that this is a
+ * global operation, so this
+ * needs to be done on all MPI
+ * processes.
+ *
+ * This operation is used in
+ * eliminating constraints
+ * (e.g. due to hanging nodes)
+ * and makes sure that we can
+ * write this modification to
+ * the matrix without having to
+ * read entries (such as the
+ * locations of non-zero
+ * elements) from it —
+ * without this operation,
+ * removing constraints on
+ * %parallel matrices is a
+ * rather complicated
+ * procedure.
+ *
+ * The second parameter can be
+ * used to set the diagonal
+ * entry of this row to a value
+ * different from zero. The
+ * default is to set it to
+ * zero.
+ */
+ void clear_row (const unsigned int row,
+ const TrilinosScalar new_diag_value = 0);
+
+ /**
+ * Same as clear_row(), except
+ * that it works on a number of
+ * rows at once.
+ *
+ * The second parameter can be
+ * used to set the diagonal
+ * entries of all cleared rows
+ * to something different from
+ * zero. Note that all of these
+ * diagonal entries get the
+ * same value -- if you want
+ * different values for the
+ * diagonal entries, you have
+ * to set them by hand.
+ */
+ void clear_rows (const std::vector<unsigned int> &rows,
+ const TrilinosScalar new_diag_value = 0);
+
+ /**
+ * Make an in-place transpose
+ * of a matrix.
+ */
+ void transpose ();
//@}
- /**
- * @name Entry Access
- */
+ /**
+ * @name Entry Access
+ */
//@{
- /**
- * Return the value of the
- * entry (<i>i,j</i>). This
- * may be an expensive
- * operation and you should
- * always take care where to
- * call this function. As in
- * the deal.II sparse matrix
- * class, we throw an exception
- * if the respective entry
- * doesn't exist in the
- * sparsity pattern of this
- * class, which is requested
- * from Trilinos. Moreover, an
- * exception will be thrown
- * when the requested element
- * is not saved on the calling
- * process.
- */
- TrilinosScalar operator () (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * Return the value of the
- * matrix entry
- * (<i>i,j</i>). If this entry
- * does not exist in the
- * sparsity pattern, then zero
- * is returned. While this may
- * be convenient in some cases,
- * note that it is simple to
- * write algorithms that are
- * slow compared to an optimal
- * solution, since the sparsity
- * of the matrix is not used.
- * On the other hand, if you
- * want to be sure the entry
- * exists, you should use
- * operator() instead.
- *
- * The lack of error checking
- * in this function can also
- * yield surprising results if
- * you have a parallel
- * matrix. In that case, just
- * because you get a zero
- * result from this function
- * does not mean that either
- * the entry does not exist in
- * the sparsity pattern or that
- * it does but has a value of
- * zero. Rather, it could also
- * be that it simply isn't
- * stored on the current
- * processor; in that case, it
- * may be stored on a different
- * processor, and possibly so
- * with a nonzero value.
- */
- TrilinosScalar el (const unsigned int i,
- const unsigned int j) const;
-
- /**
- * Return the main diagonal
- * element in the <i>i</i>th
- * row. This function throws an
- * error if the matrix is not
- * quadratic and it also throws
- * an error if <i>(i,i)</i> is not
- * element of the local matrix.
- * See also the comment in
- * trilinos_sparse_matrix.cc.
- */
- TrilinosScalar diag_element (const unsigned int i) const;
+ /**
+ * Return the value of the
+ * entry (<i>i,j</i>). This
+ * may be an expensive
+ * operation and you should
+ * always take care where to
+ * call this function. As in
+ * the deal.II sparse matrix
+ * class, we throw an exception
+ * if the respective entry
+ * doesn't exist in the
+ * sparsity pattern of this
+ * class, which is requested
+ * from Trilinos. Moreover, an
+ * exception will be thrown
+ * when the requested element
+ * is not saved on the calling
+ * process.
+ */
+ TrilinosScalar operator () (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * Return the value of the
+ * matrix entry
+ * (<i>i,j</i>). If this entry
+ * does not exist in the
+ * sparsity pattern, then zero
+ * is returned. While this may
+ * be convenient in some cases,
+ * note that it is simple to
+ * write algorithms that are
+ * slow compared to an optimal
+ * solution, since the sparsity
+ * of the matrix is not used.
+ * On the other hand, if you
+ * want to be sure the entry
+ * exists, you should use
+ * operator() instead.
+ *
+ * The lack of error checking
+ * in this function can also
+ * yield surprising results if
+ * you have a parallel
+ * matrix. In that case, just
+ * because you get a zero
+ * result from this function
+ * does not mean that either
+ * the entry does not exist in
+ * the sparsity pattern or that
+ * it does but has a value of
+ * zero. Rather, it could also
+ * be that it simply isn't
+ * stored on the current
+ * processor; in that case, it
+ * may be stored on a different
+ * processor, and possibly so
+ * with a nonzero value.
+ */
+ TrilinosScalar el (const unsigned int i,
+ const unsigned int j) const;
+
+ /**
+ * Return the main diagonal
+ * element in the <i>i</i>th
+ * row. This function throws an
+ * error if the matrix is not
+ * quadratic and it also throws
+ * an error if <i>(i,i)</i> is not
+ * element of the local matrix.
+ * See also the comment in
+ * trilinos_sparse_matrix.cc.
+ */
+ TrilinosScalar diag_element (const unsigned int i) const;
//@}
- /**
- * @name Multiplications
- */
+ /**
+ * @name Multiplications
+ */
//@{
- /**
- * Matrix-vector multiplication:
- * let <i>dst = M*src</i> with
- * <i>M</i> being this matrix.
- *
- * Source and destination must
- * not be the same vector.
- *
- * Note that both vectors have to
- * be distributed vectors
- * generated using the same Map
- * as was used for the matrix in
- * case you work on a distributed
- * memory architecture, using the
- * interface in the
- * TrilinosWrappers::VectorBase
- * class (or one of the two
- * derived classes Vector and
- * MPI::Vector).
- *
- * In case of a localized Vector,
- * this function will only work
- * when running on one processor,
- * since the matrix object is
- * inherently
- * distributed. Otherwise, and
- * exception will be thrown.
- */
- void vmult (VectorBase &dst,
- const VectorBase &src) const;
-
- /**
- * Same as before, but working with
- * deal.II's own distributed vector
- * class.
- */
- void vmult (parallel::distributed::Vector<TrilinosScalar> &dst,
- const parallel::distributed::Vector<TrilinosScalar> &src) const;
-
- /**
- * Matrix-vector multiplication:
- * let <i>dst =
- * M<sup>T</sup>*src</i> with
- * <i>M</i> being this
- * matrix. This function does the
- * same as vmult() but takes the
- * transposed matrix.
- *
- * Source and destination must
- * not be the same vector.
- *
- * Note that both vectors have to
- * be distributed vectors
- * generated using the same Map
- * as was used for the matrix in
- * case you work on a distributed
- * memory architecture, using the
- * interface in the
- * TrilinosWrappers::VectorBase
- * class (or one of the two
- * derived classes Vector and
- * MPI::Vector).
- *
- * In case of a localized Vector,
- * this function will only work
- * when running on one processor,
- * since the matrix object is
- * inherently
- * distributed. Otherwise, and
- * exception will be thrown.
- */
- void Tvmult (VectorBase &dst,
- const VectorBase &src) const;
-
- /**
- * Same as before, but working with
- * deal.II's own distributed vector
- * class.
- */
- void Tvmult (parallel::distributed::Vector<TrilinosScalar> &dst,
- const parallel::distributed::Vector<TrilinosScalar> &src) const;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M*src</i> on <i>dst</i>
- * with <i>M</i> being this
- * matrix.
- *
- * Source and destination must
- * not be the same vector.
- *
- * Note that both vectors have to
- * be distributed vectors
- * generated using the same Map
- * as was used for the matrix in
- * case you work on a distributed
- * memory architecture, using the
- * interface in the
- * TrilinosWrappers::VectorBase
- * class (or one of the two
- * derived classes Vector and
- * MPI::Vector).
- *
- * In case of a localized Vector,
- * this function will only work
- * when running on one processor,
- * since the matrix object is
- * inherently
- * distributed. Otherwise, and
- * exception will be thrown.
- */
- void vmult_add (VectorBase &dst,
- const VectorBase &src) const;
-
- /**
- * Adding Matrix-vector
- * multiplication. Add
- * <i>M<sup>T</sup>*src</i> to
- * <i>dst</i> with <i>M</i> being
- * this matrix. This function
- * does the same as vmult_add()
- * but takes the transposed
- * matrix.
- *
- * Source and destination must
- * not be the same vector.
- *
- * Note that both vectors have to
- * be distributed vectors
- * generated using the same Map
- * as was used for the matrix in
- * case you work on a distributed
- * memory architecture, using the
- * interface in the
- * TrilinosWrappers::VectorBase
- * class (or one of the two
- * derived classes Vector and
- * MPI::Vector).
- *
- * In case of a localized Vector,
- * this function will only work
- * when running on one processor,
- * since the matrix object is
- * inherently
- * distributed. Otherwise, and
- * exception will be thrown.
- */
- void Tvmult_add (VectorBase &dst,
- const VectorBase &src) const;
-
- /**
- * Return the square of the norm
- * of the vector $v$ with respect
- * to the norm induced by this
- * matrix, i.e.,
- * $\left(v,Mv\right)$. This is
- * useful, e.g. in the finite
- * element context, where the
- * $L_2$ norm of a function
- * equals the matrix norm with
- * respect to the mass matrix of
- * the vector representing the
- * nodal values of the finite
- * element function.
- *
- * Obviously, the matrix needs to
- * be quadratic for this
- * operation.
- *
- * The implementation of this
- * function is not as efficient
- * as the one in the @p
- * SparseMatrix class used in
- * deal.II (i.e. the original
- * one, not the Trilinos wrapper
- * class) since Trilinos doesn't
- * support this operation and
- * needs a temporary vector.
- *
- * Note that both vectors have to
- * be distributed vectors
- * generated using the same Map
- * as was used for the matrix in
- * case you work on a distributed
- * memory architecture, using the
- * interface in the
- * TrilinosWrappers::VectorBase
- * class (or one of the two
- * derived classes Vector and
- * MPI::Vector).
- *
- * In case of a localized Vector,
- * this function will only work
- * when running on one processor,
- * since the matrix object is
- * inherently
- * distributed. Otherwise, and
- * exception will be thrown.
- */
- TrilinosScalar matrix_norm_square (const VectorBase &v) const;
-
- /**
- * Compute the matrix scalar
- * product $\left(u,Mv\right)$.
- *
- * The implementation of this
- * function is not as efficient
- * as the one in the @p
- * SparseMatrix class used in
- * deal.II (i.e. the original
- * one, not the Trilinos
- * wrapper class) since
- * Trilinos doesn't support
- * this operation and needs a
- * temporary vector.
- *
- * Note that both vectors have to
- * be distributed vectors
- * generated using the same Map
- * as was used for the matrix in
- * case you work on a distributed
- * memory architecture, using the
- * interface in the
- * TrilinosWrappers::VectorBase
- * class (or one of the two
- * derived classes Vector and
- * MPI::Vector).
- *
- * In case of a localized Vector,
- * this function will only work
- * when running on one processor,
- * since the matrix object is
- * inherently
- * distributed. Otherwise, and
- * exception will be thrown.
- */
- TrilinosScalar matrix_scalar_product (const VectorBase &u,
- const VectorBase &v) const;
-
- /**
- * Compute the residual of an
- * equation <i>Mx=b</i>, where
- * the residual is defined to
- * be <i>r=b-Mx</i>. Write the
- * residual into @p dst. The
- * <i>l<sub>2</sub></i> norm of
- * the residual vector is
- * returned.
- *
- * Source <i>x</i> and
- * destination <i>dst</i> must
- * not be the same vector.
- *
- * Note that both vectors have to
- * be distributed vectors
- * generated using the same Map
- * as was used for the matrix in
- * case you work on a distributed
- * memory architecture, using the
- * interface in the
- * TrilinosWrappers::VectorBase
- * class (or one of the two
- * derived classes Vector and
- * MPI::Vector).
- *
- * In case of a localized Vector,
- * this function will only work
- * when running on one processor,
- * since the matrix object is
- * inherently
- * distributed. Otherwise, and
- * exception will be thrown.
- */
- TrilinosScalar residual (VectorBase &dst,
- const VectorBase &x,
- const VectorBase &b) const;
-
- /**
- * Perform the matrix-matrix
- * multiplication <tt>C = A * B</tt>,
- * or, if an optional vector argument
- * is given, <tt>C = A * diag(V) *
- * B</tt>, where <tt>diag(V)</tt>
- * defines a diagonal matrix with the
- * vector entries.
- *
- * This function assumes that the
- * calling matrix <tt>A</tt> and
- * <tt>B</tt> have compatible
- * sizes. The size of <tt>C</tt> will
- * be set within this function.
- *
- * The content as well as the sparsity
- * pattern of the matrix C will be
- * changed by this function, so make
- * sure that the sparsity pattern is
- * not used somewhere else in your
- * program. This is an expensive
- * operation, so think twice before you
- * use this function.
- */
+ /**
+ * Matrix-vector multiplication:
+ * let <i>dst = M*src</i> with
+ * <i>M</i> being this matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that both vectors have to
+ * be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix in
+ * case you work on a distributed
+ * memory architecture, using the
+ * interface in the
+ * TrilinosWrappers::VectorBase
+ * class (or one of the two
+ * derived classes Vector and
+ * MPI::Vector).
+ *
+ * In case of a localized Vector,
+ * this function will only work
+ * when running on one processor,
+ * since the matrix object is
+ * inherently
+ * distributed. Otherwise, and
+ * exception will be thrown.
+ */
+ void vmult (VectorBase &dst,
+ const VectorBase &src) const;
+
+ /**
+ * Same as before, but working with
+ * deal.II's own distributed vector
+ * class.
+ */
+ void vmult (parallel::distributed::Vector<TrilinosScalar> &dst,
+ const parallel::distributed::Vector<TrilinosScalar> &src) const;
+
+ /**
+ * Matrix-vector multiplication:
+ * let <i>dst =
+ * M<sup>T</sup>*src</i> with
+ * <i>M</i> being this
+ * matrix. This function does the
+ * same as vmult() but takes the
+ * transposed matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that both vectors have to
+ * be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix in
+ * case you work on a distributed
+ * memory architecture, using the
+ * interface in the
+ * TrilinosWrappers::VectorBase
+ * class (or one of the two
+ * derived classes Vector and
+ * MPI::Vector).
+ *
+ * In case of a localized Vector,
+ * this function will only work
+ * when running on one processor,
+ * since the matrix object is
+ * inherently
+ * distributed. Otherwise, and
+ * exception will be thrown.
+ */
+ void Tvmult (VectorBase &dst,
+ const VectorBase &src) const;
+
+ /**
+ * Same as before, but working with
+ * deal.II's own distributed vector
+ * class.
+ */
+ void Tvmult (parallel::distributed::Vector<TrilinosScalar> &dst,
+ const parallel::distributed::Vector<TrilinosScalar> &src) const;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M*src</i> on <i>dst</i>
+ * with <i>M</i> being this
+ * matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that both vectors have to
+ * be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix in
+ * case you work on a distributed
+ * memory architecture, using the
+ * interface in the
+ * TrilinosWrappers::VectorBase
+ * class (or one of the two
+ * derived classes Vector and
+ * MPI::Vector).
+ *
+ * In case of a localized Vector,
+ * this function will only work
+ * when running on one processor,
+ * since the matrix object is
+ * inherently
+ * distributed. Otherwise, and
+ * exception will be thrown.
+ */
+ void vmult_add (VectorBase &dst,
+ const VectorBase &src) const;
+
+ /**
+ * Adding Matrix-vector
+ * multiplication. Add
+ * <i>M<sup>T</sup>*src</i> to
+ * <i>dst</i> with <i>M</i> being
+ * this matrix. This function
+ * does the same as vmult_add()
+ * but takes the transposed
+ * matrix.
+ *
+ * Source and destination must
+ * not be the same vector.
+ *
+ * Note that both vectors have to
+ * be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix in
+ * case you work on a distributed
+ * memory architecture, using the
+ * interface in the
+ * TrilinosWrappers::VectorBase
+ * class (or one of the two
+ * derived classes Vector and
+ * MPI::Vector).
+ *
+ * In case of a localized Vector,
+ * this function will only work
+ * when running on one processor,
+ * since the matrix object is
+ * inherently
+ * distributed. Otherwise, and
+ * exception will be thrown.
+ */
+ void Tvmult_add (VectorBase &dst,
+ const VectorBase &src) const;
+
+ /**
+ * Return the square of the norm
+ * of the vector $v$ with respect
+ * to the norm induced by this
+ * matrix, i.e.,
+ * $\left(v,Mv\right)$. This is
+ * useful, e.g. in the finite
+ * element context, where the
+ * $L_2$ norm of a function
+ * equals the matrix norm with
+ * respect to the mass matrix of
+ * the vector representing the
+ * nodal values of the finite
+ * element function.
+ *
+ * Obviously, the matrix needs to
+ * be quadratic for this
+ * operation.
+ *
+ * The implementation of this
+ * function is not as efficient
+ * as the one in the @p
+ * SparseMatrix class used in
+ * deal.II (i.e. the original
+ * one, not the Trilinos wrapper
+ * class) since Trilinos doesn't
+ * support this operation and
+ * needs a temporary vector.
+ *
+ * Note that both vectors have to
+ * be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix in
+ * case you work on a distributed
+ * memory architecture, using the
+ * interface in the
+ * TrilinosWrappers::VectorBase
+ * class (or one of the two
+ * derived classes Vector and
+ * MPI::Vector).
+ *
+ * In case of a localized Vector,
+ * this function will only work
+ * when running on one processor,
+ * since the matrix object is
+ * inherently
+ * distributed. Otherwise, and
+ * exception will be thrown.
+ */
+ TrilinosScalar matrix_norm_square (const VectorBase &v) const;
+
+ /**
+ * Compute the matrix scalar
+ * product $\left(u,Mv\right)$.
+ *
+ * The implementation of this
+ * function is not as efficient
+ * as the one in the @p
+ * SparseMatrix class used in
+ * deal.II (i.e. the original
+ * one, not the Trilinos
+ * wrapper class) since
+ * Trilinos doesn't support
+ * this operation and needs a
+ * temporary vector.
+ *
+ * Note that both vectors have to
+ * be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix in
+ * case you work on a distributed
+ * memory architecture, using the
+ * interface in the
+ * TrilinosWrappers::VectorBase
+ * class (or one of the two
+ * derived classes Vector and
+ * MPI::Vector).
+ *
+ * In case of a localized Vector,
+ * this function will only work
+ * when running on one processor,
+ * since the matrix object is
+ * inherently
+ * distributed. Otherwise, and
+ * exception will be thrown.
+ */
+ TrilinosScalar matrix_scalar_product (const VectorBase &u,
+ const VectorBase &v) const;
+
+ /**
+ * Compute the residual of an
+ * equation <i>Mx=b</i>, where
+ * the residual is defined to
+ * be <i>r=b-Mx</i>. Write the
+ * residual into @p dst. The
+ * <i>l<sub>2</sub></i> norm of
+ * the residual vector is
+ * returned.
+ *
+ * Source <i>x</i> and
+ * destination <i>dst</i> must
+ * not be the same vector.
+ *
+ * Note that both vectors have to
+ * be distributed vectors
+ * generated using the same Map
+ * as was used for the matrix in
+ * case you work on a distributed
+ * memory architecture, using the
+ * interface in the
+ * TrilinosWrappers::VectorBase
+ * class (or one of the two
+ * derived classes Vector and
+ * MPI::Vector).
+ *
+ * In case of a localized Vector,
+ * this function will only work
+ * when running on one processor,
+ * since the matrix object is
+ * inherently
+ * distributed. Otherwise, and
+ * exception will be thrown.
+ */
+ TrilinosScalar residual (VectorBase &dst,
+ const VectorBase &x,
+ const VectorBase &b) const;
+
+ /**
+ * Perform the matrix-matrix
+ * multiplication <tt>C = A * B</tt>,
+ * or, if an optional vector argument
+ * is given, <tt>C = A * diag(V) *
+ * B</tt>, where <tt>diag(V)</tt>
+ * defines a diagonal matrix with the
+ * vector entries.
+ *
+ * This function assumes that the
+ * calling matrix <tt>A</tt> and
+ * <tt>B</tt> have compatible
+ * sizes. The size of <tt>C</tt> will
+ * be set within this function.
+ *
+ * The content as well as the sparsity
+ * pattern of the matrix C will be
+ * changed by this function, so make
+ * sure that the sparsity pattern is
+ * not used somewhere else in your
+ * program. This is an expensive
+ * operation, so think twice before you
+ * use this function.
+ */
void mmult (SparseMatrix &C,
const SparseMatrix &B,
const VectorBase &V = VectorBase()) const;
<< arg2 << " through " << arg3
<< " are stored locally and can be accessed.");
-
private:
- /**
- * Trilinos doesn't allow to
- * mix additions to matrix
- * entries and overwriting them
- * (to make synchronisation of
- * parallel computations
- * simpler). The way we do it
- * is to, for each access
- * operation, store whether it
- * is an insertion or an
- * addition. If the previous
- * one was of different type,
- * then we first have to flush
- * the Trilinos buffers;
- * otherwise, we can simply go
- * on. Luckily, Trilinos has
- * an object for this which
- * does already all the
- * parallel communications in
- * such a case, so we simply
- * use their model, which
- * stores whether the last
- * operation was an addition or
- * an insertion.
- */
- Epetra_CombineMode last_action;
-
- /**
- * A boolean variable to hold
- * information on whether the
- * vector is compressed or not.
- */
- bool compressed;
+ /**
+ * Point to the vector we are
+ * referencing.
+ */
+ VectorBase &vector;
/**
- * Whether this vector has ghost elements. This is true
- * on all processors even if only one of them has any
- * ghost elements.
+ * Index of the referenced element
+ * of the vector.
*/
- bool has_ghosts;
-
- /**
- * An Epetra distibuted vector
- * type. Requires an existing
- * Epetra_Map for storing data.
- */
- std_cxx1x::shared_ptr<Epetra_FEVector> vector;
-
-
- /**
- * Make the reference class a
- * friend.
- */
- friend class internal::VectorReference;
- friend class Vector;
- friend class MPI::Vector;
+ const unsigned int index;
+
+ /**
+ * Make the vector class a
+ * friend, so that it can
+ * create objects of the
+ * present type.
+ */
+ friend class ::dealii::TrilinosWrappers::VectorBase;
+ };
+ }
+ /**
+ * @endcond
+ */
+
+
+ /**
+ * Base class for the two types of Trilinos vectors, the distributed
+ * memory vector MPI::Vector and a localized vector Vector. The latter
+ * is designed for use in either serial implementations or as a
+ * localized copy on each processor. The implementation of this class
+ * is based on the Trilinos vector class Epetra_FEVector, the (parallel)
+ * partitioning of which is governed by an Epetra_Map. This means that
+ * the vector type is generic and can be done in this base class, while
+ * the definition of the partition map (and hence, the constructor and
+ * reinit function) will have to be done in the derived classes. The
+ * Epetra_FEVector is precisely the kind of vector we deal with all the
+ * time - we probably get it from some assembly process, where also
+ * entries not locally owned might need to written and hence need to be
+ * forwarded to the owner. The only requirement for this class to work
+ * is that Trilinos is installed with the same compiler as is used for
+ * compilation of deal.II.
+ *
+ * The interface of this class is modeled after the existing Vector
+ * class in deal.II. It has almost the same member functions, and is
+ * often exchangable. However, since Trilinos only supports a single
+ * scalar type (double), it is not templated, and only works with that
+ * type.
+ *
+ * Note that Trilinos only guarantees that operations do what you expect
+ * if the function @p GlobalAssemble has been called after vector
+ * assembly in order to distribute the data. Therefore, you need to call
+ * Vector::compress() before you actually use the vectors.
+ *
+ * @ingroup TrilinosWrappers
+ * @ingroup Vectors
+ * @author Martin Kronbichler, 2008
+ */
+ class VectorBase : public Subscriptor
+ {
+ public:
+ /**
+ * Declare some of the standard
+ * types used in all
+ * containers. These types
+ * parallel those in the
+ * <tt>C</tt> standard libraries
+ * <tt>vector<...></tt> class.
+ */
+ typedef TrilinosScalar value_type;
+ typedef TrilinosScalar real_type;
+ typedef std::size_t size_type;
+ typedef internal::VectorReference reference;
+ typedef const internal::VectorReference const_reference;
+
+ /**
+ * @name 1: Basic Object-handling
+ */
+ //@{
+
+ /**
+ * Default constructor that
+ * generates an empty (zero size)
+ * vector. The function
+ * <tt>reinit()</tt> will have to
+ * give the vector the correct
+ * size and distribution among
+ * processes in case of an MPI
+ * run.
+ */
+ VectorBase ();
+
+ /**
+ * Copy constructor. Sets the
+ * dimension to that of the given
+ * vector, and copies all the
+ * elements.
+ */
+ VectorBase (const VectorBase &v);
+
+ /**
+ * Destructor
+ */
+ virtual ~VectorBase ();
+
+ /**
+ * Release all memory and return
+ * to a state just like after
+ * having called the default
+ * constructor.
+ */
+ void clear ();
+
+ /**
+ * Reinit functionality, sets the
+ * dimension and possibly the
+ * parallel partitioning (Epetra_Map)
+ * of the calling vector to the
+ * settings of the input vector.
+ */
+ void reinit (const VectorBase &v,
+ const bool fast = false);
+
+ /**
+ * Compress the underlying
+ * representation of the Trilinos
+ * object, i.e. flush the buffers
+ * of the vector object if it has
+ * any. This function is
+ * necessary after writing into a
+ * vector element-by-element and
+ * before anything else can be
+ * done on it.
+ *
+ * The (defaulted) argument can
+ * be used to specify the
+ * compress mode
+ * (<code>Add</code> or
+ * <code>Insert</code>) in case
+ * the vector has not been
+ * written to since the last
+ * time this function was
+ * called. The argument is
+ * ignored if the vector has
+ * been added or written to
+ * since the last time
+ * compress() was called.
+ *
+ * See @ref GlossCompress "Compressing distributed objects"
+ * for more information.
+ */
+ void compress (::dealii::VectorOperation::values operation
+ =::dealii::VectorOperation::unknown);
+
+ /**
+ * @deprecated
+ */
+ void compress (const Epetra_CombineMode last_action);
+
+ /**
+ * Returns the state of the
+ * vector, i.e., whether
+ * compress() has already been
+ * called after an operation
+ * requiring data exchange.
+ */
+ bool is_compressed () const;
+
+ /**
+ * Set all components of the
+ * vector to the given number @p
+ * s. Simply pass this down to
+ * the Trilinos Epetra object,
+ * but we still need to declare
+ * this function to make the
+ * example given in the
+ * discussion about making the
+ * constructor explicit work.
+ *
+ * Since the semantics of
+ * assigning a scalar to a vector
+ * are not immediately clear,
+ * this operator should really
+ * only be used if you want to
+ * set the entire vector to
+ * zero. This allows the
+ * intuitive notation
+ * <tt>v=0</tt>. Assigning other
+ * values is deprecated and may
+ * be disallowed in the future.
+ */
+ VectorBase &
+ operator = (const TrilinosScalar s);
+
+ /**
+ * Copy function. This function takes
+ * a VectorBase vector and copies all
+ * the elements. The target vector
+ * will have the same parallel
+ * distribution as the calling
+ * vector.
+ */
+ VectorBase &
+ operator = (const VectorBase &v);
+
+ /**
+ * Another copy function. This
+ * one takes a deal.II vector and
+ * copies it into a
+ * TrilinosWrapper vector. Note
+ * that since we do not provide
+ * any Epetra_map that tells
+ * about the partitioning of the
+ * vector among the MPI
+ * processes, the size of the
+ * TrilinosWrapper vector has to
+ * be the same as the size of the
+ * input vector. In order to
+ * change the map, use the
+ * reinit(const Epetra_Map
+ * &input_map) function.
+ */
+ template <typename Number>
+ VectorBase &
+ operator = (const ::dealii::Vector<Number> &v);
+
+ /**
+ * Test for equality. This
+ * function assumes that the
+ * present vector and the one to
+ * compare with have the same
+ * size already, since comparing
+ * vectors of different sizes
+ * makes not much sense anyway.
+ */
+ bool operator == (const VectorBase &v) const;
+
+ /**
+ * Test for inequality. This
+ * function assumes that the
+ * present vector and the one to
+ * compare with have the same
+ * size already, since comparing
+ * vectors of different sizes
+ * makes not much sense anyway.
+ */
+ bool operator != (const VectorBase &v) const;
+
+ /**
+ * Return the global dimension of
+ * the vector.
+ */
+ unsigned int size () const;
+
+ /**
+ * Return the local dimension of
+ * the vector, i.e. the number of
+ * elements stored on the present
+ * MPI process. For sequential
+ * vectors, this number is the
+ * same as size(), but for
+ * parallel vectors it may be
+ * smaller.
+ *
+ * To figure out which elements
+ * exactly are stored locally,
+ * use local_range().
+ *
+ * If the vector contains ghost
+ * elements, they are included in
+ * this number.
+ */
+ unsigned int local_size () const;
+
+ /**
+ * Return a pair of indices
+ * indicating which elements of
+ * this vector are stored
+ * locally. The first number is
+ * the index of the first element
+ * stored, the second the index
+ * of the one past the last one
+ * that is stored locally. If
+ * this is a sequential vector,
+ * then the result will be the
+ * pair (0,N), otherwise it will
+ * be a pair (i,i+n), where
+ * <tt>n=local_size()</tt>.
+ */
+ std::pair<unsigned int, unsigned int> local_range () const;
+
+ /**
+ * Return whether @p index is in
+ * the local range or not, see
+ * also local_range().
+ */
+ bool in_local_range (const unsigned int index) const;
+
+ /**
+ * Return if the vector contains ghost
+ * elements. This answer is true if there
+ * are ghost elements on at least one
+ * process.
+ */
+ bool has_ghost_elements() const;
+
+ /**
+ * Return the scalar (inner)
+ * product of two vectors. The
+ * vectors must have the same
+ * size.
+ */
+ TrilinosScalar operator * (const VectorBase &vec) const;
+
+ /**
+ * Return square of the
+ * $l_2$-norm.
+ */
+ real_type norm_sqr () const;
+
+ /**
+ * Mean value of the elements of
+ * this vector.
+ */
+ TrilinosScalar mean_value () const;
+
+ /**
+ * Compute the minimal value of
+ * the elements of this vector.
+ */
+ TrilinosScalar minimal_value () const;
+
+ /**
+ * $l_1$-norm of the vector. The
+ * sum of the absolute values.
+ */
+ real_type l1_norm () const;
+
+ /**
+ * $l_2$-norm of the vector. The
+ * square root of the sum of the
+ * squares of the elements.
+ */
+ real_type l2_norm () const;
+
+ /**
+ * $l_p$-norm of the vector. The
+ * <i>p</i>th root of the sum of
+ * the <i>p</i>th powers of the
+ * absolute values of the
+ * elements.
+ */
+ real_type lp_norm (const TrilinosScalar p) const;
+
+ /**
+ * Maximum absolute value of the
+ * elements.
+ */
+ real_type linfty_norm () const;
+
+ /**
+ * Return whether the vector
+ * contains only elements with
+ * value zero. This function is
+ * mainly for internal
+ * consistency checks and should
+ * seldom be used when not in
+ * debug mode since it uses quite
+ * some time.
+ */
+ bool all_zero () const;
+
+ /**
+ * Return @p true if the vector
+ * has no negative entries,
+ * i.e. all entries are zero or
+ * positive. This function is
+ * used, for example, to check
+ * whether refinement indicators
+ * are really all positive (or
+ * zero).
+ */
+ bool is_non_negative () const;
+ //@}
+
+
+ /**
+ * @name 2: Data-Access
+ */
+ //@{
+
+ /**
+ * Provide access to a given
+ * element, both read and write.
+ */
+ reference
+ operator () (const unsigned int index);
+
+ /**
+ * Provide read-only access to an
+ * element. This is equivalent to
+ * the <code>el()</code> command.
+ */
+ TrilinosScalar
+ operator () (const unsigned int index) const;
+
+ /**
+ * Provide access to a given
+ * element, both read and write.
+ *
+ * Exactly the same as operator().
+ */
+ reference
+ operator [] (const unsigned int index);
+
+ /**
+ * Provide read-only access to an
+ * element. This is equivalent to
+ * the <code>el()</code> command.
+ *
+ * Exactly the same as operator().
+ */
+ TrilinosScalar
+ operator [] (const unsigned int index) const;
+
+ /**
+ * Return the value of the vector
+ * entry <i>i</i>. Note that this
+ * function does only work
+ * properly when we request a
+ * data stored on the local
+ * processor. The function will
+ * throw an exception in case the
+ * elements sits on another
+ * process.
+ */
+ TrilinosScalar el (const unsigned int index) const;
+
+ /**
+ * A collective set operation:
+ * instead of setting individual
+ * elements of a vector, this
+ * function allows to set a whole
+ * set of elements at once. The
+ * indices of the elements to be
+ * set are stated in the first
+ * argument, the corresponding
+ * values in the second.
+ */
+ void set (const std::vector<unsigned int> &indices,
- const std::vector<TrilinosScalar> &values);
++ const std::vector<TrilinosScalar> &values);
+
+ /**
+ * This is a second collective
+ * set operation. As a
+ * difference, this function
+ * takes a deal.II vector of
+ * values.
+ */
+ void set (const std::vector<unsigned int> &indices,
+ const ::dealii::Vector<TrilinosScalar> &values);
+ //@}
+
+
+ /**
+ * @name 3: Modification of vectors
+ */
+ //@{
+
+ /**
+ * This collective set operation
+ * is of lower level and can
+ * handle anything else —
+ * the only thing you have to
+ * provide is an address where
+ * all the indices are stored and
+ * the number of elements to be
+ * set.
+ */
+ void set (const unsigned int n_elements,
+ const unsigned int *indices,
+ const TrilinosScalar *values);
+
+ /**
+ * A collective add operation:
+ * This funnction adds a whole
+ * set of values stored in @p
+ * values to the vector
+ * components specified by @p
+ * indices.
+ */
+ void add (const std::vector<unsigned int> &indices,
+ const std::vector<TrilinosScalar> &values);
+
+ /**
+ * This is a second collective
+ * add operation. As a
+ * difference, this function
+ * takes a deal.II vector of
+ * values.
+ */
+ void add (const std::vector<unsigned int> &indices,
+ const ::dealii::Vector<TrilinosScalar> &values);
+
+ /**
+ * Take an address where
+ * <tt>n_elements</tt> are stored
+ * contiguously and add them into
+ * the vector. Handles all cases
+ * which are not covered by the
+ * other two <tt>add()</tt>
+ * functions above.
+ */
+ void add (const unsigned int n_elements,
+ const unsigned int *indices,
+ const TrilinosScalar *values);
+
+ /**
+ * Multiply the entire vector by
+ * a fixed factor.
+ */
+ VectorBase &operator *= (const TrilinosScalar factor);
+
+ /**
+ * Divide the entire vector by a
+ * fixed factor.
+ */
+ VectorBase &operator /= (const TrilinosScalar factor);
+
+ /**
+ * Add the given vector to the
+ * present one.
+ */
+ VectorBase &operator += (const VectorBase &V);
+
+ /**
+ * Subtract the given vector from
+ * the present one.
+ */
+ VectorBase &operator -= (const VectorBase &V);
+
+ /**
+ * Addition of @p s to all
+ * components. Note that @p s is
+ * a scalar and not a vector.
+ */
+ void add (const TrilinosScalar s);
+
+ /**
+ * Simple vector addition, equal
+ * to the <tt>operator
+ * +=</tt>.
+ *
+ * Though, if the second argument
+ * <tt>allow_different_maps</tt>
+ * is set, then it is possible to
+ * add data from a different map.
+ */
+ void add (const VectorBase &V,
+ const bool allow_different_maps = false);
+
+ /**
+ * Simple addition of a multiple
+ * of a vector, i.e. <tt>*this =
+ * a*V</tt>.
+ */
+ void add (const TrilinosScalar a,
+ const VectorBase &V);
+
+ /**
+ * Multiple addition of scaled
+ * vectors, i.e. <tt>*this = a*V +
+ * b*W</tt>.
+ */
+ void add (const TrilinosScalar a,
+ const VectorBase &V,
+ const TrilinosScalar b,
+ const VectorBase &W);
+
+ /**
+ * Scaling and simple vector
+ * addition, i.e. <tt>*this =
+ * s*(*this) + V</tt>.
+ */
+ void sadd (const TrilinosScalar s,
+ const VectorBase &V);
+
+ /**
+ * Scaling and simple addition,
+ * i.e. <tt>*this = s*(*this) +
+ * a*V</tt>.
+ */
+ void sadd (const TrilinosScalar s,
+ const TrilinosScalar a,
+ const VectorBase &V);
+
+ /**
+ * Scaling and multiple addition.
+ */
+ void sadd (const TrilinosScalar s,
+ const TrilinosScalar a,
+ const VectorBase &V,
+ const TrilinosScalar b,
+ const VectorBase &W);
+
+ /**
+ * Scaling and multiple addition.
+ * <tt>*this = s*(*this) + a*V +
+ * b*W + c*X</tt>.
+ */
+ void sadd (const TrilinosScalar s,
+ const TrilinosScalar a,
+ const VectorBase &V,
+ const TrilinosScalar b,
+ const VectorBase &W,
+ const TrilinosScalar c,
+ const VectorBase &X);
+
+ /**
+ * Scale each element of this
+ * vector by the corresponding
+ * element in the argument. This
+ * function is mostly meant to
+ * simulate multiplication (and
+ * immediate re-assignment) by a
+ * diagonal scaling matrix.
+ */
+ void scale (const VectorBase &scaling_factors);
+
+ /**
+ * Assignment <tt>*this =
+ * a*V</tt>.
+ */
+ void equ (const TrilinosScalar a,
+ const VectorBase &V);
+
+ /**
+ * Assignment <tt>*this = a*V +
+ * b*W</tt>.
+ */
+ void equ (const TrilinosScalar a,
+ const VectorBase &V,
+ const TrilinosScalar b,
+ const VectorBase &W);
+
+ /**
+ * Compute the elementwise ratio
+ * of the two given vectors, that
+ * is let <tt>this[i] =
+ * a[i]/b[i]</tt>. This is useful
+ * for example if you want to
+ * compute the cellwise ratio of
+ * true to estimated error.
+ *
+ * This vector is appropriately
+ * scaled to hold the result.
+ *
+ * If any of the <tt>b[i]</tt> is
+ * zero, the result is
+ * undefined. No attempt is made
+ * to catch such situations.
+ */
+ void ratio (const VectorBase &a,
+ const VectorBase &b);
+ //@}
+
+
+ /**
+ * @name 4: Mixed stuff
+ */
+ //@{
+
+ /**
+ * Return a const reference to the
+ * underlying Trilinos
+ * Epetra_MultiVector class.
+ */
+ const Epetra_MultiVector &trilinos_vector () const;
+
+ /**
+ * Return a (modifyable) reference to
+ * the underlying Trilinos
+ * Epetra_FEVector class.
+ */
+ Epetra_FEVector &trilinos_vector ();
+
+ /**
+ * Return a const reference to the
+ * underlying Trilinos Epetra_Map
+ * that sets the parallel
+ * partitioning of the vector.
+ */
+ const Epetra_Map &vector_partitioner () const;
+
+ /**
+ * Output of vector in
+ * user-defined format in analogy
+ * to the dealii::Vector<number>
+ * class.
+ */
+ void print (const char *format = 0) const;
+
+ /**
+ * Print to a stream. @p
+ * precision denotes the desired
+ * precision with which values
+ * shall be printed, @p
+ * scientific whether scientific
+ * notation shall be used. If @p
+ * across is @p true then the
+ * vector is printed in a line,
+ * while if @p false then the
+ * elements are printed on a
+ * separate line each.
+ */
+ void print (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /**
+ * Swap the contents of this
+ * vector and the other vector @p
+ * v. One could do this operation
+ * with a temporary variable and
+ * copying over the data
+ * elements, but this function is
+ * significantly more efficient
+ * since it only swaps the
+ * pointers to the data of the
+ * two vectors and therefore does
+ * not need to allocate temporary
+ * storage and move data
+ * around. Note that the vectors
+ * need to be of the same size
+ * and base on the same map.
+ *
+ * This function is analog to the
+ * the @p swap function of all C
+ * standard containers. Also,
+ * there is a global function
+ * <tt>swap(u,v)</tt> that simply
+ * calls <tt>u.swap(v)</tt>,
+ * again in analogy to standard
+ * functions.
+ */
+ void swap (VectorBase &v);
+
+ /**
+ * Estimate for the memory
+ * consumption in bytes.
+ */
+ std::size_t memory_consumption () const;
+ //@}
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcGhostsPresent);
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcDifferentParallelPartitioning);
+
+ /**
+ * Exception
+ */
+ DeclException1 (ExcTrilinosError,
+ int,
+ << "An error with error number " << arg1
+ << " occurred while calling a Trilinos function");
+
+ /**
+ * Exception
+ */
+ DeclException3 (ExcAccessToNonlocalElement,
+ int, int, int,
+ << "You tried to access element " << arg1
+ << " of a distributed vector, but only entries "
+ << arg2 << " through " << arg3
+ << " are stored locally and can be accessed.");
+
+
+ private:
+ /**
+ * Trilinos doesn't allow to
+ * mix additions to matrix
+ * entries and overwriting them
+ * (to make synchronisation of
+ * parallel computations
+ * simpler). The way we do it
+ * is to, for each access
+ * operation, store whether it
+ * is an insertion or an
+ * addition. If the previous
+ * one was of different type,
+ * then we first have to flush
+ * the Trilinos buffers;
+ * otherwise, we can simply go
+ * on. Luckily, Trilinos has
+ * an object for this which
+ * does already all the
+ * parallel communications in
+ * such a case, so we simply
+ * use their model, which
+ * stores whether the last
+ * operation was an addition or
+ * an insertion.
+ */
+ Epetra_CombineMode last_action;
+
+ /**
+ * A boolean variable to hold
+ * information on whether the
+ * vector is compressed or not.
+ */
+ bool compressed;
+
+ /**
+ * Whether this vector has ghost elements. This is true
+ * on all processors even if only one of them has any
+ * ghost elements.
+ */
+ bool has_ghosts;
+
+ /**
+ * An Epetra distibuted vector
+ * type. Requires an existing
+ * Epetra_Map for storing data.
+ */
+ std_cxx1x::shared_ptr<Epetra_FEVector> vector;
+
+
+ /**
+ * Make the reference class a
+ * friend.
+ */
+ friend class internal::VectorReference;
+ friend class Vector;
+ friend class MPI::Vector;
};
inline
void
VectorBase::set (const std::vector<unsigned int> &indices,
- const std::vector<TrilinosScalar> &values)
+ const std::vector<TrilinosScalar> &values)
{
- // if we have ghost values, do not allow
- // writing to this vector at all.
+ // if we have ghost values, do not allow
+ // writing to this vector at all.
Assert (!has_ghost_elements(), ExcGhostsPresent());
Assert (indices.size() == values.size(),
inline
void
VectorBase::add (const std::vector<unsigned int> &indices,
- const std::vector<TrilinosScalar> &values)
+ const std::vector<TrilinosScalar> &values)
{
- // if we have ghost values, do not allow
- // writing to this vector at all.
+ // if we have ghost values, do not allow
+ // writing to this vector at all.
Assert (!has_ghost_elements(), ExcGhostsPresent());
Assert (indices.size() == values.size(),
ExcDimensionMismatch(indices.size(),values.size()));
#ifdef DEAL_II_USE_TRILINOS
- /**
- * Another copy operator: copy
- * the values from a (sequential
- * or parallel, depending on the
- * underlying compiler) Trilinos
- * wrapper vector class. This
- * operator is only available if
- * Trilinos was detected during
- * configuration time.
- *
- * Note that due to the
- * communication model used in MPI,
- * this operation can only succeed
- * if all processes do it at the
- * same time. I.e., it is not
- * possible for only one process to
- * obtain a copy of a parallel
- * vector while the other jobs do
- * something else.
- */
- Vector<Number> &
- operator = (const TrilinosWrappers::MPI::Vector &v);
-
- /**
- * Another copy operator: copy the
- * values from a sequential
- * Trilinos wrapper vector
- * class. This operator is only
- * available if Trilinos was
- * detected during configuration
- * time.
- */
- Vector<Number> &
- operator = (const TrilinosWrappers::Vector &v);
+ /**
+ * Another copy operator: copy
+ * the values from a (sequential
+ * or parallel, depending on the
+ * underlying compiler) Trilinos
+ * wrapper vector class. This
+ * operator is only available if
+ * Trilinos was detected during
+ * configuration time.
+ *
+ * Note that due to the
+ * communication model used in MPI,
+ * this operation can only succeed
+ * if all processes do it at the
+ * same time. I.e., it is not
+ * possible for only one process to
+ * obtain a copy of a parallel
+ * vector while the other jobs do
+ * something else.
+ */
+ Vector<Number> &
+ operator = (const TrilinosWrappers::MPI::Vector &v);
+
+ /**
+ * Another copy operator: copy the
+ * values from a sequential
+ * Trilinos wrapper vector
+ * class. This operator is only
+ * available if Trilinos was
+ * detected during configuration
+ * time.
+ */
+ Vector<Number> &
+ operator = (const TrilinosWrappers::Vector &v);
#endif
- /**
- * Test for equality. This function
- * assumes that the present vector
- * and the one to compare with have
- * the same size already, since
- * comparing vectors of different
- * sizes makes not much sense
- * anyway.
- */
- template <typename Number2>
- bool operator == (const Vector<Number2> &v) const;
-
- /**
- * Test for inequality. This function
- * assumes that the present vector and
- * the one to compare with have the same
- * size already, since comparing vectors
- * of different sizes makes not much
- * sense anyway.
- */
- template <typename Number2>
- bool operator != (const Vector<Number2> &v) const;
-
- /**
- * Return the scalar product of
- * two vectors. The return type
- * is the underlying type of
- * @p this vector, so the return
- * type and the accuracy with
- * which it the result is
- * computed depend on the order
- * of the arguments of this
- * vector.
- *
- * For complex vectors, the
- * scalar product is implemented
- * as $\left<v,w\right>=\sum_i
- * v_i \bar{w_i}$.
- */
- template <typename Number2>
- Number operator * (const Vector<Number2> &V) const;
-
- /**
- * Return square of the $l_2$-norm.
- */
- real_type norm_sqr () const;
-
- /**
- * Mean value of the elements of
- * this vector.
- */
- Number mean_value () const;
-
- /**
- * $l_1$-norm of the vector.
- * The sum of the absolute values.
- */
- real_type l1_norm () const;
-
- /**
- * $l_2$-norm of the vector. The
- * square root of the sum of the
- * squares of the elements.
- */
- real_type l2_norm () const;
-
- /**
- * $l_p$-norm of the vector. The
- * pth root of the sum of the pth
- * powers of the absolute values
- * of the elements.
- */
- real_type lp_norm (const real_type p) const;
-
- /**
- * Maximum absolute value of the
- * elements.
- */
- real_type linfty_norm () const;
-
- /**
- * Return dimension of the vector.
- */
- unsigned int size () const;
-
- /**
- * Return whether the vector contains only
- * elements with value zero. This function
- * is mainly for internal consistency
- * checks and should seldom be used when
- * not in debug mode since it uses quite
- * some time.
- */
- bool all_zero () const;
-
- /**
- * Return @p true if the vector has no
- * negative entries, i.e. all entries are
- * zero or positive. This function is
- * used, for example, to check whether
- * refinement indicators are really all
- * positive (or zero).
- *
- * The function obviously only makes
- * sense if the template argument of this
- * class is a real type. If it is a
- * complex type, then an exception is
- * thrown.
- */
- bool is_non_negative () const;
-
- /**
- * Make the @p Vector class a bit like
- * the <tt>vector<></tt> class of the C++
- * standard library by returning
- * iterators to the start and end of the
- * elements of this vector.
- */
- iterator begin ();
-
- /**
- * Return constant iterator to the start of
- * the vectors.
- */
- const_iterator begin () const;
-
- /**
- * Return an iterator pointing to the
- * element past the end of the array.
- */
- iterator end ();
-
- /**
- * Return a constant iterator pointing to
- * the element past the end of the array.
- */
- const_iterator end () const;
- //@}
-
-
- /**
- * @name 2: Data-Access
- */
- //@{
- /**
- * Access the value of the @p ith
- * component.
- */
- Number operator() (const unsigned int i) const;
-
- /**
- * Access the @p ith component
- * as a writeable reference.
- */
- Number& operator() (const unsigned int i);
-
- /**
- * Access the value of the @p ith
- * component.
- *
- * Exactly the same as operator().
- */
- Number operator[] (const unsigned int i) const;
-
- /**
- * Access the @p ith component
- * as a writeable reference.
- *
- * Exactly the same as operator().
- */
- Number& operator[] (const unsigned int i);
- //@}
-
-
- /**
- * @name 3: Modification of vectors
- */
- //@{
-
- /**
- * Add the given vector to the present
- * one.
- */
- Vector<Number> & operator += (const Vector<Number> &V);
-
- /**
- * Subtract the given vector from the
- * present one.
- */
- Vector<Number> & operator -= (const Vector<Number> &V);
-
- /**
- * A collective add operation:
- * This funnction adds a whole
- * set of values stored in @p
- * values to the vector
- * components specified by @p
- * indices.
- */
- template <typename OtherNumber>
- void add (const std::vector<unsigned int> &indices,
- const std::vector<OtherNumber> &values);
-
- /**
- * This is a second collective
- * add operation. As a
- * difference, this function
- * takes a deal.II vector of
- * values.
- */
- template <typename OtherNumber>
- void add (const std::vector<unsigned int> &indices,
- const Vector<OtherNumber> &values);
-
- /**
- * Take an address where
- * <tt>n_elements</tt> are stored
- * contiguously and add them into
- * the vector. Handles all cases
- * which are not covered by the
- * other two <tt>add()</tt>
- * functions above.
- */
- template <typename OtherNumber>
- void add (const unsigned int n_elements,
- const unsigned int *indices,
- const OtherNumber *values);
-
- /**
- * Addition of @p s to all
- * components. Note that @p s is a
- * scalar and not a vector.
- */
- void add (const Number s);
-
- /**
- * Simple vector addition, equal to the
- * <tt>operator +=</tt>.
- */
- void add (const Vector<Number> &V);
-
- /**
- * Simple addition of a multiple of a
- * vector, i.e. <tt>*this += a*V</tt>.
- */
- void add (const Number a, const Vector<Number> &V);
-
- /**
- * Multiple addition of scaled vectors,
- * i.e. <tt>*this += a*V+b*W</tt>.
- */
- void add (const Number a, const Vector<Number> &V,
- const Number b, const Vector<Number> &W);
-
- /**
- * Scaling and simple vector addition,
- * i.e.
- * <tt>*this = s*(*this)+V</tt>.
- */
- void sadd (const Number s,
- const Vector<Number> &V);
-
- /**
- * Scaling and simple addition, i.e.
- * <tt>*this = s*(*this)+a*V</tt>.
- */
- void sadd (const Number s,
- const Number a,
- const Vector<Number> &V);
-
- /**
- * Scaling and multiple addition.
- */
- void sadd (const Number s,
- const Number a,
- const Vector<Number> &V,
- const Number b,
- const Vector<Number> &W);
-
- /**
- * Scaling and multiple addition.
- * <tt>*this = s*(*this)+a*V + b*W + c*X</tt>.
- */
- void sadd (const Number s,
- const Number a,
- const Vector<Number> &V,
- const Number b,
- const Vector<Number> &W,
- const Number c,
- const Vector<Number> &X);
-
- /**
- * Scale each element of the
- * vector by the given factor.
- *
- * This function is deprecated
- * and will be removed in a
- * future version. Use
- * <tt>operator *=</tt> and
- * <tt>operator /=</tt> instead.
- */
- void scale (const Number factor);
-
-
- /**
- * Scale each element of the
- * vector by a constant
- * value.
- */
- Vector<Number> & operator *= (const Number factor);
-
- /**
- * Scale each element of the
- * vector by the inverse of the
- * given value.
- */
- Vector<Number> & operator /= (const Number factor);
-
- /**
- * Scale each element of this
- * vector by the corresponding
- * element in the argument. This
- * function is mostly meant to
- * simulate multiplication (and
- * immediate re-assignment) by a
- * diagonal scaling matrix.
- */
- void scale (const Vector<Number> &scaling_factors);
-
- /**
- * Scale each element of this
- * vector by the corresponding
- * element in the argument. This
- * function is mostly meant to
- * simulate multiplication (and
- * immediate re-assignment) by a
- * diagonal scaling matrix.
- */
- template <typename Number2>
- void scale (const Vector<Number2> &scaling_factors);
-
- /**
- * Assignment <tt>*this = a*u</tt>.
- */
- void equ (const Number a, const Vector<Number>& u);
-
- /**
- * Assignment <tt>*this = a*u</tt>.
- */
- template <typename Number2>
- void equ (const Number a, const Vector<Number2>& u);
-
- /**
- * Assignment <tt>*this = a*u + b*v</tt>.
- */
- void equ (const Number a, const Vector<Number>& u,
- const Number b, const Vector<Number>& v);
-
- /**
- * Assignment <tt>*this = a*u + b*v + b*w</tt>.
- */
- void equ (const Number a, const Vector<Number>& u,
- const Number b, const Vector<Number>& v,
- const Number c, const Vector<Number>& w);
-
- /**
- * Compute the elementwise ratio of the
- * two given vectors, that is let
- * <tt>this[i] = a[i]/b[i]</tt>. This is
- * useful for example if you want to
- * compute the cellwise ratio of true to
- * estimated error.
- *
- * This vector is appropriately
- * scaled to hold the result.
- *
- * If any of the <tt>b[i]</tt> is
- * zero, the result is
- * undefined. No attempt is made
- * to catch such situations.
- */
- void ratio (const Vector<Number> &a,
- const Vector<Number> &b);
-
- /**
- * This function does nothing but is
- * there for compatibility with the
- * @p PETScWrappers::Vector class.
- *
- * For the PETSc vector wrapper class,
- * this function updates the ghost
- * values of the PETSc vector. This
- * is necessary after any modification
- * before reading ghost values.
- *
- * However, for the implementation of
- * this class, it is immaterial and thus
- * an empty function.
- */
- void update_ghost_values () const;
- //@}
-
-
- /**
- * @name 4: Mixed stuff
- */
- //@{
- /**
- * Output of vector in user-defined
- * format. For complex-valued vectors,
- * the format should include specifiers
- * for both the real and imaginary
- * parts.
- */
- void print (const char* format = 0) const;
-
- /**
- * Print to a
- * stream. @p precision denotes
- * the desired precision with
- * which values shall be printed,
- * @p scientific whether
- * scientific notation shall be
- * used. If @p across is
- * @p true then the vector is
- * printed in a line, while if
- * @p false then the elements
- * are printed on a separate line
- * each.
- */
- void print (std::ostream& out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const bool across = true) const;
-
- /**
- * Print to a
- * LogStream. <tt>width</tt> is
- * used as argument to the
- * std::setw manipulator, if
- * printing across. If @p
- * across is @p true then the
- * vector is printed in a line,
- * while if @p false then the
- * elements are printed on a
- * separate line each.
- */
- void print (LogStream& out,
- const unsigned int width = 6,
- const bool across = true) const;
-
- /**
- * Write the vector en bloc to a
- * file. This is done in a binary
- * mode, so the output is neither
- * readable by humans nor
- * (probably) by other computers
- * using a different operating
- * system or number format.
- */
- void block_write (std::ostream &out) const;
-
- /**
- * Read a vector en block from a
- * file. This is done using the
- * inverse operations to the
- * above function, so it is
- * reasonably fast because the
- * bitstream is not interpreted.
- *
- * The vector is resized if
- * necessary.
- *
- * A primitive form of error
- * checking is performed which
- * will recognize the bluntest
- * attempts to interpret some
- * data as a vector stored
- * bitwise to a file, but not
- * more.
- */
- void block_read (std::istream &in);
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
- */
- std::size_t memory_consumption () const;
- //@}
-
- /**
- * Write the data of this object to
- * a stream for the purpose of serialization.
- */
- template <class Archive>
- void save (Archive & ar, const unsigned int version) const;
-
- /**
- * Read the data of this object
- * from a stream for the purpose of serialization.
- */
- template <class Archive>
- void load (Archive & ar, const unsigned int version);
-
- BOOST_SERIALIZATION_SPLIT_MEMBER()
-
- protected:
-
- /**
- * Dimension. Actual number of
- * components contained in the
- * vector. Get this number by
- * calling <tt>size()</tt>.
- */
- unsigned int vec_size;
-
- /**
- * Amount of memory actually
- * reserved for this vector. This
- * number may be greater than
- * @p vec_size if a @p reinit was
- * called with less memory
- * requirements than the vector
- * needed last time. At present
- * @p reinit does not free
- * memory when the number of
- * needed elements is reduced.
- */
- unsigned int max_vec_size;
-
- /**
- * Pointer to the array of
- * elements of this vector.
- */
- Number *val;
-
- /**
- * Make all other vector types
- * friends.
- */
- template <typename Number2> friend class Vector;
-
- /**
- * LAPACK matrices need access to
- * the data.
- */
- friend class LAPACKFullMatrix<Number>;
-
- /**
- * VectorView will access the
- * pointer.
- */
- friend class VectorView<Number>;
+ /**
+ * Test for equality. This function
+ * assumes that the present vector
+ * and the one to compare with have
+ * the same size already, since
+ * comparing vectors of different
+ * sizes makes not much sense
+ * anyway.
+ */
+ template <typename Number2>
+ bool operator == (const Vector<Number2> &v) const;
+
+ /**
+ * Test for inequality. This function
+ * assumes that the present vector and
+ * the one to compare with have the same
+ * size already, since comparing vectors
+ * of different sizes makes not much
+ * sense anyway.
+ */
+ template <typename Number2>
+ bool operator != (const Vector<Number2> &v) const;
+
+ /**
+ * Return the scalar product of
+ * two vectors. The return type
+ * is the underlying type of
+ * @p this vector, so the return
+ * type and the accuracy with
+ * which it the result is
+ * computed depend on the order
+ * of the arguments of this
+ * vector.
+ *
+ * For complex vectors, the
+ * scalar product is implemented
+ * as $\left<v,w\right>=\sum_i
+ * v_i \bar{w_i}$.
+ */
+ template <typename Number2>
+ Number operator * (const Vector<Number2> &V) const;
+
+ /**
+ * Return square of the $l_2$-norm.
+ */
+ real_type norm_sqr () const;
+
+ /**
+ * Mean value of the elements of
+ * this vector.
+ */
+ Number mean_value () const;
+
+ /**
+ * $l_1$-norm of the vector.
+ * The sum of the absolute values.
+ */
+ real_type l1_norm () const;
+
+ /**
+ * $l_2$-norm of the vector. The
+ * square root of the sum of the
+ * squares of the elements.
+ */
+ real_type l2_norm () const;
+
+ /**
+ * $l_p$-norm of the vector. The
+ * pth root of the sum of the pth
+ * powers of the absolute values
+ * of the elements.
+ */
+ real_type lp_norm (const real_type p) const;
+
+ /**
+ * Maximum absolute value of the
+ * elements.
+ */
+ real_type linfty_norm () const;
+
+ /**
+ * Return dimension of the vector.
+ */
+ unsigned int size () const;
+
+ /**
+ * Return whether the vector contains only
+ * elements with value zero. This function
+ * is mainly for internal consistency
+ * checks and should seldom be used when
+ * not in debug mode since it uses quite
+ * some time.
+ */
+ bool all_zero () const;
+
+ /**
+ * Return @p true if the vector has no
+ * negative entries, i.e. all entries are
+ * zero or positive. This function is
+ * used, for example, to check whether
+ * refinement indicators are really all
+ * positive (or zero).
+ *
+ * The function obviously only makes
+ * sense if the template argument of this
+ * class is a real type. If it is a
+ * complex type, then an exception is
+ * thrown.
+ */
+ bool is_non_negative () const;
+
+ /**
+ * Make the @p Vector class a bit like
+ * the <tt>vector<></tt> class of the C++
+ * standard library by returning
+ * iterators to the start and end of the
+ * elements of this vector.
+ */
+ iterator begin ();
+
+ /**
+ * Return constant iterator to the start of
+ * the vectors.
+ */
+ const_iterator begin () const;
+
+ /**
+ * Return an iterator pointing to the
+ * element past the end of the array.
+ */
+ iterator end ();
+
+ /**
+ * Return a constant iterator pointing to
+ * the element past the end of the array.
+ */
+ const_iterator end () const;
+ //@}
+
+
+ /**
+ * @name 2: Data-Access
+ */
+ //@{
+ /**
+ * Access the value of the @p ith
+ * component.
+ */
+ Number operator() (const unsigned int i) const;
+
+ /**
+ * Access the @p ith component
+ * as a writeable reference.
+ */
+ Number &operator() (const unsigned int i);
+
+ /**
+ * Access the value of the @p ith
+ * component.
+ *
+ * Exactly the same as operator().
+ */
+ Number operator[] (const unsigned int i) const;
+
+ /**
+ * Access the @p ith component
+ * as a writeable reference.
+ *
+ * Exactly the same as operator().
+ */
+ Number &operator[] (const unsigned int i);
+ //@}
+
+
+ /**
+ * @name 3: Modification of vectors
+ */
+ //@{
+
+ /**
+ * Add the given vector to the present
+ * one.
+ */
+ Vector<Number> &operator += (const Vector<Number> &V);
+
+ /**
+ * Subtract the given vector from the
+ * present one.
+ */
+ Vector<Number> &operator -= (const Vector<Number> &V);
+
+ /**
+ * A collective add operation:
+ * This funnction adds a whole
+ * set of values stored in @p
+ * values to the vector
+ * components specified by @p
+ * indices.
+ */
+ template <typename OtherNumber>
+ void add (const std::vector<unsigned int> &indices,
- const std::vector<OtherNumber> &values);
++ const std::vector<OtherNumber> &values);
+
+ /**
+ * This is a second collective
+ * add operation. As a
+ * difference, this function
+ * takes a deal.II vector of
+ * values.
+ */
+ template <typename OtherNumber>
+ void add (const std::vector<unsigned int> &indices,
+ const Vector<OtherNumber> &values);
+
+ /**
+ * Take an address where
+ * <tt>n_elements</tt> are stored
+ * contiguously and add them into
+ * the vector. Handles all cases
+ * which are not covered by the
+ * other two <tt>add()</tt>
+ * functions above.
+ */
+ template <typename OtherNumber>
+ void add (const unsigned int n_elements,
+ const unsigned int *indices,
- const OtherNumber *values);
++ const OtherNumber *values);
+
+ /**
+ * Addition of @p s to all
+ * components. Note that @p s is a
+ * scalar and not a vector.
+ */
+ void add (const Number s);
+
+ /**
+ * Simple vector addition, equal to the
+ * <tt>operator +=</tt>.
+ */
+ void add (const Vector<Number> &V);
+
+ /**
+ * Simple addition of a multiple of a
+ * vector, i.e. <tt>*this += a*V</tt>.
+ */
+ void add (const Number a, const Vector<Number> &V);
+
+ /**
+ * Multiple addition of scaled vectors,
+ * i.e. <tt>*this += a*V+b*W</tt>.
+ */
+ void add (const Number a, const Vector<Number> &V,
+ const Number b, const Vector<Number> &W);
+
+ /**
+ * Scaling and simple vector addition,
+ * i.e.
+ * <tt>*this = s*(*this)+V</tt>.
+ */
+ void sadd (const Number s,
+ const Vector<Number> &V);
+
+ /**
+ * Scaling and simple addition, i.e.
+ * <tt>*this = s*(*this)+a*V</tt>.
+ */
+ void sadd (const Number s,
+ const Number a,
+ const Vector<Number> &V);
+
+ /**
+ * Scaling and multiple addition.
+ */
+ void sadd (const Number s,
+ const Number a,
+ const Vector<Number> &V,
+ const Number b,
+ const Vector<Number> &W);
+
+ /**
+ * Scaling and multiple addition.
+ * <tt>*this = s*(*this)+a*V + b*W + c*X</tt>.
+ */
+ void sadd (const Number s,
+ const Number a,
+ const Vector<Number> &V,
+ const Number b,
+ const Vector<Number> &W,
+ const Number c,
+ const Vector<Number> &X);
+
+ /**
+ * Scale each element of the
+ * vector by the given factor.
+ *
+ * This function is deprecated
+ * and will be removed in a
+ * future version. Use
+ * <tt>operator *=</tt> and
+ * <tt>operator /=</tt> instead.
+ */
+ void scale (const Number factor);
+
+
+ /**
+ * Scale each element of the
+ * vector by a constant
+ * value.
+ */
+ Vector<Number> &operator *= (const Number factor);
+
+ /**
+ * Scale each element of the
+ * vector by the inverse of the
+ * given value.
+ */
+ Vector<Number> &operator /= (const Number factor);
+
+ /**
+ * Scale each element of this
+ * vector by the corresponding
+ * element in the argument. This
+ * function is mostly meant to
+ * simulate multiplication (and
+ * immediate re-assignment) by a
+ * diagonal scaling matrix.
+ */
+ void scale (const Vector<Number> &scaling_factors);
+
+ /**
+ * Scale each element of this
+ * vector by the corresponding
+ * element in the argument. This
+ * function is mostly meant to
+ * simulate multiplication (and
+ * immediate re-assignment) by a
+ * diagonal scaling matrix.
+ */
+ template <typename Number2>
+ void scale (const Vector<Number2> &scaling_factors);
+
+ /**
+ * Assignment <tt>*this = a*u</tt>.
+ */
+ void equ (const Number a, const Vector<Number> &u);
+
+ /**
+ * Assignment <tt>*this = a*u</tt>.
+ */
+ template <typename Number2>
+ void equ (const Number a, const Vector<Number2> &u);
+
+ /**
+ * Assignment <tt>*this = a*u + b*v</tt>.
+ */
+ void equ (const Number a, const Vector<Number> &u,
+ const Number b, const Vector<Number> &v);
+
+ /**
+ * Assignment <tt>*this = a*u + b*v + b*w</tt>.
+ */
+ void equ (const Number a, const Vector<Number> &u,
+ const Number b, const Vector<Number> &v,
+ const Number c, const Vector<Number> &w);
+
+ /**
+ * Compute the elementwise ratio of the
+ * two given vectors, that is let
+ * <tt>this[i] = a[i]/b[i]</tt>. This is
+ * useful for example if you want to
+ * compute the cellwise ratio of true to
+ * estimated error.
+ *
+ * This vector is appropriately
+ * scaled to hold the result.
+ *
+ * If any of the <tt>b[i]</tt> is
+ * zero, the result is
+ * undefined. No attempt is made
+ * to catch such situations.
+ */
+ void ratio (const Vector<Number> &a,
+ const Vector<Number> &b);
+
+ /**
+ * This function does nothing but is
+ * there for compatibility with the
+ * @p PETScWrappers::Vector class.
+ *
+ * For the PETSc vector wrapper class,
+ * this function updates the ghost
+ * values of the PETSc vector. This
+ * is necessary after any modification
+ * before reading ghost values.
+ *
+ * However, for the implementation of
+ * this class, it is immaterial and thus
+ * an empty function.
+ */
+ void update_ghost_values () const;
+ //@}
+
+
+ /**
+ * @name 4: Mixed stuff
+ */
+ //@{
+ /**
+ * Output of vector in user-defined
+ * format. For complex-valued vectors,
+ * the format should include specifiers
+ * for both the real and imaginary
+ * parts.
+ */
+ void print (const char *format = 0) const;
+
+ /**
+ * Print to a
+ * stream. @p precision denotes
+ * the desired precision with
+ * which values shall be printed,
+ * @p scientific whether
+ * scientific notation shall be
+ * used. If @p across is
+ * @p true then the vector is
+ * printed in a line, while if
+ * @p false then the elements
+ * are printed on a separate line
+ * each.
+ */
+ void print (std::ostream &out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const;
+
+ /**
+ * Print to a
+ * LogStream. <tt>width</tt> is
+ * used as argument to the
+ * std::setw manipulator, if
+ * printing across. If @p
+ * across is @p true then the
+ * vector is printed in a line,
+ * while if @p false then the
+ * elements are printed on a
+ * separate line each.
+ */
+ void print (LogStream &out,
+ const unsigned int width = 6,
+ const bool across = true) const;
+
+ /**
+ * Write the vector en bloc to a
+ * file. This is done in a binary
+ * mode, so the output is neither
+ * readable by humans nor
+ * (probably) by other computers
+ * using a different operating
+ * system or number format.
+ */
+ void block_write (std::ostream &out) const;
+
+ /**
+ * Read a vector en block from a
+ * file. This is done using the
+ * inverse operations to the
+ * above function, so it is
+ * reasonably fast because the
+ * bitstream is not interpreted.
+ *
+ * The vector is resized if
+ * necessary.
+ *
+ * A primitive form of error
+ * checking is performed which
+ * will recognize the bluntest
+ * attempts to interpret some
+ * data as a vector stored
+ * bitwise to a file, but not
+ * more.
+ */
+ void block_read (std::istream &in);
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object.
+ */
+ std::size_t memory_consumption () const;
+ //@}
+
+ /**
+ * Write the data of this object to
+ * a stream for the purpose of serialization.
+ */
+ template <class Archive>
+ void save (Archive &ar, const unsigned int version) const;
+
+ /**
+ * Read the data of this object
+ * from a stream for the purpose of serialization.
+ */
+ template <class Archive>
+ void load (Archive &ar, const unsigned int version);
+
+ BOOST_SERIALIZATION_SPLIT_MEMBER()
+
+ protected:
+
+ /**
+ * Dimension. Actual number of
+ * components contained in the
+ * vector. Get this number by
+ * calling <tt>size()</tt>.
+ */
+ unsigned int vec_size;
+
+ /**
+ * Amount of memory actually
+ * reserved for this vector. This
+ * number may be greater than
+ * @p vec_size if a @p reinit was
+ * called with less memory
+ * requirements than the vector
+ * needed last time. At present
+ * @p reinit does not free
+ * memory when the number of
+ * needed elements is reduced.
+ */
+ unsigned int max_vec_size;
+
+ /**
+ * Pointer to the array of
+ * elements of this vector.
+ */
+ Number *val;
+
+ /**
+ * Make all other vector types
+ * friends.
+ */
+ template <typename Number2> friend class Vector;
+
+ /**
+ * LAPACK matrices need access to
+ * the data.
+ */
+ friend class LAPACKFullMatrix<Number>;
+
+ /**
+ * VectorView will access the
+ * pointer.
+ */
+ friend class VectorView<Number>;
};
/*@}*/
struct InnerProd
{
Number
- operator() (const Number*&X, const Number2*&Y, const Number &) const
- operator() (const Number *&X, const Number2 *&Y, const Number &) const
++ operator() (const Number *&X, const Number2 *&Y, const Number &) const
{
return *X++ * Number(numbers::NumberTraits<Number2>::conjugate(*Y++));
}
struct Norm2
{
RealType
- operator() (const Number*&X, const Number* &, const RealType &) const
- operator() (const Number *&X, const Number *&, const RealType &) const
++ operator() (const Number *&X, const Number *&, const RealType &) const
{
return numbers::NumberTraits<Number>::abs_square(*X++);
}
struct Norm1
{
RealType
- operator() (const Number*&X, const Number* &, const RealType &) const
- operator() (const Number *&X, const Number *&, const RealType &) const
++ operator() (const Number *&X, const Number *&, const RealType &) const
{
return numbers::NumberTraits<Number>::abs(*X++);
}
struct NormP
{
RealType
- operator() (const Number*&X, const Number* &, const RealType &p) const
- operator() (const Number *&X, const Number *&, const RealType &p) const
++ operator() (const Number *&X, const Number *&, const RealType &p) const
{
return std::pow(numbers::NumberTraits<Number>::abs(*X++), p);
}
struct MeanValue
{
Number
- operator() (const Number*&X, const Number* &, const Number &) const
- operator() (const Number *&X, const Number *&, const Number &) const
++ operator() (const Number *&X, const Number *&, const Number &) const
{
return *X++;
}
inline
void
FEEvaluationBase<dim,dofs_per_cell_,n_q_points_,n_components_,Number>
- ::distribute_local_to_global (std::vector<VectorType*> &dst,
-::distribute_local_to_global (std::vector<VectorType *> &dst,
++::distribute_local_to_global (std::vector<VectorType *> &dst,
const unsigned int first_index) const
{
AssertIndexRange (first_index, dst.size());
inline
void
FEEvaluationBase<dim,dofs_per_cell_,n_q_points_,n_components_,Number>
- ::set_dof_values (std::vector<VectorType*> &dst,
-::set_dof_values (std::vector<VectorType *> &dst,
++::set_dof_values (std::vector<VectorType *> &dst,
const unsigned int first_index) const
{
AssertIndexRange (first_index, dst.size());
namespace internal
{
- namespace MatrixFreeFunctions
- {
- /**
- * The class that stores all geometry-dependent data related with cell
- * interiors for use in the matrix-free class.
- *
- * @author Katharina Kormann and Martin Kronbichler, 2010, 2011
- */
- template <int dim, typename Number>
- struct MappingInfo
+ namespace MatrixFreeFunctions
{
- /**
- * Determines how many bits of an unsigned int
- * are used to distinguish the cell types
- * (Cartesian, with constant Jacobian, or
- * general)
- */
- static const std::size_t n_cell_type_bits = 2;
-
- /**
- * Determines how many types of different
- * cells can be detected at most. Corresponds
- * to the number of bits we reserved for it.
- */
- static const unsigned int n_cell_types = 1U<<n_cell_type_bits;
-
- /**
- * Empty constructor.
- */
- MappingInfo();
-
- /**
- * Computes the information in the given
- * cells. The cells are specified by the level
- * and the index within the level (as given by
- * CellIterator::level() and
- * CellIterator::index(), in order to allow
- * for different kinds of iterators,
- * e.g. standard DoFHandler, multigrid, etc.)
- * on a fixed Triangulation. In addition, a
- * mapping and several quadrature formulas are
- * given.
- */
- void initialize (const dealii::Triangulation<dim> &tria,
- const std::vector<std::pair<unsigned int,unsigned int> > &cells,
- const std::vector<unsigned int> &active_fe_index,
- const Mapping<dim> &mapping,
- const std::vector<dealii::hp::QCollection<1> > &quad,
- const UpdateFlags update_flags);
-
- /**
- * Helper function to determine which update
- * flags must be set in the internal functions
- * to initialize all data as requested by the
- * user.
- */
- UpdateFlags
- compute_update_flags (const UpdateFlags update_flags,
- const std::vector<dealii::hp::QCollection<1> > &quad) const;
-
- /**
- * Returns the type of a given cell as
- * detected during initialization.
- */
- CellType get_cell_type (const unsigned int cell_chunk_no) const;
-
- /**
- * Returns the type of a given cell as
- * detected during initialization.
- */
- unsigned int get_cell_data_index (const unsigned int cell_chunk_no) const;
-
- /**
- * Clears all data fields in this class.
- */
- void clear ();
-
- /**
- * Returns the memory consumption of this
- * class in bytes.
- */
- std::size_t memory_consumption() const;
-
- /**
- * Prints a detailed summary of memory
- * consumption in the different structures of
- * this class to the given output stream.
- */
- template <typename STREAM>
- void print_memory_consumption(STREAM &out,
- const SizeInfo &size_info) const;
-
- /**
- * Stores whether a cell is Cartesian, has
- * constant transform data (Jacobians) or is
- * general. cell_type % 4 gives this
- * information (0: Cartesian, 1: constant
- * Jacobian throughout cell, 2: general cell),
- * and cell_type / 4 gives the index in the
- * data field of where to find the information
- * in the fields Jacobian and JxW values
- * (except for quadrature points, for which
- * the index runs as usual).
- */
- std::vector<unsigned int> cell_type;
-
- /**
- * The first field stores the inverse Jacobian
- * for Cartesian cells: There, it is a
- * diagonal rank-2 tensor, so we actually just
- * store a rank-1 tensor. It is the same on
- * all cells, therefore we only store it once
- * per cell, and use similarities from one
- * cell to another, too (on structured meshes,
- * there are usually many cells with the same
- * Jacobian).
- *
- * The second field stores the Jacobian
- * determinant for Cartesian cells (without
- * the quadrature weight, which depends on the
- * quadrature point, whereas the determinant
- * is the same on each quadrature point).
- */
- AlignedVector<std::pair<Tensor<1,dim,VectorizedArray<Number> >,
- VectorizedArray<Number> > > cartesian_data;
-
- /**
- * The first field stores the Jacobian for
- * non-Cartesian cells where all the Jacobians
- * on the cell are the same (i.e., constant,
- * which comes from a linear transformation
- * from unit to real cell). Also use
- * similarities from one cell to another (on
- * structured meshes, there are usually many
- * cells with the same Jacobian).
- *
- * The second field stores the Jacobian
- * determinant for non-Cartesian cells with
- * constant Jacobian throughout the cell
- * (without the quadrature weight, which
- * depends on the quadrature point, whereas
- * the determinant is the same on each
- * quadrature point).
- */
- AlignedVector<std::pair<Tensor<2,dim,VectorizedArray<Number> >,
- VectorizedArray<Number> > > affine_data;
-
- /**
- * Definition of a structure that stores data
- * that depends on the quadrature formula (if
- * we have more than one quadrature formula on
- * a given problem, these fields will be
- * different)
- */
- struct MappingInfoDependent
+ /**
+ * The class that stores all geometry-dependent data related with cell
+ * interiors for use in the matrix-free class.
+ *
+ * @author Katharina Kormann and Martin Kronbichler, 2010, 2011
+ */
+ template <int dim, typename Number>
+ struct MappingInfo
{
- /**
- * This field stores the row starts for the
- * inverse Jacobian transformations,
- * quadrature weights and second derivatives.
- */
- std::vector<unsigned int> rowstart_jacobians;
-
- /**
- * This field stores the inverse Jacobian
- * transformation from unit to real cell,
- * which is needed for most gradient
- * transformations (corresponds to
- * FEValues::inverse_jacobian) for general
- * cells.
- */
- AlignedVector<Tensor<2,dim,VectorizedArray<Number> > > jacobians;
-
- /**
- * This field stores the Jacobian
- * determinant times the quadrature weights
- * (JxW in deal.II speak) for general cells.
- */
- AlignedVector<VectorizedArray<Number> > JxW_values;
-
- /**
- * Stores the diagonal part of the gradient of
- * the inverse Jacobian transformation. The
- * first index runs over the derivatives
- * $\partial^2/\partial x_i^2$, the second
- * over the space coordinate. Needed for
- * computing the Laplacian of FE functions on
- * the real cell. Uses a separate storage from
- * the off-diagonal part $\partial^2/\partial
- * x_i \partial x_j, i\neq j$ because that is
- * only needed for computing a full Hessian.
- */
- AlignedVector<Tensor<2,dim,VectorizedArray<Number> > > jacobians_grad_diag;
-
- /**
- * Stores the off-diagonal part of the
- * gradient of the inverse Jacobian
- * transformation. Because of symmetry, only
- * the upper diagonal part is needed. The
- * first index runs through the derivatives
- * row-wise, i.e., $\partial^2/\partial x_1
- * \partial x_2$ first, then
- * $\partial^2/\partial x_1 \partial x_3$, and
- * so on. The second index is the spatial
- * coordinate. Not filled currently.
- */
- AlignedVector<Tensor<1,(dim>1?dim*(dim-1)/2:1),
- Tensor<1,dim,VectorizedArray<Number> > > > jacobians_grad_upper;
-
- /**
- * Stores the row start for quadrature points
- * in real coordinates for both types of
- * cells. Note that Cartesian cells will have
- * shorter fields (length is @p n_q_points_1d)
- * than non-Cartesian cells (length is @p
- * n_q_points).
- */
- std::vector<unsigned int> rowstart_q_points;
-
- /**
- * Stores the quadrature points in real
- * coordinates for Cartesian cells (does not
- * need to store the full data on all points)
- */
- AlignedVector<Point<dim,VectorizedArray<Number> > > quadrature_points;
-
- /**
- * The dim-dimensional quadrature formula
- * underlying the problem (constructed from a
- * 1D tensor product quadrature formula).
- */
- dealii::hp::QCollection<dim> quadrature;
-
- /**
- * The (dim-1)-dimensional quadrature formula
- * corresponding to face evaluation
- * (constructed from a 1D tensor product
- * quadrature formula).
- */
- dealii::hp::QCollection<dim-1> face_quadrature;
-
- /**
- * The number of quadrature points for the
- * current quadrature formula.
- */
- std::vector<unsigned int> n_q_points;
-
- /**
- * The number of quadrature points for the
- * current quadrature formula when applied to
- * a face. Only set if the quadrature formula
- * is derived from a tensor product, since it
- * is not defined from the full quadrature
- * formula otherwise.
- */
- std::vector<unsigned int> n_q_points_face;
-
- /**
- * The quadrature weights (vectorized data
- * format) on the unit cell.
- */
- std::vector<AlignedVector<VectorizedArray<Number> > > quadrature_weights;
-
- /**
- * This variable stores the number of
- * quadrature points for all quadrature
- * indices in the underlying element for
- * easier access to data in the hp case.
- */
- std::vector<unsigned int> quad_index_conversion;
-
- /**
- * Returns the quadrature index for a given
- * number of quadrature points. If not in hp
- * mode or if the index is not found, this
- * function always returns index 0. Hence,
- * this function does not check whether the
- * given degree is actually present.
- */
- unsigned int
- quad_index_from_n_q_points (const unsigned int n_q_points) const;
-
-
- /**
- * Prints a detailed summary of memory
- * consumption in the different structures of
- * this class to the given output stream.
- */
+ /**
+ * Determines how many bits of an unsigned int
+ * are used to distinguish the cell types
+ * (Cartesian, with constant Jacobian, or
+ * general)
+ */
+ static const std::size_t n_cell_type_bits = 2;
+
+ /**
+ * Determines how many types of different
+ * cells can be detected at most. Corresponds
+ * to the number of bits we reserved for it.
+ */
+ static const unsigned int n_cell_types = 1U<<n_cell_type_bits;
+
+ /**
+ * Empty constructor.
+ */
+ MappingInfo();
+
+ /**
+ * Computes the information in the given
+ * cells. The cells are specified by the level
+ * and the index within the level (as given by
+ * CellIterator::level() and
+ * CellIterator::index(), in order to allow
+ * for different kinds of iterators,
+ * e.g. standard DoFHandler, multigrid, etc.)
+ * on a fixed Triangulation. In addition, a
+ * mapping and several quadrature formulas are
+ * given.
+ */
+ void initialize (const dealii::Triangulation<dim> &tria,
+ const std::vector<std::pair<unsigned int,unsigned int> > &cells,
+ const std::vector<unsigned int> &active_fe_index,
+ const Mapping<dim> &mapping,
- const std::vector<dealii::hp::QCollection<1> > &quad,
++ const std::vector<dealii::hp::QCollection<1> > &quad,
+ const UpdateFlags update_flags);
+
+ /**
+ * Helper function to determine which update
+ * flags must be set in the internal functions
+ * to initialize all data as requested by the
+ * user.
+ */
+ UpdateFlags
+ compute_update_flags (const UpdateFlags update_flags,
- const std::vector<dealii::hp::QCollection<1> > &quad) const;
++ const std::vector<dealii::hp::QCollection<1> > &quad) const;
+
+ /**
+ * Returns the type of a given cell as
+ * detected during initialization.
+ */
+ CellType get_cell_type (const unsigned int cell_chunk_no) const;
+
+ /**
+ * Returns the type of a given cell as
+ * detected during initialization.
+ */
+ unsigned int get_cell_data_index (const unsigned int cell_chunk_no) const;
+
+ /**
+ * Clears all data fields in this class.
+ */
+ void clear ();
+
+ /**
+ * Returns the memory consumption of this
+ * class in bytes.
+ */
+ std::size_t memory_consumption() const;
+
+ /**
+ * Prints a detailed summary of memory
+ * consumption in the different structures of
+ * this class to the given output stream.
+ */
template <typename STREAM>
void print_memory_consumption(STREAM &out,
const SizeInfo &size_info) const;
template <typename DH, typename Quad>
void MatrixFree<dim,Number>::
reinit(const Mapping<dim> &mapping,
- const std::vector<const DH *> &dof_handler,
- const std::vector<const ConstraintMatrix*> &constraint,
- const std::vector<const DH *> &dof_handler,
++ const std::vector<const DH *> &dof_handler,
+ const std::vector<const ConstraintMatrix *> &constraint,
const std::vector<Quad> &quad,
const MatrixFree<dim,Number>::AdditionalData additional_data)
{
template <typename VectorStruct>
inline
- void update_ghost_values_start (const std::vector<VectorStruct> &src)
+ void update_ghost_values_start (const std::vector<VectorStruct> &src)
{
- for(unsigned int comp=0;comp<src.size();comp++)
+ for (unsigned int comp=0; comp<src.size(); comp++)
update_ghost_values_start(src[comp], comp);
}
template <typename VectorStruct>
inline
- void update_ghost_values_start (const std::vector<VectorStruct*> &src)
- void update_ghost_values_start (const std::vector<VectorStruct *> &src)
++ void update_ghost_values_start (const std::vector<VectorStruct *> &src)
{
- for(unsigned int comp=0;comp<src.size();comp++)
+ for (unsigned int comp=0; comp<src.size(); comp++)
update_ghost_values_start(*src[comp], comp);
}
template <typename VectorStruct>
inline
- void update_ghost_values_finish (const std::vector<VectorStruct> &src)
+ void update_ghost_values_finish (const std::vector<VectorStruct> &src)
{
- for(unsigned int comp=0;comp<src.size();comp++)
+ for (unsigned int comp=0; comp<src.size(); comp++)
update_ghost_values_finish(src[comp]);
}
template <typename VectorStruct>
inline
- void update_ghost_values_finish (const std::vector<VectorStruct*> &src)
- void update_ghost_values_finish (const std::vector<VectorStruct *> &src)
++ void update_ghost_values_finish (const std::vector<VectorStruct *> &src)
{
- for(unsigned int comp=0;comp<src.size();comp++)
+ for (unsigned int comp=0; comp<src.size(); comp++)
update_ghost_values_finish(*src[comp]);
}
return NULL;
}
- tbb::empty_task* dummy;
+ tbb::empty_task *dummy;
private:
- const Worker &function;
+ const Worker &function;
const unsigned int partition;
const internal::MatrixFreeFunctions::TaskInfo &task_info;
};
MatrixFree<dim, Number>::cell_loop
(const std_cxx1x::function<void (const MatrixFree<dim,Number> &,
OutVector &,
- const InVector&,
+ const InVector &,
const std::pair<unsigned int,
- unsigned int> &)> &cell_operation,
+ unsigned int> &)> &cell_operation,
OutVector &dst,
- const InVector &src) const
+ const InVector &src) const
{
#if DEAL_II_USE_MT==1
template <class VECTOR>
class ResidualLocalBlocksToGlobalBlocks
{
- public:
- /**
- * Copy the BlockInfo and the
- * matrix pointers into local
- * variables.
- */
- void initialize(const BlockInfo* block_info,
- NamedData<VECTOR*>& residuals);
- /**
- * Initialize the constraints.
- */
- void initialize(const ConstraintMatrix& constraints);
- /**
- * Initialize the local data
- * in the
- * DoFInfo
- * object used later for
- * assembling.
- *
- * The info object refers to
- * a cell if
- * <code>!face</code>, or
- * else to an interior or
- * boundary face.
- */
- template <class DOFINFO>
- void initialize_info(DOFINFO& info, bool face) const;
-
-
- /**
- * Assemble the local residuals
- * into the global residuals.
- */
- template<class DOFINFO>
- void assemble(const DOFINFO& info);
-
- /**
- * Assemble both local residuals
- * into the global residuals.
- */
- template<class DOFINFO>
- void assemble(const DOFINFO& info1,
- const DOFINFO& info2);
- private:
- /**
- * Assemble a single local
- * residual into the global.
- */
- void assemble(VECTOR& global,
- const BlockVector<double>& local,
- const std::vector<types::global_dof_index>& dof);
-
- /**
- * The global matrices,
- * stored as a vector of
- * pointers.
- */
- NamedData<SmartPointer<VECTOR,
- ResidualLocalBlocksToGlobalBlocks<VECTOR> > > residuals;
+ public:
+ /**
+ * Copy the BlockInfo and the
+ * matrix pointers into local
+ * variables.
+ */
+ void initialize(const BlockInfo *block_info,
+ NamedData<VECTOR *> &residuals);
+ /**
+ * Initialize the constraints.
+ */
+ void initialize(const ConstraintMatrix &constraints);
+ /**
+ * Initialize the local data
+ * in the
+ * DoFInfo
+ * object used later for
+ * assembling.
+ *
+ * The info object refers to
+ * a cell if
+ * <code>!face</code>, or
+ * else to an interior or
+ * boundary face.
+ */
+ template <class DOFINFO>
+ void initialize_info(DOFINFO &info, bool face) const;
+
+
+ /**
+ * Assemble the local residuals
+ * into the global residuals.
+ */
+ template<class DOFINFO>
+ void assemble(const DOFINFO &info);
+
+ /**
+ * Assemble both local residuals
+ * into the global residuals.
+ */
+ template<class DOFINFO>
+ void assemble(const DOFINFO &info1,
+ const DOFINFO &info2);
+ private:
+ /**
+ * Assemble a single local
+ * residual into the global.
+ */
+ void assemble(VECTOR &global,
+ const BlockVector<double> &local,
- const std::vector<unsigned int> &dof);
++ const std::vector<types::global_dof_index> &dof);
+
+ /**
+ * The global matrices,
+ * stored as a vector of
+ * pointers.
+ */
+ NamedData<SmartPointer<VECTOR,
+ ResidualLocalBlocksToGlobalBlocks<VECTOR> > > residuals;
/**
* A pointer to the object containing the block structure.
template <class MATRIX, typename number = double>
class MatrixLocalBlocksToGlobalBlocks
{
- public:
- /**
- * Constructor, initializing
- * the #threshold, which
- * limits how small numbers
- * may be to be entered into
- * the matrix.
- */
- MatrixLocalBlocksToGlobalBlocks(double threshold = 1.e-12);
-
- /**
- * Copy the BlockInfo and the
- * matrix pointers into local
- * variables and initialize
- * cell matrix vectors.
- */
- void initialize(const BlockInfo* block_info,
- MatrixBlockVector<MATRIX>& matrices);
-
- /**
- * Initialize the constraints.
- */
- void initialize(const ConstraintMatrix& constraints);
- /**
- * Initialize the local data
- * in the
- * DoFInfo
- * object used later for
- * assembling.
- *
- * The info object refers to
- * a cell if
- * <code>!face</code>, or
- * else to an interior or
- * boundary face.
- */
- template <class DOFINFO>
- void initialize_info(DOFINFO& info, bool face) const;
-
-
- /**
- * Assemble the local matrices
- * into the global matrices.
- */
- template<class DOFINFO>
- void assemble(const DOFINFO& info);
-
- /**
- * Assemble all local matrices
- * into the global matrices.
- */
- template<class DOFINFO>
- void assemble(const DOFINFO& info1,
- const DOFINFO& info2);
-
- private:
- /**
- * Assemble a single local
- * matrix into a global one.
- */
- void assemble(
- MatrixBlock<MATRIX>& global,
- const FullMatrix<number>& local,
- const unsigned int block_row,
- const unsigned int block_col,
- const std::vector<types::global_dof_index>& dof1,
- const std::vector<types::global_dof_index>& dof2);
-
- /**
- * The global matrices,
- * stored as a vector of
- * pointers.
- */
- SmartPointer<MatrixBlockVector<MATRIX>,
- MatrixLocalBlocksToGlobalBlocks<MATRIX, number> > matrices;
+ public:
+ /**
+ * Constructor, initializing
+ * the #threshold, which
+ * limits how small numbers
+ * may be to be entered into
+ * the matrix.
+ */
+ MatrixLocalBlocksToGlobalBlocks(double threshold = 1.e-12);
+
+ /**
+ * Copy the BlockInfo and the
+ * matrix pointers into local
+ * variables and initialize
+ * cell matrix vectors.
+ */
+ void initialize(const BlockInfo *block_info,
+ MatrixBlockVector<MATRIX> &matrices);
+
+ /**
+ * Initialize the constraints.
+ */
+ void initialize(const ConstraintMatrix &constraints);
+ /**
+ * Initialize the local data
+ * in the
+ * DoFInfo
+ * object used later for
+ * assembling.
+ *
+ * The info object refers to
+ * a cell if
+ * <code>!face</code>, or
+ * else to an interior or
+ * boundary face.
+ */
+ template <class DOFINFO>
+ void initialize_info(DOFINFO &info, bool face) const;
+
+
+ /**
+ * Assemble the local matrices
+ * into the global matrices.
+ */
+ template<class DOFINFO>
+ void assemble(const DOFINFO &info);
+
+ /**
+ * Assemble all local matrices
+ * into the global matrices.
+ */
+ template<class DOFINFO>
+ void assemble(const DOFINFO &info1,
+ const DOFINFO &info2);
+
+ private:
+ /**
+ * Assemble a single local
+ * matrix into a global one.
+ */
+ void assemble(
+ MatrixBlock<MATRIX> &global,
+ const FullMatrix<number> &local,
- unsigned int block_row,
- unsigned int block_col,
- const std::vector<unsigned int> &dof1,
- const std::vector<unsigned int> &dof2);
++ const unsigned int block_row,
++ const unsigned int block_col,
++ const std::vector<types::global_dof_index> &dof1,
++ const std::vector<types::global_dof_index> &dof2);
+
+ /**
+ * The global matrices,
+ * stored as a vector of
+ * pointers.
+ */
+ SmartPointer<MatrixBlockVector<MATRIX>,
+ MatrixLocalBlocksToGlobalBlocks<MATRIX, number> > matrices;
/**
* A pointer to the object containing the block structure.
template <class MATRIX, typename number = double>
class MGMatrixLocalBlocksToGlobalBlocks
{
- public:
- typedef MGMatrixBlockVector<MATRIX> MatrixPtrVector;
- typedef SmartPointer<MatrixPtrVector, MGMatrixLocalBlocksToGlobalBlocks<MATRIX,number> >
- MatrixPtrVectorPtr;
-
- /**
- * Constructor, initializing
- * the #threshold, which
- * limits how small numbers
- * may be to be entered into
- * the matrix.
- */
- MGMatrixLocalBlocksToGlobalBlocks(double threshold = 1.e-12);
-
- /**
- * Copy the BlockInfo and the
- * matrix pointers into local
- * variables and initialize
- * cell matrix vectors.
- */
- void initialize(const BlockInfo* block_info,
- MatrixPtrVector& matrices);
-
- /**
- * Initialize the multilevel
- * constraints.
- */
- void initialize(const MGConstrainedDoFs& mg_constrained_dofs);
-
- /**
- * Multigrid methods on
- * locally refined meshes
- * need additional
- * matrices. For
- * discontinuous Galerkin
- * methods, these are two
- * flux matrices across the
- * refinement edge, which are
- * set by this method.
- */
- void initialize_edge_flux(MatrixPtrVector& up, MatrixPtrVector& down);
-
- /**
- * Multigrid methods on
- * locally refined meshes
- * need additional
- * matrices. For
- * discontinuous Galerkin
- * methods, these are two
- * flux matrices across the
- * refinement edge, which are
- * set by this method.
- */
- void initialize_interfaces (MatrixPtrVector& interface_in, MatrixPtrVector& interface_out);
- /**
- * Initialize the local data
- * in the
- * DoFInfo
- * object used later for
- * assembling.
- *
- * The info object refers to
- * a cell if
- * <code>!face</code>, or
- * else to an interior or
- * boundary face.
- */
- template <class DOFINFO>
- void initialize_info(DOFINFO& info, bool face) const;
-
-
- /**
- * Assemble the local matrices
- * into the global matrices.
- */
- template<class DOFINFO>
- void assemble(const DOFINFO& info);
-
- /**
- * Assemble all local matrices
- * into the global matrices.
- */
- template<class DOFINFO>
- void assemble(const DOFINFO& info1,
- const DOFINFO& info2);
-
- private:
- /**
- * Assemble a single local
- * matrix into a global one.
- */
- void assemble(
- MATRIX& global,
- const FullMatrix<number>& local,
- const unsigned int block_row,
- const unsigned int block_col,
- const std::vector<types::global_dof_index>& dof1,
- const std::vector<types::global_dof_index>& dof2,
- const unsigned int level1,
- const unsigned int level2,
- bool transpose = false);
-
- /**
- * Assemble a single local
- * matrix into a global one.
- */
- void assemble_fluxes(
- MATRIX& global,
- const FullMatrix<number>& local,
- const unsigned int block_row,
- const unsigned int block_col,
- const std::vector<types::global_dof_index>& dof1,
- const std::vector<types::global_dof_index>& dof2,
- const unsigned int level1,
- const unsigned int level2);
-
- /**
- * Assemble a single local
- * matrix into a global one.
- */
- void assemble_up(
- MATRIX& global,
- const FullMatrix<number>& local,
- const unsigned int block_row,
- const unsigned int block_col,
- const std::vector<types::global_dof_index>& dof1,
- const std::vector<types::global_dof_index>& dof2,
- const unsigned int level1,
- const unsigned int level2);
-
- /**
- * Assemble a single local
- * matrix into a global one.
- */
- void assemble_down(
- MATRIX& global,
- const FullMatrix<number>& local,
- const unsigned int block_row,
- const unsigned int block_col,
- const std::vector<types::global_dof_index>& dof1,
- const std::vector<types::global_dof_index>& dof2,
- const unsigned int level1,
- const unsigned int level2);
-
- /**
- * Assemble a single local
- * matrix into a global one.
- */
- void assemble_in(
- MATRIX& global,
- const FullMatrix<number>& local,
- const unsigned int block_row,
- const unsigned int block_col,
- const std::vector<types::global_dof_index>& dof1,
- const std::vector<types::global_dof_index>& dof2,
- const unsigned int level1,
- const unsigned int level2);
-
- /**
- * Assemble a single local
- * matrix into a global one.
- */
- void assemble_out(
- MATRIX& global,
- const FullMatrix<number>& local,
- const unsigned int block_row,
- const unsigned int block_col,
- const std::vector<types::global_dof_index>& dof1,
- const std::vector<types::global_dof_index>& dof2,
- const unsigned int level1,
- const unsigned int level2);
-
- /**
- * The level matrices,
- * stored as a vector of
- * pointers.
- */
- MatrixPtrVectorPtr matrices;
-
- /**
- * The flux matrix between
- * the fine and the coarse
- * level at refinement edges.
- */
- MatrixPtrVectorPtr flux_down;
-
- /**
- * The flux matrix between
- * the coarse and the fine
- * level at refinement edges.
- */
- MatrixPtrVectorPtr flux_up;
-
- /**
- * The interface matrix between
- * the fine and the coarse
- * level at refinement edges.
- */
- MatrixPtrVectorPtr interface_out;
-
- /**
- * The interface matrix between
- * the coarse and the fine
- * level at refinement edges.
- */
- MatrixPtrVectorPtr interface_in;
+ public:
+ typedef MGMatrixBlockVector<MATRIX> MatrixPtrVector;
+ typedef SmartPointer<MatrixPtrVector, MGMatrixLocalBlocksToGlobalBlocks<MATRIX,number> >
+ MatrixPtrVectorPtr;
+
+ /**
+ * Constructor, initializing
+ * the #threshold, which
+ * limits how small numbers
+ * may be to be entered into
+ * the matrix.
+ */
+ MGMatrixLocalBlocksToGlobalBlocks(double threshold = 1.e-12);
+
+ /**
+ * Copy the BlockInfo and the
+ * matrix pointers into local
+ * variables and initialize
+ * cell matrix vectors.
+ */
+ void initialize(const BlockInfo *block_info,
+ MatrixPtrVector &matrices);
+
+ /**
+ * Initialize the multilevel
+ * constraints.
+ */
+ void initialize(const MGConstrainedDoFs &mg_constrained_dofs);
+
+ /**
+ * Multigrid methods on
+ * locally refined meshes
+ * need additional
+ * matrices. For
+ * discontinuous Galerkin
+ * methods, these are two
+ * flux matrices across the
+ * refinement edge, which are
+ * set by this method.
+ */
+ void initialize_edge_flux(MatrixPtrVector &up, MatrixPtrVector &down);
+
+ /**
+ * Multigrid methods on
+ * locally refined meshes
+ * need additional
+ * matrices. For
+ * discontinuous Galerkin
+ * methods, these are two
+ * flux matrices across the
+ * refinement edge, which are
+ * set by this method.
+ */
+ void initialize_interfaces (MatrixPtrVector &interface_in, MatrixPtrVector &interface_out);
+ /**
+ * Initialize the local data
+ * in the
+ * DoFInfo
+ * object used later for
+ * assembling.
+ *
+ * The info object refers to
+ * a cell if
+ * <code>!face</code>, or
+ * else to an interior or
+ * boundary face.
+ */
+ template <class DOFINFO>
+ void initialize_info(DOFINFO &info, bool face) const;
+
+
+ /**
+ * Assemble the local matrices
+ * into the global matrices.
+ */
+ template<class DOFINFO>
+ void assemble(const DOFINFO &info);
+
+ /**
+ * Assemble all local matrices
+ * into the global matrices.
+ */
+ template<class DOFINFO>
+ void assemble(const DOFINFO &info1,
+ const DOFINFO &info2);
+
+ private:
+ /**
+ * Assemble a single local
+ * matrix into a global one.
+ */
+ void assemble(
+ MATRIX &global,
+ const FullMatrix<number> &local,
- unsigned int block_row,
- unsigned int block_col,
- const std::vector<unsigned int> &dof1,
- const std::vector<unsigned int> &dof2,
- unsigned int level1,
- unsigned int level2,
++ const unsigned int block_row,
++ const unsigned int block_col,
++ const std::vector<types::global_dof_index> &dof1,
++ const std::vector<types::global_dof_index> &dof2,
++ const unsigned int level1,
++ const unsigned int level2,
+ bool transpose = false);
+
+ /**
+ * Assemble a single local
+ * matrix into a global one.
+ */
+ void assemble_fluxes(
+ MATRIX &global,
+ const FullMatrix<number> &local,
- unsigned int block_row,
- unsigned int block_col,
- const std::vector<unsigned int> &dof1,
- const std::vector<unsigned int> &dof2,
- unsigned int level1,
- unsigned int level2);
++ const unsigned int block_row,
++ const unsigned int block_col,
++ const std::vector<types::global_dof_index> &dof1,
++ const std::vector<types::global_dof_index> &dof2,
++ const unsigned int level1,
++ const unsigned int level2);
+
+ /**
+ * Assemble a single local
+ * matrix into a global one.
+ */
+ void assemble_up(
+ MATRIX &global,
+ const FullMatrix<number> &local,
- unsigned int block_row,
- unsigned int block_col,
- const std::vector<unsigned int> &dof1,
- const std::vector<unsigned int> &dof2,
- unsigned int level1,
- unsigned int level2);
++ const unsigned int block_row,
++ const unsigned int block_col,
++ const std::vector<types::global_dof_index> &dof1,
++ const std::vector<types::global_dof_index> &dof2,
++ const unsigned int level1,
++ const unsigned int level2);
+
+ /**
+ * Assemble a single local
+ * matrix into a global one.
+ */
+ void assemble_down(
+ MATRIX &global,
+ const FullMatrix<number> &local,
- unsigned int block_row,
- unsigned int block_col,
- const std::vector<unsigned int> &dof1,
- const std::vector<unsigned int> &dof2,
- unsigned int level1,
- unsigned int level2);
++ const unsigned int block_row,
++ const unsigned int block_col,
++ const std::vector<types::global_dof_index> &dof1,
++ const std::vector<types::global_dof_index> &dof2,
++ const unsigned int level1,
++ const unsigned int level2);
+
+ /**
+ * Assemble a single local
+ * matrix into a global one.
+ */
+ void assemble_in(
+ MATRIX &global,
+ const FullMatrix<number> &local,
- unsigned int block_row,
- unsigned int block_col,
- const std::vector<unsigned int> &dof1,
- const std::vector<unsigned int> &dof2,
- unsigned int level1,
- unsigned int level2);
++ const unsigned int block_row,
++ const unsigned int block_col,
++ const std::vector<types::global_dof_index> &dof1,
++ const std::vector<types::global_dof_index> &dof2,
++ const unsigned int level1,
++ const unsigned int level2);
+
+ /**
+ * Assemble a single local
+ * matrix into a global one.
+ */
+ void assemble_out(
+ MATRIX &global,
+ const FullMatrix<number> &local,
- unsigned int block_row,
- unsigned int block_col,
- const std::vector<unsigned int> &dof1,
- const std::vector<unsigned int> &dof2,
- unsigned int level1,
- unsigned int level2);
++ const unsigned int block_row,
++ const unsigned int block_col,
++ const std::vector<types::global_dof_index> &dof1,
++ const std::vector<types::global_dof_index> &dof2,
++ const unsigned int level1,
++ const unsigned int level2);
+
+ /**
+ * The level matrices,
+ * stored as a vector of
+ * pointers.
+ */
+ MatrixPtrVectorPtr matrices;
+
+ /**
+ * The flux matrix between
+ * the fine and the coarse
+ * level at refinement edges.
+ */
+ MatrixPtrVectorPtr flux_down;
+
+ /**
+ * The flux matrix between
+ * the coarse and the fine
+ * level at refinement edges.
+ */
+ MatrixPtrVectorPtr flux_up;
+
+ /**
+ * The interface matrix between
+ * the fine and the coarse
+ * level at refinement edges.
+ */
+ MatrixPtrVectorPtr interface_out;
+
+ /**
+ * The interface matrix between
+ * the coarse and the fine
+ * level at refinement edges.
+ */
+ MatrixPtrVectorPtr interface_in;
/**
* A pointer to the object containing the block structure.
template <class VECTOR>
inline void
ResidualLocalBlocksToGlobalBlocks<VECTOR>::assemble(
- VECTOR& global,
- const BlockVector<double>& local,
- const std::vector<types::global_dof_index>& dof)
+ VECTOR &global,
+ const BlockVector<double> &local,
- const std::vector<unsigned int> &dof)
++ const std::vector<types::global_dof_index> &dof)
{
- if(constraints == 0)
+ if (constraints == 0)
{
- for (unsigned int b=0;b<local.n_blocks();++b)
- for (unsigned int j=0;j<local.block(b).size();++j)
- {
- // The coordinates of
- // the current entry in
- // DoFHandler
- // numbering, which
- // differs from the
- // block-wise local
- // numbering we use in
- // our local vectors
- const unsigned int jcell = this->block_info->local().local_to_global(b, j);
- global(dof[jcell]) += local.block(b)(j);
- }
+ for (unsigned int b=0; b<local.n_blocks(); ++b)
+ for (unsigned int j=0; j<local.block(b).size(); ++j)
+ {
+ // The coordinates of
+ // the current entry in
+ // DoFHandler
+ // numbering, which
+ // differs from the
+ // block-wise local
+ // numbering we use in
+ // our local vectors
+ const unsigned int jcell = this->block_info->local().local_to_global(b, j);
+ global(dof[jcell]) += local.block(b)(j);
+ }
}
- else
- constraints->distribute_local_to_global(local, dof, global);
+ else
+ constraints->distribute_local_to_global(local, dof, global);
}
template <class MATRIX, typename number>
inline void
MatrixLocalBlocksToGlobalBlocks<MATRIX, number>::assemble(
- MatrixBlock<MATRIX>& global,
- const FullMatrix<number>& local,
+ MatrixBlock<MATRIX> &global,
+ const FullMatrix<number> &local,
- unsigned int block_row,
- unsigned int block_col,
- const std::vector<unsigned int> &dof1,
- const std::vector<unsigned int> &dof2)
+ const unsigned int block_row,
+ const unsigned int block_col,
- const std::vector<types::global_dof_index>& dof1,
- const std::vector<types::global_dof_index>& dof2)
++ const std::vector<types::global_dof_index> &dof1,
++ const std::vector<types::global_dof_index> &dof2)
{
- if(constraints == 0)
- {
- for (unsigned int j=0;j<local.n_rows();++j)
- for (unsigned int k=0;k<local.n_cols();++k)
- if (std::fabs(local(j,k)) >= threshold)
- {
- // The coordinates of
- // the current entry in
- // DoFHandler
- // numbering, which
- // differs from the
- // block-wise local
- // numbering we use in
- // our local matrices
- const unsigned int jcell = this->block_info->local().local_to_global(block_row, j);
- const unsigned int kcell = this->block_info->local().local_to_global(block_col, k);
-
- global.add(dof1[jcell], dof2[kcell], local(j,k));
- }
- }
+ if (constraints == 0)
+ {
+ for (unsigned int j=0; j<local.n_rows(); ++j)
+ for (unsigned int k=0; k<local.n_cols(); ++k)
+ if (std::fabs(local(j,k)) >= threshold)
+ {
+ // The coordinates of
+ // the current entry in
+ // DoFHandler
+ // numbering, which
+ // differs from the
+ // block-wise local
+ // numbering we use in
+ // our local matrices
+ const unsigned int jcell = this->block_info->local().local_to_global(block_row, j);
+ const unsigned int kcell = this->block_info->local().local_to_global(block_col, k);
+
+ global.add(dof1[jcell], dof2[kcell], local(j,k));
+ }
+ }
else
- {
- const BlockIndices &bi = this->block_info->local();
- std::vector<types::global_dof_index> sliced_row_indices (bi.block_size(block_row));
- for(unsigned int i=0; i<sliced_row_indices.size(); ++i)
- sliced_row_indices[i] = dof1[bi.block_start(block_row)+i];
-
- std::vector<types::global_dof_index> sliced_col_indices (bi.block_size(block_col));
- for(unsigned int i=0; i<sliced_col_indices.size(); ++i)
- sliced_col_indices[i] = dof2[bi.block_start(block_col)+i];
-
- constraints->distribute_local_to_global(local,
- sliced_row_indices, sliced_col_indices, global);
- }
+ {
+ const BlockIndices &bi = this->block_info->local();
- std::vector<unsigned int> sliced_row_indices (bi.block_size(block_row));
++ std::vector<types::global_dof_index> sliced_row_indices (bi.block_size(block_row));
+ for (unsigned int i=0; i<sliced_row_indices.size(); ++i)
+ sliced_row_indices[i] = dof1[bi.block_start(block_row)+i];
+
- std::vector<unsigned int> sliced_col_indices (bi.block_size(block_col));
++ std::vector<types::global_dof_index> sliced_col_indices (bi.block_size(block_col));
+ for (unsigned int i=0; i<sliced_col_indices.size(); ++i)
+ sliced_col_indices[i] = dof2[bi.block_start(block_col)+i];
+
+ constraints->distribute_local_to_global(local,
+ sliced_row_indices, sliced_col_indices, global);
+ }
}
template <class DOFINFO>
inline void
MatrixLocalBlocksToGlobalBlocks<MATRIX, number>::assemble(
- const DOFINFO& info)
+ const DOFINFO &info)
{
- for (unsigned int i=0;i<matrices->size();++i)
+ for (unsigned int i=0; i<matrices->size(); ++i)
{
- // Row and column index of
- // the block we are dealing with
+ // Row and column index of
+ // the block we are dealing with
- const unsigned int row = matrices->block(i).row;
- const unsigned int col = matrices->block(i).column;
+ const types::global_dof_index row = matrices->block(i).row;
+ const types::global_dof_index col = matrices->block(i).column;
assemble(matrices->block(i), info.matrix(i,false).matrix, row, col, info.indices, info.indices);
}
template <class DOFINFO>
inline void
MatrixLocalBlocksToGlobalBlocks<MATRIX, number>::assemble(
- const DOFINFO& info1,
- const DOFINFO& info2)
+ const DOFINFO &info1,
+ const DOFINFO &info2)
{
- for (unsigned int i=0;i<matrices->size();++i)
+ for (unsigned int i=0; i<matrices->size(); ++i)
{
- // Row and column index of
- // the block we are dealing with
+ // Row and column index of
+ // the block we are dealing with
- const unsigned int row = matrices->block(i).row;
- const unsigned int col = matrices->block(i).column;
+ const types::global_dof_index row = matrices->block(i).row;
+ const types::global_dof_index col = matrices->block(i).column;
assemble(matrices->block(i), info1.matrix(i,false).matrix, row, col, info1.indices, info1.indices);
assemble(matrices->block(i), info1.matrix(i,true).matrix, row, col, info1.indices, info2.indices);
template <class MATRIX, typename number>
inline void
MGMatrixLocalBlocksToGlobalBlocks<MATRIX, number>::assemble(
- MATRIX& global,
- const FullMatrix<number>& local,
+ MATRIX &global,
+ const FullMatrix<number> &local,
- unsigned int block_row,
- unsigned int block_col,
- const std::vector<unsigned int> &dof1,
- const std::vector<unsigned int> &dof2,
- unsigned int level1,
- unsigned int level2,
+ const unsigned int block_row,
+ const unsigned int block_col,
- const std::vector<types::global_dof_index>& dof1,
- const std::vector<types::global_dof_index>& dof2,
++ const std::vector<types::global_dof_index> &dof1,
++ const std::vector<types::global_dof_index> &dof2,
+ const unsigned int level1,
+ const unsigned int level2,
bool transpose)
{
- for (unsigned int j=0;j<local.n_rows();++j)
- for (unsigned int k=0;k<local.n_cols();++k)
+ for (unsigned int j=0; j<local.n_rows(); ++j)
+ for (unsigned int k=0; k<local.n_cols(); ++k)
if (std::fabs(local(j,k)) >= threshold)
{
- // The coordinates of
- // the current entry in
- // DoFHandler
- // numbering, which
- // differs from the
- // block-wise local
- // numbering we use in
- // our local matrices
+ // The coordinates of
+ // the current entry in
+ // DoFHandler
+ // numbering, which
+ // differs from the
+ // block-wise local
+ // numbering we use in
+ // our local matrices
const unsigned int jcell = this->block_info->local().local_to_global(block_row, j);
const unsigned int kcell = this->block_info->local().local_to_global(block_col, k);
template <class MATRIX, typename number>
inline void
MGMatrixLocalBlocksToGlobalBlocks<MATRIX, number>::assemble_fluxes(
- MATRIX& global,
- const FullMatrix<number>& local,
+ MATRIX &global,
+ const FullMatrix<number> &local,
- unsigned int block_row,
- unsigned int block_col,
- const std::vector<unsigned int> &dof1,
- const std::vector<unsigned int> &dof2,
- unsigned int level1,
- unsigned int level2)
+ const unsigned int block_row,
+ const unsigned int block_col,
- const std::vector<types::global_dof_index>& dof1,
- const std::vector<types::global_dof_index>& dof2,
++ const std::vector<types::global_dof_index> &dof1,
++ const std::vector<types::global_dof_index> &dof2,
+ const unsigned int level1,
+ const unsigned int level2)
{
- for (unsigned int j=0;j<local.n_rows();++j)
- for (unsigned int k=0;k<local.n_cols();++k)
+ for (unsigned int j=0; j<local.n_rows(); ++j)
+ for (unsigned int k=0; k<local.n_cols(); ++k)
if (std::fabs(local(j,k)) >= threshold)
{
- // The coordinates of
- // the current entry in
- // DoFHandler
- // numbering, which
- // differs from the
- // block-wise local
- // numbering we use in
- // our local matrices
+ // The coordinates of
+ // the current entry in
+ // DoFHandler
+ // numbering, which
+ // differs from the
+ // block-wise local
+ // numbering we use in
+ // our local matrices
const unsigned int jcell = this->block_info->local().local_to_global(block_row, j);
const unsigned int kcell = this->block_info->local().local_to_global(block_col, k);
template <class MATRIX, typename number>
inline void
MGMatrixLocalBlocksToGlobalBlocks<MATRIX, number>::assemble_up(
- MATRIX& global,
- const FullMatrix<number>& local,
+ MATRIX &global,
+ const FullMatrix<number> &local,
- unsigned int block_row,
- unsigned int block_col,
- const std::vector<unsigned int> &dof1,
- const std::vector<unsigned int> &dof2,
- unsigned int level1,
- unsigned int level2)
+ const unsigned int block_row,
+ const unsigned int block_col,
- const std::vector<types::global_dof_index>& dof1,
- const std::vector<types::global_dof_index>& dof2,
++ const std::vector<types::global_dof_index> &dof1,
++ const std::vector<types::global_dof_index> &dof2,
+ const unsigned int level1,
+ const unsigned int level2)
{
- for (unsigned int j=0;j<local.n_rows();++j)
- for (unsigned int k=0;k<local.n_cols();++k)
+ for (unsigned int j=0; j<local.n_rows(); ++j)
+ for (unsigned int k=0; k<local.n_cols(); ++k)
if (std::fabs(local(j,k)) >= threshold)
{
- // The coordinates of
- // the current entry in
- // DoFHandler
- // numbering, which
- // differs from the
- // block-wise local
- // numbering we use in
- // our local matrices
+ // The coordinates of
+ // the current entry in
+ // DoFHandler
+ // numbering, which
+ // differs from the
+ // block-wise local
+ // numbering we use in
+ // our local matrices
const unsigned int jcell = this->block_info->local().local_to_global(block_row, j);
const unsigned int kcell = this->block_info->local().local_to_global(block_col, k);
template <class MATRIX, typename number>
inline void
MGMatrixLocalBlocksToGlobalBlocks<MATRIX, number>::assemble_down(
- MATRIX& global,
- const FullMatrix<number>& local,
+ MATRIX &global,
+ const FullMatrix<number> &local,
- unsigned int block_row,
- unsigned int block_col,
- const std::vector<unsigned int> &dof1,
- const std::vector<unsigned int> &dof2,
- unsigned int level1,
- unsigned int level2)
+ const unsigned int block_row,
+ const unsigned int block_col,
- const std::vector<types::global_dof_index>& dof1,
- const std::vector<types::global_dof_index>& dof2,
++ const std::vector<types::global_dof_index> &dof1,
++ const std::vector<types::global_dof_index> &dof2,
+ const unsigned int level1,
+ const unsigned int level2)
{
- for (unsigned int j=0;j<local.n_rows();++j)
- for (unsigned int k=0;k<local.n_cols();++k)
+ for (unsigned int j=0; j<local.n_rows(); ++j)
+ for (unsigned int k=0; k<local.n_cols(); ++k)
if (std::fabs(local(k,j)) >= threshold)
{
- // The coordinates of
- // the current entry in
- // DoFHandler
- // numbering, which
- // differs from the
- // block-wise local
- // numbering we use in
- // our local matrices
+ // The coordinates of
+ // the current entry in
+ // DoFHandler
+ // numbering, which
+ // differs from the
+ // block-wise local
+ // numbering we use in
+ // our local matrices
const unsigned int jcell = this->block_info->local().local_to_global(block_row, j);
const unsigned int kcell = this->block_info->local().local_to_global(block_col, k);
template <class MATRIX, typename number>
inline void
MGMatrixLocalBlocksToGlobalBlocks<MATRIX, number>::assemble_in(
- MATRIX& global,
- const FullMatrix<number>& local,
+ MATRIX &global,
+ const FullMatrix<number> &local,
- unsigned int block_row,
- unsigned int block_col,
- const std::vector<unsigned int> &dof1,
- const std::vector<unsigned int> &dof2,
- unsigned int level1,
- unsigned int level2)
+ const unsigned int block_row,
+ const unsigned int block_col,
- const std::vector<types::global_dof_index>& dof1,
- const std::vector<types::global_dof_index>& dof2,
++ const std::vector<types::global_dof_index> &dof1,
++ const std::vector<types::global_dof_index> &dof2,
+ const unsigned int level1,
+ const unsigned int level2)
{
// AssertDimension(local.n(), dof1.size());
// AssertDimension(local.m(), dof2.size());
template <class MATRIX, typename number>
inline void
MGMatrixLocalBlocksToGlobalBlocks<MATRIX, number>::assemble_out(
- MATRIX& global,
- const FullMatrix<number>& local,
+ MATRIX &global,
+ const FullMatrix<number> &local,
- unsigned int block_row,
- unsigned int block_col,
- const std::vector<unsigned int> &dof1,
- const std::vector<unsigned int> &dof2,
- unsigned int level1,
- unsigned int level2)
+ const unsigned int block_row,
+ const unsigned int block_col,
- const std::vector<types::global_dof_index>& dof1,
- const std::vector<types::global_dof_index>& dof2,
++ const std::vector<types::global_dof_index> &dof1,
++ const std::vector<types::global_dof_index> &dof2,
+ const unsigned int level1,
+ const unsigned int level2)
{
// AssertDimension(local.n(), dof1.size());
// AssertDimension(local.m(), dof2.size());
template<int dim, int spacedim = dim, typename number = double>
class DoFInfo : public LocalResults<number>
{
- public:
- /// The current cell
- typename Triangulation<dim, spacedim>::cell_iterator cell;
-
- /// The current face
- typename Triangulation<dim, spacedim>::face_iterator face;
-
- /**
- * The number of the current
- * face on the current cell.
- *
- * This number is
- * deal_II_numbers::invalid_unsigned_int
- * if the info object was
- * initialized with a cell.
- */
-
- unsigned int face_number;
- /**
- * The number of the current
- * subface on the current
- * face
- *
- * This number is
- * deal_II_numbers::invalid_unsigned_int
- * if the info object was not
- * initialized with a subface.
- */
- unsigned int sub_number;
-
- /*
- * The DoF indices of the
- * current cell
- */
- std::vector<types::global_dof_index> indices;
-
- /**
- * The DoF indices on the
- * current cell, organized by
- * local blocks
- */
- std::vector<std::vector<types::global_dof_index> > indices_by_block;
-
- /**
- * Constructor setting the
- * #block_info pointer.
- */
- DoFInfo(const BlockInfo& block_info);
-
- /**
- * Constructor
- * leaving the #block_info
- * pointer empty, but setting
- * the #aux_local_indices.
- */
- template <class DH>
- DoFInfo (const DH& dof_handler);
-
- /**
- * Set the current cell and
- * fill @p indices.
- */
- template <class DHCellIterator>
- void reinit(const DHCellIterator& c);
-
- /**
- * Set the current face and
- * fill @p indices if the #cell
- * changed.
- */
- template <class DHCellIterator, class DHFaceIterator>
- void reinit(const DHCellIterator& c,
- const DHFaceIterator& f,
- const unsigned int face_no);
-
- /**
- * Set the current subface
- * and fill @p indices if the
- * #cell changed.
- */
- template <class DHCellIterator, class DHFaceIterator>
- void reinit(const DHCellIterator& c,
- const DHFaceIterator& f,
- const unsigned int face_no,
- const unsigned int subface_no);
-
- /**
- * Switch to a new face of the
- * same cell. Does not change
- * @p indices and does not reset
- * data in LocalResults.
- */
- template <class DHFaceIterator>
- void set_face (const DHFaceIterator& f,
- const unsigned int face_no);
- /**
- * Switch to a new subface of the
- * same cell. Does not change
- * @p indices and does not reset
- * data in LocalResults.
- */
- template <class DHFaceIterator>
- void set_subface (const DHFaceIterator& f,
- const unsigned int face_no,
- const unsigned int subface_no);
-
- const BlockIndices& local_indices() const;
-
-
- /// The block structure of the system
- SmartPointer<const BlockInfo,DoFInfo<dim,spacedim> > block_info;
-
- bool level_cell;
- private:
- /**
- * Standard constructor, not
- * setting any block
- * indices. Use of this
- * constructor is not
- * recommended, but it is
- * needed for the arrays in
- * DoFInfoBox.
- */
- DoFInfo ();
-
- /// Set up local block indices
- void set_block_indices ();
- /// Fill index vector with active indices
- void get_indices(const typename DoFHandler<dim, spacedim>::cell_iterator& c);
-
- /// Fill index vector with level indices
- void get_indices(const typename MGDoFHandler<dim, spacedim>::cell_iterator& c);
-
- /// Auxiliary vector
- std::vector<types::global_dof_index> indices_org;
-
- /**
- * An auxiliary local
- * BlockIndices object created
- * if #block_info is not set.
- * It contains just a single
- * block of the size of
- * degrees of freedom per cell.
- */
- BlockIndices aux_local_indices;
-
- friend class DoFInfoBox<dim, DoFInfo<dim, spacedim, number> >;
+ public:
+ /// The current cell
+ typename Triangulation<dim, spacedim>::cell_iterator cell;
+
+ /// The current face
+ typename Triangulation<dim, spacedim>::face_iterator face;
+
+ /**
+ * The number of the current
+ * face on the current cell.
+ *
+ * This number is
+ * deal_II_numbers::invalid_unsigned_int
+ * if the info object was
+ * initialized with a cell.
+ */
+
+ unsigned int face_number;
+ /**
+ * The number of the current
+ * subface on the current
+ * face
+ *
+ * This number is
+ * deal_II_numbers::invalid_unsigned_int
+ * if the info object was not
+ * initialized with a subface.
+ */
+ unsigned int sub_number;
+
+ /*
+ * The DoF indices of the
+ * current cell
+ */
- std::vector<unsigned int> indices;
++ std::vector<types::global_dof_index> indices;
+
+ /**
+ * The DoF indices on the
+ * current cell, organized by
+ * local blocks
+ */
- std::vector<std::vector<unsigned int> > indices_by_block;
++ std::vector<std::vector<types::global_dof_index> > indices_by_block;
+
+ /**
+ * Constructor setting the
+ * #block_info pointer.
+ */
+ DoFInfo(const BlockInfo &block_info);
+
+ /**
+ * Constructor
+ * leaving the #block_info
+ * pointer empty, but setting
+ * the #aux_local_indices.
+ */
+ template <class DH>
+ DoFInfo (const DH &dof_handler);
+
+ /**
+ * Set the current cell and
+ * fill @p indices.
+ */
+ template <class DHCellIterator>
+ void reinit(const DHCellIterator &c);
+
+ /**
+ * Set the current face and
+ * fill @p indices if the #cell
+ * changed.
+ */
+ template <class DHCellIterator, class DHFaceIterator>
+ void reinit(const DHCellIterator &c,
+ const DHFaceIterator &f,
- const unsigned int n);
++ const unsigned int face_no);
+
+ /**
+ * Set the current subface
+ * and fill @p indices if the
+ * #cell changed.
+ */
+ template <class DHCellIterator, class DHFaceIterator>
+ void reinit(const DHCellIterator &c,
+ const DHFaceIterator &f,
- const unsigned int n,
- const unsigned int s);
++ const unsigned int face_no,
++ const unsigned int subface_no);
+
+ /**
+ * Switch to a new face of the
+ * same cell. Does not change
+ * @p indices and does not reset
+ * data in LocalResults.
+ */
+ template <class DHFaceIterator>
+ void set_face (const DHFaceIterator &f,
- const unsigned int n);
++ const unsigned int face_no);
+ /**
+ * Switch to a new subface of the
+ * same cell. Does not change
+ * @p indices and does not reset
+ * data in LocalResults.
+ */
+ template <class DHFaceIterator>
+ void set_subface (const DHFaceIterator &f,
- const unsigned int n,
- const unsigned int s);
++ const unsigned int face_no,
++ const unsigned int subface_no);
+
+ const BlockIndices &local_indices() const;
+
+
+ /// The block structure of the system
+ SmartPointer<const BlockInfo,DoFInfo<dim,spacedim> > block_info;
+
+ bool level_cell;
+ private:
+ /**
+ * Standard constructor, not
+ * setting any block
+ * indices. Use of this
+ * constructor is not
+ * recommended, but it is
+ * needed for the arrays in
+ * DoFInfoBox.
+ */
+ DoFInfo ();
+
+ /// Set up local block indices
+ void set_block_indices ();
+ /// Fill index vector with active indices
+ void get_indices(const typename DoFHandler<dim, spacedim>::cell_iterator &c);
+
+ /// Fill index vector with level indices
+ void get_indices(const typename MGDoFHandler<dim, spacedim>::cell_iterator &c);
+
+ /// Auxiliary vector
- std::vector<unsigned int> indices_org;
++ std::vector<types::global_dof_index> indices_org;
+
+ /**
+ * An auxiliary local
+ * BlockIndices object created
+ * if #block_info is not set.
+ * It contains just a single
+ * block of the size of
+ * degrees of freedom per cell.
+ */
+ BlockIndices aux_local_indices;
+
+ friend class DoFInfoBox<dim, DoFInfo<dim, spacedim, number> >;
};
template <class DHFaceIterator>
inline void
DoFInfo<dim,spacedim,number>::set_face(
- const DHFaceIterator& f,
+ const DHFaceIterator &f,
- unsigned int n)
+ const unsigned int face_no)
{
face = static_cast<typename Triangulation<dim>::face_iterator> (f);
- face_number = n;
+ face_number = face_no;
sub_number = deal_II_numbers::invalid_unsigned_int;
}
template <class DHCellIterator, class DHFaceIterator>
inline void
DoFInfo<dim,spacedim,number>::reinit(
- const DHCellIterator& c,
- const DHFaceIterator& f,
+ const DHCellIterator &c,
+ const DHFaceIterator &f,
- unsigned int n)
+ const unsigned int face_no)
{
if ((cell.state() != IteratorState::valid)
|| cell != static_cast<typename Triangulation<dim>::cell_iterator> (c))
template <class DHFaceIterator>
inline void
DoFInfo<dim,spacedim,number>::set_subface(
- const DHFaceIterator& f,
+ const DHFaceIterator &f,
- unsigned int n,
- unsigned int s)
+ const unsigned int face_no,
+ const unsigned int subface_no)
{
face = static_cast<typename Triangulation<dim>::face_iterator> (f);
- face_number = n;
- sub_number = s;
+ face_number = face_no;
+ sub_number = subface_no;
}
template <class DHCellIterator, class DHFaceIterator>
inline void
DoFInfo<dim,spacedim,number>::reinit(
- const DHCellIterator& c,
- const DHFaceIterator& f,
+ const DHCellIterator &c,
+ const DHFaceIterator &f,
- unsigned int n,
- unsigned int s)
+ const unsigned int face_no,
+ const unsigned int subface_no)
{
if (cell.state() != IteratorState::valid
|| cell != static_cast<typename Triangulation<dim>::cell_iterator> (c))
template<class SOLVER, class VECTOR>
template<class MATRIX, class PRECOND>
MGCoarseGridLACIteration<SOLVER, VECTOR>
- ::MGCoarseGridLACIteration(SOLVER& s,
+ ::MGCoarseGridLACIteration(SOLVER &s,
- const MATRIX &m,
+ const MATRIX &m,
const PRECOND &p)
- :
- solver(&s, typeid(*this).name())
+ :
+ solver(&s, typeid(*this).name())
{
matrix = new PointerMatrix<MATRIX, VECTOR>(&m);
precondition = new PointerMatrix<PRECOND, VECTOR>(&p);
template<class MATRIX, class PRECOND>
void
MGCoarseGridLACIteration<SOLVER, VECTOR>
- ::initialize(SOLVER& s,
+ ::initialize(SOLVER &s,
- const MATRIX &m,
+ const MATRIX &m,
const PRECOND &p)
{
solver = &s;
*/
class MGConstrainedDoFs : public Subscriptor
{
- public:
- /**
- * Fill the internal data
- * structures with values
- * extracted from the dof
- * handler.
- *
- * This function leaves
- * #boundary_indices empty, since
- * no boundary values are
- * provided.
- */
- template <int dim, int spacedim>
- void initialize(const MGDoFHandler<dim,spacedim>& dof);
-
- /**
- * Fill the internal data
- * structures with values
- * extracted from the dof
- * handler, applying the boundary
- * values provided.
- */
- template <int dim, int spacedim>
- void initialize(const MGDoFHandler<dim,spacedim>& dof,
- const typename FunctionMap<dim>::type& function_map,
- const ComponentMask &component_mask = ComponentMask());
-
- template <int dim, int spacedim>
- void initialize(const DoFHandler<dim,spacedim>& dof,
- const typename FunctionMap<dim>::type& function_map,
- const std::vector<bool>& component_mask = std::vector<bool>());
-
- /**
- * Reset the data structures.
- */
- void clear();
-
- /**
- * Determine whether a dof index
- * is subject to a boundary
- * constraint.
- */
- bool is_boundary_index (const unsigned int level,
- const unsigned int index) const;
-
- /**
- * Determine whether a dof index
- * is at an edge that is not
- * a refinement edge.
- */
- bool non_refinement_edge_index (const unsigned int level,
+ public:
+ /**
+ * Fill the internal data
+ * structures with values
+ * extracted from the dof
+ * handler.
+ *
+ * This function leaves
+ * #boundary_indices empty, since
+ * no boundary values are
+ * provided.
+ */
+ template <int dim, int spacedim>
+ void initialize(const MGDoFHandler<dim,spacedim> &dof);
+
+ /**
+ * Fill the internal data
+ * structures with values
+ * extracted from the dof
+ * handler, applying the boundary
+ * values provided.
+ */
+ template <int dim, int spacedim>
+ void initialize(const MGDoFHandler<dim,spacedim> &dof,
+ const typename FunctionMap<dim>::type &function_map,
+ const ComponentMask &component_mask = ComponentMask());
+
++ template <int dim, int spacedim>
++ void initialize(const DoFHandler<dim,spacedim> &dof,
++ const typename FunctionMap<dim>::type &function_map,
++ const std::vector<bool> &component_mask = std::vector<bool>());
++
+ /**
+ * Reset the data structures.
+ */
+ void clear();
+
+ /**
+ * Determine whether a dof index
+ * is subject to a boundary
+ * constraint.
+ */
+ bool is_boundary_index (const unsigned int level,
+ const unsigned int index) const;
+
+ /**
+ * Determine whether a dof index
+ * is at an edge that is not
+ * a refinement edge.
+ */
+ bool non_refinement_edge_index (const unsigned int level,
+ const unsigned int index) const;
+
+ /**
+ * Determine whether a dof index
+ * is at the refinement edge.
+ */
+ bool at_refinement_edge (const unsigned int level,
+ const unsigned int index) const;
+
+ /**
+ * Determine whether a dof index
+ * is at the refinement edge and
+ * subject to a boundary
+ * constraint .
+ */
+ bool at_refinement_edge_boundary (const unsigned int level,
const unsigned int index) const;
- /**
- * Determine whether a dof index
- * is at the refinement edge.
- */
- bool at_refinement_edge (const unsigned int level,
- const unsigned int index) const;
-
- /**
- * Determine whether a dof index
- * is at the refinement edge and
- * subject to a boundary
- * constraint .
- */
- bool at_refinement_edge_boundary (const unsigned int level,
- const unsigned int index) const;
-
- /**
- * Return the indices of dofs for each
- * level that lie on the boundary of the
- * domain.
- */
- const std::vector<std::set<unsigned int> > &
- get_boundary_indices () const;
-
- /**
- * Return the indices of dofs for each
- * level that lie on the boundary of the
- * domain.
- */
- const std::vector<std::set<unsigned int> > &
- get_non_refinement_edge_indices () const;
-
- /**
- * Return the indices of dofs for each
- * level that lie on the refinement edge
- * (i.e. are on faces between cells of
- * this level and cells on the level
- * below).
- */
- const std::vector<std::vector<bool> > &
- get_refinement_edge_indices () const;
-
- /**
- * Return the indices of dofs for each
- * level that are in the intersection of
- * the sets returned by
- * get_boundary_indices() and
- * get_refinement_edge_indices().
- */
- const std::vector<std::vector<bool> > &
- get_refinement_edge_boundary_indices () const;
-
- /**
- * Return if boundary_indices need to
- * be set or not.
- */
-
- bool set_boundary_values () const;
-
- /**
- * Return if the finite element requires
- * continuity across refinement edges.
- */
- bool continuity_across_refinement_edges () const;
- private:
-
- /**
- * The indices of boundary dofs
- * for each level.
- */
- std::vector<std::set<unsigned int> > boundary_indices;
-
- /**
- * The degrees of freedom on egdges
- * that are not a
- * refinement edge between a
- * level and coarser cells.
- */
- std::vector<std::set<unsigned int> > non_refinement_edge_indices;
-
- /**
- * The degrees of freedom on the
- * refinement edge between a
- * level and coarser cells.
- */
- std::vector<std::vector<bool> > refinement_edge_indices;
-
- /**
- * The degrees of freedom on the
- * refinement edge between a
- * level and coarser cells, which
- * are also on the boundary.
- *
- * This is a subset of
- * #refinement_edge_indices.
- */
- std::vector<std::vector<bool> > refinement_edge_boundary_indices;
+ /**
+ * Return the indices of dofs for each
+ * level that lie on the boundary of the
+ * domain.
+ */
+ const std::vector<std::set<unsigned int> > &
+ get_boundary_indices () const;
+
+ /**
+ * Return the indices of dofs for each
+ * level that lie on the boundary of the
+ * domain.
+ */
+ const std::vector<std::set<unsigned int> > &
+ get_non_refinement_edge_indices () const;
+
+ /**
+ * Return the indices of dofs for each
+ * level that lie on the refinement edge
+ * (i.e. are on faces between cells of
+ * this level and cells on the level
+ * below).
+ */
+ const std::vector<std::vector<bool> > &
+ get_refinement_edge_indices () const;
+
+ /**
+ * Return the indices of dofs for each
+ * level that are in the intersection of
+ * the sets returned by
+ * get_boundary_indices() and
+ * get_refinement_edge_indices().
+ */
+ const std::vector<std::vector<bool> > &
+ get_refinement_edge_boundary_indices () const;
+
+ /**
+ * Return if boundary_indices need to
+ * be set or not.
+ */
+
+ bool set_boundary_values () const;
+
+ /**
+ * Return if the finite element requires
+ * continuity across refinement edges.
+ */
+ bool continuity_across_refinement_edges () const;
+ private:
+
+ /**
+ * The indices of boundary dofs
+ * for each level.
+ */
+ std::vector<std::set<unsigned int> > boundary_indices;
+
+ /**
+ * The degrees of freedom on egdges
+ * that are not a
+ * refinement edge between a
+ * level and coarser cells.
+ */
+ std::vector<std::set<unsigned int> > non_refinement_edge_indices;
+
+ /**
+ * The degrees of freedom on the
+ * refinement edge between a
+ * level and coarser cells.
+ */
+ std::vector<std::vector<bool> > refinement_edge_indices;
+
+ /**
+ * The degrees of freedom on the
+ * refinement edge between a
+ * level and coarser cells, which
+ * are also on the boundary.
+ *
+ * This is a subset of
+ * #refinement_edge_indices.
+ */
+ std::vector<std::vector<bool> > refinement_edge_boundary_indices;
};
MGTools::make_boundary_list (dof, function_map, boundary_indices, component_mask);
MGTools::extract_inner_interface_dofs (dof, refinement_edge_indices,
- refinement_edge_boundary_indices);
+ refinement_edge_boundary_indices);
MGTools::extract_non_interface_dofs (dof, non_refinement_edge_indices);
- const DoFHandler<dim,spacedim>& dof,
- const typename FunctionMap<dim>::type& function_map,
- const std::vector<bool>& component_mask)
+}
+
+
+template <int dim, int spacedim>
+inline
+void
+MGConstrainedDoFs::initialize(
- for(unsigned int l=0; l<nlevels; ++l)
++ const DoFHandler<dim,spacedim> &dof,
++ const typename FunctionMap<dim>::type &function_map,
++ const std::vector<bool> &component_mask)
+{
+ const unsigned int nlevels = dof.get_tria().n_levels();
+ boundary_indices.resize(nlevels);
+ refinement_edge_indices.resize(nlevels);
+ refinement_edge_boundary_indices.resize(nlevels);
+
- refinement_edge_boundary_indices);
++ for (unsigned int l=0; l<nlevels; ++l)
+ {
+ boundary_indices[l].clear();
+ refinement_edge_indices[l].resize(dof.n_dofs(l));
+ refinement_edge_boundary_indices[l].resize(dof.n_dofs(l));
+ }
+
+ MGTools::make_boundary_list (dof, function_map, boundary_indices, component_mask);
+ MGTools::extract_inner_interface_dofs (dof, refinement_edge_indices,
++ refinement_edge_boundary_indices);
}
template <int dim, int spacedim>
inline
-unsigned int
+types::global_dof_index
MGDoFHandler<dim,spacedim>::MGVertexDoFs::get_index (const unsigned int level,
- const unsigned int dof_number,
- const unsigned int dofs_per_vertex) const {
+ const unsigned int dof_number,
+ const unsigned int dofs_per_vertex) const
+ {
Assert ((level >= coarsest_level) && (level <= finest_level),
ExcInvalidLevel(level));
Assert (dof_number < dofs_per_vertex,
*/
namespace MGTools
{
- /**
- * Compute row length vector for
- * multilevel methods.
- */
+ /**
+ * Compute row length vector for
+ * multilevel methods.
+ */
template <int dim, int spacedim>
void
- compute_row_length_vector(const MGDoFHandler<dim,spacedim>& dofs,
+ compute_row_length_vector(const MGDoFHandler<dim,spacedim> &dofs,
const unsigned int level,
- std::vector<unsigned int>& row_lengths,
+ std::vector<unsigned int> &row_lengths,
const DoFTools::Coupling flux_couplings = DoFTools::none);
- /**
- * Compute row length vector for
- * multilevel methods with
- * optimization for block
- * couplings.
- */
+ /**
+ * Compute row length vector for
+ * multilevel methods with
+ * optimization for block
+ * couplings.
+ */
template <int dim, int spacedim>
void
- compute_row_length_vector(const MGDoFHandler<dim,spacedim>& dofs,
+ compute_row_length_vector(const MGDoFHandler<dim,spacedim> &dofs,
const unsigned int level,
- std::vector<unsigned int>& row_lengths,
- const Table<2,DoFTools::Coupling>& couplings,
- const Table<2,DoFTools::Coupling>& flux_couplings);
+ std::vector<unsigned int> &row_lengths,
+ const Table<2,DoFTools::Coupling> &couplings,
+ const Table<2,DoFTools::Coupling> &flux_couplings);
- /**
- * Write the sparsity structure
- * of the matrix belonging to the
- * specified @p level. The sparsity pattern
- * is not compressed, so before
- * creating the actual matrix
- * you have to compress the
- * matrix yourself, using
- * <tt>SparseMatrixStruct::compress()</tt>.
- *
- * There is no need to consider
- * hanging nodes here, since only
- * one level is considered.
- */
+ /**
+ * Write the sparsity structure
+ * of the matrix belonging to the
+ * specified @p level. The sparsity pattern
+ * is not compressed, so before
+ * creating the actual matrix
+ * you have to compress the
+ * matrix yourself, using
+ * <tt>SparseMatrixStruct::compress()</tt>.
+ *
+ * There is no need to consider
+ * hanging nodes here, since only
+ * one level is considered.
+ */
- template <int dim, class SparsityPattern, int spacedim>
+ template <class DH, class SparsityPattern>
void
- make_sparsity_pattern (const MGDoFHandler<dim,spacedim> &dof_handler,
+ make_sparsity_pattern (const DH &dof_handler,
SparsityPattern &sparsity,
const unsigned int level);
const unsigned int level,
const Table<2,DoFTools::Coupling> &flux_mask);
- /**
- * Count the dofs block-wise
- * on each level.
- *
- * Result is a vector containing
- * for each level a vector
- * containing the number of dofs
- * for each block (access is
- * <tt>result[level][block]</tt>).
- */
+ /**
+ * Count the dofs block-wise
+ * on each level.
+ *
+ * Result is a vector containing
+ * for each level a vector
+ * containing the number of dofs
+ * for each block (access is
+ * <tt>result[level][block]</tt>).
+ */
- template <int dim, int spacedim>
+ template <class DH>
void
- count_dofs_per_block (const DH& dof_handler,
- std::vector<std::vector<unsigned int> >& dofs_per_block,
- count_dofs_per_block (const MGDoFHandler<dim,spacedim> &mg_dof,
- std::vector<std::vector<unsigned int> > &result,
- std::vector<unsigned int> target_block = std::vector<unsigned int>());
++ count_dofs_per_block (const DH &dof_handler,
++ std::vector<std::vector<unsigned int> > &dofs_per_block,
+ std::vector<unsigned int> target_block = std::vector<unsigned int>());
- /**
- * Count the dofs component-wise
- * on each level.
- *
- * Result is a vector containing
- * for each level a vector
- * containing the number of dofs
- * for each component (access is
- * <tt>result[level][component]</tt>).
- */
+ /**
+ * Count the dofs component-wise
+ * on each level.
+ *
+ * Result is a vector containing
+ * for each level a vector
+ * containing the number of dofs
+ * for each component (access is
+ * <tt>result[level][component]</tt>).
+ */
template <int dim, int spacedim>
void
count_dofs_per_component (const MGDoFHandler<dim,spacedim> &mg_dof,
void
make_boundary_list (const MGDoFHandler<dim,spacedim> &mg_dof,
const typename FunctionMap<dim>::type &function_map,
- std::vector<std::set<unsigned int> > &boundary_indices,
+ std::vector<std::set<unsigned int> > &boundary_indices,
const ComponentMask &component_mask = ComponentMask());
- /**
- * The same function as above, but return
- * an IndexSet rather than a
- * std::set<unsigned int> on each level.
- */
+ /**
+ * The same function as above, but return
+ * an IndexSet rather than a
+ * std::set<unsigned int> on each level.
+ */
template <int dim, int spacedim>
void
make_boundary_list (const MGDoFHandler<dim,spacedim> &mg_dof,
template <int dim, int spacedim>
void
extract_inner_interface_dofs (const MGDoFHandler<dim,spacedim> &mg_dof_handler,
- std::vector<std::vector<bool> > &interface_dofs,
- std::vector<std::vector<bool> > &boundary_interface_dofs);
+ std::vector<std::vector<bool> > &interface_dofs,
+ std::vector<std::vector<bool> > &boundary_interface_dofs);
- /**
- * Does the same as the function above,
- * but fills only the interface_dofs.
- */
+ /**
+ * Does the same as the function above,
+ * but fills only the interface_dofs.
+ */
template <int dim, int spacedim>
void
extract_inner_interface_dofs (const MGDoFHandler<dim,spacedim> &mg_dof_handler,
template <int dim, int spacedim>
void
extract_non_interface_dofs (const MGDoFHandler<dim,spacedim> &mg_dof_handler,
- std::vector<std::set<unsigned int> > &non_interface_dofs);
- std::vector<std::set<unsigned int> > &non_interface_dofs);
++ std::vector<std::set<unsigned int> > &non_interface_dofs);
}
/* @} */
template <class VECTOR>
class MGTransferPrebuilt : public MGTransferBase<VECTOR>
{
- public:
- /**
- * Constructor without constraint
- * matrices. Use this constructor
- * only with discontinuous finite
- * elements or with no local
- * refinement.
- */
- MGTransferPrebuilt ();
- /**
- * Constructor with constraint matrices as well as mg_constrained_dofs.
- */
- MGTransferPrebuilt (const ConstraintMatrix& constraints,
- const MGConstrainedDoFs& mg_constrained_dofs);
- /**
- * Destructor.
- */
- virtual ~MGTransferPrebuilt ();
- /**
- * Actually build the prolongation
- * matrices for each level.
- */
- template <int dim, int spacedim>
- void build_matrices (const MGDoFHandler<dim,spacedim> &mg_dof);
-
- template <int dim, int spacedim>
- void build_matrices (const DoFHandler<dim,spacedim> &dof_handler);
-
- virtual void prolongate (const unsigned int to_level,
- VECTOR &dst,
- const VECTOR &src) const;
-
- virtual void restrict_and_add (const unsigned int from_level,
- VECTOR &dst,
- const VECTOR &src) const;
-
- /**
- * Transfer from a vector on the
- * global grid to vectors defined
- * on each of the levels
- * separately, i.a. an @p MGVector.
- */
- template <int dim, class InVector, int spacedim>
- void
- copy_to_mg (const MGDoFHandler<dim,spacedim>& mg_dof,
- MGLevelObject<VECTOR>& dst,
- const InVector& src) const;
-
- template <int dim, class InVector, int spacedim>
- void
- copy_to_mg (const DoFHandler<dim,spacedim>& dof_handler,
- MGLevelObject<VECTOR>& dst,
- const InVector& src) const;
-
- /**
- * Transfer from multi-level vector to
- * normal vector.
- *
- * Copies data from active
- * portions of an MGVector into
- * the respective positions of a
- * <tt>Vector<number></tt>. In order to
- * keep the result consistent,
- * constrained degrees of freedom
- * are set to zero.
- */
- template <int dim, class OutVector, int spacedim>
- void
- copy_from_mg (const MGDoFHandler<dim,spacedim>& mg_dof,
- OutVector& dst,
- const MGLevelObject<VECTOR> &src) const;
-
- template <int dim, class OutVector, int spacedim>
- void
- copy_from_mg (const DoFHandler<dim,spacedim>& dof_handler,
- OutVector& dst,
- const MGLevelObject<VECTOR> &src) const;
-
- /**
- * Add a multi-level vector to a
- * normal vector.
- *
- * Works as the previous
- * function, but probably not for
- * continuous elements.
- */
- template <int dim, class OutVector, int spacedim>
- void
- copy_from_mg_add (const MGDoFHandler<dim,spacedim>& mg_dof,
- OutVector& dst,
- const MGLevelObject<VECTOR>& src) const;
-
- template <int dim, class OutVector, int spacedim>
- void
- copy_from_mg_add (const DoFHandler<dim,spacedim>& dof_handler,
- OutVector& dst,
- const MGLevelObject<VECTOR>& src) const;
-
- /**
- * If this object operates on
- * BlockVector objects, we need
- * to describe how the individual
- * vector components are mapped
- * to the blocks of a vector. For
- * example, for a Stokes system,
- * we have dim+1 vector
- * components for velocity and
- * pressure, but we may want to
- * use block vectors with only
- * two blocks for all velocities
- * in one block, and the pressure
- * variables in the other.
- *
- * By default, if this function
- * is not called, block vectors
- * have as many blocks as the
- * finite element has vector
- * components. However, this can
- * be changed by calling this
- * function with an array that
- * describes how vector
- * components are to be grouped
- * into blocks. The meaning of
- * the argument is the same as
- * the one given to the
- * DoFTools::count_dofs_per_component
- * function.
- */
- void
- set_component_to_block_map (const std::vector<unsigned int> &map);
-
- /**
- * Finite element does not
- * provide prolongation matrices.
- */
- DeclException0(ExcNoProlongation);
-
- /**
- * Call @p build_matrices
- * function first.
- */
- DeclException0(ExcMatricesNotBuilt);
-
- /**
- * Memory used by this object.
- */
- std::size_t memory_consumption () const;
-
-
- private:
-
- /**
- * Sizes of the multi-level vectors.
- */
- std::vector<unsigned int> sizes;
-
- /**
- * Sparsity patterns for transfer
- * matrices.
- */
- std::vector<std_cxx1x::shared_ptr<SparsityPattern> > prolongation_sparsities;
-
- /**
- * The actual prolongation matrix.
- * column indices belong to the
- * dof indices of the mother cell,
- * i.e. the coarse level.
- * while row indices belong to the
- * child cell, i.e. the fine level.
- */
- std::vector<std_cxx1x::shared_ptr<SparseMatrix<double> > > prolongation_matrices;
-
- /**
- * Mapping for the
- * <tt>copy_to/from_mg</tt>-functions.
- * The data is first the global
- * index, then the level index.
- */
- std::vector<std::vector<std::pair<unsigned int, unsigned int> > >
- copy_indices;
-
- /**
- * The vector that stores what
- * has been given to the
- * set_component_to_block_map()
- * function.
- */
- std::vector<unsigned int> component_to_block_map;
-
- /**
- * Degrees of freedom on the
- * refinement edge excluding
- * those on the boundary.
- */
- std::vector<std::vector<bool> > interface_dofs;
- /**
- * The constraints of the global
- * system.
- */
- SmartPointer<const ConstraintMatrix, MGTransferPrebuilt<VECTOR> > constraints;
- /**
- * The mg_constrained_dofs of the level
- * systems.
- */
-
- SmartPointer<const MGConstrainedDoFs, MGTransferPrebuilt<VECTOR> > mg_constrained_dofs;
+ public:
+ /**
+ * Constructor without constraint
+ * matrices. Use this constructor
+ * only with discontinuous finite
+ * elements or with no local
+ * refinement.
+ */
+ MGTransferPrebuilt ();
+ /**
+ * Constructor with constraint matrices as well as mg_constrained_dofs.
+ */
+ MGTransferPrebuilt (const ConstraintMatrix &constraints,
+ const MGConstrainedDoFs &mg_constrained_dofs);
+ /**
+ * Destructor.
+ */
+ virtual ~MGTransferPrebuilt ();
+ /**
+ * Actually build the prolongation
+ * matrices for each level.
+ */
+ template <int dim, int spacedim>
+ void build_matrices (const MGDoFHandler<dim,spacedim> &mg_dof);
+
++ template <int dim, int spacedim>
++ void build_matrices (const DoFHandler<dim,spacedim> &dof_handler);
++
+ virtual void prolongate (const unsigned int to_level,
+ VECTOR &dst,
+ const VECTOR &src) const;
+
+ virtual void restrict_and_add (const unsigned int from_level,
+ VECTOR &dst,
+ const VECTOR &src) const;
+
+ /**
+ * Transfer from a vector on the
+ * global grid to vectors defined
+ * on each of the levels
+ * separately, i.a. an @p MGVector.
+ */
+ template <int dim, class InVector, int spacedim>
+ void
+ copy_to_mg (const MGDoFHandler<dim,spacedim> &mg_dof,
+ MGLevelObject<VECTOR> &dst,
+ const InVector &src) const;
+
++ template <int dim, class InVector, int spacedim>
++ void
++ copy_to_mg (const DoFHandler<dim,spacedim> &dof_handler,
++ MGLevelObject<VECTOR> &dst,
++ const InVector &src) const;
++
+ /**
+ * Transfer from multi-level vector to
+ * normal vector.
+ *
+ * Copies data from active
+ * portions of an MGVector into
+ * the respective positions of a
+ * <tt>Vector<number></tt>. In order to
+ * keep the result consistent,
+ * constrained degrees of freedom
+ * are set to zero.
+ */
+ template <int dim, class OutVector, int spacedim>
+ void
+ copy_from_mg (const MGDoFHandler<dim,spacedim> &mg_dof,
+ OutVector &dst,
+ const MGLevelObject<VECTOR> &src) const;
+
++ template <int dim, class OutVector, int spacedim>
++ void
++ copy_from_mg (const DoFHandler<dim,spacedim> &dof_handler,
++ OutVector &dst,
++ const MGLevelObject<VECTOR> &src) const;
++
+ /**
+ * Add a multi-level vector to a
+ * normal vector.
+ *
+ * Works as the previous
+ * function, but probably not for
+ * continuous elements.
+ */
+ template <int dim, class OutVector, int spacedim>
+ void
+ copy_from_mg_add (const MGDoFHandler<dim,spacedim> &mg_dof,
+ OutVector &dst,
+ const MGLevelObject<VECTOR> &src) const;
+
++ template <int dim, class OutVector, int spacedim>
++ void
++ copy_from_mg_add (const DoFHandler<dim,spacedim> &dof_handler,
++ OutVector &dst,
++ const MGLevelObject<VECTOR> &src) const;
++
+ /**
+ * If this object operates on
+ * BlockVector objects, we need
+ * to describe how the individual
+ * vector components are mapped
+ * to the blocks of a vector. For
+ * example, for a Stokes system,
+ * we have dim+1 vector
+ * components for velocity and
+ * pressure, but we may want to
+ * use block vectors with only
+ * two blocks for all velocities
+ * in one block, and the pressure
+ * variables in the other.
+ *
+ * By default, if this function
+ * is not called, block vectors
+ * have as many blocks as the
+ * finite element has vector
+ * components. However, this can
+ * be changed by calling this
+ * function with an array that
+ * describes how vector
+ * components are to be grouped
+ * into blocks. The meaning of
+ * the argument is the same as
+ * the one given to the
+ * DoFTools::count_dofs_per_component
+ * function.
+ */
+ void
+ set_component_to_block_map (const std::vector<unsigned int> &map);
+
+ /**
+ * Finite element does not
+ * provide prolongation matrices.
+ */
+ DeclException0(ExcNoProlongation);
+
+ /**
+ * Call @p build_matrices
+ * function first.
+ */
+ DeclException0(ExcMatricesNotBuilt);
+
+ /**
+ * Memory used by this object.
+ */
+ std::size_t memory_consumption () const;
+
+
+ private:
+
+ /**
+ * Sizes of the multi-level vectors.
+ */
+ std::vector<unsigned int> sizes;
+
+ /**
+ * Sparsity patterns for transfer
+ * matrices.
+ */
+ std::vector<std_cxx1x::shared_ptr<SparsityPattern> > prolongation_sparsities;
+
+ /**
+ * The actual prolongation matrix.
+ * column indices belong to the
+ * dof indices of the mother cell,
+ * i.e. the coarse level.
+ * while row indices belong to the
+ * child cell, i.e. the fine level.
+ */
+ std::vector<std_cxx1x::shared_ptr<SparseMatrix<double> > > prolongation_matrices;
+
+ /**
+ * Mapping for the
+ * <tt>copy_to/from_mg</tt>-functions.
+ * The data is first the global
+ * index, then the level index.
- */
++ */
+ std::vector<std::vector<std::pair<unsigned int, unsigned int> > >
+ copy_indices;
+
+ /**
+ * The vector that stores what
+ * has been given to the
+ * set_component_to_block_map()
+ * function.
+ */
+ std::vector<unsigned int> component_to_block_map;
+
+ /**
+ * Degrees of freedom on the
+ * refinement edge excluding
+ * those on the boundary.
+ */
+ std::vector<std::vector<bool> > interface_dofs;
+ /**
+ * The constraints of the global
+ * system.
+ */
+ SmartPointer<const ConstraintMatrix, MGTransferPrebuilt<VECTOR> > constraints;
+ /**
+ * The mg_constrained_dofs of the level
+ * systems.
+ */
+
+ SmartPointer<const MGConstrainedDoFs, MGTransferPrebuilt<VECTOR> > mg_constrained_dofs;
};
}
}
-
+
+ template <int dim, typename number, int spacedim>
+ void
+ reinit_vector (const dealii::DoFHandler<dim,spacedim> &mg_dof,
+ std::vector<unsigned int> ,
+ MGLevelObject<dealii::Vector<number> > &v)
+ {
+ for (unsigned int level=v.get_minlevel();
- level<=v.get_maxlevel();++level)
++ level<=v.get_maxlevel(); ++level)
+ {
+ unsigned int n = mg_dof.n_dofs (level);
+ v[level].reinit(n);
+ }
+
+ }
+
- /**
- * Adjust vectors on all levels to
- * correct size. Here, we just
- * count the numbers of degrees
- * of freedom on each level and
- * @p reinit each level vector
- * to this length. The target_component
- * is handed to MGTools::count_dofs_per_block.
- * See for documentation there.
- */
+ /**
+ * Adjust vectors on all levels to
+ * correct size. Here, we just
+ * count the numbers of degrees
+ * of freedom on each level and
+ * @p reinit each level vector
+ * to this length. The target_component
+ * is handed to MGTools::count_dofs_per_block.
+ * See for documentation there.
+ */
template <int dim, typename number, int spacedim>
void
reinit_vector (const dealii::MGDoFHandler<dim,spacedim> &mg_dof,
v[level].collect_sizes();
}
}
-
++
+ template <int dim, typename number, int spacedim>
+ void
+ reinit_vector (const dealii::DoFHandler<dim,spacedim> &mg_dof,
+ std::vector<unsigned int> target_component,
+ MGLevelObject<BlockVector<number> > &v)
+ {
+ const unsigned int n_blocks = mg_dof.get_fe().n_blocks();
+ if (target_component.size()==0)
+ {
+ target_component.resize(n_blocks);
- for (unsigned int i=0;i<n_blocks;++i)
++ for (unsigned int i=0; i<n_blocks; ++i)
+ target_component[i] = i;
+ }
+ Assert(target_component.size()==n_blocks,
+ ExcDimensionMismatch(target_component.size(),n_blocks));
+ const unsigned int max_block
+ = *std::max_element (target_component.begin(),
+ target_component.end());
+ const unsigned int n_target_blocks = max_block + 1;
+
+ std::vector<std::vector<unsigned int> >
- ndofs(mg_dof.get_tria().n_levels(),
- std::vector<unsigned int>(n_target_blocks));
++ ndofs(mg_dof.get_tria().n_levels(),
++ std::vector<unsigned int>(n_target_blocks));
+ MGTools::count_dofs_per_block (mg_dof, ndofs, target_component);
+
+ for (unsigned int level=v.get_minlevel();
- level<=v.get_maxlevel();++level)
++ level<=v.get_maxlevel(); ++level)
+ {
+ v[level].reinit(n_target_blocks);
+ for (unsigned int b=0; b<n_target_blocks; ++b)
+ v[level].block(b).reinit(ndofs[level][b]);
+ v[level].collect_sizes();
+ }
+ }
}
- const DoFHandler<dim,spacedim>& dof_handler,
- MGLevelObject<VECTOR>& dst,
- const InVector& src) const
+template <class VECTOR>
+template <int dim, class InVector, int spacedim>
+void
+MGTransferPrebuilt<VECTOR>::copy_to_mg (
- for (unsigned int level=dof_handler.get_tria().n_levels();level != 0;)
++ const DoFHandler<dim,spacedim> &dof_handler,
++ MGLevelObject<VECTOR> &dst,
++ const InVector &src) const
+{
+ reinit_vector(dof_handler, component_to_block_map, dst);
+ bool first = true;
- VECTOR& dst_level = dst[level];
++ for (unsigned int level=dof_handler.get_tria().n_levels(); level != 0;)
+ {
+ --level;
- i != copy_indices[level].end();++i)
- dst_level(i->second) = src(i->first);
-
- // For non-DG: degrees of
- // freedom in the refinement
- // face may need special
- // attention, since they belong
- // to the coarse level, but
- // have fine level basis
- // functions
++ VECTOR &dst_level = dst[level];
+
+ typedef std::vector<std::pair<unsigned int, unsigned int> >::const_iterator IT;
+ for (IT i= copy_indices[level].begin();
- restrict_and_add (level+1, dst[level], dst[level+1]);
++ i != copy_indices[level].end(); ++i)
++ dst_level(i->second) = src(i->first);
++
++ // For non-DG: degrees of
++ // freedom in the refinement
++ // face may need special
++ // attention, since they belong
++ // to the coarse level, but
++ // have fine level basis
++ // functions
+ if (!first)
++ restrict_and_add (level+1, dst[level], dst[level+1]);
+ first = false;
+ }
+}
+
+
+
template <class VECTOR>
template <int dim, class OutVector, int spacedim>
void
- const DoFHandler<dim,spacedim>& dof_handler,
- OutVector& dst,
- const MGLevelObject<VECTOR>& src) const
+template <class VECTOR>
+template <int dim, class OutVector, int spacedim>
+void
+MGTransferPrebuilt<VECTOR>::copy_from_mg(
- // For non-DG: degrees of
- // freedom in the refinement
- // face may need special
- // attention, since they belong
- // to the coarse level, but
- // have fine level basis
- // functions
++ const DoFHandler<dim,spacedim> &dof_handler,
++ OutVector &dst,
++ const MGLevelObject<VECTOR> &src) const
+{
- for (unsigned int level=0;level<dof_handler.get_tria().n_levels();++level)
- {
- typedef std::vector<std::pair<unsigned int, unsigned int> >::const_iterator IT;
++ // For non-DG: degrees of
++ // freedom in the refinement
++ // face may need special
++ // attention, since they belong
++ // to the coarse level, but
++ // have fine level basis
++ // functions
+ dst = 0;
- for (IT i= copy_indices[level].begin();
- i != copy_indices[level].end();++i)
- dst(i->first) = src[level](i->second);
- }
++ for (unsigned int level=0; level<dof_handler.get_tria().n_levels(); ++level)
++ {
++ typedef std::vector<std::pair<unsigned int, unsigned int> >::const_iterator IT;
+
++ for (IT i= copy_indices[level].begin();
++ i != copy_indices[level].end(); ++i)
++ dst(i->first) = src[level](i->second);
++ }
+ if (constraints != 0)
+ constraints->condense(dst);
+}
+
+
+
template <class VECTOR>
template <int dim, class OutVector, int spacedim>
void
- const DoFHandler<dim,spacedim>& dof_handler,
+template <class VECTOR>
+template <int dim, class OutVector, int spacedim>
+void
+MGTransferPrebuilt<VECTOR>::copy_from_mg_add (
- // For non-DG: degrees of
- // freedom in the refinement
- // face may need special
- // attention, since they belong
- // to the coarse level, but
- // have fine level basis
- // functions
- for (unsigned int level=0;level<dof_handler.get_tria().n_levels();++level)
- {
- typedef std::vector<std::pair<unsigned int, unsigned int> >::const_iterator IT;
- for (IT i= copy_indices[level].begin();
- i != copy_indices[level].end();++i)
- dst(i->first) += src[level](i->second);
- }
++ const DoFHandler<dim,spacedim> &dof_handler,
+ OutVector &dst,
+ const MGLevelObject<VECTOR> &src) const
+{
++ // For non-DG: degrees of
++ // freedom in the refinement
++ // face may need special
++ // attention, since they belong
++ // to the coarse level, but
++ // have fine level basis
++ // functions
++ for (unsigned int level=0; level<dof_handler.get_tria().n_levels(); ++level)
++ {
++ typedef std::vector<std::pair<unsigned int, unsigned int> >::const_iterator IT;
++ for (IT i= copy_indices[level].begin();
++ i != copy_indices[level].end(); ++i)
++ dst(i->first) += src[level](i->second);
++ }
+}
+
+
+
template <class VECTOR>
void
MGTransferPrebuilt<VECTOR>::
template <class VECTOR>
class Multigrid : public Subscriptor
{
- public:
- /**
- * List of implemented cycle types.
- */
- enum Cycle
- {
- /// The V-cycle
- v_cycle,
- /// The W-cycle
- w_cycle,
- /// The F-cycle
- f_cycle
- };
-
- typedef VECTOR vector_type;
- typedef const VECTOR const_vector_type;
-
- /**
- * Constructor. The
- * MGDoFHandler is used to
- * determine the highest possible
- * level. <tt>transfer</tt> is an
- * object performing prolongation
- * and restriction.
- *
- * This function already
- * initializes the vectors which
- * will be used later in the
- * course of the
- * computations. You should
- * therefore create objects of
- * this type as late as possible.
- */
- template <int dim>
- Multigrid(const MGDoFHandler<dim>& mg_dof_handler,
- const MGMatrixBase<VECTOR>& matrix,
- const MGCoarseGridBase<VECTOR>& coarse,
- const MGTransferBase<VECTOR>& transfer,
- const MGSmootherBase<VECTOR>& pre_smooth,
- const MGSmootherBase<VECTOR>& post_smooth,
- Cycle cycle = v_cycle);
-
- template <int dim>
- Multigrid(const DoFHandler<dim>& mg_dof_handler,
- const MGMatrixBase<VECTOR>& matrix,
- const MGCoarseGridBase<VECTOR>& coarse,
- const MGTransferBase<VECTOR>& transfer,
- const MGSmootherBase<VECTOR>& pre_smooth,
- const MGSmootherBase<VECTOR>& post_smooth,
- Cycle cycle = v_cycle);
-
- /**
- * Experimental constructor for
- * cases in which no MGDoFHandler
- * is available.
- *
- * @warning Not intended for general use.
- */
- Multigrid(const unsigned int minlevel,
- const unsigned int maxlevel,
- const MGMatrixBase<VECTOR>& matrix,
- const MGCoarseGridBase<VECTOR>& coarse,
- const MGTransferBase<VECTOR>& transfer,
- const MGSmootherBase<VECTOR>& pre_smooth,
- const MGSmootherBase<VECTOR>& post_smooth,
- Cycle cycle = v_cycle);
-
- /**
- * Reinit this class according to
- * #minlevel and #maxlevel.
- */
- void reinit (const unsigned int minlevel,
- const unsigned int maxlevel);
-
- /**
- * Execute one multigrid
- * cycle. The type of cycle is
- * selected by the constructor
- * argument cycle. See the enum
- * Cycle for available types.
- */
- void cycle ();
-
- /**
- * Execute one step of the
- * V-cycle algorithm. This
- * function assumes, that the
- * multilevel vector #defect is
- * filled with the residual of an
- * outer defect correction
- * scheme. This is usually taken
- * care of by
- * PreconditionMG). After
- * vcycle(), the result is in the
- * multilevel vector
- * #solution. See
- * <tt>copy_*_mg</tt> in class
- * MGTools if you want to use
- * these vectors yourself.
- *
- * The actual work for this
- * function is done in
- * level_v_step().
- */
- void vcycle ();
-
- /**
- * @deprecated This function is
- * purely experimental and will
- * probably never be implemented
- * in a way that it can be
- * released.
- *
- * Perform a multigrid cycle with
- * a vector which is already a
- * level vector. Use of this
- * function assumes that there is
- * NO local refinement and that
- * both vectors are on the finest
- * level of this Multigrid
- * object.
- */
- void vmult(VECTOR& dst, const VECTOR& src) const;
-
- /**
- * @deprecated This function is
- * purely experimental and will
- * probably never be implemented
- * in a way that it can be
- * released.
- *
- * Perform a multigrid cycle with
- * a vector which is already a
- * level vector. Use of this
- * function assumes that there is
- * NO local refinement and that
- * both vectors are on the finest
- * level of this Multigrid
- * object.
- */
- void vmult_add(VECTOR& dst, const VECTOR& src) const;
-
- /**
- * @deprecated Even worse than
- * vmult(), this function is not
- * even implemented, but just
- * declared such that certain
- * objects relying on it can be
- * constructed.
- */
- void Tvmult(VECTOR& dst, const VECTOR& src) const;
-
- /**
- * @deprecated Even worse than
- * vmult(), this function is not
- * even implemented, but just
- * declared such that certain
- * objects relying on it can be
- * constructed.
- */
- void Tvmult_add(VECTOR& dst, const VECTOR& src) const;
-
- /**
- * Set additional matrices to
- * correct residual computation
- * at refinement edges. Since we
- * only smoothen in the interior
- * of the refined part of the
- * mesh, the coupling across the
- * refinement edge is
- * missing. This coupling is
- * provided by these two
- * matrices.
- *
- * @note While
- * <tt>edge_out.vmult</tt> is
- * used, for the second argument,
- * we use
- * <tt>edge_in.Tvmult</tt>. Thus,
- * <tt>edge_in</tt> should be
- * assembled in transposed
- * form. This saves a second
- * sparsity pattern for
- * <tt>edge_in</tt>. In
- * particular, for symmetric
- * operators, both arguments can
- * refer to the same matrix,
- * saving assembling of one of
- * them.
- */
- void set_edge_matrices (const MGMatrixBase<VECTOR>& edge_out,
- const MGMatrixBase<VECTOR>& edge_in);
-
- /**
- * Set additional matrices to
- * correct residual computation
- * at refinement edges. These
- * matrices originate from
- * discontinuous Galerkin methods
- * (see FE_DGQ etc.), where they
- * correspond to the edge fluxes
- * at the refinement edge between
- * two levels.
- *
- * @note While
- * <tt>edge_down.vmult</tt> is
- * used, for the second argument,
- * we use
- * <tt>edge_up.Tvmult</tt>. Thus,
- * <tt>edge_up</tt> should be
- * assembled in transposed
- * form. This saves a second
- * sparsity pattern for
- * <tt>edge_up</tt>. In
- * particular, for symmetric
- * operators, both arguments can
- * refer to the same matrix,
- * saving assembling of one of
- * them.
- */
- void set_edge_flux_matrices (const MGMatrixBase<VECTOR>& edge_down,
- const MGMatrixBase<VECTOR>& edge_up);
-
- /**
- * Return the finest level for
- * multigrid.
- */
- unsigned int get_maxlevel() const;
-
- /**
- * Return the coarsest level for
- * multigrid.
- */
- unsigned int get_minlevel() const;
-
- /**
- * Set the highest level for
- * which the multilevel method is
- * performed. By default, this is
- * the finest level of the
- * Triangulation; therefore, this
- * function will only accept
- * arguments smaller than the
- * current #maxlevel and not
- * smaller than the current
- * #minlevel.
- */
- void set_maxlevel (const unsigned int);
-
- /**
- * Set the coarse level for which
- * the multilevel method is
- * performed. By default, this is
- * zero. Accepted are
- * non-negative values not larger than
- * than the current #maxlevel.
- *
- * If <tt>relative</tt> ist
- * <tt>true</tt>, then this
- * function determins the number
- * of levels used, that is, it
- * sets #minlevel to
- * #maxlevel-<tt>level</tt>.
- *
- * @note The mesh on the coarsest
- * level must cover the whole
- * domain. There may not be
- * hanging nodes on #minlevel.
- *
- * @note If #minlevel is set to a
- * nonzero value, do not forget
- * to adjust your coarse grid
- * solver!
- */
- void set_minlevel (const unsigned int level,
- bool relative = false);
-
- /**
- * Chance #cycle_type used in cycle().
- */
- void set_cycle(Cycle);
-
- /**
- * Set the debug level. Higher
- * values will create more
- * debugging output during the
- * multigrid cycles.
- */
- void set_debug (const unsigned int);
-
- private:
-
- /**
- * The V-cycle multigrid method.
- * <tt>level</tt> is the level the
- * function starts on. It
- * will usually be called for the
- * highest level from outside,
- * but will then call itself
- * recursively for <tt>level-1</tt>,
- * unless we are on #minlevel
- * where the coarse grid solver
- * solves the problem exactly.
- */
- void level_v_step (const unsigned int level);
-
- /**
- * The actual W-cycle or F-cycle
- * multigrid method.
- * <tt>level</tt> is the level
- * the function starts on. It
- * will usually be called for the
- * highest level from outside,
- * but will then call itself
- * recursively for
- * <tt>level-1</tt>, unless we
- * are on #minlevel where the
- * coarse grid solver solves the
- * problem exactly.
- */
- void level_step (const unsigned int level, Cycle cycle);
-
- /**
- * Cycle type performed by the method cycle().
- */
- Cycle cycle_type;
-
- /**
- * Level for coarse grid solution.
- */
- unsigned int minlevel;
-
- /**
- * Highest level of cells.
- */
- unsigned int maxlevel;
-
- public:
- /**
- * Input vector for the
- * cycle. Contains the defect of
- * the outer method projected to
- * the multilevel vectors.
- */
- MGLevelObject<VECTOR> defect;
-
- /**
- * The solution update after the
- * multigrid step.
- */
- MGLevelObject<VECTOR> solution;
-
- private:
- /**
- * Auxiliary vector.
- */
- MGLevelObject<VECTOR> t;
-
- /**
- * Auxiliary vector for W- and
- * F-cycles. Left uninitialized
- * in V-cycle.
- */
- MGLevelObject<VECTOR> defect2;
-
-
- /**
- * The matrix for each level.
- */
- SmartPointer<const MGMatrixBase<VECTOR>,Multigrid<VECTOR> > matrix;
-
- /**
- * The matrix for each level.
- */
- SmartPointer<const MGCoarseGridBase<VECTOR>,Multigrid<VECTOR> > coarse;
-
- /**
- * Object for grid tranfer.
- */
- SmartPointer<const MGTransferBase<VECTOR>,Multigrid<VECTOR> > transfer;
-
- /**
- * The pre-smoothing object.
- */
- SmartPointer<const MGSmootherBase<VECTOR>,Multigrid<VECTOR> > pre_smooth;
-
- /**
- * The post-smoothing object.
- */
- SmartPointer<const MGSmootherBase<VECTOR>,Multigrid<VECTOR> > post_smooth;
-
- /**
- * Edge matrix from the interior
- * of the refined part to the
- * refinement edge.
- *
- * @note Only <tt>vmult</tt> is
- * used for these matrices.
- */
- SmartPointer<const MGMatrixBase<VECTOR> > edge_out;
-
- /**
- * Transpose edge matrix from the
- * refinement edge to the
- * interior of the refined part.
- *
- * @note Only <tt>Tvmult</tt> is
- * used for these matrices.
- */
- SmartPointer<const MGMatrixBase<VECTOR> > edge_in;
-
- /**
- * Edge matrix from fine to coarse.
- *
- * @note Only <tt>vmult</tt> is
- * used for these matrices.
- */
- SmartPointer<const MGMatrixBase<VECTOR>,Multigrid<VECTOR> > edge_down;
-
- /**
- * Transpose edge matrix from coarse to fine.
- *
- * @note Only <tt>Tvmult</tt> is
- * used for these matrices.
- */
- SmartPointer<const MGMatrixBase<VECTOR>,Multigrid<VECTOR> > edge_up;
-
- /**
- * Level for debug
- * output. Defaults to zero and
- * can be set by set_debug().
- */
- unsigned int debug;
-
- template<int dim, class VECTOR2, class TRANSFER> friend class PreconditionMG;
+ public:
+ /**
+ * List of implemented cycle types.
+ */
+ enum Cycle
+ {
+ /// The V-cycle
+ v_cycle,
+ /// The W-cycle
+ w_cycle,
+ /// The F-cycle
+ f_cycle
+ };
+
+ typedef VECTOR vector_type;
+ typedef const VECTOR const_vector_type;
+
+ /**
+ * Constructor. The
+ * MGDoFHandler is used to
+ * determine the highest possible
+ * level. <tt>transfer</tt> is an
+ * object performing prolongation
+ * and restriction.
+ *
+ * This function already
+ * initializes the vectors which
+ * will be used later in the
+ * course of the
+ * computations. You should
+ * therefore create objects of
+ * this type as late as possible.
+ */
+ template <int dim>
+ Multigrid(const MGDoFHandler<dim> &mg_dof_handler,
+ const MGMatrixBase<VECTOR> &matrix,
+ const MGCoarseGridBase<VECTOR> &coarse,
+ const MGTransferBase<VECTOR> &transfer,
+ const MGSmootherBase<VECTOR> &pre_smooth,
+ const MGSmootherBase<VECTOR> &post_smooth,
+ Cycle cycle = v_cycle);
+
++ template <int dim>
++ Multigrid(const DoFHandler<dim> &mg_dof_handler,
++ const MGMatrixBase<VECTOR> &matrix,
++ const MGCoarseGridBase<VECTOR> &coarse,
++ const MGTransferBase<VECTOR> &transfer,
++ const MGSmootherBase<VECTOR> &pre_smooth,
++ const MGSmootherBase<VECTOR> &post_smooth,
++ Cycle cycle = v_cycle);
++
+ /**
+ * Experimental constructor for
+ * cases in which no MGDoFHandler
+ * is available.
+ *
+ * @warning Not intended for general use.
+ */
+ Multigrid(const unsigned int minlevel,
+ const unsigned int maxlevel,
+ const MGMatrixBase<VECTOR> &matrix,
+ const MGCoarseGridBase<VECTOR> &coarse,
+ const MGTransferBase<VECTOR> &transfer,
+ const MGSmootherBase<VECTOR> &pre_smooth,
+ const MGSmootherBase<VECTOR> &post_smooth,
+ Cycle cycle = v_cycle);
+
+ /**
+ * Reinit this class according to
+ * #minlevel and #maxlevel.
+ */
+ void reinit (const unsigned int minlevel,
+ const unsigned int maxlevel);
+
+ /**
+ * Execute one multigrid
+ * cycle. The type of cycle is
+ * selected by the constructor
+ * argument cycle. See the enum
+ * Cycle for available types.
+ */
+ void cycle ();
+
+ /**
+ * Execute one step of the
+ * V-cycle algorithm. This
+ * function assumes, that the
+ * multilevel vector #defect is
+ * filled with the residual of an
+ * outer defect correction
+ * scheme. This is usually taken
+ * care of by
+ * PreconditionMG). After
+ * vcycle(), the result is in the
+ * multilevel vector
+ * #solution. See
+ * <tt>copy_*_mg</tt> in class
+ * MGTools if you want to use
+ * these vectors yourself.
+ *
+ * The actual work for this
+ * function is done in
+ * level_v_step().
+ */
+ void vcycle ();
+
+ /**
+ * @deprecated This function is
+ * purely experimental and will
+ * probably never be implemented
+ * in a way that it can be
+ * released.
+ *
+ * Perform a multigrid cycle with
+ * a vector which is already a
+ * level vector. Use of this
+ * function assumes that there is
+ * NO local refinement and that
+ * both vectors are on the finest
+ * level of this Multigrid
+ * object.
+ */
+ void vmult(VECTOR &dst, const VECTOR &src) const;
+
+ /**
+ * @deprecated This function is
+ * purely experimental and will
+ * probably never be implemented
+ * in a way that it can be
+ * released.
+ *
+ * Perform a multigrid cycle with
+ * a vector which is already a
+ * level vector. Use of this
+ * function assumes that there is
+ * NO local refinement and that
+ * both vectors are on the finest
+ * level of this Multigrid
+ * object.
+ */
+ void vmult_add(VECTOR &dst, const VECTOR &src) const;
+
+ /**
+ * @deprecated Even worse than
+ * vmult(), this function is not
+ * even implemented, but just
+ * declared such that certain
+ * objects relying on it can be
+ * constructed.
+ */
+ void Tvmult(VECTOR &dst, const VECTOR &src) const;
+
+ /**
+ * @deprecated Even worse than
+ * vmult(), this function is not
+ * even implemented, but just
+ * declared such that certain
+ * objects relying on it can be
+ * constructed.
+ */
+ void Tvmult_add(VECTOR &dst, const VECTOR &src) const;
+
+ /**
+ * Set additional matrices to
+ * correct residual computation
+ * at refinement edges. Since we
+ * only smoothen in the interior
+ * of the refined part of the
+ * mesh, the coupling across the
+ * refinement edge is
+ * missing. This coupling is
+ * provided by these two
+ * matrices.
+ *
+ * @note While
+ * <tt>edge_out.vmult</tt> is
+ * used, for the second argument,
+ * we use
+ * <tt>edge_in.Tvmult</tt>. Thus,
+ * <tt>edge_in</tt> should be
+ * assembled in transposed
+ * form. This saves a second
+ * sparsity pattern for
+ * <tt>edge_in</tt>. In
+ * particular, for symmetric
+ * operators, both arguments can
+ * refer to the same matrix,
+ * saving assembling of one of
+ * them.
+ */
+ void set_edge_matrices (const MGMatrixBase<VECTOR> &edge_out,
+ const MGMatrixBase<VECTOR> &edge_in);
+
+ /**
+ * Set additional matrices to
+ * correct residual computation
+ * at refinement edges. These
+ * matrices originate from
+ * discontinuous Galerkin methods
+ * (see FE_DGQ etc.), where they
+ * correspond to the edge fluxes
+ * at the refinement edge between
+ * two levels.
+ *
+ * @note While
+ * <tt>edge_down.vmult</tt> is
+ * used, for the second argument,
+ * we use
+ * <tt>edge_up.Tvmult</tt>. Thus,
+ * <tt>edge_up</tt> should be
+ * assembled in transposed
+ * form. This saves a second
+ * sparsity pattern for
+ * <tt>edge_up</tt>. In
+ * particular, for symmetric
+ * operators, both arguments can
+ * refer to the same matrix,
+ * saving assembling of one of
+ * them.
+ */
+ void set_edge_flux_matrices (const MGMatrixBase<VECTOR> &edge_down,
+ const MGMatrixBase<VECTOR> &edge_up);
+
+ /**
+ * Return the finest level for
+ * multigrid.
+ */
+ unsigned int get_maxlevel() const;
+
+ /**
+ * Return the coarsest level for
+ * multigrid.
+ */
+ unsigned int get_minlevel() const;
+
+ /**
+ * Set the highest level for
+ * which the multilevel method is
+ * performed. By default, this is
+ * the finest level of the
+ * Triangulation; therefore, this
+ * function will only accept
+ * arguments smaller than the
+ * current #maxlevel and not
+ * smaller than the current
+ * #minlevel.
+ */
+ void set_maxlevel (const unsigned int);
+
+ /**
+ * Set the coarse level for which
+ * the multilevel method is
+ * performed. By default, this is
+ * zero. Accepted are
+ * non-negative values not larger than
+ * than the current #maxlevel.
+ *
+ * If <tt>relative</tt> ist
+ * <tt>true</tt>, then this
+ * function determins the number
+ * of levels used, that is, it
+ * sets #minlevel to
+ * #maxlevel-<tt>level</tt>.
+ *
+ * @note The mesh on the coarsest
+ * level must cover the whole
+ * domain. There may not be
+ * hanging nodes on #minlevel.
+ *
+ * @note If #minlevel is set to a
+ * nonzero value, do not forget
+ * to adjust your coarse grid
+ * solver!
+ */
+ void set_minlevel (const unsigned int level,
+ bool relative = false);
+
+ /**
+ * Chance #cycle_type used in cycle().
+ */
+ void set_cycle(Cycle);
+
+ /**
+ * Set the debug level. Higher
+ * values will create more
+ * debugging output during the
+ * multigrid cycles.
+ */
+ void set_debug (const unsigned int);
+
+ private:
+
+ /**
+ * The V-cycle multigrid method.
+ * <tt>level</tt> is the level the
+ * function starts on. It
+ * will usually be called for the
+ * highest level from outside,
+ * but will then call itself
+ * recursively for <tt>level-1</tt>,
+ * unless we are on #minlevel
+ * where the coarse grid solver
+ * solves the problem exactly.
+ */
+ void level_v_step (const unsigned int level);
+
+ /**
+ * The actual W-cycle or F-cycle
+ * multigrid method.
+ * <tt>level</tt> is the level
+ * the function starts on. It
+ * will usually be called for the
+ * highest level from outside,
+ * but will then call itself
+ * recursively for
+ * <tt>level-1</tt>, unless we
+ * are on #minlevel where the
+ * coarse grid solver solves the
+ * problem exactly.
+ */
+ void level_step (const unsigned int level, Cycle cycle);
+
+ /**
+ * Cycle type performed by the method cycle().
+ */
+ Cycle cycle_type;
+
+ /**
+ * Level for coarse grid solution.
+ */
+ unsigned int minlevel;
+
+ /**
+ * Highest level of cells.
+ */
+ unsigned int maxlevel;
+
+ public:
+ /**
+ * Input vector for the
+ * cycle. Contains the defect of
+ * the outer method projected to
+ * the multilevel vectors.
+ */
+ MGLevelObject<VECTOR> defect;
+
+ /**
+ * The solution update after the
+ * multigrid step.
+ */
+ MGLevelObject<VECTOR> solution;
+
+ private:
+ /**
+ * Auxiliary vector.
+ */
+ MGLevelObject<VECTOR> t;
+
+ /**
+ * Auxiliary vector for W- and
+ * F-cycles. Left uninitialized
+ * in V-cycle.
+ */
+ MGLevelObject<VECTOR> defect2;
+
+
+ /**
+ * The matrix for each level.
+ */
+ SmartPointer<const MGMatrixBase<VECTOR>,Multigrid<VECTOR> > matrix;
+
+ /**
+ * The matrix for each level.
+ */
+ SmartPointer<const MGCoarseGridBase<VECTOR>,Multigrid<VECTOR> > coarse;
+
+ /**
+ * Object for grid tranfer.
+ */
+ SmartPointer<const MGTransferBase<VECTOR>,Multigrid<VECTOR> > transfer;
+
+ /**
+ * The pre-smoothing object.
+ */
+ SmartPointer<const MGSmootherBase<VECTOR>,Multigrid<VECTOR> > pre_smooth;
+
+ /**
+ * The post-smoothing object.
+ */
+ SmartPointer<const MGSmootherBase<VECTOR>,Multigrid<VECTOR> > post_smooth;
+
+ /**
+ * Edge matrix from the interior
+ * of the refined part to the
+ * refinement edge.
+ *
+ * @note Only <tt>vmult</tt> is
+ * used for these matrices.
+ */
+ SmartPointer<const MGMatrixBase<VECTOR> > edge_out;
+
+ /**
+ * Transpose edge matrix from the
+ * refinement edge to the
+ * interior of the refined part.
+ *
+ * @note Only <tt>Tvmult</tt> is
+ * used for these matrices.
+ */
+ SmartPointer<const MGMatrixBase<VECTOR> > edge_in;
+
+ /**
+ * Edge matrix from fine to coarse.
+ *
+ * @note Only <tt>vmult</tt> is
+ * used for these matrices.
+ */
+ SmartPointer<const MGMatrixBase<VECTOR>,Multigrid<VECTOR> > edge_down;
+
+ /**
+ * Transpose edge matrix from coarse to fine.
+ *
+ * @note Only <tt>Tvmult</tt> is
+ * used for these matrices.
+ */
+ SmartPointer<const MGMatrixBase<VECTOR>,Multigrid<VECTOR> > edge_up;
+
+ /**
+ * Level for debug
+ * output. Defaults to zero and
+ * can be set by set_debug().
+ */
+ unsigned int debug;
+
+ template<int dim, class VECTOR2, class TRANSFER> friend class PreconditionMG;
};
template<int dim, class VECTOR, class TRANSFER>
class PreconditionMG : public Subscriptor
{
- public:
- /**
- * Constructor.
- * Arguments are the multigrid object,
- * pre-smoother, post-smoother and
- * coarse grid solver.
- */
- PreconditionMG(const DoFHandler<dim>& dof_handler,
- Multigrid<VECTOR>& mg,
- const TRANSFER& transfer);
-
- /**
- * Dummy function needed by other classes.
- */
- bool empty () const;
-
- /**
- * Preconditioning operator.
- * Calls the @p vcycle function
- * of the @p MG object passed to
- * the constructor.
- *
- * This is the operator used by
- * LAC iterative solvers.
- */
- template<class VECTOR2>
- void vmult (VECTOR2 &dst,
- const VECTOR2 &src) const;
-
- /**
- * Preconditioning operator.
- * Calls the @p vcycle function
- * of the @p MG object passed to
- * the constructor.
- */
- template<class VECTOR2>
- void vmult_add (VECTOR2 &dst,
- const VECTOR2 &src) const;
-
- /**
- * Tranposed preconditioning operator.
- *
- * Not implemented, but the
- * definition may be needed.
- */
- template<class VECTOR2>
- void Tvmult (VECTOR2 &dst,
- const VECTOR2 &src) const;
-
- /**
- * Tranposed preconditioning operator.
- *
- * Not implemented, but the
- * definition may be needed.
- */
- template<class VECTOR2>
- void Tvmult_add (VECTOR2 &dst,
- const VECTOR2 &src) const;
-
- private:
- /**
- * Associated @p MGDoFHandler.
- */
- SmartPointer<const DoFHandler<dim>,PreconditionMG<dim,VECTOR,TRANSFER> > dof_handler;
-
- /**
- * The multigrid object.
- */
- SmartPointer<Multigrid<VECTOR>,PreconditionMG<dim,VECTOR,TRANSFER> > multigrid;
-
- /**
- * Object for grid tranfer.
- */
- SmartPointer<const TRANSFER,PreconditionMG<dim,VECTOR,TRANSFER> > transfer;
+ public:
+ /**
+ * Constructor.
+ * Arguments are the multigrid object,
+ * pre-smoother, post-smoother and
+ * coarse grid solver.
+ */
- PreconditionMG(const MGDoFHandler<dim> &mg_dof,
++ PreconditionMG(const DoFHandler<dim> &dof_handler,
+ Multigrid<VECTOR> &mg,
+ const TRANSFER &transfer);
+
+ /**
+ * Dummy function needed by other classes.
+ */
+ bool empty () const;
+
+ /**
+ * Preconditioning operator.
+ * Calls the @p vcycle function
+ * of the @p MG object passed to
+ * the constructor.
+ *
+ * This is the operator used by
+ * LAC iterative solvers.
+ */
+ template<class VECTOR2>
+ void vmult (VECTOR2 &dst,
+ const VECTOR2 &src) const;
+
+ /**
+ * Preconditioning operator.
+ * Calls the @p vcycle function
+ * of the @p MG object passed to
+ * the constructor.
+ */
+ template<class VECTOR2>
+ void vmult_add (VECTOR2 &dst,
+ const VECTOR2 &src) const;
+
+ /**
+ * Tranposed preconditioning operator.
+ *
+ * Not implemented, but the
+ * definition may be needed.
+ */
+ template<class VECTOR2>
+ void Tvmult (VECTOR2 &dst,
+ const VECTOR2 &src) const;
+
+ /**
+ * Tranposed preconditioning operator.
+ *
+ * Not implemented, but the
+ * definition may be needed.
+ */
+ template<class VECTOR2>
+ void Tvmult_add (VECTOR2 &dst,
+ const VECTOR2 &src) const;
+
+ private:
+ /**
+ * Associated @p MGDoFHandler.
+ */
- SmartPointer<const MGDoFHandler<dim>,PreconditionMG<dim,VECTOR,TRANSFER> > mg_dof_handler;
++ SmartPointer<const DoFHandler<dim>,PreconditionMG<dim,VECTOR,TRANSFER> > dof_handler;
+
+ /**
+ * The multigrid object.
+ */
+ SmartPointer<Multigrid<VECTOR>,PreconditionMG<dim,VECTOR,TRANSFER> > multigrid;
+
+ /**
+ * Object for grid tranfer.
+ */
+ SmartPointer<const TRANSFER,PreconditionMG<dim,VECTOR,TRANSFER> > transfer;
};
/*@}*/
{}
- Multigrid<VECTOR>::Multigrid (const DoFHandler<dim>& dof_handler,
- const MGMatrixBase<VECTOR>& matrix,
- const MGCoarseGridBase<VECTOR>& coarse,
- const MGTransferBase<VECTOR>& transfer,
- const MGSmootherBase<VECTOR>& pre_smooth,
- const MGSmootherBase<VECTOR>& post_smooth,
- Cycle cycle)
- :
- cycle_type(cycle),
- minlevel(0),
- maxlevel(dof_handler.get_tria().n_levels()-1),
- defect(minlevel,maxlevel),
- solution(minlevel,maxlevel),
- t(minlevel,maxlevel),
- defect2(minlevel,maxlevel),
- matrix(&matrix, typeid(*this).name()),
- coarse(&coarse, typeid(*this).name()),
- transfer(&transfer, typeid(*this).name()),
- pre_smooth(&pre_smooth, typeid(*this).name()),
- post_smooth(&post_smooth, typeid(*this).name()),
- edge_down(0, typeid(*this).name()),
- edge_up(0, typeid(*this).name()),
- debug(0)
+template <class VECTOR>
+template <int dim>
++Multigrid<VECTOR>::Multigrid (const DoFHandler<dim> &dof_handler,
++ const MGMatrixBase<VECTOR> &matrix,
++ const MGCoarseGridBase<VECTOR> &coarse,
++ const MGTransferBase<VECTOR> &transfer,
++ const MGSmootherBase<VECTOR> &pre_smooth,
++ const MGSmootherBase<VECTOR> &post_smooth,
++ Cycle cycle)
++ :
++ cycle_type(cycle),
++ minlevel(0),
++ maxlevel(dof_handler.get_tria().n_levels()-1),
++ defect(minlevel,maxlevel),
++ solution(minlevel,maxlevel),
++ t(minlevel,maxlevel),
++ defect2(minlevel,maxlevel),
++ matrix(&matrix, typeid(*this).name()),
++ coarse(&coarse, typeid(*this).name()),
++ transfer(&transfer, typeid(*this).name()),
++ pre_smooth(&pre_smooth, typeid(*this).name()),
++ post_smooth(&post_smooth, typeid(*this).name()),
++ edge_down(0, typeid(*this).name()),
++ edge_up(0, typeid(*this).name()),
++ debug(0)
+{}
+
+
template <class VECTOR>
inline
template<int dim, class VECTOR, class TRANSFER>
PreconditionMG<dim, VECTOR, TRANSFER>
- ::PreconditionMG(const DoFHandler<dim>& dof_handler,
- Multigrid<VECTOR>& mg,
- const TRANSFER& transfer)
- :
- dof_handler(&dof_handler),
- multigrid(&mg),
- transfer(&transfer)
-::PreconditionMG(const MGDoFHandler<dim> &mg_dof_handler,
++::PreconditionMG(const DoFHandler<dim> &dof_handler,
+ Multigrid<VECTOR> &mg,
+ const TRANSFER &transfer)
+ :
- mg_dof_handler(&mg_dof_handler),
++ dof_handler(&dof_handler),
+ multigrid(&mg),
+ transfer(&transfer)
{}
template<int dim, class VECTOR, class TRANSFER>
template<class VECTOR2>
void
PreconditionMG<dim, VECTOR, TRANSFER>::vmult (
- VECTOR2& dst,
- const VECTOR2& src) const
+ VECTOR2 &dst,
+ const VECTOR2 &src) const
{
- transfer->copy_to_mg(*mg_dof_handler,
+ transfer->copy_to_mg(*dof_handler,
- multigrid->defect,
- src);
+ multigrid->defect,
+ src);
multigrid->cycle();
- transfer->copy_from_mg(*mg_dof_handler,
+ transfer->copy_from_mg(*dof_handler,
- dst,
- multigrid->solution);
+ dst,
+ multigrid->solution);
}
template<class VECTOR2>
void
PreconditionMG<dim, VECTOR, TRANSFER>::vmult_add (
- VECTOR2& dst,
- const VECTOR2& src) const
+ VECTOR2 &dst,
+ const VECTOR2 &src) const
{
- transfer->copy_to_mg(*mg_dof_handler,
+ transfer->copy_to_mg(*dof_handler,
- multigrid->defect,
- src);
+ multigrid->defect,
+ src);
multigrid->cycle();
- transfer->copy_from_mg_add(*mg_dof_handler,
+ transfer->copy_from_mg_add(*dof_handler,
- dst,
- multigrid->solution);
+ dst,
+ multigrid->solution);
}
*/
class DerivativeApproximation
{
+ public:
+ /**
+ * This function is used to
+ * obtain an approximation of the
+ * gradient. Pass it the DoF
+ * handler object that describes
+ * the finite element field, a
+ * nodal value vector, and
+ * receive the cell-wise
+ * Euclidian norm of the
+ * approximated gradient.
+ *
+ * The last parameter denotes the
+ * solution component, for which the
+ * gradient is to be computed. It
+ * defaults to the first component. For
+ * scalar elements, this is the only
+ * valid choice; for vector-valued ones,
+ * any component between zero and the
+ * number of vector components can be
+ * given here.
+ */
+ template <int dim, template <int, int> class DH, class InputVector, int spacedim>
+ static void
+ approximate_gradient (const Mapping<dim,spacedim> &mapping,
+ const DH<dim,spacedim> &dof,
+ const InputVector &solution,
+ Vector<float> &derivative_norm,
+ const unsigned int component = 0);
+
+ /**
+ * Calls the @p interpolate
+ * function, see above, with
+ * <tt>mapping=MappingQ1@<dim@>()</tt>.
+ */
+ template <int dim, template <int, int> class DH, class InputVector, int spacedim>
+ static void
+ approximate_gradient (const DH<dim,spacedim> &dof,
+ const InputVector &solution,
+ Vector<float> &derivative_norm,
+ const unsigned int component = 0);
+
+ /**
+ * This function is the analogue
+ * to the one above, computing
+ * finite difference
+ * approximations of the tensor
+ * of second derivatives. Pass it
+ * the DoF handler object that
+ * describes the finite element
+ * field, a nodal value vector,
+ * and receive the cell-wise
+ * spectral norm of the
+ * approximated tensor of second
+ * derivatives. The spectral norm
+ * is the matrix norm associated
+ * to the $l_2$ vector norm.
+ *
+ * The last parameter denotes the
+ * solution component, for which
+ * the gradient is to be
+ * computed. It defaults to the
+ * first component. For
+ * scalar elements, this is the only
+ * valid choice; for vector-valued ones,
+ * any component between zero and the
+ * number of vector components can be
+ * given here.
+ */
+ template <int dim, template <int, int> class DH, class InputVector, int spacedim>
+ static void
+ approximate_second_derivative (const Mapping<dim,spacedim> &mapping,
+ const DH<dim,spacedim> &dof,
+ const InputVector &solution,
+ Vector<float> &derivative_norm,
+ const unsigned int component = 0);
+
+ /**
+ * Calls the @p interpolate
+ * function, see above, with
+ * <tt>mapping=MappingQ1@<dim@>()</tt>.
+ */
+ template <int dim, template <int, int> class DH, class InputVector, int spacedim>
+ static void
+ approximate_second_derivative (const DH<dim,spacedim> &dof,
+ const InputVector &solution,
+ Vector<float> &derivative_norm,
+ const unsigned int component = 0);
+
+ /**
+ * This function calculates the
+ * <tt>order</tt>-th order approximate
+ * derivative and returns the full tensor
+ * for a single cell.
+ *
+ * The last parameter denotes the
+ * solution component, for which
+ * the gradient is to be
+ * computed. It defaults to the
+ * first component. For
+ * scalar elements, this is the only
+ * valid choice; for vector-valued ones,
+ * any component between zero and the
+ * number of vector components can be
+ * given here.
+ */
+
+ template <int dim, template <int, int> class DH, class InputVector, int order, int spacedim>
+ static void
+ approximate_derivative_tensor (const Mapping<dim,spacedim> &mapping,
+ const DH<dim,spacedim> &dof,
+ const InputVector &solution,
+ const typename DH<dim,spacedim>::active_cell_iterator &cell,
+ Tensor<order,dim> &derivative,
+ const unsigned int component = 0);
+
+ /**
+ * Same as above, with
+ * <tt>mapping=MappingQ1@<dim@>()</tt>.
+ */
+
+ template <int dim, template <int, int> class DH, class InputVector, int order, int spacedim>
+ static void
+ approximate_derivative_tensor (const DH<dim,spacedim> &dof,
+ const InputVector &solution,
+ const typename DH<dim,spacedim>::active_cell_iterator &cell,
+ Tensor<order,dim> &derivative,
+ const unsigned int component = 0);
+
+ /**
+ * Return the norm of the derivative.
+ */
+ template <int dim, int order>
+ static double
+ derivative_norm(const Tensor<order,dim> &derivative);
+
+ /**
+ * Exception
+ */
+ DeclException2 (ExcInvalidVectorLength,
+ int, int,
+ << "Vector has length " << arg1 << ", but should have "
+ << arg2);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInsufficientDirections);
+
+ private:
+
+ /**
+ * The following class is used to
+ * describe the data needed to
+ * compute the finite difference
+ * approximation to the gradient
+ * on a cell. See the general
+ * documentation of this class
+ * for more information on
+ * implementational details.
+ *
+ * @author Wolfgang Bangerth, 2000
+ */
+ template <int dim>
+ class Gradient
+ {
public:
- /**
- * This function is used to
- * obtain an approximation of the
- * gradient. Pass it the DoF
- * handler object that describes
- * the finite element field, a
- * nodal value vector, and
- * receive the cell-wise
- * Euclidian norm of the
- * approximated gradient.
- *
- * The last parameter denotes the
- * solution component, for which the
- * gradient is to be computed. It
- * defaults to the first component. For
- * scalar elements, this is the only
- * valid choice; for vector-valued ones,
- * any component between zero and the
- * number of vector components can be
- * given here.
- */
- template <int dim, template <int, int> class DH, class InputVector, int spacedim>
- static void
- approximate_gradient (const Mapping<dim,spacedim> &mapping,
+ /**
+ * Declare which data fields have
+ * to be updated for the function
+ * @p get_projected_derivative
+ * to work.
+ */
+ static const UpdateFlags update_flags;
+
+ /**
+ * Declare the data type which
+ * holds the derivative described
+ * by this class.
+ */
+ typedef Tensor<1,dim> Derivative;
+
+ /**
+ * Likewise declare the data type
+ * that holds the derivative
+ * projected to a certain
+ * directions.
+ */
+ typedef double ProjectedDerivative;
+
+ /**
+ * Given an FEValues object
+ * initialized to a cell, and a
+ * solution vector, extract the
+ * desired derivative at the
+ * first quadrature point (which
+ * is the only one, as we only
+ * evaluate the finite element
+ * field at the center of each
+ * cell).
+ */
+ template <class InputVector, int spacedim>
+ static ProjectedDerivative
- get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
++ get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
+ const InputVector &solution,
+ const unsigned int component);
+
+ /**
+ * Return the norm of the
+ * derivative object. Here, for
+ * the gradient, we choose the
+ * Euclidian norm of the gradient
+ * vector.
+ */
+ static double derivative_norm (const Derivative &d);
+
+ /**
+ * If for the present derivative
+ * order, symmetrization of the
+ * derivative tensor is
+ * necessary, then do so on the
+ * argument.
+ *
+ * For the first derivatives, no
+ * such thing is necessary, so
+ * this function is a no-op.
+ */
+ static void symmetrize (Derivative &derivative_tensor);
+ };
+
+
+
+ /**
+ * The following class is used to
+ * describe the data needed to
+ * compute the finite difference
+ * approximation to the second
+ * derivatives on a cell. See the
+ * general documentation of this
+ * class for more information on
+ * implementational details.
+ *
+ * @author Wolfgang Bangerth, 2000
+ */
+ template <int dim>
+ class SecondDerivative
+ {
+ public:
+ /**
+ * Declare which data fields have
+ * to be updated for the function
+ * @p get_projected_derivative
+ * to work.
+ */
+ static const UpdateFlags update_flags;
+
+ /**
+ * Declare the data type which
+ * holds the derivative described
+ * by this class.
+ */
+ typedef Tensor<2,dim> Derivative;
+
+ /**
+ * Likewise declare the data type
+ * that holds the derivative
+ * projected to a certain
+ * directions.
+ */
+ typedef Tensor<1,dim> ProjectedDerivative;
+
+ /**
+ * Given an FEValues object
+ * initialized to a cell, and a
+ * solution vector, extract the
+ * desired derivative at the
+ * first quadrature point (which
+ * is the only one, as we only
+ * evaluate the finite element
+ * field at the center of each
+ * cell).
+ */
+ template <class InputVector, int spacedim>
+ static ProjectedDerivative
- get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
++ get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
+ const InputVector &solution,
+ const unsigned int component);
+
+ /**
+ * Return the norm of the
+ * derivative object. Here, for
+ * the (symmetric) tensor of
+ * second derivatives, we choose
+ * the absolute value of the
+ * largest eigenvalue, which is
+ * the matrix norm associated to
+ * the $l_2$ norm of vectors. It
+ * is also the largest value of
+ * the curvature of the solution.
+ */
+ static double derivative_norm (const Derivative &d);
+
+ /**
+ * If for the present derivative
+ * order, symmetrization of the
+ * derivative tensor is
+ * necessary, then do so on the
+ * argument.
+ *
+ * For the second derivatives,
+ * each entry of the tensor is
+ * set to the mean of its value
+ * and the value of the transpose
+ * element.
+ *
+ * Note that this function
+ * actually modifies its
+ * argument.
+ */
+ static void symmetrize (Derivative &derivative_tensor);
+ };
+
+ template <int dim>
+ class ThirdDerivative
+ {
+ public:
+ /**
+ * Declare which data fields have
+ * to be updated for the function
+ * @p get_projected_derivative
+ * to work.
+ */
+ static const UpdateFlags update_flags;
+
+ /**
+ * Declare the data type which
+ * holds the derivative described
+ * by this class.
+ */
+ typedef Tensor<3,dim> Derivative;
+
+ /**
+ * Likewise declare the data type
+ * that holds the derivative
+ * projected to a certain
+ * directions.
+ */
+ typedef Tensor<2,dim> ProjectedDerivative;
+
+ /**
+ * Given an FEValues object
+ * initialized to a cell, and a
+ * solution vector, extract the
+ * desired derivative at the
+ * first quadrature point (which
+ * is the only one, as we only
+ * evaluate the finite element
+ * field at the center of each
+ * cell).
+ */
+ template <class InputVector, int spacedim>
+ static ProjectedDerivative
- get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
++ get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
+ const InputVector &solution,
+ const unsigned int component);
+
+ /**
+ * Return the norm of the
+ * derivative object. Here, for
+ * the (symmetric) tensor of
+ * second derivatives, we choose
+ * the absolute value of the
+ * largest eigenvalue, which is
+ * the matrix norm associated to
+ * the $l_2$ norm of vectors. It
+ * is also the largest value of
+ * the curvature of the solution.
+ */
+ static double derivative_norm (const Derivative &d);
+
+ /**
+ * If for the present derivative
+ * order, symmetrization of the
+ * derivative tensor is
+ * necessary, then do so on the
+ * argument.
+ *
+ * For the second derivatives,
+ * each entry of the tensor is
+ * set to the mean of its value
+ * and the value of the transpose
+ * element.
+ *
+ * Note that this function
+ * actually modifies its
+ * argument.
+ */
+ static void symmetrize (Derivative &derivative_tensor);
+ };
+
+ template <int order, int dim>
+ class DerivativeSelector
+ {
+ public:
+ /**
+ * typedef to select the
+ * DerivativeDescription corresponding
+ * to the <tt>order</tt>th
+ * derivative. In this general template
+ * we set an unvalid typedef to void,
+ * the real typedefs have to be
+ * specialized.
+ */
+ typedef void DerivDescr;
+
+ };
+
+ template <int dim>
+ class DerivativeSelector<1,dim>
+ {
+ public:
+
+ typedef Gradient<dim> DerivDescr;
+ };
+
+ template <int dim>
+ class DerivativeSelector<2,dim>
+ {
+ public:
+
+ typedef SecondDerivative<dim> DerivDescr;
+ };
+
+ template <int dim>
+ class DerivativeSelector<3,dim>
+ {
+ public:
+
+ typedef ThirdDerivative<dim> DerivDescr;
+ };
+
+
+
+
+ private:
+
+ /**
+ * Convenience typedef denoting
+ * the range of indices on which
+ * a certain thread shall
+ * operate.
+ */
+ typedef std::pair<unsigned int,unsigned int> IndexInterval;
+
+ /**
+ * Kind of the main function of
+ * this class. It is called by
+ * the public entry points to
+ * this class with the correct
+ * template first argument and
+ * then simply calls the
+ * @p approximate function,
+ * after setting up several
+ * threads and doing some
+ * administration that is
+ * independent of the actual
+ * derivative to be computed.
+ *
+ * The @p component argument
+ * denotes which component of the
+ * solution vector we are to work
+ * on.
+ */
+ template <class DerivativeDescription, int dim,
+ template <int, int> class DH, class InputVector, int spacedim>
+ static void
+ approximate_derivative (const Mapping<dim,spacedim> &mapping,
const DH<dim,spacedim> &dof,
const InputVector &solution,
- Vector<float> &derivative_norm,
- const unsigned int component = 0);
-
- /**
- * Calls the @p interpolate
- * function, see above, with
- * <tt>mapping=MappingQ1@<dim@>()</tt>.
- */
- template <int dim, template <int, int> class DH, class InputVector, int spacedim>
- static void
- approximate_gradient (const DH<dim,spacedim> &dof,
- const InputVector &solution,
- Vector<float> &derivative_norm,
- const unsigned int component = 0);
-
- /**
- * This function is the analogue
- * to the one above, computing
- * finite difference
- * approximations of the tensor
- * of second derivatives. Pass it
- * the DoF handler object that
- * describes the finite element
- * field, a nodal value vector,
- * and receive the cell-wise
- * spectral norm of the
- * approximated tensor of second
- * derivatives. The spectral norm
- * is the matrix norm associated
- * to the $l_2$ vector norm.
- *
- * The last parameter denotes the
- * solution component, for which
- * the gradient is to be
- * computed. It defaults to the
- * first component. For
- * scalar elements, this is the only
- * valid choice; for vector-valued ones,
- * any component between zero and the
- * number of vector components can be
- * given here.
- */
- template <int dim, template <int, int> class DH, class InputVector, int spacedim>
- static void
- approximate_second_derivative (const Mapping<dim,spacedim> &mapping,
- const DH<dim,spacedim> &dof,
- const InputVector &solution,
- Vector<float> &derivative_norm,
- const unsigned int component = 0);
-
- /**
- * Calls the @p interpolate
- * function, see above, with
- * <tt>mapping=MappingQ1@<dim@>()</tt>.
- */
- template <int dim, template <int, int> class DH, class InputVector, int spacedim>
- static void
- approximate_second_derivative (const DH<dim,spacedim> &dof,
- const InputVector &solution,
- Vector<float> &derivative_norm,
- const unsigned int component = 0);
-
- /**
- * This function calculates the
- * <tt>order</tt>-th order approximate
- * derivative and returns the full tensor
- * for a single cell.
- *
- * The last parameter denotes the
- * solution component, for which
- * the gradient is to be
- * computed. It defaults to the
- * first component. For
- * scalar elements, this is the only
- * valid choice; for vector-valued ones,
- * any component between zero and the
- * number of vector components can be
- * given here.
- */
-
- template <int dim, template <int, int> class DH, class InputVector, int order, int spacedim>
- static void
- approximate_derivative_tensor (const Mapping<dim,spacedim> &mapping,
- const DH<dim,spacedim> &dof,
- const InputVector &solution,
- const typename DH<dim,spacedim>::active_cell_iterator &cell,
- Tensor<order,dim> &derivative,
- const unsigned int component = 0);
-
- /**
- * Same as above, with
- * <tt>mapping=MappingQ1@<dim@>()</tt>.
- */
-
- template <int dim, template <int, int> class DH, class InputVector, int order, int spacedim>
- static void
- approximate_derivative_tensor (const DH<dim,spacedim> &dof,
- const InputVector &solution,
- const typename DH<dim,spacedim>::active_cell_iterator &cell,
- Tensor<order,dim> &derivative,
- const unsigned int component = 0);
-
- /**
- * Return the norm of the derivative.
- */
- template <int dim, int order>
- static double
- derivative_norm(const Tensor<order,dim> &derivative);
-
- /**
- * Exception
- */
- DeclException2 (ExcInvalidVectorLength,
- int, int,
- << "Vector has length " << arg1 << ", but should have "
- << arg2);
- /**
- * Exception
- */
- DeclException0 (ExcInsufficientDirections);
-
- private:
-
- /**
- * The following class is used to
- * describe the data needed to
- * compute the finite difference
- * approximation to the gradient
- * on a cell. See the general
- * documentation of this class
- * for more information on
- * implementational details.
- *
- * @author Wolfgang Bangerth, 2000
- */
- template <int dim>
- class Gradient
- {
- public:
- /**
- * Declare which data fields have
- * to be updated for the function
- * @p get_projected_derivative
- * to work.
- */
- static const UpdateFlags update_flags;
-
- /**
- * Declare the data type which
- * holds the derivative described
- * by this class.
- */
- typedef Tensor<1,dim> Derivative;
-
- /**
- * Likewise declare the data type
- * that holds the derivative
- * projected to a certain
- * directions.
- */
- typedef double ProjectedDerivative;
-
- /**
- * Given an FEValues object
- * initialized to a cell, and a
- * solution vector, extract the
- * desired derivative at the
- * first quadrature point (which
- * is the only one, as we only
- * evaluate the finite element
- * field at the center of each
- * cell).
- */
- template <class InputVector, int spacedim>
- static ProjectedDerivative
- get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
- const InputVector &solution,
- const unsigned int component);
-
- /**
- * Return the norm of the
- * derivative object. Here, for
- * the gradient, we choose the
- * Euclidian norm of the gradient
- * vector.
- */
- static double derivative_norm (const Derivative &d);
-
- /**
- * If for the present derivative
- * order, symmetrization of the
- * derivative tensor is
- * necessary, then do so on the
- * argument.
- *
- * For the first derivatives, no
- * such thing is necessary, so
- * this function is a no-op.
- */
- static void symmetrize (Derivative &derivative_tensor);
- };
-
-
-
- /**
- * The following class is used to
- * describe the data needed to
- * compute the finite difference
- * approximation to the second
- * derivatives on a cell. See the
- * general documentation of this
- * class for more information on
- * implementational details.
- *
- * @author Wolfgang Bangerth, 2000
- */
- template <int dim>
- class SecondDerivative
- {
- public:
- /**
- * Declare which data fields have
- * to be updated for the function
- * @p get_projected_derivative
- * to work.
- */
- static const UpdateFlags update_flags;
-
- /**
- * Declare the data type which
- * holds the derivative described
- * by this class.
- */
- typedef Tensor<2,dim> Derivative;
-
- /**
- * Likewise declare the data type
- * that holds the derivative
- * projected to a certain
- * directions.
- */
- typedef Tensor<1,dim> ProjectedDerivative;
-
- /**
- * Given an FEValues object
- * initialized to a cell, and a
- * solution vector, extract the
- * desired derivative at the
- * first quadrature point (which
- * is the only one, as we only
- * evaluate the finite element
- * field at the center of each
- * cell).
- */
- template <class InputVector, int spacedim>
- static ProjectedDerivative
- get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
- const InputVector &solution,
- const unsigned int component);
-
- /**
- * Return the norm of the
- * derivative object. Here, for
- * the (symmetric) tensor of
- * second derivatives, we choose
- * the absolute value of the
- * largest eigenvalue, which is
- * the matrix norm associated to
- * the $l_2$ norm of vectors. It
- * is also the largest value of
- * the curvature of the solution.
- */
- static double derivative_norm (const Derivative &d);
-
- /**
- * If for the present derivative
- * order, symmetrization of the
- * derivative tensor is
- * necessary, then do so on the
- * argument.
- *
- * For the second derivatives,
- * each entry of the tensor is
- * set to the mean of its value
- * and the value of the transpose
- * element.
- *
- * Note that this function
- * actually modifies its
- * argument.
- */
- static void symmetrize (Derivative &derivative_tensor);
- };
-
- template <int dim>
- class ThirdDerivative
- {
- public:
- /**
- * Declare which data fields have
- * to be updated for the function
- * @p get_projected_derivative
- * to work.
- */
- static const UpdateFlags update_flags;
-
- /**
- * Declare the data type which
- * holds the derivative described
- * by this class.
- */
- typedef Tensor<3,dim> Derivative;
-
- /**
- * Likewise declare the data type
- * that holds the derivative
- * projected to a certain
- * directions.
- */
- typedef Tensor<2,dim> ProjectedDerivative;
-
- /**
- * Given an FEValues object
- * initialized to a cell, and a
- * solution vector, extract the
- * desired derivative at the
- * first quadrature point (which
- * is the only one, as we only
- * evaluate the finite element
- * field at the center of each
- * cell).
- */
- template <class InputVector, int spacedim>
- static ProjectedDerivative
- get_projected_derivative (const FEValues<dim,spacedim> &fe_values,
- const InputVector &solution,
- const unsigned int component);
-
- /**
- * Return the norm of the
- * derivative object. Here, for
- * the (symmetric) tensor of
- * second derivatives, we choose
- * the absolute value of the
- * largest eigenvalue, which is
- * the matrix norm associated to
- * the $l_2$ norm of vectors. It
- * is also the largest value of
- * the curvature of the solution.
- */
- static double derivative_norm (const Derivative &d);
-
- /**
- * If for the present derivative
- * order, symmetrization of the
- * derivative tensor is
- * necessary, then do so on the
- * argument.
- *
- * For the second derivatives,
- * each entry of the tensor is
- * set to the mean of its value
- * and the value of the transpose
- * element.
- *
- * Note that this function
- * actually modifies its
- * argument.
- */
- static void symmetrize (Derivative &derivative_tensor);
- };
-
- template <int order, int dim>
- class DerivativeSelector
- {
- public:
- /**
- * typedef to select the
- * DerivativeDescription corresponding
- * to the <tt>order</tt>th
- * derivative. In this general template
- * we set an unvalid typedef to void,
- * the real typedefs have to be
- * specialized.
- */
- typedef void DerivDescr;
-
- };
-
- template <int dim>
- class DerivativeSelector<1,dim>
- {
- public:
-
- typedef Gradient<dim> DerivDescr;
- };
-
- template <int dim>
- class DerivativeSelector<2,dim>
- {
- public:
-
- typedef SecondDerivative<dim> DerivDescr;
- };
-
- template <int dim>
- class DerivativeSelector<3,dim>
- {
- public:
-
- typedef ThirdDerivative<dim> DerivDescr;
- };
-
-
-
-
- private:
-
- /**
- * Convenience typedef denoting
- * the range of indices on which
- * a certain thread shall
- * operate.
- */
- typedef std::pair<unsigned int,unsigned int> IndexInterval;
-
- /**
- * Kind of the main function of
- * this class. It is called by
- * the public entry points to
- * this class with the correct
- * template first argument and
- * then simply calls the
- * @p approximate function,
- * after setting up several
- * threads and doing some
- * administration that is
- * independent of the actual
- * derivative to be computed.
- *
- * The @p component argument
- * denotes which component of the
- * solution vector we are to work
- * on.
- */
- template <class DerivativeDescription, int dim,
- template <int, int> class DH, class InputVector, int spacedim>
- static void
- approximate_derivative (const Mapping<dim,spacedim> &mapping,
- const DH<dim,spacedim> &dof,
- const InputVector &solution,
- const unsigned int component,
- Vector<float> &derivative_norm);
-
- /**
- * Compute the derivative
- * approximation on the cells in
- * the range given by the third
- * parameter.
- * Fill the @p derivative_norm vector with
- * the norm of the computed derivative
- * tensors on each cell.
- */
- template <class DerivativeDescription, int dim,
- template <int, int> class DH, class InputVector, int spacedim>
- static void
- approximate (const Mapping<dim,spacedim> &mapping,
- const DH<dim,spacedim> &dof,
- const InputVector &solution,
- const unsigned int component,
- const IndexInterval &index_interval,
- Vector<float> &derivative_norm);
-
- /**
- * Compute the derivative approximation on
- * one cell. This computes the full
- * derivative tensor.
- */
- template <class DerivativeDescription, int dim,
- template <int, int> class DH, class InputVector, int spacedim>
- static void
- approximate_cell (const Mapping<dim,spacedim> &mapping,
- const DH<dim,spacedim> &dof,
- const InputVector &solution,
- const unsigned int component,
- const typename DH<dim,spacedim>::active_cell_iterator &cell,
- typename DerivativeDescription::Derivative &derivative);
+ const unsigned int component,
+ Vector<float> &derivative_norm);
+
+ /**
+ * Compute the derivative
+ * approximation on the cells in
+ * the range given by the third
+ * parameter.
+ * Fill the @p derivative_norm vector with
+ * the norm of the computed derivative
+ * tensors on each cell.
+ */
+ template <class DerivativeDescription, int dim,
+ template <int, int> class DH, class InputVector, int spacedim>
+ static void
+ approximate (const Mapping<dim,spacedim> &mapping,
+ const DH<dim,spacedim> &dof,
+ const InputVector &solution,
+ const unsigned int component,
+ const IndexInterval &index_interval,
+ Vector<float> &derivative_norm);
+
+ /**
+ * Compute the derivative approximation on
+ * one cell. This computes the full
+ * derivative tensor.
+ */
+ template <class DerivativeDescription, int dim,
+ template <int, int> class DH, class InputVector, int spacedim>
+ static void
+ approximate_cell (const Mapping<dim,spacedim> &mapping,
+ const DH<dim,spacedim> &dof,
+ const InputVector &solution,
+ const unsigned int component,
- const typename DH<dim,spacedim>::active_cell_iterator &cell,
++ const typename DH<dim,spacedim>::active_cell_iterator &cell,
+ typename DerivativeDescription::Derivative &derivative);
};
template <int spacedim>
class KellyErrorEstimator<1,spacedim>
{
- public:
- /**
- * Implementation of the error
- * estimator described above. You
- * may give a coefficient, but
- * there is a default value which
- * denotes the constant
- * coefficient with value
- * one. The coefficient function
- * may either be a scalar one, in
- * which case it is used for all
- * components of the finite
- * element, or a vector-valued
- * one with as many components as
- * there are in the finite
- * element; in the latter case,
- * each component is weighted by
- * the respective component in
- * the coefficient.
- *
- * You might give a list of components
- * you want to evaluate, in case the
- * finite element used by the DoFHandler
- * object is vector-valued. You then have
- * to set those entries to true in the
- * bit-vector @p component_mask for which
- * the respective component is to be used
- * in the error estimator. The default is
- * to use all components, which is done
- * by either providing a bit-vector with
- * all-set entries, or an empty
- * bit-vector. All the other parameters
- * are as in the general case used for 2d
- * and higher.
- *
- * The estimator supports multithreading
- * and splits the cells to
- * <tt>multithread_info.n_default_threads</tt>
- * (default) threads. The number of
- * threads to be used in multithreaded
- * mode can be set with the last
- * parameter of the error estimator.
- * Multithreading is not presently
- * implemented for 1d, but we retain the
- * respective parameter for compatibility
- * with the function signature in the
- * general case.
- */
- template <typename InputVector, class DH>
- static void estimate (const Mapping<1,spacedim> &mapping,
- const DH &dof,
- const Quadrature<0> &quadrature,
- const typename FunctionMap<spacedim>::type &neumann_bc,
- const InputVector &solution,
- Vector<float> &error,
- const ComponentMask &component_mask = ComponentMask(),
- const Function<spacedim> *coefficients = 0,
- const unsigned int n_threads = multithread_info.n_default_threads,
- const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
- const types::material_id material_id = numbers::invalid_material_id);
-
- /**
- * Calls the @p estimate
- * function, see above, with
- * <tt>mapping=MappingQ1<1>()</tt>.
- */
- template <typename InputVector, class DH>
- static void estimate (const DH &dof,
- const Quadrature<0> &quadrature,
- const typename FunctionMap<spacedim>::type &neumann_bc,
- const InputVector &solution,
- Vector<float> &error,
- const ComponentMask &component_mask = ComponentMask(),
- const Function<spacedim> *coefficients = 0,
- const unsigned int n_threads = multithread_info.n_default_threads,
- const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
- const types::material_id material_id = numbers::invalid_material_id);
-
- /**
- * Same function as above, but
- * accepts more than one solution
- * vectors and returns one error
- * vector for each solution
- * vector. For the reason of
- * existence of this function,
- * see the general documentation
- * of this class.
- *
- * Since we do not want to force
- * the user of this function to
- * copy around their solution
- * vectors, the vector of
- * solution vectors takes
- * pointers to the solutions,
- * rather than being a vector of
- * vectors. This makes it simpler
- * to have the solution vectors
- * somewhere in memory, rather
- * than to have them collected
- * somewhere special. (Note that
- * it is not possible to
- * construct of vector of
- * references, so we had to use a
- * vector of pointers.)
- */
- template <typename InputVector, class DH>
- static void estimate (const Mapping<1,spacedim> &mapping,
- const DH &dof,
- const Quadrature<0> &quadrature,
- const typename FunctionMap<spacedim>::type &neumann_bc,
- const std::vector<const InputVector *> &solutions,
- std::vector<Vector<float>*> &errors,
- const ComponentMask &component_mask = ComponentMask(),
- const Function<spacedim> *coefficients = 0,
- const unsigned int n_threads = multithread_info.n_default_threads,
- const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
- const types::material_id material_id = numbers::invalid_material_id);
-
- /**
- * Calls the @p estimate
- * function, see above, with
- * <tt>mapping=MappingQ1<1>()</tt>.
- */
- template <typename InputVector, class DH>
- static void estimate (const DH &dof,
- const Quadrature<0> &quadrature,
- const typename FunctionMap<spacedim>::type &neumann_bc,
- const std::vector<const InputVector *> &solutions,
- std::vector<Vector<float>*> &errors,
- const ComponentMask &component_mask = ComponentMask(),
- const Function<spacedim> *coefficients = 0,
- const unsigned int n_threads = multithread_info.n_default_threads,
- const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
- const types::material_id material_id = numbers::invalid_material_id);
-
-
- /**
- * Equivalent to the set of functions
- * above, except that this one takes a
- * quadrature collection for hp finite
- * element dof handlers.
- */
- template <typename InputVector, class DH>
- static void estimate (const Mapping<1,spacedim> &mapping,
- const DH &dof,
- const hp::QCollection<0> &quadrature,
- const typename FunctionMap<spacedim>::type &neumann_bc,
- const InputVector &solution,
- Vector<float> &error,
- const ComponentMask &component_mask = ComponentMask(),
- const Function<spacedim> *coefficients = 0,
- const unsigned int n_threads = multithread_info.n_default_threads,
- const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
- const types::material_id material_id = numbers::invalid_material_id);
-
-
- /**
- * Equivalent to the set of functions
- * above, except that this one takes a
- * quadrature collection for hp finite
- * element dof handlers.
- */
- template <typename InputVector, class DH>
- static void estimate (const DH &dof,
- const hp::QCollection<0> &quadrature,
- const typename FunctionMap<spacedim>::type &neumann_bc,
- const InputVector &solution,
- Vector<float> &error,
- const ComponentMask &component_mask = ComponentMask(),
- const Function<spacedim> *coefficients = 0,
- const unsigned int n_threads = multithread_info.n_default_threads,
- const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
- const types::material_id material_id = numbers::invalid_material_id);
-
-
- /**
- * Equivalent to the set of functions
- * above, except that this one takes a
- * quadrature collection for hp finite
- * element dof handlers.
- */
- template <typename InputVector, class DH>
- static void estimate (const Mapping<1,spacedim> &mapping,
- const DH &dof,
- const hp::QCollection<0> &quadrature,
- const typename FunctionMap<spacedim>::type &neumann_bc,
- const std::vector<const InputVector *> &solutions,
- std::vector<Vector<float>*> &errors,
- const ComponentMask &component_mask = ComponentMask(),
- const Function<spacedim> *coefficients = 0,
- const unsigned int n_threads = multithread_info.n_default_threads,
- const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
- const types::material_id material_id = numbers::invalid_material_id);
-
-
- /**
- * Equivalent to the set of functions
- * above, except that this one takes a
- * quadrature collection for hp finite
- * element dof handlers.
- */
- template <typename InputVector, class DH>
- static void estimate (const DH &dof,
- const hp::QCollection<0> &quadrature,
- const typename FunctionMap<spacedim>::type &neumann_bc,
- const std::vector<const InputVector *> &solutions,
- std::vector<Vector<float>*> &errors,
- const ComponentMask &component_mask = ComponentMask(),
- const Function<spacedim> *coefficients = 0,
- const unsigned int n_threads = multithread_info.n_default_threads,
- const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
- const types::material_id material_id = numbers::invalid_material_id);
-
- /**
- * Exception
- */
- DeclException0 (ExcInvalidBoundaryIndicator);
- /**
- * Exception
- */
- DeclException0 (ExcInvalidComponentMask);
- /**
- * Exception
- */
- DeclException0 (ExcInvalidCoefficient);
- /**
- * Exception
- */
- DeclException0 (ExcInvalidBoundaryFunction);
- /**
- * Exception
- */
- DeclException2 (ExcIncompatibleNumberOfElements,
- int, int,
- << "The number of elements " << arg1 << " and " << arg2
- << " of the vectors do not match!");
- /**
- * Exception
- */
- DeclException0 (ExcInvalidSolutionVector);
- /**
- * Exception
- */
- DeclException0 (ExcNoSolutions);
+ public:
+ /**
+ * Implementation of the error
+ * estimator described above. You
+ * may give a coefficient, but
+ * there is a default value which
+ * denotes the constant
+ * coefficient with value
+ * one. The coefficient function
+ * may either be a scalar one, in
+ * which case it is used for all
+ * components of the finite
+ * element, or a vector-valued
+ * one with as many components as
+ * there are in the finite
+ * element; in the latter case,
+ * each component is weighted by
+ * the respective component in
+ * the coefficient.
+ *
+ * You might give a list of components
+ * you want to evaluate, in case the
+ * finite element used by the DoFHandler
+ * object is vector-valued. You then have
+ * to set those entries to true in the
+ * bit-vector @p component_mask for which
+ * the respective component is to be used
+ * in the error estimator. The default is
+ * to use all components, which is done
+ * by either providing a bit-vector with
+ * all-set entries, or an empty
+ * bit-vector. All the other parameters
+ * are as in the general case used for 2d
+ * and higher.
+ *
+ * The estimator supports multithreading
+ * and splits the cells to
+ * <tt>multithread_info.n_default_threads</tt>
+ * (default) threads. The number of
+ * threads to be used in multithreaded
+ * mode can be set with the last
+ * parameter of the error estimator.
+ * Multithreading is not presently
+ * implemented for 1d, but we retain the
+ * respective parameter for compatibility
+ * with the function signature in the
+ * general case.
+ */
+ template <typename InputVector, class DH>
- static void estimate (const Mapping<1,spacedim> &mapping,
++ static void estimate (const Mapping<1,spacedim> &mapping,
+ const DH &dof,
+ const Quadrature<0> &quadrature,
+ const typename FunctionMap<spacedim>::type &neumann_bc,
+ const InputVector &solution,
+ Vector<float> &error,
+ const ComponentMask &component_mask = ComponentMask(),
+ const Function<spacedim> *coefficients = 0,
+ const unsigned int n_threads = multithread_info.n_default_threads,
+ const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
+ const types::material_id material_id = numbers::invalid_material_id);
+
+ /**
+ * Calls the @p estimate
+ * function, see above, with
+ * <tt>mapping=MappingQ1<1>()</tt>.
+ */
+ template <typename InputVector, class DH>
+ static void estimate (const DH &dof,
+ const Quadrature<0> &quadrature,
+ const typename FunctionMap<spacedim>::type &neumann_bc,
+ const InputVector &solution,
+ Vector<float> &error,
+ const ComponentMask &component_mask = ComponentMask(),
+ const Function<spacedim> *coefficients = 0,
+ const unsigned int n_threads = multithread_info.n_default_threads,
+ const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
+ const types::material_id material_id = numbers::invalid_material_id);
+
+ /**
+ * Same function as above, but
+ * accepts more than one solution
+ * vectors and returns one error
+ * vector for each solution
+ * vector. For the reason of
+ * existence of this function,
+ * see the general documentation
+ * of this class.
+ *
+ * Since we do not want to force
+ * the user of this function to
+ * copy around their solution
+ * vectors, the vector of
+ * solution vectors takes
+ * pointers to the solutions,
+ * rather than being a vector of
+ * vectors. This makes it simpler
+ * to have the solution vectors
+ * somewhere in memory, rather
+ * than to have them collected
+ * somewhere special. (Note that
+ * it is not possible to
+ * construct of vector of
+ * references, so we had to use a
+ * vector of pointers.)
+ */
+ template <typename InputVector, class DH>
+ static void estimate (const Mapping<1,spacedim> &mapping,
+ const DH &dof,
+ const Quadrature<0> &quadrature,
+ const typename FunctionMap<spacedim>::type &neumann_bc,
+ const std::vector<const InputVector *> &solutions,
+ std::vector<Vector<float>*> &errors,
+ const ComponentMask &component_mask = ComponentMask(),
+ const Function<spacedim> *coefficients = 0,
+ const unsigned int n_threads = multithread_info.n_default_threads,
+ const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
+ const types::material_id material_id = numbers::invalid_material_id);
+
+ /**
+ * Calls the @p estimate
+ * function, see above, with
+ * <tt>mapping=MappingQ1<1>()</tt>.
+ */
+ template <typename InputVector, class DH>
+ static void estimate (const DH &dof,
+ const Quadrature<0> &quadrature,
+ const typename FunctionMap<spacedim>::type &neumann_bc,
+ const std::vector<const InputVector *> &solutions,
+ std::vector<Vector<float>*> &errors,
+ const ComponentMask &component_mask = ComponentMask(),
+ const Function<spacedim> *coefficients = 0,
+ const unsigned int n_threads = multithread_info.n_default_threads,
+ const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
+ const types::material_id material_id = numbers::invalid_material_id);
+
+
+ /**
+ * Equivalent to the set of functions
+ * above, except that this one takes a
+ * quadrature collection for hp finite
+ * element dof handlers.
+ */
+ template <typename InputVector, class DH>
+ static void estimate (const Mapping<1,spacedim> &mapping,
+ const DH &dof,
+ const hp::QCollection<0> &quadrature,
+ const typename FunctionMap<spacedim>::type &neumann_bc,
+ const InputVector &solution,
+ Vector<float> &error,
+ const ComponentMask &component_mask = ComponentMask(),
+ const Function<spacedim> *coefficients = 0,
+ const unsigned int n_threads = multithread_info.n_default_threads,
+ const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
+ const types::material_id material_id = numbers::invalid_material_id);
+
+
+ /**
+ * Equivalent to the set of functions
+ * above, except that this one takes a
+ * quadrature collection for hp finite
+ * element dof handlers.
+ */
+ template <typename InputVector, class DH>
+ static void estimate (const DH &dof,
+ const hp::QCollection<0> &quadrature,
+ const typename FunctionMap<spacedim>::type &neumann_bc,
+ const InputVector &solution,
+ Vector<float> &error,
+ const ComponentMask &component_mask = ComponentMask(),
+ const Function<spacedim> *coefficients = 0,
+ const unsigned int n_threads = multithread_info.n_default_threads,
+ const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
+ const types::material_id material_id = numbers::invalid_material_id);
+
+
+ /**
+ * Equivalent to the set of functions
+ * above, except that this one takes a
+ * quadrature collection for hp finite
+ * element dof handlers.
+ */
+ template <typename InputVector, class DH>
+ static void estimate (const Mapping<1,spacedim> &mapping,
+ const DH &dof,
+ const hp::QCollection<0> &quadrature,
+ const typename FunctionMap<spacedim>::type &neumann_bc,
+ const std::vector<const InputVector *> &solutions,
+ std::vector<Vector<float>*> &errors,
+ const ComponentMask &component_mask = ComponentMask(),
+ const Function<spacedim> *coefficients = 0,
+ const unsigned int n_threads = multithread_info.n_default_threads,
+ const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
+ const types::material_id material_id = numbers::invalid_material_id);
+
+
+ /**
+ * Equivalent to the set of functions
+ * above, except that this one takes a
+ * quadrature collection for hp finite
+ * element dof handlers.
+ */
+ template <typename InputVector, class DH>
+ static void estimate (const DH &dof,
+ const hp::QCollection<0> &quadrature,
+ const typename FunctionMap<spacedim>::type &neumann_bc,
+ const std::vector<const InputVector *> &solutions,
+ std::vector<Vector<float>*> &errors,
+ const ComponentMask &component_mask = ComponentMask(),
+ const Function<spacedim> *coefficients = 0,
+ const unsigned int n_threads = multithread_info.n_default_threads,
+ const types::subdomain_id subdomain_id = types::invalid_subdomain_id,
+ const types::material_id material_id = numbers::invalid_material_id);
+
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidBoundaryIndicator);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidComponentMask);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidCoefficient);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidBoundaryFunction);
+ /**
+ * Exception
+ */
+ DeclException2 (ExcIncompatibleNumberOfElements,
+ int, int,
+ << "The number of elements " << arg1 << " and " << arg2
+ << " of the vectors do not match!");
+ /**
+ * Exception
+ */
+ DeclException0 (ExcInvalidSolutionVector);
+ /**
+ * Exception
+ */
+ DeclException0 (ExcNoSolutions);
};
SparseMatrix<double> &matrix,
const typename FunctionMap<spacedim>::type &boundary_functions,
Vector<double> &rhs_vector,
- std::vector<types::global_dof_index>&dof_to_boundary_mapping,
- const Function<spacedim> * const weight = 0,
- std::vector<unsigned int> &dof_to_boundary_mapping,
++ std::vector<types::global_dof_index> &dof_to_boundary_mapping,
+ const Function<spacedim> *const weight = 0,
std::vector<unsigned int> component_mapping = std::vector<unsigned int>());
// * Same function, but for 1d.
SparseMatrix<double> &matrix,
const typename FunctionMap<spacedim>::type &boundary_functions,
Vector<double> &rhs_vector,
- std::vector<types::global_dof_index>&dof_to_boundary_mapping,
- const Function<spacedim> * const a = 0,
- std::vector<unsigned int> &dof_to_boundary_mapping,
++ std::vector<types::global_dof_index> &dof_to_boundary_mapping,
+ const Function<spacedim> *const a = 0,
std::vector<unsigned int> component_mapping = std::vector<unsigned int>());
- /**
- * Same function as above, but for hp
- * objects.
- */
+ /**
+ * Same function as above, but for hp
+ * objects.
+ */
template <int dim, int spacedim>
void create_boundary_mass_matrix (const hp::MappingCollection<dim,spacedim> &mapping,
SparseMatrix<double> &matrix,
const typename FunctionMap<spacedim>::type &boundary_functions,
Vector<double> &rhs_vector,
- std::vector<types::global_dof_index>&dof_to_boundary_mapping,
- const Function<spacedim> * const a = 0,
- std::vector<unsigned int> &dof_to_boundary_mapping,
++ std::vector<types::global_dof_index> &dof_to_boundary_mapping,
+ const Function<spacedim> *const a = 0,
std::vector<unsigned int> component_mapping = std::vector<unsigned int>());
- /**
- * Same function as above, but for hp
- * objects.
- */
+ /**
+ * Same function as above, but for hp
+ * objects.
+ */
//
// void create_boundary_mass_matrix (const hp::MappingCollection<1,1> &mapping,
// const hp::DoFHandler<1,1> &dof,
// SparseMatrix<double> &matrix,
// const FunctionMap<1>::type &boundary_functions,
// Vector<double> &rhs_vector,
-// std::vector<unsigned int>&dof_to_boundary_mapping,
+// std::vector<types::global_dof_index>&dof_to_boundary_mapping,
// const Function<1> * const a = 0);
- /**
- * Same function as above, but for hp
- * objects.
- */
+ /**
+ * Same function as above, but for hp
+ * objects.
+ */
template <int dim, int spacedim>
void create_boundary_mass_matrix (const hp::DoFHandler<dim,spacedim> &dof,
SparseMatrix<double> &matrix,
const typename FunctionMap<spacedim>::type &boundary_functions,
Vector<double> &rhs_vector,
- std::vector<types::global_dof_index>&dof_to_boundary_mapping,
- const Function<spacedim> * const a = 0,
- std::vector<unsigned int> &dof_to_boundary_mapping,
++ std::vector<types::global_dof_index> &dof_to_boundary_mapping,
+ const Function<spacedim> *const a = 0,
std::vector<unsigned int> component_mapping = std::vector<unsigned int>());
- /**
- * Assemble the Laplace
- * matrix. If no coefficient is
- * given, it is assumed to be
- * constant one.
- *
- * If the library is configured
- * to use multithreading, this
- * function works in parallel.
- *
- * See the general doc of this
- * class for more information.
- */
+ /**
+ * Assemble the Laplace
+ * matrix. If no coefficient is
+ * given, it is assumed to be
+ * constant one.
+ *
+ * If the library is configured
+ * to use multithreading, this
+ * function works in parallel.
+ *
+ * See the general doc of this
+ * class for more information.
+ */
template <int dim, int spacedim>
void create_laplace_matrix (const Mapping<dim, spacedim> &mapping,
const DoFHandler<dim,spacedim> &dof,
*/
namespace MatrixTools
{
- /**
- * Import namespace MatrixCreator for
- * backward compatibility with older
- * versions of deal.II in which these
- * namespaces were classes and class
- * MatrixTools was publicly derived from
- * class MatrixCreator.
- */
+ /**
+ * Import namespace MatrixCreator for
+ * backward compatibility with older
+ * versions of deal.II in which these
+ * namespaces were classes and class
+ * MatrixTools was publicly derived from
+ * class MatrixCreator.
+ */
using namespace MatrixCreator;
- /**
- * Apply dirichlet boundary conditions
- * to the system matrix and vectors
- * as described in the general
- * documentation.
- */
+ /**
+ * Apply dirichlet boundary conditions
+ * to the system matrix and vectors
+ * as described in the general
+ * documentation.
+ */
template <typename number>
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- SparseMatrix<number> &matrix,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
+ SparseMatrix<number> &matrix,
Vector<number> &solution,
Vector<number> &right_hand_side,
const bool eliminate_columns = true);
- /**
- * Apply dirichlet boundary
- * conditions to the system
- * matrix and vectors as
- * described in the general
- * documentation. This function
- * works for block sparse
- * matrices and block vectors
- */
+ /**
+ * Apply dirichlet boundary
+ * conditions to the system
+ * matrix and vectors as
+ * described in the general
+ * documentation. This function
+ * works for block sparse
+ * matrices and block vectors
+ */
template <typename number>
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
BlockSparseMatrix<number> &matrix,
BlockVector<number> &solution,
BlockVector<number> &right_hand_side,
const bool eliminate_columns = true);
#ifdef DEAL_II_USE_PETSC
- /**
- * Apply dirichlet boundary conditions to
- * the system matrix and vectors as
- * described in the general
- * documentation. This function works on
- * the classes that are used to wrap
- * PETSc objects.
- *
- * Note that this function is not very
- * efficient: it needs to alternatingly
- * read and write into the matrix, a
- * situation that PETSc does not handle
- * too well. In addition, we only get rid
- * of rows corresponding to boundary
- * nodes, but the corresponding case of
- * deleting the respective columns
- * (i.e. if @p eliminate_columns is @p
- * true) is not presently implemented,
- * and probably will never because it is
- * too expensive without direct access to
- * the PETSc data structures. (This leads
- * to the situation where the action
- * indicates by the default value of the
- * last argument is actually not
- * implemented; that argument has
- * <code>true</code> as its default value
- * to stay consistent with the other
- * functions of same name in this class.)
- * A third reason against this function
- * is that it doesn't handle the case
- * where the matrix is distributed across
- * an MPI system.
- *
- * This function is used in
- * step-17 and
- * step-18.
- */
+ /**
+ * Apply dirichlet boundary conditions to
+ * the system matrix and vectors as
+ * described in the general
+ * documentation. This function works on
+ * the classes that are used to wrap
+ * PETSc objects.
+ *
+ * Note that this function is not very
+ * efficient: it needs to alternatingly
+ * read and write into the matrix, a
+ * situation that PETSc does not handle
+ * too well. In addition, we only get rid
+ * of rows corresponding to boundary
+ * nodes, but the corresponding case of
+ * deleting the respective columns
+ * (i.e. if @p eliminate_columns is @p
+ * true) is not presently implemented,
+ * and probably will never because it is
+ * too expensive without direct access to
+ * the PETSc data structures. (This leads
+ * to the situation where the action
+ * indicates by the default value of the
+ * last argument is actually not
+ * implemented; that argument has
+ * <code>true</code> as its default value
+ * to stay consistent with the other
+ * functions of same name in this class.)
+ * A third reason against this function
+ * is that it doesn't handle the case
+ * where the matrix is distributed across
+ * an MPI system.
+ *
+ * This function is used in
+ * step-17 and
+ * step-18.
+ */
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- PETScWrappers::SparseMatrix &matrix,
- PETScWrappers::Vector &solution,
- PETScWrappers::Vector &right_hand_side,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
+ PETScWrappers::SparseMatrix &matrix,
+ PETScWrappers::Vector &solution,
+ PETScWrappers::Vector &right_hand_side,
const bool eliminate_columns = true);
- /**
- * Same function, but for parallel PETSc
- * matrices.
- */
+ /**
+ * Same function, but for parallel PETSc
+ * matrices.
+ */
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- PETScWrappers::MPI::SparseMatrix &matrix,
- PETScWrappers::MPI::Vector &solution,
- PETScWrappers::MPI::Vector &right_hand_side,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
+ PETScWrappers::MPI::SparseMatrix &matrix,
+ PETScWrappers::MPI::Vector &solution,
+ PETScWrappers::MPI::Vector &right_hand_side,
const bool eliminate_columns = true);
- /**
- * Same function, but for
- * parallel PETSc matrices. Note
- * that this function only
- * operates on the local range of
- * the parallel matrix, i.e. it
- * only eliminates rows
- * corresponding to degrees of
- * freedom for which the row is
- * stored on the present
- * processor. All other boundary
- * nodes are ignored, and it
- * doesn't matter whether they
- * are present in the first
- * argument to this function or
- * not. A consequence of this,
- * however, is that this function
- * has to be called from all
- * processors that participate in
- * sharing the contents of the
- * given matrices and vectors. It
- * is also implied that the local
- * range for all objects passed
- * to this function is the same.
- */
+ /**
+ * Same function, but for
+ * parallel PETSc matrices. Note
+ * that this function only
+ * operates on the local range of
+ * the parallel matrix, i.e. it
+ * only eliminates rows
+ * corresponding to degrees of
+ * freedom for which the row is
+ * stored on the present
+ * processor. All other boundary
+ * nodes are ignored, and it
+ * doesn't matter whether they
+ * are present in the first
+ * argument to this function or
+ * not. A consequence of this,
+ * however, is that this function
+ * has to be called from all
+ * processors that participate in
+ * sharing the contents of the
+ * given matrices and vectors. It
+ * is also implied that the local
+ * range for all objects passed
+ * to this function is the same.
+ */
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- PETScWrappers::MPI::SparseMatrix &matrix,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
+ PETScWrappers::MPI::SparseMatrix &matrix,
PETScWrappers::Vector &solution,
- PETScWrappers::MPI::Vector &right_hand_side,
+ PETScWrappers::MPI::Vector &right_hand_side,
const bool eliminate_columns = true);
- /**
- * Same as above but for BlockSparseMatrix.
- */
+ /**
+ * Same as above but for BlockSparseMatrix.
+ */
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- PETScWrappers::MPI::BlockSparseMatrix &matrix,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
+ PETScWrappers::MPI::BlockSparseMatrix &matrix,
PETScWrappers::MPI::BlockVector &solution,
PETScWrappers::MPI::BlockVector &right_hand_side,
const bool eliminate_columns = true);
#endif
#ifdef DEAL_II_USE_TRILINOS
- /**
- * Apply dirichlet boundary
- * conditions to the system matrix
- * and vectors as described in the
- * general documentation. This
- * function works on the classes
- * that are used to wrap Trilinos
- * objects.
- *
- * Note that this function is not
- * very efficient: it needs to
- * alternatingly read and write
- * into the matrix, a situation
- * that Trilinos does not handle
- * too well. In addition, we only
- * get rid of rows corresponding to
- * boundary nodes, but the
- * corresponding case of deleting
- * the respective columns (i.e. if
- * @p eliminate_columns is @p true)
- * is not presently implemented,
- * and probably will never because
- * it is too expensive without
- * direct access to the Trilinos
- * data structures. (This leads to
- * the situation where the action
- * indicates by the default value
- * of the last argument is actually
- * not implemented; that argument
- * has <code>true</code> as its
- * default value to stay consistent
- * with the other functions of same
- * name in this class.) A third
- * reason against this function is
- * that it doesn't handle the case
- * where the matrix is distributed
- * across an MPI system.
- */
+ /**
+ * Apply dirichlet boundary
+ * conditions to the system matrix
+ * and vectors as described in the
+ * general documentation. This
+ * function works on the classes
+ * that are used to wrap Trilinos
+ * objects.
+ *
+ * Note that this function is not
+ * very efficient: it needs to
+ * alternatingly read and write
+ * into the matrix, a situation
+ * that Trilinos does not handle
+ * too well. In addition, we only
+ * get rid of rows corresponding to
+ * boundary nodes, but the
+ * corresponding case of deleting
+ * the respective columns (i.e. if
+ * @p eliminate_columns is @p true)
+ * is not presently implemented,
+ * and probably will never because
+ * it is too expensive without
+ * direct access to the Trilinos
+ * data structures. (This leads to
+ * the situation where the action
+ * indicates by the default value
+ * of the last argument is actually
+ * not implemented; that argument
+ * has <code>true</code> as its
+ * default value to stay consistent
+ * with the other functions of same
+ * name in this class.) A third
+ * reason against this function is
+ * that it doesn't handle the case
+ * where the matrix is distributed
+ * across an MPI system.
+ */
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- TrilinosWrappers::SparseMatrix &matrix,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
+ TrilinosWrappers::SparseMatrix &matrix,
TrilinosWrappers::Vector &solution,
TrilinosWrappers::Vector &right_hand_side,
const bool eliminate_columns = true);
- /**
- * This function does the same as
- * the one above, except now
- * working on block structures.
- */
+ /**
+ * This function does the same as
+ * the one above, except now
+ * working on block structures.
+ */
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- TrilinosWrappers::BlockSparseMatrix &matrix,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
+ TrilinosWrappers::BlockSparseMatrix &matrix,
TrilinosWrappers::BlockVector &solution,
TrilinosWrappers::BlockVector &right_hand_side,
const bool eliminate_columns = true);
- /**
- * Apply dirichlet boundary
- * conditions to the system matrix
- * and vectors as described in the
- * general documentation. This
- * function works on the classes
- * that are used to wrap Trilinos
- * objects.
- *
- * Note that this function is not
- * very efficient: it needs to
- * alternatingly read and write
- * into the matrix, a situation
- * that Trilinos does not handle
- * too well. In addition, we only
- * get rid of rows corresponding to
- * boundary nodes, but the
- * corresponding case of deleting
- * the respective columns (i.e. if
- * @p eliminate_columns is @p true)
- * is not presently implemented,
- * and probably will never because
- * it is too expensive without
- * direct access to the Trilinos
- * data structures. (This leads to
- * the situation where the action
- * indicates by the default value
- * of the last argument is actually
- * not implemented; that argument
- * has <code>true</code> as its
- * default value to stay consistent
- * with the other functions of same
- * name in this class.) This
- * function does work on MPI vector
- * types.
- */
+ /**
+ * Apply dirichlet boundary
+ * conditions to the system matrix
+ * and vectors as described in the
+ * general documentation. This
+ * function works on the classes
+ * that are used to wrap Trilinos
+ * objects.
+ *
+ * Note that this function is not
+ * very efficient: it needs to
+ * alternatingly read and write
+ * into the matrix, a situation
+ * that Trilinos does not handle
+ * too well. In addition, we only
+ * get rid of rows corresponding to
+ * boundary nodes, but the
+ * corresponding case of deleting
+ * the respective columns (i.e. if
+ * @p eliminate_columns is @p true)
+ * is not presently implemented,
+ * and probably will never because
+ * it is too expensive without
+ * direct access to the Trilinos
+ * data structures. (This leads to
+ * the situation where the action
+ * indicates by the default value
+ * of the last argument is actually
+ * not implemented; that argument
+ * has <code>true</code> as its
+ * default value to stay consistent
+ * with the other functions of same
+ * name in this class.) This
+ * function does work on MPI vector
+ * types.
+ */
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- TrilinosWrappers::SparseMatrix &matrix,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
+ TrilinosWrappers::SparseMatrix &matrix,
TrilinosWrappers::MPI::Vector &solution,
TrilinosWrappers::MPI::Vector &right_hand_side,
const bool eliminate_columns = true);
- /**
- * This function does the same as
- * the one above, except now working
- * on block structures.
- */
+ /**
+ * This function does the same as
+ * the one above, except now working
+ * on block structures.
+ */
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- TrilinosWrappers::BlockSparseMatrix &matrix,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
+ TrilinosWrappers::BlockSparseMatrix &matrix,
TrilinosWrappers::MPI::BlockVector &solution,
TrilinosWrappers::MPI::BlockVector &right_hand_side,
const bool eliminate_columns = true);
#endif
- /**
- * Rather than applying boundary
- * values to the global matrix
- * and vector after creating the
- * global matrix, this function
- * does so during assembly, by
- * modifying the local matrix and
- * vector contributions. If you
- * call this function on all
- * local contributions, the
- * resulting matrix will have the
- * same entries, and the final
- * call to
- * apply_boundary_values() on the
- * global system will not be
- * necessary.
- *
- * Since this function does not
- * have to work on the
- * complicated data structures of
- * sparse matrices, it is
- * relatively cheap. It may
- * therefore be a win if you have
- * many fixed degrees of freedom
- * (e.g. boundary nodes), or if
- * access to the sparse matrix is
- * expensive (e.g. for block
- * sparse matrices, or for PETSc
- * or trilinos
- * matrices). However, it doesn't
- * work as expected if there are
- * also hanging nodes to be
- * considered. More caveats are
- * listed in the general
- * documentation of this class.
- */
+ /**
+ * Rather than applying boundary
+ * values to the global matrix
+ * and vector after creating the
+ * global matrix, this function
+ * does so during assembly, by
+ * modifying the local matrix and
+ * vector contributions. If you
+ * call this function on all
+ * local contributions, the
+ * resulting matrix will have the
+ * same entries, and the final
+ * call to
+ * apply_boundary_values() on the
+ * global system will not be
+ * necessary.
+ *
+ * Since this function does not
+ * have to work on the
+ * complicated data structures of
+ * sparse matrices, it is
+ * relatively cheap. It may
+ * therefore be a win if you have
+ * many fixed degrees of freedom
+ * (e.g. boundary nodes), or if
+ * access to the sparse matrix is
+ * expensive (e.g. for block
+ * sparse matrices, or for PETSc
+ * or trilinos
+ * matrices). However, it doesn't
+ * work as expected if there are
+ * also hanging nodes to be
+ * considered. More caveats are
+ * listed in the general
+ * documentation of this class.
+ */
void
- local_apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
- const std::vector<unsigned int> &local_dof_indices,
+ local_apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
+ const std::vector<types::global_dof_index> &local_dof_indices,
FullMatrix<double> &local_matrix,
Vector<double> &local_rhs,
const bool eliminate_columns);
template<int dim, typename VECTOR=Vector<double>, class DH=DoFHandler<dim> >
class SolutionTransfer
{
- public:
-
- /**
- * Constructor, takes the current DoFHandler
- * as argument.
- */
- SolutionTransfer(const DH &dof);
-
- /**
- * Destructor
- */
- ~SolutionTransfer();
-
- /**
- * Reinit this class to the state that
- * it has
- * directly after calling the Constructor
- */
- void clear();
-
- /**
- * Prepares the @p SolutionTransfer for
- * pure refinement. It
- * stores the dof indices of each cell.
- * After calling this function
- * only calling the @p refine_interpolate
- * functions is allowed.
- */
- void prepare_for_pure_refinement();
-
- /**
- * Prepares the @p SolutionTransfer for
- * coarsening and refinement. It
- * stores the dof indices of each cell and
- * stores the dof values of the vectors in
- * @p all_in in each cell that'll be coarsened.
- * @p all_in includes all vectors
- * that are to be interpolated
- * onto the new (refined and/or
- * coarsenend) grid.
- */
- void prepare_for_coarsening_and_refinement (const std::vector<VECTOR> &all_in);
-
- /**
- * Same as previous function
- * but for only one discrete function
- * to be interpolated.
- */
- void prepare_for_coarsening_and_refinement (const VECTOR &in);
-
- /**
- * This function
- * interpolates the discrete function @p in,
- * which is a vector on the grid before the
- * refinement, to the function @p out
- * which then is a vector on the refined grid.
- * It assumes the vectors having the
- * right sizes (i.e. <tt>in.size()==n_dofs_old</tt>,
- * <tt>out.size()==n_dofs_refined</tt>)
- *
- * Calling this function is allowed only
- * if @p prepare_for_pure_refinement is called
- * and the refinement is
- * executed before.
- * Multiple calling of this function is
- * allowed. e.g. for interpolating several
- * functions.
- */
- void refine_interpolate (const VECTOR &in,
- VECTOR &out) const;
-
- /**
- * This function
- * interpolates the discrete functions
- * that are stored in @p all_in onto
- * the refined and/or coarsenend grid.
- * It assumes the vectors in @p all_in
- * denote the same vectors
- * as in @p all_in as parameter of
- * <tt>prepare_for_refinement_and_coarsening(all_in)</tt>.
- * However, there is no way of verifying
- * this internally, so be careful here.
- *
- * Calling this function is
- * allowed only if first
- * Triangulation::prepare_coarsening_and_refinement,
- * second
- * @p SolutionTransfer::prepare_for_coarsening_and_refinement,
- * an then third
- * Triangulation::execute_coarsening_and_refinement
- * are called before. Multiple
- * calling of this function is
- * NOT allowed. Interpolating
- * several functions can be
- * performed in one step.
- *
- * The number of output vectors
- * is assumed to be the same as
- * the number of input
- * vectors. Also, the sizes of
- * the output vectors are assumed
- * to be of the right size
- * (@p n_dofs_refined). Otherwise
- * an assertion will be thrown.
- */
- void interpolate (const std::vector<VECTOR>&all_in,
- std::vector<VECTOR> &all_out) const;
-
- /**
- * Same as the previous function.
- * It interpolates only one function.
- * It assumes the vectors having the
- * right sizes (i.e. <tt>in.size()==n_dofs_old</tt>,
- * <tt>out.size()==n_dofs_refined</tt>)
- *
- * Multiple calling of this function is
- * NOT allowed. Interpolating
- * several functions can be performed
- * in one step by using
- * <tt>interpolate (all_in, all_out)</tt>
- */
- void interpolate (const VECTOR &in,
- VECTOR &out) const;
-
- /**
- * Determine an estimate for the
- * memory consumption (in bytes)
- * of this object.
- */
+ public:
+
+ /**
+ * Constructor, takes the current DoFHandler
+ * as argument.
+ */
+ SolutionTransfer(const DH &dof);
+
+ /**
+ * Destructor
+ */
+ ~SolutionTransfer();
+
+ /**
+ * Reinit this class to the state that
+ * it has
+ * directly after calling the Constructor
+ */
+ void clear();
+
+ /**
+ * Prepares the @p SolutionTransfer for
+ * pure refinement. It
+ * stores the dof indices of each cell.
+ * After calling this function
+ * only calling the @p refine_interpolate
+ * functions is allowed.
+ */
+ void prepare_for_pure_refinement();
+
+ /**
+ * Prepares the @p SolutionTransfer for
+ * coarsening and refinement. It
+ * stores the dof indices of each cell and
+ * stores the dof values of the vectors in
+ * @p all_in in each cell that'll be coarsened.
+ * @p all_in includes all vectors
+ * that are to be interpolated
+ * onto the new (refined and/or
+ * coarsenend) grid.
+ */
+ void prepare_for_coarsening_and_refinement (const std::vector<VECTOR> &all_in);
+
+ /**
+ * Same as previous function
+ * but for only one discrete function
+ * to be interpolated.
+ */
+ void prepare_for_coarsening_and_refinement (const VECTOR &in);
+
+ /**
+ * This function
+ * interpolates the discrete function @p in,
+ * which is a vector on the grid before the
+ * refinement, to the function @p out
+ * which then is a vector on the refined grid.
+ * It assumes the vectors having the
+ * right sizes (i.e. <tt>in.size()==n_dofs_old</tt>,
+ * <tt>out.size()==n_dofs_refined</tt>)
+ *
+ * Calling this function is allowed only
+ * if @p prepare_for_pure_refinement is called
+ * and the refinement is
+ * executed before.
+ * Multiple calling of this function is
+ * allowed. e.g. for interpolating several
+ * functions.
+ */
+ void refine_interpolate (const VECTOR &in,
+ VECTOR &out) const;
+
+ /**
+ * This function
+ * interpolates the discrete functions
+ * that are stored in @p all_in onto
+ * the refined and/or coarsenend grid.
+ * It assumes the vectors in @p all_in
+ * denote the same vectors
+ * as in @p all_in as parameter of
+ * <tt>prepare_for_refinement_and_coarsening(all_in)</tt>.
+ * However, there is no way of verifying
+ * this internally, so be careful here.
+ *
+ * Calling this function is
+ * allowed only if first
+ * Triangulation::prepare_coarsening_and_refinement,
+ * second
+ * @p SolutionTransfer::prepare_for_coarsening_and_refinement,
+ * an then third
+ * Triangulation::execute_coarsening_and_refinement
+ * are called before. Multiple
+ * calling of this function is
+ * NOT allowed. Interpolating
+ * several functions can be
+ * performed in one step.
+ *
+ * The number of output vectors
+ * is assumed to be the same as
+ * the number of input
+ * vectors. Also, the sizes of
+ * the output vectors are assumed
+ * to be of the right size
+ * (@p n_dofs_refined). Otherwise
+ * an assertion will be thrown.
+ */
+ void interpolate (const std::vector<VECTOR> &all_in,
+ std::vector<VECTOR> &all_out) const;
+
+ /**
+ * Same as the previous function.
+ * It interpolates only one function.
+ * It assumes the vectors having the
+ * right sizes (i.e. <tt>in.size()==n_dofs_old</tt>,
+ * <tt>out.size()==n_dofs_refined</tt>)
+ *
+ * Multiple calling of this function is
+ * NOT allowed. Interpolating
+ * several functions can be performed
+ * in one step by using
+ * <tt>interpolate (all_in, all_out)</tt>
+ */
+ void interpolate (const VECTOR &in,
+ VECTOR &out) const;
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes)
+ * of this object.
+ */
+ std::size_t memory_consumption () const;
+
+ /**
+ * Exception
+ */
+ DeclException0(ExcNotPrepared);
+
+ /**
+ * Exception
+ */
+ DeclException0(ExcAlreadyPrepForRef);
+
+ /**
+ * Exception
+ */
+ DeclException0(ExcAlreadyPrepForCoarseAndRef);
+
+ /**
+ * Exception
+ */
+ DeclException0(ExcTriaPrepCoarseningNotCalledBefore);
+
+ /**
+ * Exception
+ */
+ DeclException0(ExcNoInVectorsGiven);
+
+ /**
+ * Exception
+ */
+ DeclException0(ExcVectorsDifferFromInVectors);
+
+ /**
+ * Exception
+ */
+ DeclException0(ExcNumberOfDoFsPerCellHasChanged);
+
+ private:
+
+ /**
+ * Pointer to the degree of freedom handler
+ * to work with.
+ */
+ SmartPointer<const DH,SolutionTransfer<dim,VECTOR,DH> > dof_handler;
+
+ /**
+ * Stores the number of DoFs before the
+ * refinement and/or coarsening.
+ */
+ unsigned int n_dofs_old;
+
+ /**
+ * Declaration of
+ * @p PreparationState that
+ * denotes the three possible
+ * states of the
+ * @p SolutionTransfer: being
+ * prepared for 'pure
+ * refinement', prepared for
+ * 'coarsening and refinement' or
+ * not prepared.
+ */
+ enum PreparationState
+ {
+ none, pure_refinement, coarsening_and_refinement
+ };
+
+ /**
+ * Definition of the respective variable.
+ */
+ PreparationState prepared_for;
+
+
+ /**
+ * Is used for @p prepare_for_refining
+ * (of course also for
+ * @p repare_for_refining_and_coarsening)
+ * and stores all dof indices
+ * of the cells that'll be refined
+ */
- std::vector<std::vector<unsigned int> > indices_on_cell;
++ std::vector<std::vector<types::global_dof_index> > indices_on_cell;
+
+ /**
+ * All cell data (the dof indices and
+ * the dof values)
+ * should be accessible from each cell.
+ * As each cell has got only one
+ * @p user_pointer, multiple pointers to the
+ * data need to be packetized in a structure.
+ * Note that in our case on each cell
+ * either the
+ * <tt>vector<unsigned int> indices</tt> (if the cell
+ * will be refined) or the
+ * <tt>vector<double> dof_values</tt> (if the
+ * children of this cell will be deleted)
+ * is needed, hence one @p user_pointer should
+ * be sufficient, but to allow some error checks
+ * and to preserve the user from making
+ * user errors the @p user_pointer will be
+ * 'multiplied' by this structure.
+ */
+ struct Pointerstruct
+ {
+ Pointerstruct() : indices_ptr(0), dof_values_ptr(0), active_fe_index(0) {};
+ Pointerstruct(std::vector<unsigned int> *indices_ptr_in,
+ const unsigned int active_fe_index_in = 0)
+ :
+ indices_ptr(indices_ptr_in),
+ dof_values_ptr (0),
+ active_fe_index(active_fe_index_in) {};
+ Pointerstruct(std::vector<Vector<typename VECTOR::value_type> > *dof_values_ptr_in,
+ const unsigned int active_fe_index_in = 0) :
+ indices_ptr (0),
+ dof_values_ptr(dof_values_ptr_in),
+ active_fe_index(active_fe_index_in) {};
std::size_t memory_consumption () const;
- /**
- * Exception
- */
- DeclException0(ExcNotPrepared);
-
- /**
- * Exception
- */
- DeclException0(ExcAlreadyPrepForRef);
-
- /**
- * Exception
- */
- DeclException0(ExcAlreadyPrepForCoarseAndRef);
-
- /**
- * Exception
- */
- DeclException0(ExcTriaPrepCoarseningNotCalledBefore);
-
- /**
- * Exception
- */
- DeclException0(ExcNoInVectorsGiven);
-
- /**
- * Exception
- */
- DeclException0(ExcVectorsDifferFromInVectors);
-
- /**
- * Exception
- */
- DeclException0(ExcNumberOfDoFsPerCellHasChanged);
-
- private:
-
- /**
- * Pointer to the degree of freedom handler
- * to work with.
- */
- SmartPointer<const DH,SolutionTransfer<dim,VECTOR,DH> > dof_handler;
-
- /**
- * Stores the number of DoFs before the
- * refinement and/or coarsening.
- */
- unsigned int n_dofs_old;
-
- /**
- * Declaration of
- * @p PreparationState that
- * denotes the three possible
- * states of the
- * @p SolutionTransfer: being
- * prepared for 'pure
- * refinement', prepared for
- * 'coarsening and refinement' or
- * not prepared.
- */
- enum PreparationState {
- none, pure_refinement, coarsening_and_refinement
- };
-
- /**
- * Definition of the respective variable.
- */
- PreparationState prepared_for;
-
-
- /**
- * Is used for @p prepare_for_refining
- * (of course also for
- * @p repare_for_refining_and_coarsening)
- * and stores all dof indices
- * of the cells that'll be refined
- */
- std::vector<std::vector<types::global_dof_index> > indices_on_cell;
-
- /**
- * All cell data (the dof indices and
- * the dof values)
- * should be accessible from each cell.
- * As each cell has got only one
- * @p user_pointer, multiple pointers to the
- * data need to be packetized in a structure.
- * Note that in our case on each cell
- * either the
- * <tt>vector<unsigned int> indices</tt> (if the cell
- * will be refined) or the
- * <tt>vector<double> dof_values</tt> (if the
- * children of this cell will be deleted)
- * is needed, hence one @p user_pointer should
- * be sufficient, but to allow some error checks
- * and to preserve the user from making
- * user errors the @p user_pointer will be
- * 'multiplied' by this structure.
- */
- struct Pointerstruct {
- Pointerstruct() : indices_ptr(0), dof_values_ptr(0), active_fe_index(0) {};
- Pointerstruct(std::vector<unsigned int> *indices_ptr_in,
- const unsigned int active_fe_index_in = 0)
- :
- indices_ptr(indices_ptr_in),
- dof_values_ptr (0),
- active_fe_index(active_fe_index_in) {};
- Pointerstruct(std::vector<Vector<typename VECTOR::value_type> > *dof_values_ptr_in,
- const unsigned int active_fe_index_in = 0) :
- indices_ptr (0),
- dof_values_ptr(dof_values_ptr_in),
- active_fe_index(active_fe_index_in) {};
- std::size_t memory_consumption () const;
-
- std::vector<unsigned int> *indices_ptr;
- std::vector<Vector<typename VECTOR::value_type> > *dof_values_ptr;
- unsigned int active_fe_index;
- };
-
- /**
- * Map mapping from level and index of cell
- * to the @p Pointerstructs (cf. there).
- * This map makes it possible to keep all
- * the information needed to transfer the
- * solution inside this object rather than
- * using user pointers of the Triangulation
- * for this purpose.
- */
- std::map<std::pair<unsigned int, unsigned int>, Pointerstruct> cell_map;
-
- /**
- * Is used for
- * @p prepare_for_refining_and_coarsening
- * The interpolated dof values
- * of all cells that'll be coarsened
- * will be stored in this vector.
- */
- std::vector<std::vector<Vector<typename VECTOR::value_type> > > dof_values_on_cell;
+ std::vector<unsigned int> *indices_ptr;
+ std::vector<Vector<typename VECTOR::value_type> > *dof_values_ptr;
+ unsigned int active_fe_index;
+ };
+
+ /**
+ * Map mapping from level and index of cell
+ * to the @p Pointerstructs (cf. there).
+ * This map makes it possible to keep all
+ * the information needed to transfer the
+ * solution inside this object rather than
+ * using user pointers of the Triangulation
+ * for this purpose.
+ */
+ std::map<std::pair<unsigned int, unsigned int>, Pointerstruct> cell_map;
+
+ /**
+ * Is used for
+ * @p prepare_for_refining_and_coarsening
+ * The interpolated dof values
+ * of all cells that'll be coarsened
+ * will be stored in this vector.
+ */
+ std::vector<std::vector<Vector<typename VECTOR::value_type> > > dof_values_on_cell;
};
interpolate_boundary_values (const Mapping<DH::dimension,DH::space_dimension> &mapping,
const DH &dof,
const typename FunctionMap<DH::space_dimension>::type &function_map,
- std::map<unsigned int,double> &boundary_values,
+ std::map<types::global_dof_index,double> &boundary_values,
const ComponentMask &component_mask = ComponentMask());
- /**
- * @deprecated This function exists mainly
- * for backward compatibility.
- *
- * Same function as above, but
- * taking only one pair of
- * boundary indicator and
- * corresponding boundary
- * function. Calls the other
- * function with remapped
- * arguments.
- *
- */
+ /**
+ * @deprecated This function exists mainly
+ * for backward compatibility.
+ *
+ * Same function as above, but
+ * taking only one pair of
+ * boundary indicator and
+ * corresponding boundary
+ * function. Calls the other
+ * function with remapped
+ * arguments.
+ *
+ */
template <class DH>
void
interpolate_boundary_values (const Mapping<DH::dimension,DH::space_dimension> &mapping,
const DH &dof,
const types::boundary_id boundary_component,
const Function<DH::space_dimension> &boundary_function,
- std::map<unsigned int,double> &boundary_values,
+ std::map<types::global_dof_index,double> &boundary_values,
const ComponentMask &component_mask = ComponentMask());
- /**
- * Calls the other
- * interpolate_boundary_values()
- * function, see above, with
- * <tt>mapping=MappingQ1@<dim@>()</tt>.
- */
+ /**
+ * Calls the other
+ * interpolate_boundary_values()
+ * function, see above, with
+ * <tt>mapping=MappingQ1@<dim@>()</tt>.
+ */
template <class DH>
void
interpolate_boundary_values (const DH &dof,
const DoFHandler<dim,spacedim> &dof,
const typename FunctionMap<spacedim>::type &boundary_functions,
const Quadrature<dim-1> &q,
- std::map<unsigned int,double> &boundary_values,
+ std::map<types::global_dof_index,double> &boundary_values,
std::vector<unsigned int> component_mapping = std::vector<unsigned int>());
- /**
- * Calls the project_boundary_values()
- * function, see above, with
- * <tt>mapping=MappingQ1@<dim@>()</tt>.
- */
+ /**
+ * Calls the project_boundary_values()
+ * function, see above, with
+ * <tt>mapping=MappingQ1@<dim@>()</tt>.
+ */
template <int dim, int spacedim>
void project_boundary_values (const DoFHandler<dim,spacedim> &dof,
const typename FunctionMap<spacedim>::type &boundary_function,
const Quadrature<dim-1> &q,
- std::map<unsigned int,double> &boundary_values,
+ std::map<types::global_dof_index,double> &boundary_values,
std::vector<unsigned int> component_mapping = std::vector<unsigned int>());
- /**
- * Project a function to the boundary of
- * the domain, using the given quadrature
- * formula for the faces. This function
- * identifies the degrees of freedom
- * subject to Dirichlet boundary
- * conditions, adds them to the list of
- * constrained DoFs in @p constraints and
- * sets the respective inhomogeneity to
- * the value resulting from the
- * projection operation. If this routine
- * encounters a DoF that already is
- * constrained (for instance by a hanging
- * node constraint, see below, or any
- * other type of constraint, e.g. from
- * periodic boundary conditions), the old
- * setting of the constraint (dofs the
- * entry is constrained to,
- * inhomogeneities) is kept and nothing
- * happens.
- *
- * @note When combining adaptively
- * refined meshes with hanging node
- * constraints and boundary conditions
- * like from the current function within
- * one ConstraintMatrix object, the
- * hanging node constraints should always
- * be set first, and then the boundary
- * conditions since boundary conditions
- * are not set in the second operation on
- * degrees of freedom that are already
- * constrained. This makes sure that the
- * discretization remains conforming as
- * is needed. See the discussion on
- * conflicting constraints in the module
- * on @ref constraints .
- *
- * If @p component_mapping is empty, it
- * is assumed that the number of
- * components of @p boundary_function
- * matches that of the finite element
- * used by @p dof.
- *
- * In 1d, projection equals
- * interpolation. Therefore,
- * interpolate_boundary_values is
- * called.
- *
- * @arg @p component_mapping: if the
- * components in @p boundary_functions
- * and @p dof do not coincide, this
- * vector allows them to be
- * remapped. If the vector is not
- * empty, it has to have one entry for
- * each component in @p dof. This entry
- * is the component number in @p
- * boundary_functions that should be
- * used for this component in @p
- * dof. By default, no remapping is
- * applied.
- *
- * @ingroup constraints
- */
+ /**
+ * Project a function to the boundary of
+ * the domain, using the given quadrature
+ * formula for the faces. This function
+ * identifies the degrees of freedom
+ * subject to Dirichlet boundary
+ * conditions, adds them to the list of
+ * constrained DoFs in @p constraints and
+ * sets the respective inhomogeneity to
+ * the value resulting from the
+ * projection operation. If this routine
+ * encounters a DoF that already is
+ * constrained (for instance by a hanging
+ * node constraint, see below, or any
+ * other type of constraint, e.g. from
+ * periodic boundary conditions), the old
+ * setting of the constraint (dofs the
+ * entry is constrained to,
+ * inhomogeneities) is kept and nothing
+ * happens.
+ *
+ * @note When combining adaptively
+ * refined meshes with hanging node
+ * constraints and boundary conditions
+ * like from the current function within
+ * one ConstraintMatrix object, the
+ * hanging node constraints should always
+ * be set first, and then the boundary
+ * conditions since boundary conditions
+ * are not set in the second operation on
+ * degrees of freedom that are already
+ * constrained. This makes sure that the
+ * discretization remains conforming as
+ * is needed. See the discussion on
+ * conflicting constraints in the module
+ * on @ref constraints .
+ *
+ * If @p component_mapping is empty, it
+ * is assumed that the number of
+ * components of @p boundary_function
+ * matches that of the finite element
+ * used by @p dof.
+ *
+ * In 1d, projection equals
+ * interpolation. Therefore,
+ * interpolate_boundary_values is
+ * called.
+ *
+ * @arg @p component_mapping: if the
+ * components in @p boundary_functions
+ * and @p dof do not coincide, this
+ * vector allows them to be
+ * remapped. If the vector is not
+ * empty, it has to have one entry for
+ * each component in @p dof. This entry
+ * is the component number in @p
+ * boundary_functions that should be
+ * used for this component in @p
+ * dof. By default, no remapping is
+ * applied.
+ *
+ * @ingroup constraints
+ */
template <int dim, int spacedim>
void project_boundary_values (const Mapping<dim, spacedim> &mapping,
const DoFHandler<dim,spacedim> &dof,
}
- // Find the support points on a cell that
- // are mentioned multiple times in
- // unit_support_points. Mark the first
- // representative of each support point
- // mentioned multiple times by appending
- // its dof index to dofs_of_rep_points.
- // Each multiple point gets to know the dof
- // index of its representative point by the
- // dof_to_rep_dof_table.
-
- // the following vector collects all dofs i,
- // 0<=i<fe.dofs_per_cell, for that
- // unit_support_points[i]
- // is a representative one. i.e.
- // the following vector collects all rep dofs.
- // the position of a rep dof within this vector
- // is called rep index.
+ // Find the support points on a cell that
+ // are mentioned multiple times in
+ // unit_support_points. Mark the first
+ // representative of each support point
+ // mentioned multiple times by appending
+ // its dof index to dofs_of_rep_points.
+ // Each multiple point gets to know the dof
+ // index of its representative point by the
+ // dof_to_rep_dof_table.
+
+ // the following vector collects all dofs i,
+ // 0<=i<fe.dofs_per_cell, for that
+ // unit_support_points[i]
+ // is a representative one. i.e.
+ // the following vector collects all rep dofs.
+ // the position of a rep dof within this vector
+ // is called rep index.
- std::vector<std::vector<unsigned int> > dofs_of_rep_points(fe.size());
+ std::vector<std::vector<types::global_dof_index> > dofs_of_rep_points(fe.size());
- // the following table converts a dof i
- // to the rep index.
+ // the following table converts a dof i
+ // to the rep index.
- std::vector<std::vector<unsigned int> > dof_to_rep_index_table(fe.size());
+ std::vector<std::vector<types::global_dof_index> > dof_to_rep_index_table(fe.size());
std::vector<unsigned int> n_rep_points (fe.size(), 0);
const unsigned int max_rep_points = *std::max_element (n_rep_points.begin(),
n_rep_points.end());
- std::vector<unsigned int> dofs_on_cell (fe.max_dofs_per_cell());
+ std::vector<types::global_dof_index> dofs_on_cell (fe.max_dofs_per_cell());
std::vector<Point<DH::space_dimension> > rep_points (max_rep_points);
- // get space for the values of the
- // function at the rep support points.
- //
- // have two versions, one for system fe
- // and one for scalar ones, to take the
- // more efficient one respectively
+ // get space for the values of the
+ // function at the rep support points.
+ //
+ // have two versions, one for system fe
+ // and one for scalar ones, to take the
+ // more efficient one respectively
std::vector<std::vector<double> > function_values_scalar(fe.size());
std::vector<std::vector<Vector<double> > > function_values_system(fe.size());
{
void
interpolate_zero_boundary_values (const dealii::DoFHandler<1> &dof_handler,
- std::map<unsigned int,double> &boundary_values)
+ std::map<types::global_dof_index,double> &boundary_values)
{
- // we only need to find the
- // left-most and right-most
- // vertex and query its vertex
- // dof indices. that's easy :-)
+ // we only need to find the
+ // left-most and right-most
+ // vertex and query its vertex
+ // dof indices. that's easy :-)
for (unsigned int direction=0; direction<2; ++direction)
{
dealii::DoFHandler<1>::cell_iterator
- // codimension 1
+ // codimension 1
void
interpolate_zero_boundary_values (const dealii::DoFHandler<1,2> &dof_handler,
- std::map<unsigned int,double> &boundary_values)
+ std::map<types::global_dof_index,double> &boundary_values)
{
- // we only need to find the
- // left-most and right-most
- // vertex and query its vertex
- // dof indices. that's easy :-)
+ // we only need to find the
+ // left-most and right-most
+ // vertex and query its vertex
+ // dof indices. that's easy :-)
for (unsigned int direction=0; direction<2; ++direction)
{
dealii::DoFHandler<1,2>::cell_iterator
{
const FiniteElement<dim,spacedim> &fe = dof_handler.get_fe();
- // loop over all boundary faces
- // to get all dof indices of
- // dofs on the boundary. note
- // that in 3d there are cases
- // where a face is not at the
- // boundary, yet one of its
- // lines is, and we should
- // consider the degrees of
- // freedom on it as boundary
- // nodes. likewise, in 2d and
- // 3d there are cases where a
- // cell is only at the boundary
- // by one vertex. nevertheless,
- // since we do not support
- // boundaries with dimension
- // less or equal to dim-2, each
- // such boundary dof is also
- // found from some other face
- // that is actually wholly on
- // the boundary, not only by
- // one line or one vertex
+ // loop over all boundary faces
+ // to get all dof indices of
+ // dofs on the boundary. note
+ // that in 3d there are cases
+ // where a face is not at the
+ // boundary, yet one of its
+ // lines is, and we should
+ // consider the degrees of
+ // freedom on it as boundary
+ // nodes. likewise, in 2d and
+ // 3d there are cases where a
+ // cell is only at the boundary
+ // by one vertex. nevertheless,
+ // since we do not support
+ // boundaries with dimension
+ // less or equal to dim-2, each
+ // such boundary dof is also
+ // found from some other face
+ // that is actually wholly on
+ // the boundary, not only by
+ // one line or one vertex
typename dealii::DoFHandler<dim,spacedim>::active_cell_iterator
- cell = dof_handler.begin_active(),
- endc = dof_handler.end();
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
- std::vector<unsigned int> face_dof_indices (fe.dofs_per_face);
+ std::vector<types::global_dof_index> face_dof_indices (fe.dofs_per_face);
for (; cell!=endc; ++cell)
- for (unsigned int f=0; f<GeometryInfo<dim>::faces_per_cell; ++f)
- if (cell->at_boundary(f))
- {
- cell->face(f)->get_dof_indices (face_dof_indices);
- for (unsigned int i=0; i<fe.dofs_per_face; ++i)
- // enter zero boundary values
- // for all boundary nodes
- //
- // we need not care about
- // vector valued elements here,
- // since we set all components
- boundary_values[face_dof_indices[i]] = 0.;
- }
+ for (unsigned int f=0; f<GeometryInfo<dim>::faces_per_cell; ++f)
+ if (cell->at_boundary(f))
+ {
+ cell->face(f)->get_dof_indices (face_dof_indices);
+ for (unsigned int i=0; i<fe.dofs_per_face; ++i)
+ // enter zero boundary values
+ // for all boundary nodes
+ //
+ // we need not care about
+ // vector valued elements here,
+ // since we set all components
+ boundary_values[face_dof_indices[i]] = 0.;
+ }
}
}
Assert (vec_result.size() == dof.n_dofs(),
ExcDimensionMismatch (vec_result.size(), dof.n_dofs()));
- // make up boundary values
+ // make up boundary values
- std::map<unsigned int,double> boundary_values;
+ std::map<types::global_dof_index,double> boundary_values;
if (enforce_zero_boundary == true)
- // no need to project boundary
- // values, but enforce
- // homogeneous boundary values
- // anyway
+ // no need to project boundary
+ // values, but enforce
+ // homogeneous boundary values
+ // anyway
internal::
- interpolate_zero_boundary_values (dof, boundary_values);
+ interpolate_zero_boundary_values (dof, boundary_values);
else
- // no homogeneous boundary values
+ // no homogeneous boundary values
if (project_to_boundary_first == true)
- // boundary projection required
+ // boundary projection required
{
- // set up a list of boundary
- // functions for the
- // different boundary
- // parts. We want the
- // function to hold on
- // all parts of the boundary
+ // set up a list of boundary
+ // functions for the
+ // different boundary
+ // parts. We want the
+ // function to hold on
+ // all parts of the boundary
typename FunctionMap<spacedim>::type boundary_functions;
for (types::boundary_id c=0; c<numbers::internal_face_boundary_id; ++c)
boundary_functions[c] = &function;
const unsigned int dofs_per_cell = dof_handler.get_fe().dofs_per_cell;
- std::vector<unsigned int> local_dof_indices (dofs_per_cell);
+ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
cell_point.first->get_dof_indices (local_dof_indices);
- for(unsigned int i=0; i<dofs_per_cell; i++)
+ for (unsigned int i=0; i<dofs_per_cell; i++)
rhs_vector(local_dof_indices[i]) = fe_values.shape_value(i,0);
}
const unsigned int dofs_per_cell = cell_point.first->get_fe().dofs_per_cell;
- std::vector<unsigned int> local_dof_indices (dofs_per_cell);
+ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
cell_point.first->get_dof_indices (local_dof_indices);
- for(unsigned int i=0; i<dofs_per_cell; i++)
+ for (unsigned int i=0; i<dofs_per_cell; i++)
rhs_vector(local_dof_indices[i]) = fe_values.shape_value(i,0);
}
const unsigned int dofs_per_cell = dof_handler.get_fe().dofs_per_cell;
- std::vector<unsigned int> local_dof_indices (dofs_per_cell);
+ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
cell_point.first->get_dof_indices (local_dof_indices);
- for(unsigned int i=0; i<dofs_per_cell; i++)
+ for (unsigned int i=0; i<dofs_per_cell; i++)
rhs_vector(local_dof_indices[i]) = orientation * fe_values[vec].value(i,0);
}
const unsigned int dofs_per_cell = cell_point.first->get_fe().dofs_per_cell;
- std::vector<unsigned int> local_dof_indices (dofs_per_cell);
+ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
cell_point.first->get_dof_indices (local_dof_indices);
- for(unsigned int i=0; i<dofs_per_cell; i++)
+ for (unsigned int i=0; i<dofs_per_cell; i++)
rhs_vector(local_dof_indices[i]) = orientation * fe_values[vec].value(i,0);
}
Assert (n_components == i->second->n_components,
ExcDimensionMismatch(n_components, i->second->n_components));
- // field to store the indices
+ // field to store the indices
- std::vector<unsigned int> face_dofs;
+ std::vector<types::global_dof_index> face_dofs;
face_dofs.reserve (DoFTools::max_dofs_per_face(dof));
std::vector<Point<spacedim> > dof_locations;
project_boundary_values (const Mapping<1,1> &mapping,
const DoFHandler<1,1> &dof,
const FunctionMap<1>::type &boundary_functions,
- const Quadrature<0> &,
+ const Quadrature<0> &,
- std::map<unsigned int,double> &boundary_values,
+ std::map<types::global_dof_index,double> &boundary_values,
std::vector<unsigned int> component_mapping)
{
Assert (component_mapping.size() == 0, ExcNotImplemented());
project_boundary_values (const Mapping<1,2> &mapping,
const DoFHandler<1,2> &dof,
const FunctionMap<2>::type &boundary_functions,
- const Quadrature<0> &,
+ const Quadrature<0> &,
- std::map<unsigned int,double> &boundary_values,
+ std::map<types::global_dof_index,double> &boundary_values,
std::vector<unsigned int> component_mapping)
{
Assert (component_mapping.size() == 0, ExcNotImplemented());
template <int dim, int spacedim>
void
project_boundary_values (const Mapping<dim, spacedim> &mapping,
- const DoFHandler<dim, spacedim>&dof,
+ const DoFHandler<dim, spacedim> &dof,
const typename FunctionMap<spacedim>::type &boundary_functions,
const Quadrature<dim-1> &q,
- std::map<unsigned int,double> &boundary_values,
+ std::map<types::global_dof_index,double> &boundary_values,
std::vector<unsigned int> component_mapping)
{
//TODO:[?] In project_boundary_values, no condensation of sparsity
template <int dim>
struct VectorDoFTuple
{
- types::global_dof_index dof_indices[dim];
- unsigned int dof_indices[dim];
++ types::global_dof_index dof_indices[dim];
- VectorDoFTuple ()
- {
- for (unsigned int i=0; i<dim; ++i)
- dof_indices[i] = numbers::invalid_unsigned_int;
- }
+ VectorDoFTuple ()
+ {
+ for (unsigned int i=0; i<dim; ++i)
+ dof_indices[i] = numbers::invalid_unsigned_int;
+ }
- bool operator < (const VectorDoFTuple<dim> &other) const
- {
- for (unsigned int i=0; i<dim; ++i)
- if (dof_indices[i] < other.dof_indices[i])
- return true;
- else
- if (dof_indices[i] > other.dof_indices[i])
- return false;
+ bool operator < (const VectorDoFTuple<dim> &other) const
+ {
+ for (unsigned int i=0; i<dim; ++i)
+ if (dof_indices[i] < other.dof_indices[i])
+ return true;
+ else if (dof_indices[i] > other.dof_indices[i])
return false;
- }
+ return false;
+ }
- bool operator == (const VectorDoFTuple<dim> &other) const
- {
- for (unsigned int i=0; i<dim; ++i)
- if (dof_indices[i] != other.dof_indices[i])
- return false;
+ bool operator == (const VectorDoFTuple<dim> &other) const
+ {
+ for (unsigned int i=0; i<dim; ++i)
+ if (dof_indices[i] != other.dof_indices[i])
+ return false;
- return true;
- }
+ return true;
+ }
- bool operator != (const VectorDoFTuple<dim> &other) const
- {
- return ! (*this == other);
- }
+ bool operator != (const VectorDoFTuple<dim> &other) const
+ {
+ return ! (*this == other);
+ }
};
template <int dim>
void
add_tangentiality_constraints (const VectorDoFTuple<dim> &dof_indices,
- const Tensor<1,dim> &tangent_vector,
- ConstraintMatrix &constraints)
+ const Tensor<1,dim> &tangent_vector,
+ ConstraintMatrix &constraints)
{
- // choose the DoF that has the
- // largest component in the
- // tangent_vector as the
- // independent component, and
- // then constrain the others to
- // it. specifically, if, say,
- // component 0 of the tangent
- // vector t is largest by
- // magnitude, then
- // x1=t[1]/t[0]*x_0, etc.
+ // choose the DoF that has the
+ // largest component in the
+ // tangent_vector as the
+ // independent component, and
+ // then constrain the others to
+ // it. specifically, if, say,
+ // component 0 of the tangent
+ // vector t is largest by
+ // magnitude, then
+ // x1=t[1]/t[0]*x_0, etc.
- unsigned int largest_component = 0;
+ types::global_dof_index largest_component = 0;
for (unsigned int d=1; d<dim; ++d)
- if (std::fabs(tangent_vector[d]) > std::fabs(tangent_vector[largest_component]) + 1e-10)
- largest_component = d;
+ if (std::fabs(tangent_vector[d]) > std::fabs(tangent_vector[largest_component]) + 1e-10)
+ largest_component = d;
- // then constrain all of the
- // other degrees of freedom in
- // terms of the one just found
+ // then constrain all of the
+ // other degrees of freedom in
+ // terms of the one just found
for (unsigned int d=0; d<dim; ++d)
- if (d != largest_component)
- if (!constraints.is_constrained(dof_indices.dof_indices[d])
- &&
- constraints.can_store_line(dof_indices.dof_indices[d]))
- {
- constraints.add_line (dof_indices.dof_indices[d]);
-
- if (std::fabs (tangent_vector[d]/tangent_vector[largest_component])
- > std::numeric_limits<double>::epsilon())
- constraints.add_entry (dof_indices.dof_indices[d],
- dof_indices.dof_indices[largest_component],
- tangent_vector[d]/tangent_vector[largest_component]);
- }
+ if (d != largest_component)
+ if (!constraints.is_constrained(dof_indices.dof_indices[d])
+ &&
+ constraints.can_store_line(dof_indices.dof_indices[d]))
+ {
+ constraints.add_line (dof_indices.dof_indices[d]);
+
+ if (std::fabs (tangent_vector[d]/tangent_vector[largest_component])
+ > std::numeric_limits<double>::epsilon())
+ constraints.add_entry (dof_indices.dof_indices[d],
+ dof_indices.dof_indices[largest_component],
+ tangent_vector[d]/tangent_vector[largest_component]);
+ }
}
Assert (false, ExcNotImplemented ());
}
- // This function computes the
- // projection of the boundary
- // function on the boundary
- // in 3d.
+ // This function computes the
+ // projection of the boundary
+ // function on the boundary
+ // in 3d.
template<typename cell_iterator>
void
- compute_face_projection_div_conforming (const cell_iterator& cell,
+ compute_face_projection_div_conforming (const cell_iterator &cell,
const unsigned int face,
- const FEFaceValues<3>& fe_values,
+ const FEFaceValues<3> &fe_values,
const unsigned int first_vector_component,
- const Function<3>& boundary_function,
- const std::vector<DerivativeForm<1,3,3> >& jacobians,
- std::vector<double>& dof_values,
- std::vector<types::global_dof_index>& projected_dofs)
+ const Function<3> &boundary_function,
+ const std::vector<DerivativeForm<1,3,3> > &jacobians,
+ std::vector<double> &dof_values,
- std::vector<unsigned int> &projected_dofs)
++ std::vector<types::global_dof_index> &projected_dofs)
{
- // Compute the intergral over
- // the product of the normal
- // components of the boundary
- // function times the normal
- // components of the shape
- // functions supported on the
- // boundary.
+ // Compute the intergral over
+ // the product of the normal
+ // components of the boundary
+ // function times the normal
+ // components of the shape
+ // functions supported on the
+ // boundary.
const FEValuesExtractors::Vector vec (first_vector_component);
- const FiniteElement<3>& fe = cell->get_fe ();
- const std::vector<Point<3> >& normals = fe_values.get_normal_vectors ();
+ const FiniteElement<3> &fe = cell->get_fe ();
+ const std::vector<Point<3> > &normals = fe_values.get_normal_vectors ();
const unsigned int
- face_coordinate_directions[GeometryInfo<3>::faces_per_cell][2] = {{1, 2},
- {1, 2},
- {2, 0},
- {2, 0},
- {0, 1},
- {0, 1}};
+ face_coordinate_directions[GeometryInfo<3>::faces_per_cell][2] = {{1, 2},
+ {1, 2},
+ {2, 0},
+ {2, 0},
+ {0, 1},
+ {0, 1}
+ };
std::vector<Vector<double> >
- values (fe_values.n_quadrature_points, Vector<double> (3));
+ values (fe_values.n_quadrature_points, Vector<double> (3));
Vector<double> dof_values_local (fe.dofs_per_face);
{
switch (dim)
{
- case 2:
- {
- for (typename DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active ();
- cell != dof_handler.end (); ++cell)
- if (cell->at_boundary ())
- for (unsigned int face = 0; face < GeometryInfo<dim>::faces_per_cell; ++face)
- if (cell->face (face)->boundary_indicator () == boundary_component)
- {
- // if the FE is a
- // FE_Nothing object
- // there is no work to
- // do
- if (dynamic_cast<const FE_Nothing<dim>*> (&cell->get_fe ()) != 0)
- return;
-
- // This is only
- // implemented, if the
- // FE is a Raviart-Thomas
- // element. If the FE is
- // a FESystem we cannot
- // check this.
- if (dynamic_cast<const FESystem<dim>*> (&cell->get_fe ()) == 0)
- {
- typedef FiniteElement<dim> FEL;
+ case 2:
+ {
+ for (typename DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active ();
+ cell != dof_handler.end (); ++cell)
+ if (cell->at_boundary ())
+ for (unsigned int face = 0; face < GeometryInfo<dim>::faces_per_cell; ++face)
+ if (cell->face (face)->boundary_indicator () == boundary_component)
+ {
+ // if the FE is a
+ // FE_Nothing object
+ // there is no work to
+ // do
+ if (dynamic_cast<const FE_Nothing<dim>*> (&cell->get_fe ()) != 0)
+ return;
+
+ // This is only
+ // implemented, if the
+ // FE is a Raviart-Thomas
+ // element. If the FE is
+ // a FESystem we cannot
+ // check this.
+ if (dynamic_cast<const FESystem<dim>*> (&cell->get_fe ()) == 0)
+ {
+ typedef FiniteElement<dim> FEL;
- AssertThrow (dynamic_cast<const FE_RaviartThomas<dim>*> (&cell->get_fe ()) != 0,
- typename FEL::ExcInterpolationNotImplemented ());
- }
+ AssertThrow (dynamic_cast<const FE_RaviartThomas<dim>*> (&cell->get_fe ()) != 0,
+ typename FEL::ExcInterpolationNotImplemented ());
+ }
- fe_values.reinit (cell, face + cell->active_fe_index ()
- * GeometryInfo<dim>::faces_per_cell);
+ fe_values.reinit (cell, face + cell->active_fe_index ()
+ * GeometryInfo<dim>::faces_per_cell);
- const std::vector<DerivativeForm<1,dim,spacedim> > &
- jacobians = fe_values.get_present_fe_values ().get_jacobians ();
+ const std::vector<DerivativeForm<1,dim,spacedim> > &
+ jacobians = fe_values.get_present_fe_values ().get_jacobians ();
- fe_face_values.reinit (cell, face);
- internals::compute_face_projection_div_conforming (cell, face,
- fe_face_values,
- first_vector_component,
- boundary_function,
- jacobians,
- constraints);
- }
+ fe_face_values.reinit (cell, face);
+ internals::compute_face_projection_div_conforming (cell, face,
+ fe_face_values,
+ first_vector_component,
+ boundary_function,
+ jacobians,
+ constraints);
+ }
- break;
- }
+ break;
+ }
- case 3:
- {
- // In three dimensions the
- // edges between two faces
- // are treated twice.
- // Therefore we store the
- // computed values in a
- // vector and copy them over
- // in the ConstraintMatrix
- // after all values have been
- // computed.
- // If we have two values for
- // one edge, we choose the one,
- // which was computed with the
- // higher order element.
- // If both elements are of the
- // same order, we just keep the
- // first value and do not
- // compute a second one.
- const unsigned int& n_dofs = dof_handler.n_dofs ();
- std::vector<double> dof_values (n_dofs);
- std::vector<types::global_dof_index> projected_dofs (n_dofs);
-
- for (unsigned int dof = 0; dof < n_dofs; ++dof)
- projected_dofs[dof] = 0;
-
- for (typename DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active ();
- cell != dof_handler.end (); ++cell)
- if (cell->at_boundary ())
- for (unsigned int face = 0; face < GeometryInfo<dim>::faces_per_cell; ++face)
- if (cell->face (face)->boundary_indicator () == boundary_component)
- {
- // This is only
- // implemented, if the
- // FE is a Raviart-Thomas
- // element. If the FE is
- // a FESystem we cannot
- // check this.
- if (dynamic_cast<const FESystem<dim>*> (&cell->get_fe ()) == 0)
- {
- typedef FiniteElement<dim> FEL;
+ case 3:
+ {
+ // In three dimensions the
+ // edges between two faces
+ // are treated twice.
+ // Therefore we store the
+ // computed values in a
+ // vector and copy them over
+ // in the ConstraintMatrix
+ // after all values have been
+ // computed.
+ // If we have two values for
+ // one edge, we choose the one,
+ // which was computed with the
+ // higher order element.
+ // If both elements are of the
+ // same order, we just keep the
+ // first value and do not
+ // compute a second one.
+ const unsigned int &n_dofs = dof_handler.n_dofs ();
+ std::vector<double> dof_values (n_dofs);
- std::vector<unsigned int> projected_dofs (n_dofs);
++ std::vector<types::global_dof_index> projected_dofs (n_dofs);
+
+ for (unsigned int dof = 0; dof < n_dofs; ++dof)
+ projected_dofs[dof] = 0;
+
+ for (typename DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active ();
+ cell != dof_handler.end (); ++cell)
+ if (cell->at_boundary ())
+ for (unsigned int face = 0; face < GeometryInfo<dim>::faces_per_cell; ++face)
+ if (cell->face (face)->boundary_indicator () == boundary_component)
+ {
+ // This is only
+ // implemented, if the
+ // FE is a Raviart-Thomas
+ // element. If the FE is
+ // a FESystem we cannot
+ // check this.
+ if (dynamic_cast<const FESystem<dim>*> (&cell->get_fe ()) == 0)
+ {
+ typedef FiniteElement<dim> FEL;
- AssertThrow (dynamic_cast<const FE_RaviartThomas<dim>*> (&cell->get_fe ()) != 0,
- typename FEL::ExcInterpolationNotImplemented ());
- }
+ AssertThrow (dynamic_cast<const FE_RaviartThomas<dim>*> (&cell->get_fe ()) != 0,
+ typename FEL::ExcInterpolationNotImplemented ());
+ }
- fe_values.reinit (cell, face + cell->active_fe_index ()
- * GeometryInfo<dim>::faces_per_cell);
+ fe_values.reinit (cell, face + cell->active_fe_index ()
+ * GeometryInfo<dim>::faces_per_cell);
- const std::vector<DerivativeForm<1,dim ,spacedim> >&
- jacobians = fe_values.get_present_fe_values ().get_jacobians ();
+ const std::vector<DerivativeForm<1,dim ,spacedim> > &
+ jacobians = fe_values.get_present_fe_values ().get_jacobians ();
- fe_face_values.reinit (cell, face);
- internals::compute_face_projection_div_conforming (cell, face,
- fe_face_values,
- first_vector_component,
- boundary_function,
- jacobians, dof_values,
- projected_dofs);
- }
+ fe_face_values.reinit (cell, face);
+ internals::compute_face_projection_div_conforming (cell, face,
+ fe_face_values,
+ first_vector_component,
+ boundary_function,
+ jacobians, dof_values,
+ projected_dofs);
+ }
- for (unsigned int dof = 0; dof < n_dofs; ++dof)
- if ((projected_dofs[dof] != 0) && !(constraints.is_constrained (dof)))
- {
- constraints.add_line (dof);
+ for (unsigned int dof = 0; dof < n_dofs; ++dof)
+ if ((projected_dofs[dof] != 0) && !(constraints.is_constrained (dof)))
+ {
+ constraints.add_line (dof);
- if (std::abs (dof_values[dof]) > 1e-14)
- constraints.set_inhomogeneity (dof, dof_values[dof]);
- }
+ if (std::abs (dof_values[dof]) > 1e-14)
+ constraints.set_inhomogeneity (dof, dof_values[dof]);
+ }
- break;
- }
+ break;
+ }
- default:
- Assert (false, ExcNotImplemented ());
+ default:
+ Assert (false, ExcNotImplemented ());
}
}
switch (dim)
{
- case 2:
- {
- for (typename hp::DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active ();
- cell != dof_handler.end (); ++cell)
- if (cell->at_boundary ())
- for (unsigned int face = 0; face < GeometryInfo<dim>::faces_per_cell; ++face)
- if (cell->face (face)->boundary_indicator () == boundary_component)
- {
- // This is only
- // implemented, if the
- // FE is a Raviart-Thomas
- // element. If the FE is
- // a FESystem we cannot
- // check this.
- if (dynamic_cast<const FESystem<dim>*> (&cell->get_fe ()) == 0)
- {
- typedef FiniteElement<dim> FEL;
+ case 2:
+ {
+ for (typename hp::DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active ();
+ cell != dof_handler.end (); ++cell)
+ if (cell->at_boundary ())
+ for (unsigned int face = 0; face < GeometryInfo<dim>::faces_per_cell; ++face)
+ if (cell->face (face)->boundary_indicator () == boundary_component)
+ {
+ // This is only
+ // implemented, if the
+ // FE is a Raviart-Thomas
+ // element. If the FE is
+ // a FESystem we cannot
+ // check this.
+ if (dynamic_cast<const FESystem<dim>*> (&cell->get_fe ()) == 0)
+ {
+ typedef FiniteElement<dim> FEL;
- AssertThrow (dynamic_cast<const FE_RaviartThomas<dim>*> (&cell->get_fe ()) != 0,
- typename FEL::ExcInterpolationNotImplemented ());
- }
+ AssertThrow (dynamic_cast<const FE_RaviartThomas<dim>*> (&cell->get_fe ()) != 0,
+ typename FEL::ExcInterpolationNotImplemented ());
+ }
- fe_values.reinit (cell, face + cell->active_fe_index ()
- * GeometryInfo<dim>::faces_per_cell);
+ fe_values.reinit (cell, face + cell->active_fe_index ()
+ * GeometryInfo<dim>::faces_per_cell);
- const std::vector<DerivativeForm<1,dim,spacedim> > &
- jacobians = fe_values.get_present_fe_values ().get_jacobians ();
+ const std::vector<DerivativeForm<1,dim,spacedim> > &
+ jacobians = fe_values.get_present_fe_values ().get_jacobians ();
- fe_face_values.reinit (cell, face);
- internals::compute_face_projection_div_conforming (cell, face,
- fe_face_values.get_present_fe_values (),
- first_vector_component,
- boundary_function,
- jacobians,
- constraints);
- }
+ fe_face_values.reinit (cell, face);
+ internals::compute_face_projection_div_conforming (cell, face,
+ fe_face_values.get_present_fe_values (),
+ first_vector_component,
+ boundary_function,
+ jacobians,
+ constraints);
+ }
- break;
- }
+ break;
+ }
- case 3:
- {
- const unsigned int& n_dofs = dof_handler.n_dofs ();
- std::vector<double> dof_values (n_dofs);
- std::vector<types::global_dof_index> projected_dofs (n_dofs);
-
- for (unsigned int dof = 0; dof < n_dofs; ++dof)
- projected_dofs[dof] = 0;
-
- for (typename hp::DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active ();
- cell != dof_handler.end (); ++cell)
- if (cell->at_boundary ())
- for (unsigned int face = 0; face < GeometryInfo<dim>::faces_per_cell; ++face)
- if (cell->face (face)->boundary_indicator () == boundary_component)
- {
- // This is only
- // implemented, if the
- // FE is a Raviart-Thomas
- // element. If the FE is
- // a FESystem we cannot
- // check this.
- if (dynamic_cast<const FESystem<dim>*> (&cell->get_fe ()) == 0)
- {
- typedef FiniteElement<dim> FEL;
+ case 3:
+ {
+ const unsigned int &n_dofs = dof_handler.n_dofs ();
+ std::vector<double> dof_values (n_dofs);
- std::vector<unsigned int> projected_dofs (n_dofs);
++ std::vector<types::global_dof_index> projected_dofs (n_dofs);
+
+ for (unsigned int dof = 0; dof < n_dofs; ++dof)
+ projected_dofs[dof] = 0;
+
+ for (typename hp::DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active ();
+ cell != dof_handler.end (); ++cell)
+ if (cell->at_boundary ())
+ for (unsigned int face = 0; face < GeometryInfo<dim>::faces_per_cell; ++face)
+ if (cell->face (face)->boundary_indicator () == boundary_component)
+ {
+ // This is only
+ // implemented, if the
+ // FE is a Raviart-Thomas
+ // element. If the FE is
+ // a FESystem we cannot
+ // check this.
+ if (dynamic_cast<const FESystem<dim>*> (&cell->get_fe ()) == 0)
+ {
+ typedef FiniteElement<dim> FEL;
- AssertThrow (dynamic_cast<const FE_RaviartThomas<dim>*> (&cell->get_fe ()) != 0,
- typename FEL::ExcInterpolationNotImplemented ());
- }
+ AssertThrow (dynamic_cast<const FE_RaviartThomas<dim>*> (&cell->get_fe ()) != 0,
+ typename FEL::ExcInterpolationNotImplemented ());
+ }
- fe_values.reinit (cell, face + cell->active_fe_index ()
- * GeometryInfo<dim>::faces_per_cell);
+ fe_values.reinit (cell, face + cell->active_fe_index ()
+ * GeometryInfo<dim>::faces_per_cell);
- const std::vector<DerivativeForm<1,dim,spacedim> > &
- jacobians = fe_values.get_present_fe_values ().get_jacobians ();
+ const std::vector<DerivativeForm<1,dim,spacedim> > &
+ jacobians = fe_values.get_present_fe_values ().get_jacobians ();
- fe_face_values.reinit (cell, face);
- internals::compute_face_projection_div_conforming (cell, face,
- fe_face_values.get_present_fe_values (),
- first_vector_component,
- boundary_function,
- jacobians, dof_values,
- projected_dofs);
- }
+ fe_face_values.reinit (cell, face);
+ internals::compute_face_projection_div_conforming (cell, face,
+ fe_face_values.get_present_fe_values (),
+ first_vector_component,
+ boundary_function,
+ jacobians, dof_values,
+ projected_dofs);
+ }
- for (unsigned int dof = 0; dof < n_dofs; ++dof)
- if ((projected_dofs[dof] != 0) && !(constraints.is_constrained (dof)))
- {
- constraints.add_line (dof);
+ for (unsigned int dof = 0; dof < n_dofs; ++dof)
+ if ((projected_dofs[dof] != 0) && !(constraints.is_constrained (dof)))
+ {
+ constraints.add_line (dof);
- if (std::abs (dof_values[dof]) > 1e-14)
- constraints.set_inhomogeneity (dof, dof_values[dof]);
- }
+ if (std::abs (dof_values[dof]) > 1e-14)
+ constraints.set_inhomogeneity (dof, dof_values[dof]);
+ }
- break;
- }
+ break;
+ }
- default:
- Assert (false, ExcNotImplemented ());
+ default:
+ Assert (false, ExcNotImplemented ());
}
}
"to imposing Dirichlet values on the vector-valued "
"quantity."));
- std::vector<unsigned int> face_dofs;
+ std::vector<types::global_dof_index> face_dofs;
- // create FE and mapping
- // collections for all elements in
- // use by this DoFHandler
+ // create FE and mapping
+ // collections for all elements in
+ // use by this DoFHandler
hp::FECollection<dim,spacedim> fe_collection (dof_handler.get_fe());
hp::MappingCollection<dim,spacedim> mapping_collection;
for (unsigned int i=0; i<fe_collection.size(); ++i)
i != ranges.end(); )
{
std::vector<Range>::iterator
- next = i;
+ next = i;
++next;
- unsigned int first_index = i->begin;
- unsigned int last_index = i->end;
+ types::global_dof_index first_index = i->begin;
+ types::global_dof_index last_index = i->end;
- // see if we can merge any of
- // the following ranges
+ // see if we can merge any of
+ // the following ranges
bool can_merge = false;
while (next != ranges.end() &&
(next->begin <= last_index))
void
- IndexSet::read(std::istream & in)
+ IndexSet::read(std::istream &in)
{
- unsigned int s, numranges, b, e;
+ types::global_dof_index s;
+ unsigned int numranges;
-
++
in >> s >> numranges;
ranges.clear();
set_size(s);
- for (unsigned int i=0;i<numranges;++i)
+ for (unsigned int i=0; i<numranges; ++i)
{
+ types::global_dof_index b, e;
in >> b >> e;
add_range(b,e);
}
}
void
- IndexSet::block_read(std::istream & in)
+ IndexSet::block_read(std::istream &in)
{
- unsigned int size;
+ types::global_dof_index size;
size_t n_ranges;
- in.read(reinterpret_cast<char*>(&size), sizeof(size));
- in.read(reinterpret_cast<char*>(&n_ranges), sizeof(n_ranges));
- // we have to clear ranges first
+ in.read(reinterpret_cast<char *>(&size), sizeof(size));
+ in.read(reinterpret_cast<char *>(&n_ranges), sizeof(n_ranges));
+ // we have to clear ranges first
ranges.clear();
set_size(size);
ranges.resize(n_ranges, Range(0,0));
}
- void IndexSet::fill_index_vector(std::vector<types::global_dof_index> & indices) const
-void IndexSet::fill_index_vector(std::vector<unsigned int> &indices) const
++void IndexSet::fill_index_vector(std::vector<types::global_dof_index> &indices) const
{
compress();
#endif
else
{
- std::vector<types::global_dof_index> indices;
- std::vector<unsigned int> indices;
++ std::vector<types::global_dof_index> indices;
fill_index_vector(indices);
return Epetra_Map (-1,
std::size_t
IndexSet::memory_consumption () const
{
- return MemoryConsumption::memory_consumption (ranges) +
- MemoryConsumption::memory_consumption (is_compressed) +
- MemoryConsumption::memory_consumption (index_space_size);
+ return (MemoryConsumption::memory_consumption (ranges) +
- MemoryConsumption::memory_consumption (is_compressed) +
- MemoryConsumption::memory_consumption (index_space_size));
++ MemoryConsumption::memory_consumption (is_compressed) +
++ MemoryConsumption::memory_consumption (index_space_size));
}
const unsigned int List::max_int_value
- = std::numeric_limits<unsigned int>::max();
+ = std::numeric_limits<unsigned int>::max();
- const char* List::description_init = "[List";
+ const char *List::description_init = "[List";
- List::List (const PatternBase &p,
+ List::List (const PatternBase &p,
const unsigned int min_elements,
const unsigned int max_elements)
- :
- pattern (p.clone()),
- min_elements (min_elements),
- max_elements (max_elements)
+ :
+ pattern (p.clone()),
+ min_elements (min_elements),
+ max_elements (max_elements)
{
Assert (min_elements <= max_elements,
ExcInvalidRange (min_elements, max_elements));
const unsigned int Map::max_int_value
- = std::numeric_limits<unsigned int>::max();
+ = std::numeric_limits<unsigned int>::max();
- const char* Map::description_init = "[Map";
+ const char *Map::description_init = "[Map";
- Map::Map (const PatternBase &p_key,
- const PatternBase &p_value,
+ Map::Map (const PatternBase &p_key,
+ const PatternBase &p_value,
const unsigned int min_elements,
const unsigned int max_elements)
- :
- key_pattern (p_key.clone()),
- value_pattern (p_value.clone()),
- min_elements (min_elements),
- max_elements (max_elements)
+ :
+ key_pattern (p_key.clone()),
+ value_pattern (p_value.clone()),
+ min_elements (min_elements),
+ max_elements (max_elements)
{
Assert (min_elements <= max_elements,
ExcInvalidRange (min_elements, max_elements));
Assert (subface_no < GeometryInfo<dim>::max_children_per_face,
ExcInternalError());
- // As the quadrature points created by
- // QProjector are on subfaces in their
- // "standard location" we have to use a
- // permutation of the equivalent subface
- // number in order to respect face
- // orientation, flip and rotation. The
- // information we need here is exactly the
- // same as the
- // GeometryInfo<3>::child_cell_on_face info
- // for the bottom face (face 4) of a hex, as
- // on this the RefineCase of the cell matches
- // that of the face and the subfaces are
- // numbered in the same way as the child
- // cells.
-
- // in 3d, we have to account for faces that
- // have non-standard face orientation, flip
- // and rotation. thus, we have to store
- // _eight_ data sets per face or subface
- // already for the isotropic
- // case. Additionally, we have three
- // different refinement cases, resulting in
- // <tt>4 + 2 + 2 = 8</tt> different subfaces
- // for each face.
+ // As the quadrature points created by
+ // QProjector are on subfaces in their
+ // "standard location" we have to use a
+ // permutation of the equivalent subface
+ // number in order to respect face
+ // orientation, flip and rotation. The
+ // information we need here is exactly the
+ // same as the
+ // GeometryInfo<3>::child_cell_on_face info
+ // for the bottom face (face 4) of a hex, as
+ // on this the RefineCase of the cell matches
+ // that of the face and the subfaces are
+ // numbered in the same way as the child
+ // cells.
+
+ // in 3d, we have to account for faces that
+ // have non-standard face orientation, flip
+ // and rotation. thus, we have to store
+ // _eight_ data sets per face or subface
+ // already for the isotropic
+ // case. Additionally, we have three
+ // different refinement cases, resulting in
+ // <tt>4 + 2 + 2 = 8</tt> different subfaces
+ // for each face.
const unsigned int total_subfaces_per_face=8;
- // set up a table with the according offsets
- // for non-standard orientation, first index:
- // face_orientation (standard true=1), second
- // index: face_flip (standard false=0), third
- // index: face_rotation (standard false=0)
- //
- // note, that normally we should use the
- // obvious offsets 0,1,2,3,4,5,6,7. However,
- // prior to the changes enabling flipped and
- // rotated faces, in many places of the
- // library the convention was used, that the
- // first dataset with offset 0 corresponds to
- // a face in standard orientation. therefore
- // we use the offsets 4,5,6,7,0,1,2,3 here to
- // stick to that (implicit) convention
+ // set up a table with the according offsets
+ // for non-standard orientation, first index:
+ // face_orientation (standard true=1), second
+ // index: face_flip (standard false=0), third
+ // index: face_rotation (standard false=0)
+ //
+ // note, that normally we should use the
+ // obvious offsets 0,1,2,3,4,5,6,7. However,
+ // prior to the changes enabling flipped and
+ // rotated faces, in many places of the
+ // library the convention was used, that the
+ // first dataset with offset 0 corresponds to
+ // a face in standard orientation. therefore
+ // we use the offsets 4,5,6,7,0,1,2,3 here to
+ // stick to that (implicit) convention
static const unsigned int orientation_offset[2][2][2]=
- {{
- // face_orientation=false; face_flip=false; face_rotation=false and true
- {4*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face,
- 5*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face},
- // face_orientation=false; face_flip=true; face_rotation=false and true
- {6*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face,
- 7*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face}},
- {
- // face_orientation=true; face_flip=false; face_rotation=false and true
- {0*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face,
- 1*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face},
- // face_orientation=true; face_flip=true; face_rotation=false and true
- {2*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face,
- 3*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face}}};
-
- // set up a table with the offsets for a
- // given refinement case respecting the
- // corresponding number of subfaces. the
- // index corresponds to (RefineCase::Type - 1)
-
- // note, that normally we should use the
- // obvious offsets 0,2,6. However, prior to
- // the implementation of anisotropic
- // refinement, in many places of the library
- // the convention was used, that the first
- // dataset with offset 0 corresponds to a
- // standard (isotropic) face
- // refinement. therefore we use the offsets
- // 6,4,0 here to stick to that (implicit)
- // convention
- static const unsigned int ref_case_offset[3]=
+ {
{
- 6, //cut_x
- 4, //cut_y
- 0 //cut_xy
- };
+ // face_orientation=false; face_flip=false; face_rotation=false and true
+ {
+ 4*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face,
- 5*GeometryInfo<dim>::faces_per_cell *total_subfaces_per_face
++ 5*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face
+ },
+ // face_orientation=false; face_flip=true; face_rotation=false and true
+ {
+ 6*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face,
- 7*GeometryInfo<dim>::faces_per_cell *total_subfaces_per_face
++ 7*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face
+ }
+ },
+ {
+ // face_orientation=true; face_flip=false; face_rotation=false and true
+ {
+ 0*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face,
- 1*GeometryInfo<dim>::faces_per_cell *total_subfaces_per_face
++ 1*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face
+ },
+ // face_orientation=true; face_flip=true; face_rotation=false and true
+ {
+ 2*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face,
- 3*GeometryInfo<dim>::faces_per_cell *total_subfaces_per_face
++ 3*GeometryInfo<dim>::faces_per_cell*total_subfaces_per_face
+ }
+ }
+ };
+
+ // set up a table with the offsets for a
+ // given refinement case respecting the
+ // corresponding number of subfaces. the
+ // index corresponds to (RefineCase::Type - 1)
+
+ // note, that normally we should use the
+ // obvious offsets 0,2,6. However, prior to
+ // the implementation of anisotropic
+ // refinement, in many places of the library
+ // the convention was used, that the first
+ // dataset with offset 0 corresponds to a
+ // standard (isotropic) face
+ // refinement. therefore we use the offsets
+ // 6,4,0 here to stick to that (implicit)
+ // convention
+ static const unsigned int ref_case_offset[3]=
+ {
+ 6, //cut_x
+ 4, //cut_y
+ 0 //cut_xy
+ };
- // for each subface of a given FaceRefineCase
- // there is a corresponding equivalent
- // subface number of one of the "standard"
- // RefineCases (cut_x, cut_y, cut_xy). Map
- // the given values to those equivalent
- // ones.
+ // for each subface of a given FaceRefineCase
+ // there is a corresponding equivalent
+ // subface number of one of the "standard"
+ // RefineCases (cut_x, cut_y, cut_xy). Map
+ // the given values to those equivalent
+ // ones.
- // first, define an invalid number
+ // first, define an invalid number
static const unsigned int e = deal_II_numbers::invalid_unsigned_int;
static const RefinementCase<dim-1>
}
- Assert (permutation[i] < n, ExcIndexRange (permutation[i], 0, n));
- out[permutation[i]] = i;
+ template <typename Integer>
+ std::vector<Integer>
+ reverse_permutation (const std::vector<Integer> &permutation)
+ {
+ const unsigned int n = permutation.size();
+
+ std::vector<Integer> out (n);
+ for (unsigned int i=0; i<n; ++i)
+ out[i] = n - 1 - permutation[i];
+
+ return out;
+ }
+
+
+
+ template <typename Integer>
+ std::vector<Integer>
+ invert_permutation (const std::vector<Integer> &permutation)
+ {
+ const unsigned int n = permutation.size();
+
+ std::vector<Integer> out (n, numbers::invalid_unsigned_int);
+
+ for (unsigned int i=0; i<n; ++i)
+ {
- // check that we have actually reached
- // all indices
++ Assert (permutation[i] < n, ExcIndexRange (permutation[i], 0, n));
++ out[permutation[i]] = i;
+ }
+
- ExcMessage ("The given input permutation had duplicate entries!"));
++ // check that we have actually reached
++ // all indices
+ for (unsigned int i=0; i<n; ++i)
+ Assert (out[i] != numbers::invalid_unsigned_int,
++ ExcMessage ("The given input permutation had duplicate entries!"));
+
+ return out;
+ }
+
+
+
namespace System
{
}
else if (!p4est_has_children && !dealii_cell->has_children())
{
- //this active cell didn't change
+ //this active cell didn't change
typename internal::p4est::types<dim>::quadrant *q;
- q = static_cast<typename internal::p4est::types<dim>::quadrant*> (
- sc_array_index (const_cast<sc_array_t*>(&tree.quadrants), idx)
- );
- *static_cast<typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus*>(q->p.user_data) = parallel::distributed::Triangulation<dim,spacedim>::CELL_PERSIST;
-
- for(typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin();
- it != attached_data_pack_callbacks.end();
- ++it)
+ q = static_cast<typename internal::p4est::types<dim>::quadrant *> (
+ sc_array_index (const_cast<sc_array_t *>(&tree.quadrants), idx)
+ );
+ *static_cast<typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus *>(q->p.user_data) = parallel::distributed::Triangulation<dim,spacedim>::CELL_PERSIST;
+
+ for (typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin();
+ it != attached_data_pack_callbacks.end();
+ ++it)
{
- void * ptr = static_cast<char*>(q->p.user_data) + (*it).first; //add offset
- void *ptr = static_cast<char *>(q->p.user_data) + (*it).first; //add offset
++ void *ptr = static_cast<char *>(q->p.user_data) + (*it).first; //add offset
((*it).second)(dealii_cell,
parallel::distributed::Triangulation<dim,spacedim>::CELL_PERSIST,
ptr);
Assert(child0_idx != -1, ExcMessage("the first child should exist as an active quadrant!"));
typename internal::p4est::types<dim>::quadrant *q;
- q = static_cast<typename internal::p4est::types<dim>::quadrant*> (
- sc_array_index (const_cast<sc_array_t*>(&tree.quadrants), child0_idx)
- );
- *static_cast<typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus*>(q->p.user_data) = parallel::distributed::Triangulation<dim,spacedim>::CELL_REFINE;
-
- for(typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin();
- it != attached_data_pack_callbacks.end();
- ++it)
+ q = static_cast<typename internal::p4est::types<dim>::quadrant *> (
+ sc_array_index (const_cast<sc_array_t *>(&tree.quadrants), child0_idx)
+ );
+ *static_cast<typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus *>(q->p.user_data) = parallel::distributed::Triangulation<dim,spacedim>::CELL_REFINE;
+
+ for (typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin();
+ it != attached_data_pack_callbacks.end();
+ ++it)
{
- void * ptr = static_cast<char*>(q->p.user_data) + (*it).first; //add offset
- void *ptr = static_cast<char *>(q->p.user_data) + (*it).first; //add offset
++ void *ptr = static_cast<char *>(q->p.user_data) + (*it).first; //add offset
((*it).second)(dealii_cell,
parallel::distributed::Triangulation<dim,spacedim>::CELL_REFINE,
}
else
{
- //it's children got coarsened into
- //this cell
+ //it's children got coarsened into
+ //this cell
typename internal::p4est::types<dim>::quadrant *q;
- q = static_cast<typename internal::p4est::types<dim>::quadrant*> (
- sc_array_index (const_cast<sc_array_t*>(&tree.quadrants), idx)
- );
- *static_cast<typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus*>(q->p.user_data) = parallel::distributed::Triangulation<dim,spacedim>::CELL_COARSEN;
-
- for(typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin();
- it != attached_data_pack_callbacks.end();
- ++it)
+ q = static_cast<typename internal::p4est::types<dim>::quadrant *> (
+ sc_array_index (const_cast<sc_array_t *>(&tree.quadrants), idx)
+ );
+ *static_cast<typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus *>(q->p.user_data) = parallel::distributed::Triangulation<dim,spacedim>::CELL_COARSEN;
+
+ for (typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin();
+ it != attached_data_pack_callbacks.end();
+ ++it)
{
- void * ptr = static_cast<char*>(q->p.user_data) + (*it).first; //add offset
- void *ptr = static_cast<char *>(q->p.user_data) + (*it).first; //add offset
++ void *ptr = static_cast<char *>(q->p.user_data) + (*it).first; //add offset
((*it).second)(dealii_cell,
parallel::distributed::Triangulation<dim,spacedim>::CELL_COARSEN,
ptr);
number_cache.n_global_active_cells
= std::accumulate (number_cache.n_locally_owned_active_cells.begin(),
number_cache.n_locally_owned_active_cells.end(),
- /* ensure sum is
- computed with
- correct data
- type:*/
- 0);
++ /* ensure sum is
++ computed with
++ correct data
++ type:*/
+ static_cast<types::global_dof_index>(0));
number_cache.n_global_levels = Utilities::MPI::max(this->n_levels(), mpi_communicator);
}
template <int dim, int spacedim>
void
- BlockInfo::initialize(const DoFHandler<dim, spacedim>& dof, bool levels_only, bool multigrid)
-BlockInfo::initialize(const DoFHandler<dim, spacedim> &dof)
++BlockInfo::initialize(const DoFHandler<dim, spacedim> &dof, bool levels_only, bool multigrid)
{
- if (!levels_only) {
- const FiniteElement<dim, spacedim>& fe = dof.get_fe();
- std::vector<unsigned int> sizes(fe.n_blocks());
- DoFTools::count_dofs_per_block(dof, sizes);
- bi_global.reinit(sizes);
- }
-
- if (multigrid) {
- std::vector<std::vector<unsigned int> > sizes (dof.get_tria ().n_levels ());
-
- for (unsigned int i = 0; i < sizes.size (); ++i)
- sizes[i].resize (dof.get_fe ().n_blocks ());
-
- MGTools::count_dofs_per_block (dof, sizes);
- levels.resize (sizes.size ());
-
- for (unsigned int i = 0; i < sizes.size (); ++i)
- levels[i].reinit (sizes[i]);
- }
- const FiniteElement<dim, spacedim> &fe = dof.get_fe();
- std::vector<unsigned int> sizes(fe.n_blocks());
- DoFTools::count_dofs_per_block(dof, sizes);
- bi_global.reinit(sizes);
++ if (!levels_only)
++ {
++ const FiniteElement<dim, spacedim> &fe = dof.get_fe();
++ std::vector<unsigned int> sizes(fe.n_blocks());
++ DoFTools::count_dofs_per_block(dof, sizes);
++ bi_global.reinit(sizes);
++ }
++
++ if (multigrid)
++ {
++ std::vector<std::vector<unsigned int> > sizes (dof.get_tria ().n_levels ());
++
++ for (unsigned int i = 0; i < sizes.size (); ++i)
++ sizes[i].resize (dof.get_fe ().n_blocks ());
++
++ MGTools::count_dofs_per_block (dof, sizes);
++ levels.resize (sizes.size ());
++
++ for (unsigned int i = 0; i < sizes.size (); ++i)
++ levels[i].reinit (sizes[i]);
++ }
}
template <int dim, int spacedim>
void
- BlockInfo::initialize(const MGDoFHandler<dim, spacedim>& dof, bool levels_only)
+ BlockInfo::initialize(const MGDoFHandler<dim, spacedim> &dof, bool levels_only)
{
if (!levels_only)
- initialize(static_cast<const DoFHandler<dim, spacedim>&>(dof));
+ initialize(static_cast<const DoFHandler<dim, spacedim>&> (dof));
std::vector<std::vector<unsigned int> > sizes (dof.get_tria().n_levels());
for (unsigned int i=0; i<sizes.size(); ++i)
namespace internal
{
template <int dim, int spacedim>
- const types::global_dof_index * dummy ()
- const unsigned int *dummy ()
++ const types::global_dof_index *dummy ()
{
return &dealii::DoFHandler<dim,spacedim>::invalid_dof_index;
}
using dealii::DoFHandler;
- /**
- * A class with the same purpose as the similarly named class of the
- * Triangulation class. See there for more information.
- */
+ /**
+ * A class with the same purpose as the similarly named class of the
+ * Triangulation class. See there for more information.
+ */
struct Implementation
{
- /**
- * Implement the function of same name in
- * the mother class.
- */
- template <int spacedim>
- static
- unsigned int
- max_couplings_between_dofs (const DoFHandler<1,spacedim> &dof_handler)
- {
- return std::min(3*dof_handler.selected_fe->dofs_per_vertex +
- 2*dof_handler.selected_fe->dofs_per_line,
- dof_handler.n_dofs());
- }
-
-
-
- template <int spacedim>
- static
- unsigned int
- max_couplings_between_dofs (const DoFHandler<2,spacedim> &dof_handler)
- {
-
- // get these numbers by drawing pictures
- // and counting...
- // example:
- // | | |
- // --x-----x--x--X--
- // | | | |
- // | x--x--x
- // | | | |
- // --x--x--*--x--x--
- // | | | |
- // x--x--x |
- // | | | |
- // --X--x--x-----x--
- // | | |
- // x = vertices connected with center vertex *;
- // = total of 19
- // (the X vertices are connected with * if
- // the vertices adjacent to X are hanging
- // nodes)
- // count lines -> 28 (don't forget to count
- // mother and children separately!)
- unsigned int max_couplings;
- switch (dof_handler.tria->max_adjacent_cells())
- {
- case 4:
- max_couplings=19*dof_handler.selected_fe->dofs_per_vertex +
- 28*dof_handler.selected_fe->dofs_per_line +
- 8*dof_handler.selected_fe->dofs_per_quad;
- break;
- case 5:
- max_couplings=21*dof_handler.selected_fe->dofs_per_vertex +
- 31*dof_handler.selected_fe->dofs_per_line +
- 9*dof_handler.selected_fe->dofs_per_quad;
- break;
- case 6:
- max_couplings=28*dof_handler.selected_fe->dofs_per_vertex +
- 42*dof_handler.selected_fe->dofs_per_line +
- 12*dof_handler.selected_fe->dofs_per_quad;
- break;
- case 7:
- max_couplings=30*dof_handler.selected_fe->dofs_per_vertex +
- 45*dof_handler.selected_fe->dofs_per_line +
- 13*dof_handler.selected_fe->dofs_per_quad;
- break;
- case 8:
- max_couplings=37*dof_handler.selected_fe->dofs_per_vertex +
- 56*dof_handler.selected_fe->dofs_per_line +
- 16*dof_handler.selected_fe->dofs_per_quad;
- break;
-
- // the following
- // numbers are not
- // based on actual
- // counting but by
- // extrapolating the
- // number sequences
- // from the previous
- // ones (for example,
- // for dofs_per_vertex,
- // the sequence above
- // is 19, 21, 28, 30,
- // 37, and is continued
- // as follows):
- case 9:
- max_couplings=39*dof_handler.selected_fe->dofs_per_vertex +
- 59*dof_handler.selected_fe->dofs_per_line +
- 17*dof_handler.selected_fe->dofs_per_quad;
- break;
- case 10:
- max_couplings=46*dof_handler.selected_fe->dofs_per_vertex +
- 70*dof_handler.selected_fe->dofs_per_line +
- 20*dof_handler.selected_fe->dofs_per_quad;
- break;
- case 11:
- max_couplings=48*dof_handler.selected_fe->dofs_per_vertex +
- 73*dof_handler.selected_fe->dofs_per_line +
- 21*dof_handler.selected_fe->dofs_per_quad;
- break;
- case 12:
- max_couplings=55*dof_handler.selected_fe->dofs_per_vertex +
- 84*dof_handler.selected_fe->dofs_per_line +
- 24*dof_handler.selected_fe->dofs_per_quad;
- break;
- case 13:
- max_couplings=57*dof_handler.selected_fe->dofs_per_vertex +
- 87*dof_handler.selected_fe->dofs_per_line +
- 25*dof_handler.selected_fe->dofs_per_quad;
- break;
- case 14:
- max_couplings=63*dof_handler.selected_fe->dofs_per_vertex +
- 98*dof_handler.selected_fe->dofs_per_line +
- 28*dof_handler.selected_fe->dofs_per_quad;
- break;
- case 15:
- max_couplings=65*dof_handler.selected_fe->dofs_per_vertex +
- 103*dof_handler.selected_fe->dofs_per_line +
- 29*dof_handler.selected_fe->dofs_per_quad;
- break;
- case 16:
- max_couplings=72*dof_handler.selected_fe->dofs_per_vertex +
- 114*dof_handler.selected_fe->dofs_per_line +
- 32*dof_handler.selected_fe->dofs_per_quad;
- break;
-
- default:
- Assert (false, ExcNotImplemented());
- max_couplings=0;
- }
- return std::min(max_couplings,dof_handler.n_dofs());
- }
-
-
- template <int spacedim>
- static
- unsigned int
- max_couplings_between_dofs (const DoFHandler<3,spacedim> &dof_handler)
- {
+ /**
+ * Implement the function of same name in
+ * the mother class.
+ */
+ template <int spacedim>
+ static
+ unsigned int
+ max_couplings_between_dofs (const DoFHandler<1,spacedim> &dof_handler)
+ {
+ return std::min(3*dof_handler.selected_fe->dofs_per_vertex +
+ 2*dof_handler.selected_fe->dofs_per_line,
+ dof_handler.n_dofs());
+ }
+
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ max_couplings_between_dofs (const DoFHandler<2,spacedim> &dof_handler)
+ {
+
+ // get these numbers by drawing pictures
+ // and counting...
+ // example:
+ // | | |
+ // --x-----x--x--X--
+ // | | | |
+ // | x--x--x
+ // | | | |
+ // --x--x--*--x--x--
+ // | | | |
+ // x--x--x |
+ // | | | |
+ // --X--x--x-----x--
+ // | | |
+ // x = vertices connected with center vertex *;
+ // = total of 19
+ // (the X vertices are connected with * if
+ // the vertices adjacent to X are hanging
+ // nodes)
+ // count lines -> 28 (don't forget to count
+ // mother and children separately!)
+ unsigned int max_couplings;
+ switch (dof_handler.tria->max_adjacent_cells())
+ {
+ case 4:
+ max_couplings=19*dof_handler.selected_fe->dofs_per_vertex +
+ 28*dof_handler.selected_fe->dofs_per_line +
+ 8*dof_handler.selected_fe->dofs_per_quad;
+ break;
+ case 5:
+ max_couplings=21*dof_handler.selected_fe->dofs_per_vertex +
+ 31*dof_handler.selected_fe->dofs_per_line +
+ 9*dof_handler.selected_fe->dofs_per_quad;
+ break;
+ case 6:
+ max_couplings=28*dof_handler.selected_fe->dofs_per_vertex +
+ 42*dof_handler.selected_fe->dofs_per_line +
+ 12*dof_handler.selected_fe->dofs_per_quad;
+ break;
+ case 7:
+ max_couplings=30*dof_handler.selected_fe->dofs_per_vertex +
+ 45*dof_handler.selected_fe->dofs_per_line +
+ 13*dof_handler.selected_fe->dofs_per_quad;
+ break;
+ case 8:
+ max_couplings=37*dof_handler.selected_fe->dofs_per_vertex +
+ 56*dof_handler.selected_fe->dofs_per_line +
+ 16*dof_handler.selected_fe->dofs_per_quad;
+ break;
+
+ // the following
+ // numbers are not
+ // based on actual
+ // counting but by
+ // extrapolating the
+ // number sequences
+ // from the previous
+ // ones (for example,
+ // for dofs_per_vertex,
+ // the sequence above
+ // is 19, 21, 28, 30,
+ // 37, and is continued
+ // as follows):
+ case 9:
+ max_couplings=39*dof_handler.selected_fe->dofs_per_vertex +
+ 59*dof_handler.selected_fe->dofs_per_line +
+ 17*dof_handler.selected_fe->dofs_per_quad;
+ break;
+ case 10:
+ max_couplings=46*dof_handler.selected_fe->dofs_per_vertex +
+ 70*dof_handler.selected_fe->dofs_per_line +
+ 20*dof_handler.selected_fe->dofs_per_quad;
+ break;
+ case 11:
+ max_couplings=48*dof_handler.selected_fe->dofs_per_vertex +
+ 73*dof_handler.selected_fe->dofs_per_line +
+ 21*dof_handler.selected_fe->dofs_per_quad;
+ break;
+ case 12:
+ max_couplings=55*dof_handler.selected_fe->dofs_per_vertex +
+ 84*dof_handler.selected_fe->dofs_per_line +
+ 24*dof_handler.selected_fe->dofs_per_quad;
+ break;
+ case 13:
+ max_couplings=57*dof_handler.selected_fe->dofs_per_vertex +
+ 87*dof_handler.selected_fe->dofs_per_line +
+ 25*dof_handler.selected_fe->dofs_per_quad;
+ break;
+ case 14:
+ max_couplings=63*dof_handler.selected_fe->dofs_per_vertex +
+ 98*dof_handler.selected_fe->dofs_per_line +
+ 28*dof_handler.selected_fe->dofs_per_quad;
+ break;
+ case 15:
+ max_couplings=65*dof_handler.selected_fe->dofs_per_vertex +
+ 103*dof_handler.selected_fe->dofs_per_line +
+ 29*dof_handler.selected_fe->dofs_per_quad;
+ break;
+ case 16:
+ max_couplings=72*dof_handler.selected_fe->dofs_per_vertex +
+ 114*dof_handler.selected_fe->dofs_per_line +
+ 32*dof_handler.selected_fe->dofs_per_quad;
+ break;
+
+ default:
+ Assert (false, ExcNotImplemented());
+ max_couplings=0;
+ }
+ return std::min(max_couplings,dof_handler.n_dofs());
+ }
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ max_couplings_between_dofs (const DoFHandler<3,spacedim> &dof_handler)
+ {
//TODO:[?] Invent significantly better estimates than the ones in this function
- // doing the same thing here is a
- // rather complicated thing, compared
- // to the 2d case, since it is hard
- // to draw pictures with several
- // refined hexahedra :-) so I
- // presently only give a coarse
- // estimate for the case that at most
- // 8 hexes meet at each vertex
- //
- // can anyone give better estimate
- // here?
- const unsigned int max_adjacent_cells
- = dof_handler.tria->max_adjacent_cells();
-
- unsigned int max_couplings;
- if (max_adjacent_cells <= 8)
- max_couplings=7*7*7*dof_handler.selected_fe->dofs_per_vertex +
- 7*6*7*3*dof_handler.selected_fe->dofs_per_line +
- 9*4*7*3*dof_handler.selected_fe->dofs_per_quad +
- 27*dof_handler.selected_fe->dofs_per_hex;
- else
- {
- Assert (false, ExcNotImplemented());
- max_couplings=0;
- }
-
- return std::min(max_couplings,dof_handler.n_dofs());
- }
-
-
- /**
- * Reserve enough space in the
- * <tt>levels[]</tt> objects to store the
- * numbers of the degrees of freedom
- * needed for the given element. The
- * given element is that one which
- * was selected when calling
- * @p distribute_dofs the last time.
- */
- template <int spacedim>
- static
- void reserve_space (DoFHandler<1,spacedim> &dof_handler)
- {
- dof_handler.vertex_dofs
- .resize(dof_handler.tria->n_vertices() *
- dof_handler.selected_fe->dofs_per_vertex,
- DoFHandler<1,spacedim>::invalid_dof_index);
-
- for (unsigned int i=0; i<dof_handler.tria->n_levels(); ++i)
- {
- dof_handler.levels
- .push_back (new internal::DoFHandler::DoFLevel<1>);
-
- dof_handler.levels.back()->dof_object.dofs
- .resize (dof_handler.tria->n_raw_cells(i) *
- dof_handler.selected_fe->dofs_per_line,
- DoFHandler<1,spacedim>::invalid_dof_index);
-
- dof_handler.levels.back()->cell_dof_indices_cache
- .resize (dof_handler.tria->n_raw_cells(i) *
- dof_handler.selected_fe->dofs_per_cell,
- DoFHandler<1,spacedim>::invalid_dof_index);
- }
- }
-
-
- template <int spacedim>
- static
- void reserve_space (DoFHandler<2,spacedim> &dof_handler)
- {
- dof_handler.vertex_dofs
- .resize(dof_handler.tria->n_vertices() *
- dof_handler.selected_fe->dofs_per_vertex,
- DoFHandler<2,spacedim>::invalid_dof_index);
-
- for (unsigned int i=0; i<dof_handler.tria->n_levels(); ++i)
- {
- dof_handler.levels.push_back (new internal::DoFHandler::DoFLevel<2>);
-
- dof_handler.levels.back()->dof_object.dofs
- .resize (dof_handler.tria->n_raw_cells(i) *
- dof_handler.selected_fe->dofs_per_quad,
- DoFHandler<2,spacedim>::invalid_dof_index);
-
- dof_handler.levels.back()->cell_dof_indices_cache
- .resize (dof_handler.tria->n_raw_cells(i) *
- dof_handler.selected_fe->dofs_per_cell,
- DoFHandler<2,spacedim>::invalid_dof_index);
+ // doing the same thing here is a
+ // rather complicated thing, compared
+ // to the 2d case, since it is hard
+ // to draw pictures with several
+ // refined hexahedra :-) so I
+ // presently only give a coarse
+ // estimate for the case that at most
+ // 8 hexes meet at each vertex
+ //
+ // can anyone give better estimate
+ // here?
+ const unsigned int max_adjacent_cells
+ = dof_handler.tria->max_adjacent_cells();
+
+ unsigned int max_couplings;
+ if (max_adjacent_cells <= 8)
+ max_couplings=7*7*7*dof_handler.selected_fe->dofs_per_vertex +
+ 7*6*7*3*dof_handler.selected_fe->dofs_per_line +
+ 9*4*7*3*dof_handler.selected_fe->dofs_per_quad +
+ 27*dof_handler.selected_fe->dofs_per_hex;
+ else
+ {
+ Assert (false, ExcNotImplemented());
+ max_couplings=0;
+ }
+
+ return std::min(max_couplings,dof_handler.n_dofs());
+ }
+
+
+ /**
+ * Reserve enough space in the
+ * <tt>levels[]</tt> objects to store the
+ * numbers of the degrees of freedom
+ * needed for the given element. The
+ * given element is that one which
+ * was selected when calling
+ * @p distribute_dofs the last time.
+ */
+ template <int spacedim>
+ static
+ void reserve_space (DoFHandler<1,spacedim> &dof_handler)
+ {
+ dof_handler.vertex_dofs
+ .resize(dof_handler.tria->n_vertices() *
+ dof_handler.selected_fe->dofs_per_vertex,
+ DoFHandler<1,spacedim>::invalid_dof_index);
+
+ for (unsigned int i=0; i<dof_handler.tria->n_levels(); ++i)
+ {
+ dof_handler.levels
+ .push_back (new internal::DoFHandler::DoFLevel<1>);
+
+ dof_handler.levels.back()->dof_object.dofs
+ .resize (dof_handler.tria->n_raw_cells(i) *
+ dof_handler.selected_fe->dofs_per_line,
+ DoFHandler<1,spacedim>::invalid_dof_index);
+
+ dof_handler.levels.back()->cell_dof_indices_cache
+ .resize (dof_handler.tria->n_raw_cells(i) *
+ dof_handler.selected_fe->dofs_per_cell,
+ DoFHandler<1,spacedim>::invalid_dof_index);
+ }
+ }
+
+
+ template <int spacedim>
+ static
+ void reserve_space (DoFHandler<2,spacedim> &dof_handler)
+ {
+ dof_handler.vertex_dofs
+ .resize(dof_handler.tria->n_vertices() *
+ dof_handler.selected_fe->dofs_per_vertex,
+ DoFHandler<2,spacedim>::invalid_dof_index);
+
+ for (unsigned int i=0; i<dof_handler.tria->n_levels(); ++i)
+ {
+ dof_handler.levels.push_back (new internal::DoFHandler::DoFLevel<2>);
+
+ dof_handler.levels.back()->dof_object.dofs
+ .resize (dof_handler.tria->n_raw_cells(i) *
+ dof_handler.selected_fe->dofs_per_quad,
+ DoFHandler<2,spacedim>::invalid_dof_index);
+
+ dof_handler.levels.back()->cell_dof_indices_cache
+ .resize (dof_handler.tria->n_raw_cells(i) *
+ dof_handler.selected_fe->dofs_per_cell,
+ DoFHandler<2,spacedim>::invalid_dof_index);
+ }
+
+ dof_handler.faces = new internal::DoFHandler::DoFFaces<2>;
+ dof_handler.faces->lines.dofs
+ .resize (dof_handler.tria->n_raw_lines() *
+ dof_handler.selected_fe->dofs_per_line,
+ DoFHandler<2,spacedim>::invalid_dof_index);
+ }
+
+
+ template <int spacedim>
+ static
+ void reserve_space (DoFHandler<3,spacedim> &dof_handler)
+ {
+ dof_handler.vertex_dofs
+ .resize(dof_handler.tria->n_vertices() *
+ dof_handler.selected_fe->dofs_per_vertex,
+ DoFHandler<3,spacedim>::invalid_dof_index);
+
+ for (unsigned int i=0; i<dof_handler.tria->n_levels(); ++i)
+ {
+ dof_handler.levels.push_back (new internal::DoFHandler::DoFLevel<3>);
+
+ dof_handler.levels.back()->dof_object.dofs
+ .resize (dof_handler.tria->n_raw_cells(i) *
+ dof_handler.selected_fe->dofs_per_hex,
+ DoFHandler<3,spacedim>::invalid_dof_index);
+
+ dof_handler.levels.back()->cell_dof_indices_cache
+ .resize (dof_handler.tria->n_raw_cells(i) *
+ dof_handler.selected_fe->dofs_per_cell,
+ DoFHandler<3,spacedim>::invalid_dof_index);
+ }
+ dof_handler.faces = new internal::DoFHandler::DoFFaces<3>;
+
+ dof_handler.faces->lines.dofs
+ .resize (dof_handler.tria->n_raw_lines() *
+ dof_handler.selected_fe->dofs_per_line,
+ DoFHandler<3,spacedim>::invalid_dof_index);
+ dof_handler.faces->quads.dofs
+ .resize (dof_handler.tria->n_raw_quads() *
+ dof_handler.selected_fe->dofs_per_quad,
+ DoFHandler<3,spacedim>::invalid_dof_index);
+ }
++
++ template<int spacedim>
++ static
++ void reserve_space_mg (DoFHandler<1, spacedim> &dof_handler)
++ {
++ Assert (dof_handler.get_tria ().n_levels () > 0, ExcMessage ("Invalid triangulation"));
++ dof_handler.clear_mg_space ();
++
++ const dealii::Triangulation<1, spacedim> &tria = dof_handler.get_tria ();
++ const unsigned int &dofs_per_line = dof_handler.get_fe ().dofs_per_line;
++ const unsigned int &n_levels = tria.n_levels ();
++
++ for (unsigned int i = 0; i < n_levels; ++i)
++ {
++ dof_handler.mg_levels.push_back (new internal::DoFHandler::DoFLevel<1>);
++ dof_handler.mg_levels.back ()->dof_object.dofs = std::vector<unsigned int> (tria.n_raw_lines (i) * dofs_per_line, DoFHandler<1>::invalid_dof_index);
++ }
++
++ const unsigned int &n_vertices = tria.n_vertices ();
++
++ dof_handler.mg_vertex_dofs.resize (n_vertices);
++
++ std::vector<unsigned int> max_level (n_vertices, 0);
++ std::vector<unsigned int> min_level (n_vertices, n_levels);
++
++ for (typename dealii::Triangulation<1, spacedim>::cell_iterator cell = tria.begin (); cell != tria.end (); ++cell)
++ {
++ const unsigned int level = cell->level ();
++
++ for (unsigned int vertex = 0; vertex < GeometryInfo<1>::vertices_per_cell; ++vertex)
++ {
++ const unsigned int vertex_index = cell->vertex_index (vertex);
++
++ if (min_level[vertex_index] > level)
++ min_level[vertex_index] = level;
++
++ if (max_level[vertex_index] < level)
++ max_level[vertex_index] = level;
+ }
-
- dof_handler.faces = new internal::DoFHandler::DoFFaces<2>;
- dof_handler.faces->lines.dofs
- .resize (dof_handler.tria->n_raw_lines() *
- dof_handler.selected_fe->dofs_per_line,
- DoFHandler<2,spacedim>::invalid_dof_index);
- }
-
-
- template <int spacedim>
- static
- void reserve_space (DoFHandler<3,spacedim> &dof_handler)
- {
- dof_handler.vertex_dofs
- .resize(dof_handler.tria->n_vertices() *
- dof_handler.selected_fe->dofs_per_vertex,
- DoFHandler<3,spacedim>::invalid_dof_index);
-
- for (unsigned int i=0; i<dof_handler.tria->n_levels(); ++i)
- {
- dof_handler.levels.push_back (new internal::DoFHandler::DoFLevel<3>);
-
- dof_handler.levels.back()->dof_object.dofs
- .resize (dof_handler.tria->n_raw_cells(i) *
- dof_handler.selected_fe->dofs_per_hex,
- DoFHandler<3,spacedim>::invalid_dof_index);
-
- dof_handler.levels.back()->cell_dof_indices_cache
- .resize (dof_handler.tria->n_raw_cells(i) *
- dof_handler.selected_fe->dofs_per_cell,
- DoFHandler<3,spacedim>::invalid_dof_index);
++ }
++
++ for (unsigned int vertex = 0; vertex < n_vertices; ++vertex)
++ if (tria.vertex_used (vertex))
++ {
++ Assert (min_level[vertex] < n_levels, ExcInternalError ());
++ Assert (max_level[vertex] >= min_level[vertex], ExcInternalError ());
++ dof_handler.mg_vertex_dofs[vertex].init (min_level[vertex], max_level[vertex], dof_handler.get_fe ().dofs_per_vertex);
++ }
++
++ else
++ {
++ Assert (min_level[vertex] == n_levels, ExcInternalError ());
++ Assert (max_level[vertex] == 0, ExcInternalError ());
++ dof_handler.mg_vertex_dofs[vertex].init (1, 0, 0);
++ }
++ }
++
++ template<int spacedim>
++ static
++ void reserve_space_mg (DoFHandler<2, spacedim> &dof_handler)
++ {
++ Assert (dof_handler.get_tria ().n_levels () > 0, ExcMessage ("Invalid triangulation"));
++ dof_handler.clear_mg_space ();
++
++ const dealii::FiniteElement<2, spacedim> &fe = dof_handler.get_fe ();
++ const dealii::Triangulation<2, spacedim> &tria = dof_handler.get_tria ();
++ const unsigned int &n_levels = tria.n_levels ();
++
++ for (unsigned int i = 0; i < n_levels; ++i)
++ {
++ dof_handler.mg_levels.push_back (new internal::DoFHandler::DoFLevel<2>);
++ dof_handler.mg_levels.back ()->dof_object.dofs = std::vector<unsigned int> (tria.n_raw_quads (i) * fe.dofs_per_quad, DoFHandler<2>::invalid_dof_index);
++ }
++
++ dof_handler.mg_faces = new internal::DoFHandler::DoFFaces<2>;
++ dof_handler.mg_faces->lines.dofs = std::vector<unsigned int> (tria.n_raw_lines () * fe.dofs_per_line, DoFHandler<2>::invalid_dof_index);
++
++ const unsigned int &n_vertices = tria.n_vertices ();
++
++ dof_handler.mg_vertex_dofs.resize (n_vertices);
++
++ std::vector<unsigned int> max_level (n_vertices, 0);
++ std::vector<unsigned int> min_level (n_vertices, n_levels);
++
++ for (typename dealii::Triangulation<2, spacedim>::cell_iterator cell = tria.begin (); cell != tria.end (); ++cell)
++ {
++ const unsigned int level = cell->level ();
++
++ for (unsigned int vertex = 0; vertex < GeometryInfo<2>::vertices_per_cell; ++vertex)
++ {
++ const unsigned int vertex_index = cell->vertex_index (vertex);
++
++ if (min_level[vertex_index] > level)
++ min_level[vertex_index] = level;
++
++ if (max_level[vertex_index] < level)
++ max_level[vertex_index] = level;
+ }
- dof_handler.faces = new internal::DoFHandler::DoFFaces<3>;
-
- dof_handler.faces->lines.dofs
- .resize (dof_handler.tria->n_raw_lines() *
- dof_handler.selected_fe->dofs_per_line,
- DoFHandler<3,spacedim>::invalid_dof_index);
- dof_handler.faces->quads.dofs
- .resize (dof_handler.tria->n_raw_quads() *
- dof_handler.selected_fe->dofs_per_quad,
- DoFHandler<3,spacedim>::invalid_dof_index);
- }
-
- template<int spacedim>
- static
- void reserve_space_mg (DoFHandler<1, spacedim>& dof_handler) {
- Assert (dof_handler.get_tria ().n_levels () > 0, ExcMessage ("Invalid triangulation"));
- dof_handler.clear_mg_space ();
-
- const dealii::Triangulation<1, spacedim>& tria = dof_handler.get_tria ();
- const unsigned int& dofs_per_line = dof_handler.get_fe ().dofs_per_line;
- const unsigned int& n_levels = tria.n_levels ();
-
- for (unsigned int i = 0; i < n_levels; ++i) {
- dof_handler.mg_levels.push_back (new internal::DoFHandler::DoFLevel<1>);
- dof_handler.mg_levels.back ()->dof_object.dofs = std::vector<unsigned int> (tria.n_raw_lines (i) * dofs_per_line, DoFHandler<1>::invalid_dof_index);
- }
-
- const unsigned int& n_vertices = tria.n_vertices ();
-
- dof_handler.mg_vertex_dofs.resize (n_vertices);
-
- std::vector<unsigned int> max_level (n_vertices, 0);
- std::vector<unsigned int> min_level (n_vertices, n_levels);
-
- for (typename dealii::Triangulation<1, spacedim>::cell_iterator cell = tria.begin (); cell != tria.end (); ++cell) {
- const unsigned int level = cell->level ();
-
- for (unsigned int vertex = 0; vertex < GeometryInfo<1>::vertices_per_cell; ++vertex) {
- const unsigned int vertex_index = cell->vertex_index (vertex);
-
- if (min_level[vertex_index] > level)
- min_level[vertex_index] = level;
-
- if (max_level[vertex_index] < level)
- max_level[vertex_index] = level;
- }
- }
-
- for (unsigned int vertex = 0; vertex < n_vertices; ++vertex)
- if (tria.vertex_used (vertex)) {
- Assert (min_level[vertex] < n_levels, ExcInternalError ());
- Assert (max_level[vertex] >= min_level[vertex], ExcInternalError ());
- dof_handler.mg_vertex_dofs[vertex].init (min_level[vertex], max_level[vertex], dof_handler.get_fe ().dofs_per_vertex);
- }
-
- else {
- Assert (min_level[vertex] == n_levels, ExcInternalError ());
- Assert (max_level[vertex] == 0, ExcInternalError ());
- dof_handler.mg_vertex_dofs[vertex].init (1, 0, 0);
- }
- }
-
- template<int spacedim>
- static
- void reserve_space_mg (DoFHandler<2, spacedim>& dof_handler) {
- Assert (dof_handler.get_tria ().n_levels () > 0, ExcMessage ("Invalid triangulation"));
- dof_handler.clear_mg_space ();
-
- const dealii::FiniteElement<2, spacedim>& fe = dof_handler.get_fe ();
- const dealii::Triangulation<2, spacedim>& tria = dof_handler.get_tria ();
- const unsigned int& n_levels = tria.n_levels ();
-
- for (unsigned int i = 0; i < n_levels; ++i) {
- dof_handler.mg_levels.push_back (new internal::DoFHandler::DoFLevel<2>);
- dof_handler.mg_levels.back ()->dof_object.dofs = std::vector<unsigned int> (tria.n_raw_quads (i) * fe.dofs_per_quad, DoFHandler<2>::invalid_dof_index);
- }
-
- dof_handler.mg_faces = new internal::DoFHandler::DoFFaces<2>;
- dof_handler.mg_faces->lines.dofs = std::vector<unsigned int> (tria.n_raw_lines () * fe.dofs_per_line, DoFHandler<2>::invalid_dof_index);
-
- const unsigned int& n_vertices = tria.n_vertices ();
-
- dof_handler.mg_vertex_dofs.resize (n_vertices);
-
- std::vector<unsigned int> max_level (n_vertices, 0);
- std::vector<unsigned int> min_level (n_vertices, n_levels);
-
- for (typename dealii::Triangulation<2, spacedim>::cell_iterator cell = tria.begin (); cell != tria.end (); ++cell) {
- const unsigned int level = cell->level ();
-
- for (unsigned int vertex = 0; vertex < GeometryInfo<2>::vertices_per_cell; ++vertex) {
- const unsigned int vertex_index = cell->vertex_index (vertex);
-
- if (min_level[vertex_index] > level)
- min_level[vertex_index] = level;
-
- if (max_level[vertex_index] < level)
- max_level[vertex_index] = level;
- }
- }
-
- for (unsigned int vertex = 0; vertex < n_vertices; ++vertex)
- if (tria.vertex_used (vertex)) {
- Assert (min_level[vertex] < n_levels, ExcInternalError ());
- Assert (max_level[vertex] >= min_level[vertex], ExcInternalError ());
- dof_handler.mg_vertex_dofs[vertex].init (min_level[vertex], max_level[vertex], fe.dofs_per_vertex);
- }
-
- else {
- Assert (min_level[vertex] == n_levels, ExcInternalError ());
- Assert (max_level[vertex] == 0, ExcInternalError ());
- dof_handler.mg_vertex_dofs[vertex].init (1, 0, 0);
- }
- }
-
- template<int spacedim>
- static
- void reserve_space_mg (DoFHandler<3, spacedim>& dof_handler) {
- Assert (dof_handler.get_tria ().n_levels () > 0, ExcMessage ("Invalid triangulation"));
- dof_handler.clear_mg_space ();
-
- const dealii::FiniteElement<3, spacedim>& fe = dof_handler.get_fe ();
- const dealii::Triangulation<3, spacedim>& tria = dof_handler.get_tria ();
- const unsigned int& n_levels = tria.n_levels ();
-
- for (unsigned int i = 0; i < n_levels; ++i) {
- dof_handler.mg_levels.push_back (new internal::DoFHandler::DoFLevel<3>);
- dof_handler.mg_levels.back ()->dof_object.dofs = std::vector<unsigned int> (tria.n_raw_hexs (i) * fe.dofs_per_hex, DoFHandler<3>::invalid_dof_index);
- }
-
- dof_handler.mg_faces = new internal::DoFHandler::DoFFaces<3>;
- dof_handler.mg_faces->lines.dofs = std::vector<unsigned int> (tria.n_raw_lines () * fe.dofs_per_line, DoFHandler<3>::invalid_dof_index);
- dof_handler.mg_faces->quads.dofs = std::vector<unsigned int> (tria.n_raw_quads () * fe.dofs_per_quad, DoFHandler<3>::invalid_dof_index);
-
- const unsigned int& n_vertices = tria.n_vertices ();
-
- dof_handler.mg_vertex_dofs.resize (n_vertices);
-
- std::vector<unsigned int> max_level (n_vertices, 0);
- std::vector<unsigned int> min_level (n_vertices, n_levels);
-
- for (typename dealii::Triangulation<3, spacedim>::cell_iterator cell = tria.begin (); cell != tria.end (); ++cell) {
- const unsigned int level = cell->level ();
-
- for (unsigned int vertex = 0; vertex < GeometryInfo<3>::vertices_per_cell; ++vertex) {
- const unsigned int vertex_index = cell->vertex_index (vertex);
-
- if (min_level[vertex_index] > level)
- min_level[vertex_index] = level;
-
- if (max_level[vertex_index] < level)
- max_level[vertex_index] = level;
- }
- }
-
- for (unsigned int vertex = 0; vertex < n_vertices; ++vertex)
- if (tria.vertex_used (vertex)) {
- Assert (min_level[vertex] < n_levels, ExcInternalError ());
- Assert (max_level[vertex] >= min_level[vertex], ExcInternalError ());
- dof_handler.mg_vertex_dofs[vertex].init (min_level[vertex], max_level[vertex], fe.dofs_per_vertex);
- }
-
- else {
- Assert (min_level[vertex] == n_levels, ExcInternalError ());
- Assert (max_level[vertex] == 0, ExcInternalError ());
- dof_handler.mg_vertex_dofs[vertex].init (1, 0, 0);
- }
- }
-
- template<int spacedim>
- static
- types::global_dof_index distribute_dofs_on_cell (typename DoFHandler<1, spacedim>::cell_iterator& cell, types::global_dof_index next_free_dof) {
- const FiniteElement<1, spacedim>& fe = cell->get_fe ();
-
- if (fe.dofs_per_vertex > 0)
- for (unsigned int vertex = 0; vertex < GeometryInfo<1>::vertices_per_cell; ++vertex) {
- typename DoFHandler<1, spacedim>::cell_iterator neighbor = cell->neighbor (vertex);
-
- if (neighbor.state () == IteratorState::valid)
- if (neighbor->user_flag_set () && (neighbor->level () == cell->level ())) {
- if (vertex == 0)
- for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof)
- cell->set_mg_vertex_dof_index (cell->level (), 0, dof, neighbor->mg_vertex_dof_index (cell->level (), 1, dof));
-
- else
- for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof)
- cell->set_mg_vertex_dof_index (cell->level (), 1, dof, neighbor->mg_vertex_dof_index (cell->level (), 0, dof));
-
- continue;
- }
-
- for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof)
- cell->set_mg_vertex_dof_index (cell->level (), vertex, dof, next_free_dof++);
- }
-
- if (fe.dofs_per_line > 0)
- for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof)
- cell->set_mg_dof_index (cell->level (), dof, next_free_dof++);
-
- cell->set_user_flag ();
- return next_free_dof;
- }
-
- template<int spacedim>
- static
- types::global_dof_index distribute_dofs_on_cell (typename DoFHandler<2, spacedim>::cell_iterator& cell, types::global_dof_index next_free_dof) {
- const FiniteElement<2, spacedim>& fe = cell->get_fe ();
-
- if (fe.dofs_per_vertex > 0)
- for (unsigned int vertex = 0; vertex < GeometryInfo<2>::vertices_per_cell; ++vertex)
- if (cell->mg_vertex_dof_index (cell->level (), vertex, 0) == DoFHandler<2>::invalid_dof_index)
- for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof)
- cell->set_mg_vertex_dof_index (cell->level (), vertex, dof, next_free_dof++);
-
- if (fe.dofs_per_line > 0)
- for (unsigned int face = 0; face < GeometryInfo<2>::faces_per_cell; ++face) {
- typename DoFHandler<2, spacedim>::line_iterator line = cell->line (face);
-
- if (line->mg_dof_index (cell->level (), 0) == DoFHandler<2>::invalid_dof_index)
- for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof)
- line->set_mg_dof_index (cell->level (), dof, next_free_dof++);
- }
-
- if (fe.dofs_per_quad > 0)
- for (unsigned int dof = 0; dof < fe.dofs_per_quad; ++dof)
- cell->set_mg_dof_index (cell->level (), dof, next_free_dof++);
-
- cell->set_user_flag ();
- return next_free_dof;
- }
-
- template<int spacedim>
- static
- types::global_dof_index distribute_dofs_on_cell (typename DoFHandler<3, spacedim>::cell_iterator& cell, types::global_dof_index next_free_dof) {
- const FiniteElement<3, spacedim>& fe = cell->get_fe ();
-
- if (fe.dofs_per_vertex > 0)
- for (unsigned int vertex = 0; vertex < GeometryInfo<3>::vertices_per_cell; ++vertex)
- if (cell->mg_vertex_dof_index (cell->level (), vertex, 0) == DoFHandler<3>::invalid_dof_index)
- for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof)
- cell->set_mg_vertex_dof_index (cell->level (), vertex, dof, next_free_dof++);
-
- if (fe.dofs_per_line > 0)
- for (unsigned int line = 0; line < GeometryInfo<3>::lines_per_cell; ++line) {
- typename DoFHandler<3, spacedim>::line_iterator line_it = cell->line (line);
-
- if (line_it->mg_dof_index (cell->level (), 0) == DoFHandler<3>::invalid_dof_index)
- for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof)
- line_it->set_mg_dof_index (cell->level (), dof, next_free_dof++);
- }
-
- if (fe.dofs_per_quad > 0)
- for (unsigned int face = 0; face < GeometryInfo<3>::quads_per_cell; ++face) {
- typename DoFHandler<3, spacedim>::quad_iterator quad = cell->quad (face);
-
- if (quad->mg_dof_index (cell->level (), 0) == DoFHandler<3>::invalid_dof_index)
- for (unsigned int dof = 0; dof < fe.dofs_per_quad; ++dof)
- quad->set_mg_dof_index (cell->level (), dof, next_free_dof++);
- }
-
- if (fe.dofs_per_hex > 0)
- for (unsigned int dof = 0; dof < fe.dofs_per_hex; ++dof)
- cell->set_mg_dof_index (cell->level (), dof, next_free_dof++);
-
- cell->set_user_flag ();
- return next_free_dof;
- }
-
- template<int spacedim>
- static
- types::global_dof_index get_dof_index (const DoFHandler<1, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<1>& mg_level, internal::DoFHandler::DoFFaces<1>&, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<1>) {
- return mg_level.dof_object.get_dof_index (dof_handler, obj_index, fe_index, local_index);
- }
-
- template<int spacedim>
- static
- types::global_dof_index get_dof_index (const DoFHandler<2, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<2>&, internal::DoFHandler::DoFFaces<2>& mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<1>) {
- return mg_faces.lines.get_dof_index (dof_handler, obj_index, fe_index, local_index);
- }
-
- template<int spacedim>
- static
- types::global_dof_index get_dof_index (const DoFHandler<2, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<2>& mg_level, internal::DoFHandler::DoFFaces<2>&, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<2>) {
- return mg_level.dof_object.get_dof_index (dof_handler, obj_index, fe_index, local_index);
- }
-
- template<int spacedim>
- static
- types::global_dof_index get_dof_index (const DoFHandler<3, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<3>&, internal::DoFHandler::DoFFaces<3>& mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<1>) {
- return mg_faces.lines.get_dof_index (dof_handler, obj_index, fe_index, local_index);
- }
-
- template<int spacedim>
- static
- types::global_dof_index get_dof_index (const DoFHandler<3, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<3>&, internal::DoFHandler::DoFFaces<3>& mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<2>) {
- return mg_faces.quads.get_dof_index (dof_handler, obj_index, fe_index, local_index);
- }
-
- template<int spacedim>
- static
- types::global_dof_index get_dof_index (const DoFHandler<3, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<3>& mg_level, internal::DoFHandler::DoFFaces<3>&, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<3>) {
- return mg_level.dof_object.get_dof_index (dof_handler, obj_index, fe_index, local_index);
- }
-
- template<int spacedim>
- static
- void set_dof_index (const DoFHandler<1, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<1>& mg_level, internal::DoFHandler::DoFFaces<1>&, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index, const int2type<1>) {
- mg_level.dof_object.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index);
- }
-
- template<int spacedim>
- static
- void set_dof_index (const DoFHandler<2, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<2>&, internal::DoFHandler::DoFFaces<2>& mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index, const int2type<1>) {
- mg_faces.lines.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index);
- }
-
- template<int spacedim>
- static
- void set_dof_index (const DoFHandler<2, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<2>& mg_level, internal::DoFHandler::DoFFaces<2>&, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index, const int2type<2>) {
- mg_level.dof_object.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index);
- }
-
- template<int spacedim>
- static
- void set_dof_index (const DoFHandler<3, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<3>&, internal::DoFHandler::DoFFaces<3>& mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index, const int2type<1>) {
- mg_faces.lines.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index);
- }
-
- template<int spacedim>
- static
- void set_dof_index (const DoFHandler<3, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<3>&, internal::DoFHandler::DoFFaces<3>& mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index, const int2type<2>) {
- mg_faces.quads.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index);
- }
-
- template<int spacedim>
- static
- void set_dof_index (const DoFHandler<3, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<3>& mg_level, internal::DoFHandler::DoFFaces<3>&, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index, const int2type<3>) {
- mg_level.dof_object.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index);
- }
++ }
++
++ for (unsigned int vertex = 0; vertex < n_vertices; ++vertex)
++ if (tria.vertex_used (vertex))
++ {
++ Assert (min_level[vertex] < n_levels, ExcInternalError ());
++ Assert (max_level[vertex] >= min_level[vertex], ExcInternalError ());
++ dof_handler.mg_vertex_dofs[vertex].init (min_level[vertex], max_level[vertex], fe.dofs_per_vertex);
++ }
++
++ else
++ {
++ Assert (min_level[vertex] == n_levels, ExcInternalError ());
++ Assert (max_level[vertex] == 0, ExcInternalError ());
++ dof_handler.mg_vertex_dofs[vertex].init (1, 0, 0);
++ }
++ }
++
++ template<int spacedim>
++ static
++ void reserve_space_mg (DoFHandler<3, spacedim> &dof_handler)
++ {
++ Assert (dof_handler.get_tria ().n_levels () > 0, ExcMessage ("Invalid triangulation"));
++ dof_handler.clear_mg_space ();
++
++ const dealii::FiniteElement<3, spacedim> &fe = dof_handler.get_fe ();
++ const dealii::Triangulation<3, spacedim> &tria = dof_handler.get_tria ();
++ const unsigned int &n_levels = tria.n_levels ();
++
++ for (unsigned int i = 0; i < n_levels; ++i)
++ {
++ dof_handler.mg_levels.push_back (new internal::DoFHandler::DoFLevel<3>);
++ dof_handler.mg_levels.back ()->dof_object.dofs = std::vector<unsigned int> (tria.n_raw_hexs (i) * fe.dofs_per_hex, DoFHandler<3>::invalid_dof_index);
++ }
++
++ dof_handler.mg_faces = new internal::DoFHandler::DoFFaces<3>;
++ dof_handler.mg_faces->lines.dofs = std::vector<unsigned int> (tria.n_raw_lines () * fe.dofs_per_line, DoFHandler<3>::invalid_dof_index);
++ dof_handler.mg_faces->quads.dofs = std::vector<unsigned int> (tria.n_raw_quads () * fe.dofs_per_quad, DoFHandler<3>::invalid_dof_index);
++
++ const unsigned int &n_vertices = tria.n_vertices ();
++
++ dof_handler.mg_vertex_dofs.resize (n_vertices);
++
++ std::vector<unsigned int> max_level (n_vertices, 0);
++ std::vector<unsigned int> min_level (n_vertices, n_levels);
++
++ for (typename dealii::Triangulation<3, spacedim>::cell_iterator cell = tria.begin (); cell != tria.end (); ++cell)
++ {
++ const unsigned int level = cell->level ();
++
++ for (unsigned int vertex = 0; vertex < GeometryInfo<3>::vertices_per_cell; ++vertex)
++ {
++ const unsigned int vertex_index = cell->vertex_index (vertex);
++
++ if (min_level[vertex_index] > level)
++ min_level[vertex_index] = level;
++
++ if (max_level[vertex_index] < level)
++ max_level[vertex_index] = level;
++ }
++ }
++
++ for (unsigned int vertex = 0; vertex < n_vertices; ++vertex)
++ if (tria.vertex_used (vertex))
++ {
++ Assert (min_level[vertex] < n_levels, ExcInternalError ());
++ Assert (max_level[vertex] >= min_level[vertex], ExcInternalError ());
++ dof_handler.mg_vertex_dofs[vertex].init (min_level[vertex], max_level[vertex], fe.dofs_per_vertex);
++ }
++
++ else
++ {
++ Assert (min_level[vertex] == n_levels, ExcInternalError ());
++ Assert (max_level[vertex] == 0, ExcInternalError ());
++ dof_handler.mg_vertex_dofs[vertex].init (1, 0, 0);
++ }
++ }
++
++ template<int spacedim>
++ static
++ types::global_dof_index distribute_dofs_on_cell (typename DoFHandler<1, spacedim>::cell_iterator &cell, types::global_dof_index next_free_dof)
++ {
++ const FiniteElement<1, spacedim> &fe = cell->get_fe ();
++
++ if (fe.dofs_per_vertex > 0)
++ for (unsigned int vertex = 0; vertex < GeometryInfo<1>::vertices_per_cell; ++vertex)
++ {
++ typename DoFHandler<1, spacedim>::cell_iterator neighbor = cell->neighbor (vertex);
++
++ if (neighbor.state () == IteratorState::valid)
++ if (neighbor->user_flag_set () && (neighbor->level () == cell->level ()))
++ {
++ if (vertex == 0)
++ for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof)
++ cell->set_mg_vertex_dof_index (cell->level (), 0, dof, neighbor->mg_vertex_dof_index (cell->level (), 1, dof));
++
++ else
++ for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof)
++ cell->set_mg_vertex_dof_index (cell->level (), 1, dof, neighbor->mg_vertex_dof_index (cell->level (), 0, dof));
++
++ continue;
++ }
++
++ for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof)
++ cell->set_mg_vertex_dof_index (cell->level (), vertex, dof, next_free_dof++);
++ }
++
++ if (fe.dofs_per_line > 0)
++ for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof)
++ cell->set_mg_dof_index (cell->level (), dof, next_free_dof++);
++
++ cell->set_user_flag ();
++ return next_free_dof;
++ }
++
++ template<int spacedim>
++ static
++ types::global_dof_index distribute_dofs_on_cell (typename DoFHandler<2, spacedim>::cell_iterator &cell, types::global_dof_index next_free_dof)
++ {
++ const FiniteElement<2, spacedim> &fe = cell->get_fe ();
++
++ if (fe.dofs_per_vertex > 0)
++ for (unsigned int vertex = 0; vertex < GeometryInfo<2>::vertices_per_cell; ++vertex)
++ if (cell->mg_vertex_dof_index (cell->level (), vertex, 0) == DoFHandler<2>::invalid_dof_index)
++ for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof)
++ cell->set_mg_vertex_dof_index (cell->level (), vertex, dof, next_free_dof++);
++
++ if (fe.dofs_per_line > 0)
++ for (unsigned int face = 0; face < GeometryInfo<2>::faces_per_cell; ++face)
++ {
++ typename DoFHandler<2, spacedim>::line_iterator line = cell->line (face);
++
++ if (line->mg_dof_index (cell->level (), 0) == DoFHandler<2>::invalid_dof_index)
++ for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof)
++ line->set_mg_dof_index (cell->level (), dof, next_free_dof++);
++ }
++
++ if (fe.dofs_per_quad > 0)
++ for (unsigned int dof = 0; dof < fe.dofs_per_quad; ++dof)
++ cell->set_mg_dof_index (cell->level (), dof, next_free_dof++);
++
++ cell->set_user_flag ();
++ return next_free_dof;
++ }
++
++ template<int spacedim>
++ static
++ types::global_dof_index distribute_dofs_on_cell (typename DoFHandler<3, spacedim>::cell_iterator &cell, types::global_dof_index next_free_dof)
++ {
++ const FiniteElement<3, spacedim> &fe = cell->get_fe ();
++
++ if (fe.dofs_per_vertex > 0)
++ for (unsigned int vertex = 0; vertex < GeometryInfo<3>::vertices_per_cell; ++vertex)
++ if (cell->mg_vertex_dof_index (cell->level (), vertex, 0) == DoFHandler<3>::invalid_dof_index)
++ for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof)
++ cell->set_mg_vertex_dof_index (cell->level (), vertex, dof, next_free_dof++);
++
++ if (fe.dofs_per_line > 0)
++ for (unsigned int line = 0; line < GeometryInfo<3>::lines_per_cell; ++line)
++ {
++ typename DoFHandler<3, spacedim>::line_iterator line_it = cell->line (line);
++
++ if (line_it->mg_dof_index (cell->level (), 0) == DoFHandler<3>::invalid_dof_index)
++ for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof)
++ line_it->set_mg_dof_index (cell->level (), dof, next_free_dof++);
++ }
++
++ if (fe.dofs_per_quad > 0)
++ for (unsigned int face = 0; face < GeometryInfo<3>::quads_per_cell; ++face)
++ {
++ typename DoFHandler<3, spacedim>::quad_iterator quad = cell->quad (face);
++
++ if (quad->mg_dof_index (cell->level (), 0) == DoFHandler<3>::invalid_dof_index)
++ for (unsigned int dof = 0; dof < fe.dofs_per_quad; ++dof)
++ quad->set_mg_dof_index (cell->level (), dof, next_free_dof++);
++ }
++
++ if (fe.dofs_per_hex > 0)
++ for (unsigned int dof = 0; dof < fe.dofs_per_hex; ++dof)
++ cell->set_mg_dof_index (cell->level (), dof, next_free_dof++);
++
++ cell->set_user_flag ();
++ return next_free_dof;
++ }
++
++ template<int spacedim>
++ static
++ types::global_dof_index get_dof_index (const DoFHandler<1, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<1> &mg_level, internal::DoFHandler::DoFFaces<1> &, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<1>)
++ {
++ return mg_level.dof_object.get_dof_index (dof_handler, obj_index, fe_index, local_index);
++ }
++
++ template<int spacedim>
++ static
++ types::global_dof_index get_dof_index (const DoFHandler<2, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<2> &, internal::DoFHandler::DoFFaces<2> &mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<1>)
++ {
++ return mg_faces.lines.get_dof_index (dof_handler, obj_index, fe_index, local_index);
++ }
++
++ template<int spacedim>
++ static
++ types::global_dof_index get_dof_index (const DoFHandler<2, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<2> &mg_level, internal::DoFHandler::DoFFaces<2> &, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<2>)
++ {
++ return mg_level.dof_object.get_dof_index (dof_handler, obj_index, fe_index, local_index);
++ }
++
++ template<int spacedim>
++ static
++ types::global_dof_index get_dof_index (const DoFHandler<3, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<3> &, internal::DoFHandler::DoFFaces<3> &mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<1>)
++ {
++ return mg_faces.lines.get_dof_index (dof_handler, obj_index, fe_index, local_index);
++ }
++
++ template<int spacedim>
++ static
++ types::global_dof_index get_dof_index (const DoFHandler<3, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<3> &, internal::DoFHandler::DoFFaces<3> &mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<2>)
++ {
++ return mg_faces.quads.get_dof_index (dof_handler, obj_index, fe_index, local_index);
++ }
++
++ template<int spacedim>
++ static
++ types::global_dof_index get_dof_index (const DoFHandler<3, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<3> &mg_level, internal::DoFHandler::DoFFaces<3> &, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<3>)
++ {
++ return mg_level.dof_object.get_dof_index (dof_handler, obj_index, fe_index, local_index);
++ }
++
++ template<int spacedim>
++ static
++ void set_dof_index (const DoFHandler<1, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<1> &mg_level, internal::DoFHandler::DoFFaces<1> &, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index, const int2type<1>)
++ {
++ mg_level.dof_object.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index);
++ }
++
++ template<int spacedim>
++ static
++ void set_dof_index (const DoFHandler<2, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<2> &, internal::DoFHandler::DoFFaces<2> &mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index, const int2type<1>)
++ {
++ mg_faces.lines.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index);
++ }
++
++ template<int spacedim>
++ static
++ void set_dof_index (const DoFHandler<2, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<2> &mg_level, internal::DoFHandler::DoFFaces<2> &, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index, const int2type<2>)
++ {
++ mg_level.dof_object.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index);
++ }
++
++ template<int spacedim>
++ static
++ void set_dof_index (const DoFHandler<3, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<3> &, internal::DoFHandler::DoFFaces<3> &mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index, const int2type<1>)
++ {
++ mg_faces.lines.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index);
++ }
++
++ template<int spacedim>
++ static
++ void set_dof_index (const DoFHandler<3, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<3> &, internal::DoFHandler::DoFFaces<3> &mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index, const int2type<2>)
++ {
++ mg_faces.quads.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index);
++ }
++
++ template<int spacedim>
++ static
++ void set_dof_index (const DoFHandler<3, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<3> &mg_level, internal::DoFHandler::DoFFaces<3> &, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index, const int2type<3>)
++ {
++ mg_level.dof_object.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index);
++ }
};
}
}
template<int dim, int spacedim>
DoFHandler<dim,spacedim>::DoFHandler (const Triangulation<dim,spacedim> &tria)
- :
- tria(&tria, typeid(*this).name()),
- selected_fe(0, typeid(*this).name()),
- faces(NULL),
- mg_faces (NULL)
+ :
+ tria(&tria, typeid(*this).name()),
+ selected_fe(0, typeid(*this).name()),
- faces(NULL)
++ faces(NULL),
++ mg_faces (NULL)
{
- // decide whether we need a
- // sequential or a parallel
- // distributed policy
+ // decide whether we need a
+ // sequential or a parallel
+ // distributed policy
if (dynamic_cast<const parallel::distributed::Triangulation< dim, spacedim >*>
(&tria)
== 0)
template<int dim, int spacedim>
DoFHandler<dim,spacedim>::DoFHandler ()
- :
- tria(0, typeid(*this).name()),
- selected_fe(0, typeid(*this).name()),
- faces(NULL),
- mg_faces (NULL)
+ :
+ tria(0, typeid(*this).name()),
+ selected_fe(0, typeid(*this).name()),
- faces(NULL)
++ faces(NULL),
++ mg_faces (NULL)
{}
template <>
-unsigned int DoFHandler<1>::n_boundary_dofs (const FunctionMap &boundary_indicators) const
+types::global_dof_index DoFHandler<1>::n_boundary_dofs (const FunctionMap &boundary_indicators) const
{
- // check that only boundary
- // indicators 0 and 1 are allowed
- // in 1d
+ // check that only boundary
+ // indicators 0 and 1 are allowed
+ // in 1d
for (FunctionMap::const_iterator i=boundary_indicators.begin();
i!=boundary_indicators.end(); ++i)
Assert ((i->first == 0) || (i->first == 1),
template <>
-unsigned int DoFHandler<1>::n_boundary_dofs (const std::set<types::boundary_id> &boundary_indicators) const
+types::global_dof_index DoFHandler<1>::n_boundary_dofs (const std::set<types::boundary_id> &boundary_indicators) const
{
- // check that only boundary
- // indicators 0 and 1 are allowed
- // in 1d
+ // check that only boundary
+ // indicators 0 and 1 are allowed
+ // in 1d
for (std::set<types::boundary_id>::const_iterator i=boundary_indicators.begin();
i!=boundary_indicators.end(); ++i)
Assert ((*i == 0) || (*i == 1),
template <>
-unsigned int DoFHandler<1,2>::n_boundary_dofs (const FunctionMap &boundary_indicators) const
+types::global_dof_index DoFHandler<1,2>::n_boundary_dofs (const FunctionMap &boundary_indicators) const
{
- // check that only boundary
- // indicators 0 and 1 are allowed
- // in 1d
+ // check that only boundary
+ // indicators 0 and 1 are allowed
+ // in 1d
for (FunctionMap::const_iterator i=boundary_indicators.begin();
i!=boundary_indicators.end(); ++i)
Assert ((i->first == 0) || (i->first == 1),
template <>
-unsigned int DoFHandler<1,2>::n_boundary_dofs (const std::set<types::boundary_id> &boundary_indicators) const
+types::global_dof_index DoFHandler<1,2>::n_boundary_dofs (const std::set<types::boundary_id> &boundary_indicators) const
{
- // check that only boundary
- // indicators 0 and 1 are allowed
- // in 1d
+ // check that only boundary
+ // indicators 0 and 1 are allowed
+ // in 1d
for (std::set<types::boundary_id>::const_iterator i=boundary_indicators.begin();
i!=boundary_indicators.end(); ++i)
Assert ((*i == 0) || (*i == 1),
std::set<int> boundary_dofs;
const unsigned int dofs_per_face = get_fe().dofs_per_face;
- std::vector<unsigned int> dofs_on_face(dofs_per_face);
+ std::vector<types::global_dof_index> dofs_on_face(dofs_per_face);
- // loop over all faces of all cells
- // and see whether they are at a
- // boundary. note (i) that we visit
- // interior faces twice (which we
- // don't care about) but exterior
- // faces only once as is
- // appropriate, and (ii) that we
- // need not take special care of
- // single lines (using
- // @p{cell->has_boundary_lines}),
- // since we do not support
- // boundaries of dimension dim-2,
- // and so every boundary line is
- // also part of a boundary face.
+ // loop over all faces of all cells
+ // and see whether they are at a
+ // boundary. note (i) that we visit
+ // interior faces twice (which we
+ // don't care about) but exterior
+ // faces only once as is
+ // appropriate, and (ii) that we
+ // need not take special care of
+ // single lines (using
+ // @p{cell->has_boundary_lines}),
+ // since we do not support
+ // boundaries of dimension dim-2,
+ // and so every boundary line is
+ // also part of a boundary face.
active_cell_iterator cell = begin_active (),
- endc = end();
+ endc = end();
for (; cell!=endc; ++cell)
for (unsigned int f=0; f<GeometryInfo<dim>::faces_per_cell; ++f)
if (cell->at_boundary(f))
std::set<int> boundary_dofs;
const unsigned int dofs_per_face = get_fe().dofs_per_face;
- std::vector<unsigned int> dofs_on_face(dofs_per_face);
+ std::vector<types::global_dof_index> dofs_on_face(dofs_per_face);
- // same as in the previous
- // function, but with an additional
- // check for the boundary indicator
+ // same as in the previous
+ // function, but with an additional
+ // check for the boundary indicator
active_cell_iterator cell = begin_active (),
- endc = end();
+ endc = end();
for (; cell!=endc; ++cell)
for (unsigned int f=0; f<GeometryInfo<dim>::faces_per_cell; ++f)
if (cell->at_boundary(f)
std::set<int> boundary_dofs;
const unsigned int dofs_per_face = get_fe().dofs_per_face;
- std::vector<unsigned int> dofs_on_face(dofs_per_face);
+ std::vector<types::global_dof_index> dofs_on_face(dofs_per_face);
- // same as in the previous
- // function, but with a different
- // check for the boundary indicator
+ // same as in the previous
+ // function, but with a different
+ // check for the boundary indicator
active_cell_iterator cell = begin_active (),
- endc = end();
+ endc = end();
for (; cell!=endc; ++cell)
for (unsigned int f=0; f<GeometryInfo<dim>::faces_per_cell; ++f)
if (cell->at_boundary(f)
}
- void DoFHandler<dim, spacedim>::distribute_mg_dofs (const FiniteElement<dim, spacedim>& fe) {
+template<int dim, int spacedim>
- const unsigned int& n_levels = (*tria).n_levels ();
++void DoFHandler<dim, spacedim>::distribute_mg_dofs (const FiniteElement<dim, spacedim> &fe)
++{
+ Assert ((dynamic_cast<const parallel::distributed::Triangulation<dim, spacedim>*> (&*tria) == 0), ExcMessage ("Invalid triangulation"));
+ distribute_dofs (fe);
+ reserve_space ();
+
- for (unsigned int level = 0; level < n_levels; ++level) {
- types::global_dof_index next_free_dof = 0;
++ const unsigned int &n_levels = (*tria).n_levels ();
+
+ mg_used_dofs.resize (n_levels, 0);
+
+ std::vector<bool> user_flags;
+
+ (*tria).save_user_flags (user_flags);
+ const_cast<Triangulation<dim, spacedim>&>(*tria).clear_user_flags ();
+
- for (cell_iterator cell = begin (level); cell != end (level); ++cell)
- next_free_dof = internal::DoFHandler::Implementation::distribute_dofs_on_cell<spacedim> (cell, next_free_dof);
++ for (unsigned int level = 0; level < n_levels; ++level)
++ {
++ types::global_dof_index next_free_dof = 0;
+
- mg_used_dofs[level] = next_free_dof;
- }
++ for (cell_iterator cell = begin (level); cell != end (level); ++cell)
++ next_free_dof = internal::DoFHandler::Implementation::distribute_dofs_on_cell<spacedim> (cell, next_free_dof);
+
- void DoFHandler<dim, spacedim>::reserve_space () {
++ mg_used_dofs[level] = next_free_dof;
++ }
+
+ const_cast<Triangulation<dim, spacedim>&>(*tria).load_user_flags (user_flags);
+ block_info_object.initialize (*this, true, true);
+}
+
+template<int dim, int spacedim>
- void DoFHandler<dim, spacedim>::clear_mg_space () {
++void DoFHandler<dim, spacedim>::reserve_space ()
++{
+ internal::DoFHandler::Implementation::reserve_space_mg (*this);
+}
+
+template<int dim, int spacedim>
++void DoFHandler<dim, spacedim>::clear_mg_space ()
++{
+ for (unsigned int i = 0; i < mg_levels.size (); ++i)
+ delete mg_levels[i];
+
+ mg_levels.clear ();
+ delete mg_faces;
+ mg_faces = NULL;
+
+ std::vector<MGVertexDoFs> tmp;
+
+ std::swap (mg_vertex_dofs, tmp);
+}
+
template<int dim, int spacedim>
void DoFHandler<dim,spacedim>::initialize_local_block_info ()
template<int dim, int spacedim>
void DoFHandler<dim,spacedim>::clear ()
{
- // release lock to old fe
+ // release lock to old fe
selected_fe = 0;
- // release memory
+ // release memory
clear_space ();
+ clear_mg_space ();
}
template <int dim, int spacedim>
void
DoFHandler<dim,spacedim>::
-renumber_dofs (const std::vector<unsigned int> &new_numbers)
+renumber_dofs (const std::vector<types::global_dof_index> &new_numbers)
{
Assert (new_numbers.size() == n_locally_owned_dofs(),
- ExcRenumberingIncomplete());
+ ExcRenumberingIncomplete());
#ifdef DEBUG
- // assert that the new indices are
- // consecutively numbered if we are
- // working on a single
- // processor. this doesn't need to
- // hold in the case of a parallel
- // mesh since we map the interval
- // [0...n_dofs()) into itself but
- // only globally, not on each
- // processor
+ // assert that the new indices are
+ // consecutively numbered if we are
+ // working on a single
+ // processor. this doesn't need to
+ // hold in the case of a parallel
+ // mesh since we map the interval
+ // [0...n_dofs()) into itself but
+ // only globally, not on each
+ // processor
if (n_locally_owned_dofs() == n_dofs())
{
- std::vector<unsigned int> tmp(new_numbers);
+ std::vector<types::global_dof_index> tmp(new_numbers);
std::sort (tmp.begin(), tmp.end());
- std::vector<unsigned int>::const_iterator p = tmp.begin();
- unsigned int i = 0;
+ std::vector<types::global_dof_index>::const_iterator p = tmp.begin();
+ types::global_dof_index i = 0;
for (; p!=tmp.end(); ++p, ++i)
- Assert (*p == i, ExcNewNumbersNotConsecutive(i));
+ Assert (*p == i, ExcNewNumbersNotConsecutive(i));
}
else
- for (unsigned int i=0; i<new_numbers.size(); ++i)
+ for (types::global_dof_index i=0; i<new_numbers.size(); ++i)
Assert (new_numbers[i] < n_dofs(),
- ExcMessage ("New DoF index is not less than the total number of dofs."));
+ ExcMessage ("New DoF index is not less than the total number of dofs."));
#endif
number_cache = policy->renumber_dofs (new_numbers, *this);
{
switch (dim)
{
- case 1:
- return get_fe().dofs_per_vertex;
- case 2:
- return (3*get_fe().dofs_per_vertex +
- 2*get_fe().dofs_per_line);
- case 3:
- // we need to take refinement of
- // one boundary face into
- // consideration here; in fact,
- // this function returns what
- // #max_coupling_between_dofs<2>
- // returns
- //
- // we assume here, that only four
- // faces meet at the boundary;
- // this assumption is not
- // justified and needs to be
- // fixed some time. fortunately,
- // ommitting it for now does no
- // harm since the matrix will cry
- // foul if its requirements are
- // not satisfied
- return (19*get_fe().dofs_per_vertex +
- 28*get_fe().dofs_per_line +
- 8*get_fe().dofs_per_quad);
- default:
- Assert (false, ExcNotImplemented());
- return numbers::invalid_unsigned_int;
+ case 1:
+ return get_fe().dofs_per_vertex;
+ case 2:
+ return (3*get_fe().dofs_per_vertex +
+ 2*get_fe().dofs_per_line);
+ case 3:
+ // we need to take refinement of
+ // one boundary face into
+ // consideration here; in fact,
+ // this function returns what
+ // #max_coupling_between_dofs<2>
+ // returns
+ //
+ // we assume here, that only four
+ // faces meet at the boundary;
+ // this assumption is not
+ // justified and needs to be
+ // fixed some time. fortunately,
- // omitting it for now does no
++ // ommitting it for now does no
+ // harm since the matrix will cry
+ // foul if its requirements are
+ // not satisfied
+ return (19*get_fe().dofs_per_vertex +
+ 28*get_fe().dofs_per_line +
+ 8*get_fe().dofs_per_quad);
+ default:
+ Assert (false, ExcNotImplemented());
+ return numbers::invalid_unsigned_int;
}
}
number_cache.clear ();
}
- types::global_dof_index DoFHandler<dim, spacedim>::get_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index) const {
+template<int dim, int spacedim>
+template<int structdim>
- void DoFHandler<dim, spacedim>::set_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index) const {
++types::global_dof_index DoFHandler<dim, spacedim>::get_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index) const
++{
+ return internal::DoFHandler::Implementation::get_dof_index (*this, *this->mg_levels[obj_level], *this->mg_faces, obj_index, fe_index, local_index, internal::int2type<structdim> ());
+}
+
+template<int dim, int spacedim>
+template<int structdim>
- DoFHandler<dim, spacedim>::MGVertexDoFs::MGVertexDoFs (): coarsest_level (numbers::invalid_unsigned_int), finest_level (0), indices (0), indices_offset (0) {
++void DoFHandler<dim, spacedim>::set_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const types::global_dof_index global_index) const
++{
+ internal::DoFHandler::Implementation::set_dof_index (*this, *this->mg_levels[obj_level], *this->mg_faces, obj_index, fe_index, local_index, global_index, internal::int2type<structdim> ());
+}
+
+template<int dim, int spacedim>
- DoFHandler<dim, spacedim>::MGVertexDoFs::~MGVertexDoFs () {
++DoFHandler<dim, spacedim>::MGVertexDoFs::MGVertexDoFs (): coarsest_level (numbers::invalid_unsigned_int), finest_level (0), indices (0), indices_offset (0)
++{
+}
+
+template<int dim, int spacedim>
- void DoFHandler<dim, spacedim>::MGVertexDoFs::init (const unsigned int cl, const unsigned int fl, const unsigned int dofs_per_vertex) {
- if (indices != 0) {
- delete[] indices;
- indices = 0;
- }
++DoFHandler<dim, spacedim>::MGVertexDoFs::~MGVertexDoFs ()
++{
+ delete[] indices;
+ delete[] indices_offset;
+}
+
+template<int dim, int spacedim>
- if (indices_offset != 0) {
- delete[] indices_offset;
- indices_offset = 0;
- }
++void DoFHandler<dim, spacedim>::MGVertexDoFs::init (const unsigned int cl, const unsigned int fl, const unsigned int dofs_per_vertex)
++{
++ if (indices != 0)
++ {
++ delete[] indices;
++ indices = 0;
++ }
+
- unsigned int DoFHandler<dim, spacedim>::MGVertexDoFs::get_coarsest_level () const {
++ if (indices_offset != 0)
++ {
++ delete[] indices_offset;
++ indices_offset = 0;
++ }
+
+ coarsest_level = cl;
+ finest_level = fl;
+
+ if (cl > fl)
+ return;
+
+ const unsigned int n_levels = finest_level - coarsest_level + 1;
+ const unsigned int n_indices = n_levels * dofs_per_vertex;
+
+ indices = new unsigned int[n_indices];
+ Assert (indices != 0, ExcNoMemory ());
+
+ for (unsigned int i = 0; i < n_indices; ++i)
+ indices[i] = DoFHandler<dim, spacedim>::invalid_dof_index;
+
+ indices_offset = new unsigned int[n_levels];
+ Assert (indices != 0, ExcNoMemory ());
+
+ for (unsigned int i = 0; i < n_levels; ++i)
+ indices_offset[i] = i * dofs_per_vertex;
+}
+
+template<int dim, int spacedim>
- unsigned int DoFHandler<dim, spacedim>::MGVertexDoFs::get_finest_level () const {
++unsigned int DoFHandler<dim, spacedim>::MGVertexDoFs::get_coarsest_level () const
++{
+ return coarsest_level;
+}
+
+template<int dim, int spacedim>
++unsigned int DoFHandler<dim, spacedim>::MGVertexDoFs::get_finest_level () const
++{
+ return finest_level;
+}
+
/*-------------- Explicit Instantiations -------------------------------*/
#include "dof_handler.inst"
struct Implementation
{
- /* -------------- distribute_dofs functionality ------------- */
-
- /**
- * Distribute dofs on the given cell,
- * with new dofs starting with index
- * @p next_free_dof. Return the next
- * unused index number.
- *
- * This function is excluded from the
- * @p distribute_dofs function since
- * it can not be implemented dimension
- * independent.
- */
- template <int spacedim>
- static
- types::global_dof_index
- distribute_dofs_on_cell (const DoFHandler<1,spacedim> &dof_handler,
- const typename DoFHandler<1,spacedim>::active_cell_iterator &cell,
- types::global_dof_index next_free_dof)
- {
+ /* -------------- distribute_dofs functionality ------------- */
+
+ /**
+ * Distribute dofs on the given cell,
+ * with new dofs starting with index
+ * @p next_free_dof. Return the next
+ * unused index number.
+ *
+ * This function is excluded from the
+ * @p distribute_dofs function since
+ * it can not be implemented dimension
+ * independent.
+ */
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ distribute_dofs_on_cell (const DoFHandler<1,spacedim> &dof_handler,
+ const typename DoFHandler<1,spacedim>::active_cell_iterator &cell,
- unsigned int next_free_dof)
++ types::global_dof_index next_free_dof)
+ {
- // distribute dofs of vertices
- if (dof_handler.get_fe().dofs_per_vertex > 0)
- for (unsigned int v=0; v<GeometryInfo<1>::vertices_per_cell; ++v)
- {
- if (cell->vertex_dof_index (v,0) ==
- DoFHandler<1,spacedim>::invalid_dof_index)
- for (unsigned int d=0;
- d<dof_handler.get_fe().dofs_per_vertex; ++d)
- {
- Assert ((cell->vertex_dof_index (v,d) ==
- DoFHandler<1,spacedim>::invalid_dof_index),
- ExcInternalError());
- cell->set_vertex_dof_index (v, d, next_free_dof++);
- }
- else
- for (unsigned int d=0;
- d<dof_handler.get_fe().dofs_per_vertex; ++d)
- Assert ((cell->vertex_dof_index (v,d) !=
- DoFHandler<1,spacedim>::invalid_dof_index),
- ExcInternalError());
- }
+ // distribute dofs of vertices
+ if (dof_handler.get_fe().dofs_per_vertex > 0)
+ for (unsigned int v=0; v<GeometryInfo<1>::vertices_per_cell; ++v)
+ {
+ if (cell->vertex_dof_index (v,0) ==
+ DoFHandler<1,spacedim>::invalid_dof_index)
+ for (unsigned int d=0;
+ d<dof_handler.get_fe().dofs_per_vertex; ++d)
+ {
+ Assert ((cell->vertex_dof_index (v,d) ==
+ DoFHandler<1,spacedim>::invalid_dof_index),
+ ExcInternalError());
+ cell->set_vertex_dof_index (v, d, next_free_dof++);
+ }
+ else
+ for (unsigned int d=0;
+ d<dof_handler.get_fe().dofs_per_vertex; ++d)
+ Assert ((cell->vertex_dof_index (v,d) !=
+ DoFHandler<1,spacedim>::invalid_dof_index),
+ ExcInternalError());
+ }
- // dofs of line
- for (unsigned int d=0;
- d<dof_handler.get_fe().dofs_per_line; ++d)
- cell->set_dof_index (d, next_free_dof++);
+ // dofs of line
+ for (unsigned int d=0;
+ d<dof_handler.get_fe().dofs_per_line; ++d)
+ cell->set_dof_index (d, next_free_dof++);
- // note that this cell has been
- // processed
- cell->set_user_flag ();
+ // note that this cell has been
+ // processed
+ cell->set_user_flag ();
- return next_free_dof;
- }
+ return next_free_dof;
+ }
- template <int spacedim>
- static
- types::global_dof_index
- distribute_dofs_on_cell (const DoFHandler<2,spacedim> &dof_handler,
- const typename DoFHandler<2,spacedim>::active_cell_iterator &cell,
- types::global_dof_index next_free_dof)
- {
- if (dof_handler.get_fe().dofs_per_vertex > 0)
- // number dofs on vertices
- for (unsigned int vertex=0; vertex<GeometryInfo<2>::vertices_per_cell; ++vertex)
- // check whether dofs for this
- // vertex have been distributed
- // (only check the first dof)
- if (cell->vertex_dof_index(vertex, 0) == DoFHandler<2,spacedim>::invalid_dof_index)
- for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_vertex; ++d)
- cell->set_vertex_dof_index (vertex, d, next_free_dof++);
-
- // for the four sides
- if (dof_handler.get_fe().dofs_per_line > 0)
- for (unsigned int side=0; side<GeometryInfo<2>::faces_per_cell; ++side)
- {
- const typename DoFHandler<2,spacedim>::line_iterator
- line = cell->line(side);
-
- // distribute dofs if necessary:
- // check whether line dof is already
- // numbered (check only first dof)
- if (line->dof_index(0) == DoFHandler<2,spacedim>::invalid_dof_index)
- // if not: distribute dofs
- for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_line; ++d)
- line->set_dof_index (d, next_free_dof++);
- }
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ distribute_dofs_on_cell (const DoFHandler<2,spacedim> &dof_handler,
+ const typename DoFHandler<2,spacedim>::active_cell_iterator &cell,
- unsigned int next_free_dof)
++ types::global_dof_index next_free_dof)
+ {
+ if (dof_handler.get_fe().dofs_per_vertex > 0)
+ // number dofs on vertices
+ for (unsigned int vertex=0; vertex<GeometryInfo<2>::vertices_per_cell; ++vertex)
+ // check whether dofs for this
+ // vertex have been distributed
+ // (only check the first dof)
+ if (cell->vertex_dof_index(vertex, 0) == DoFHandler<2,spacedim>::invalid_dof_index)
+ for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_vertex; ++d)
+ cell->set_vertex_dof_index (vertex, d, next_free_dof++);
+
+ // for the four sides
+ if (dof_handler.get_fe().dofs_per_line > 0)
+ for (unsigned int side=0; side<GeometryInfo<2>::faces_per_cell; ++side)
+ {
+ const typename DoFHandler<2,spacedim>::line_iterator
+ line = cell->line(side);
+
+ // distribute dofs if necessary:
+ // check whether line dof is already
+ // numbered (check only first dof)
+ if (line->dof_index(0) == DoFHandler<2,spacedim>::invalid_dof_index)
+ // if not: distribute dofs
+ for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_line; ++d)
+ line->set_dof_index (d, next_free_dof++);
+ }
- // dofs of quad
- if (dof_handler.get_fe().dofs_per_quad > 0)
- for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_quad; ++d)
- cell->set_dof_index (d, next_free_dof++);
+ // dofs of quad
+ if (dof_handler.get_fe().dofs_per_quad > 0)
+ for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_quad; ++d)
+ cell->set_dof_index (d, next_free_dof++);
- // note that this cell has been processed
- cell->set_user_flag ();
+ // note that this cell has been processed
+ cell->set_user_flag ();
- return next_free_dof;
- }
+ return next_free_dof;
+ }
- template <int spacedim>
- static
- types::global_dof_index
- distribute_dofs_on_cell (const DoFHandler<3,spacedim> &dof_handler,
- const typename DoFHandler<3,spacedim>::active_cell_iterator &cell,
- types::global_dof_index next_free_dof)
- {
- if (dof_handler.get_fe().dofs_per_vertex > 0)
- // number dofs on vertices
- for (unsigned int vertex=0; vertex<GeometryInfo<3>::vertices_per_cell; ++vertex)
- // check whether dofs for this
- // vertex have been distributed
- // (only check the first dof)
- if (cell->vertex_dof_index(vertex, 0) == DoFHandler<3,spacedim>::invalid_dof_index)
- for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_vertex; ++d)
- cell->set_vertex_dof_index (vertex, d, next_free_dof++);
-
- // for the lines
- if (dof_handler.get_fe().dofs_per_line > 0)
- for (unsigned int l=0; l<GeometryInfo<3>::lines_per_cell; ++l)
- {
- const typename DoFHandler<3,spacedim>::line_iterator
- line = cell->line(l);
-
- // distribute dofs if necessary:
- // check whether line dof is already
- // numbered (check only first dof)
- if (line->dof_index(0) == DoFHandler<3,spacedim>::invalid_dof_index)
- // if not: distribute dofs
- for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_line; ++d)
- line->set_dof_index (d, next_free_dof++);
- }
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ distribute_dofs_on_cell (const DoFHandler<3,spacedim> &dof_handler,
+ const typename DoFHandler<3,spacedim>::active_cell_iterator &cell,
- unsigned int next_free_dof)
++ types::global_dof_index next_free_dof)
+ {
+ if (dof_handler.get_fe().dofs_per_vertex > 0)
+ // number dofs on vertices
+ for (unsigned int vertex=0; vertex<GeometryInfo<3>::vertices_per_cell; ++vertex)
+ // check whether dofs for this
+ // vertex have been distributed
+ // (only check the first dof)
+ if (cell->vertex_dof_index(vertex, 0) == DoFHandler<3,spacedim>::invalid_dof_index)
+ for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_vertex; ++d)
+ cell->set_vertex_dof_index (vertex, d, next_free_dof++);
+
+ // for the lines
+ if (dof_handler.get_fe().dofs_per_line > 0)
+ for (unsigned int l=0; l<GeometryInfo<3>::lines_per_cell; ++l)
+ {
+ const typename DoFHandler<3,spacedim>::line_iterator
+ line = cell->line(l);
+
+ // distribute dofs if necessary:
+ // check whether line dof is already
+ // numbered (check only first dof)
+ if (line->dof_index(0) == DoFHandler<3,spacedim>::invalid_dof_index)
+ // if not: distribute dofs
+ for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_line; ++d)
+ line->set_dof_index (d, next_free_dof++);
+ }
- // for the quads
- if (dof_handler.get_fe().dofs_per_quad > 0)
- for (unsigned int q=0; q<GeometryInfo<3>::quads_per_cell; ++q)
- {
- const typename DoFHandler<3,spacedim>::quad_iterator
- quad = cell->quad(q);
-
- // distribute dofs if necessary:
- // check whether quad dof is already
- // numbered (check only first dof)
- if (quad->dof_index(0) == DoFHandler<3,spacedim>::invalid_dof_index)
- // if not: distribute dofs
- for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_quad; ++d)
- quad->set_dof_index (d, next_free_dof++);
- }
+ // for the quads
+ if (dof_handler.get_fe().dofs_per_quad > 0)
+ for (unsigned int q=0; q<GeometryInfo<3>::quads_per_cell; ++q)
+ {
+ const typename DoFHandler<3,spacedim>::quad_iterator
+ quad = cell->quad(q);
+
+ // distribute dofs if necessary:
+ // check whether quad dof is already
+ // numbered (check only first dof)
+ if (quad->dof_index(0) == DoFHandler<3,spacedim>::invalid_dof_index)
+ // if not: distribute dofs
+ for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_quad; ++d)
+ quad->set_dof_index (d, next_free_dof++);
+ }
- // dofs of hex
- if (dof_handler.get_fe().dofs_per_hex > 0)
- for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_hex; ++d)
- cell->set_dof_index (d, next_free_dof++);
+ // dofs of hex
+ if (dof_handler.get_fe().dofs_per_hex > 0)
+ for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_hex; ++d)
+ cell->set_dof_index (d, next_free_dof++);
- // note that this cell has been
- // processed
- cell->set_user_flag ();
+ // note that this cell has been
+ // processed
+ cell->set_user_flag ();
- return next_free_dof;
- }
+ return next_free_dof;
+ }
- /**
- * Distribute degrees of
- * freedom on all cells, or
- * on cells with the
- * correct subdomain_id if
- * the corresponding
- * argument is not equal to
- * types::invalid_subdomain_id. Return
- * the next free dof index.
- */
- template <int dim, int spacedim>
- static
- types::global_dof_index
- distribute_dofs (const types::global_dof_index offset,
- const types::subdomain_id subdomain_id,
- DoFHandler<dim,spacedim> &dof_handler)
- {
- const dealii::Triangulation<dim,spacedim> & tria
- = dof_handler.get_tria();
- Assert (tria.n_levels() > 0, ExcMessage("Empty triangulation"));
-
- // Clear user flags because we will
- // need them. But first we save
- // them and make sure that we
- // restore them later such that at
- // the end of this function the
- // Triangulation will be in the
- // same state as it was at the
- // beginning of this function.
- std::vector<bool> user_flags;
- tria.save_user_flags(user_flags);
- const_cast<dealii::Triangulation<dim,spacedim> &>(tria).clear_user_flags ();
-
- types::global_dof_index next_free_dof = offset;
- typename DoFHandler<dim,spacedim>::active_cell_iterator
- cell = dof_handler.begin_active(),
- endc = dof_handler.end();
-
- for (; cell != endc; ++cell)
- if ((subdomain_id == types::invalid_subdomain_id)
- ||
- (cell->subdomain_id() == subdomain_id))
- next_free_dof
- = Implementation::distribute_dofs_on_cell (dof_handler, cell, next_free_dof);
-
- // update the cache used
- // for cell dof indices
- for (typename DoFHandler<dim,spacedim>::cell_iterator
- cell = dof_handler.begin(); cell != dof_handler.end(); ++cell)
- if (cell->subdomain_id() != types::artificial_subdomain_id)
- cell->update_cell_dof_indices_cache ();
-
- // finally restore the user flags
- const_cast<dealii::Triangulation<dim,spacedim> &>(tria).load_user_flags(user_flags);
-
- return next_free_dof;
- }
-
+ /**
+ * Distribute degrees of
+ * freedom on all cells, or
+ * on cells with the
+ * correct subdomain_id if
+ * the corresponding
+ * argument is not equal to
+ * types::invalid_subdomain_id. Return
- * the total number of dofs
- * returned.
++ * the next free dof index.
+ */
+ template <int dim, int spacedim>
+ static
- unsigned int
- distribute_dofs (const unsigned int offset,
++ types::global_dof_index
++ distribute_dofs (const types::global_dof_index offset,
+ const types::subdomain_id subdomain_id,
+ DoFHandler<dim,spacedim> &dof_handler)
+ {
+ const dealii::Triangulation<dim,spacedim> &tria
+ = dof_handler.get_tria();
+ Assert (tria.n_levels() > 0, ExcMessage("Empty triangulation"));
+
+ // Clear user flags because we will
+ // need them. But first we save
+ // them and make sure that we
+ // restore them later such that at
+ // the end of this function the
+ // Triangulation will be in the
+ // same state as it was at the
+ // beginning of this function.
+ std::vector<bool> user_flags;
+ tria.save_user_flags(user_flags);
+ const_cast<dealii::Triangulation<dim,spacedim> &>(tria).clear_user_flags ();
- /* --------------------- renumber_dofs functionality ---------------- */
-
-
- /**
- * Implementation of the
- * general template of same
- * name.
- *
- * If the second argument
- * has any elements set,
- * elements of the then the
- * vector of new numbers do
- * not relate to the old
- * DoF number but instead
- * to the index of the old
- * DoF number within the
- * set of locally owned
- * DoFs.
- */
- template <int spacedim>
- static
- void
- renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
- const IndexSet &,
- DoFHandler<1,spacedim> &dof_handler,
- const bool check_validity)
- {
- // note that we can not use cell
- // iterators in this function since
- // then we would renumber the dofs on
- // the interface of two cells more
- // than once. Anyway, this way it's
- // not only more correct but also
- // faster; note, however, that dof
- // numbers may be invalid_dof_index,
- // namely when the appropriate
- // vertex/line/etc is unused
- for (std::vector<types::global_dof_index>::iterator
- i=dof_handler.vertex_dofs.begin();
- i!=dof_handler.vertex_dofs.end(); ++i)
- if (*i != DoFHandler<1,spacedim>::invalid_dof_index)
- *i = new_numbers[*i];
- else if (check_validity)
- // if index is
- // invalid_dof_index:
- // check if this one
- // really is unused
- Assert (dof_handler.get_tria()
- .vertex_used((i-dof_handler.vertex_dofs.begin()) /
- dof_handler.selected_fe->dofs_per_vertex)
- == false,
- ExcInternalError ());
-
- for (unsigned int level=0; level<dof_handler.levels.size(); ++level)
- for (std::vector<types::global_dof_index>::iterator
- i=dof_handler.levels[level]->dof_object.dofs.begin();
- i!=dof_handler.levels[level]->dof_object.dofs.end(); ++i)
- if (*i != DoFHandler<1,spacedim>::invalid_dof_index)
- *i = new_numbers[*i];
-
- // update the cache
- // used for cell dof
- // indices
- for (typename DoFHandler<1,spacedim>::cell_iterator
- cell = dof_handler.begin();
- cell != dof_handler.end(); ++cell)
- cell->update_cell_dof_indices_cache ();
- }
- unsigned int next_free_dof = offset;
++ types::global_dof_index next_free_dof = offset;
+ typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+ for (; cell != endc; ++cell)
+ if ((subdomain_id == types::invalid_subdomain_id)
+ ||
+ (cell->subdomain_id() == subdomain_id))
+ next_free_dof
+ = Implementation::distribute_dofs_on_cell (dof_handler, cell, next_free_dof);
+
+ // update the cache used
+ // for cell dof indices
+ for (typename DoFHandler<dim,spacedim>::cell_iterator
+ cell = dof_handler.begin(); cell != dof_handler.end(); ++cell)
+ if (cell->subdomain_id() != types::artificial_subdomain_id)
+ cell->update_cell_dof_indices_cache ();
+ // finally restore the user flags
+ const_cast<dealii::Triangulation<dim,spacedim> &>(tria).load_user_flags(user_flags);
- template <int spacedim>
- static
- void
- renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
- const IndexSet &indices,
- DoFHandler<2,spacedim> &dof_handler,
- const bool check_validity)
- {
- // note that we can not use cell
- // iterators in this function since
- // then we would renumber the dofs on
- // the interface of two cells more
- // than once. Anyway, this way it's
- // not only more correct but also
- // faster; note, however, that dof
- // numbers may be invalid_dof_index,
- // namely when the appropriate
- // vertex/line/etc is unused
- for (std::vector<types::global_dof_index>::iterator
- i=dof_handler.vertex_dofs.begin();
- i!=dof_handler.vertex_dofs.end(); ++i)
- if (*i != DoFHandler<2,spacedim>::invalid_dof_index)
- *i = (indices.n_elements() == 0)?
- (new_numbers[*i]) :
- (new_numbers[indices.index_within_set(*i)]);
- else if (check_validity)
- // if index is invalid_dof_index:
- // check if this one really is
- // unused
- Assert (dof_handler.get_tria()
- .vertex_used((i-dof_handler.vertex_dofs.begin()) /
- dof_handler.selected_fe->dofs_per_vertex)
- == false,
- ExcInternalError ());
+ return next_free_dof;
+ }
- for (std::vector<types::global_dof_index>::iterator
- i=dof_handler.faces->lines.dofs.begin();
- i!=dof_handler.faces->lines.dofs.end(); ++i)
- if (*i != DoFHandler<2,spacedim>::invalid_dof_index)
- *i = ((indices.n_elements() == 0) ?
- new_numbers[*i] :
- new_numbers[indices.index_within_set(*i)]);
- for (unsigned int level=0; level<dof_handler.levels.size(); ++level)
- {
- for (std::vector<types::global_dof_index>::iterator
- i=dof_handler.levels[level]->dof_object.dofs.begin();
- i!=dof_handler.levels[level]->dof_object.dofs.end(); ++i)
- if (*i != DoFHandler<2,spacedim>::invalid_dof_index)
- *i = ((indices.n_elements() == 0) ?
- new_numbers[*i] :
- new_numbers[indices.index_within_set(*i)]);
- }
+ /* --------------------- renumber_dofs functionality ---------------- */
+
+
+ /**
+ * Implementation of the
+ * general template of same
+ * name.
+ *
+ * If the second argument
+ * has any elements set,
+ * elements of the then the
+ * vector of new numbers do
+ * not relate to the old
+ * DoF number but instead
+ * to the index of the old
+ * DoF number within the
+ * set of locally owned
+ * DoFs.
+ */
+ template <int spacedim>
+ static
+ void
- renumber_dofs (const std::vector<unsigned int> &new_numbers,
++ renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
+ const IndexSet &,
+ DoFHandler<1,spacedim> &dof_handler,
+ const bool check_validity)
+ {
+ // note that we can not use cell
+ // iterators in this function since
+ // then we would renumber the dofs on
+ // the interface of two cells more
+ // than once. Anyway, this way it's
+ // not only more correct but also
+ // faster; note, however, that dof
+ // numbers may be invalid_dof_index,
+ // namely when the appropriate
+ // vertex/line/etc is unused
- for (std::vector<unsigned int>::iterator
++ for (std::vector<types::global_dof_index>::iterator
+ i=dof_handler.vertex_dofs.begin();
+ i!=dof_handler.vertex_dofs.end(); ++i)
+ if (*i != DoFHandler<1,spacedim>::invalid_dof_index)
+ *i = new_numbers[*i];
+ else if (check_validity)
+ // if index is
+ // invalid_dof_index:
+ // check if this one
+ // really is unused
+ Assert (dof_handler.get_tria()
+ .vertex_used((i-dof_handler.vertex_dofs.begin()) /
+ dof_handler.selected_fe->dofs_per_vertex)
+ == false,
+ ExcInternalError ());
+
+ for (unsigned int level=0; level<dof_handler.levels.size(); ++level)
- for (std::vector<unsigned int>::iterator
++ for (std::vector<types::global_dof_index>::iterator
+ i=dof_handler.levels[level]->dof_object.dofs.begin();
+ i!=dof_handler.levels[level]->dof_object.dofs.end(); ++i)
+ if (*i != DoFHandler<1,spacedim>::invalid_dof_index)
+ *i = new_numbers[*i];
+
+ // update the cache
+ // used for cell dof
+ // indices
+ for (typename DoFHandler<1,spacedim>::cell_iterator
+ cell = dof_handler.begin();
+ cell != dof_handler.end(); ++cell)
+ cell->update_cell_dof_indices_cache ();
+ }
- // update the cache
- // used for cell dof
- // indices
- for (typename DoFHandler<2,spacedim>::cell_iterator
- cell = dof_handler.begin();
- cell != dof_handler.end(); ++cell)
- cell->update_cell_dof_indices_cache ();
- }
- template <int spacedim>
- static
- void
- renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
- const IndexSet &indices,
- DoFHandler<3,spacedim> &dof_handler,
- const bool check_validity)
+ template <int spacedim>
+ static
+ void
- renumber_dofs (const std::vector<unsigned int> &new_numbers,
++ renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
+ const IndexSet &indices,
- DoFHandler<2,spacedim> &dof_handler,
++ DoFHandler<2,spacedim> &dof_handler,
+ const bool check_validity)
+ {
+ // note that we can not use cell
+ // iterators in this function since
+ // then we would renumber the dofs on
+ // the interface of two cells more
+ // than once. Anyway, this way it's
+ // not only more correct but also
+ // faster; note, however, that dof
+ // numbers may be invalid_dof_index,
+ // namely when the appropriate
+ // vertex/line/etc is unused
- for (std::vector<unsigned int>::iterator
++ for (std::vector<types::global_dof_index>::iterator
+ i=dof_handler.vertex_dofs.begin();
+ i!=dof_handler.vertex_dofs.end(); ++i)
+ if (*i != DoFHandler<2,spacedim>::invalid_dof_index)
+ *i = (indices.n_elements() == 0)?
+ (new_numbers[*i]) :
+ (new_numbers[indices.index_within_set(*i)]);
+ else if (check_validity)
+ // if index is invalid_dof_index:
+ // check if this one really is
+ // unused
+ Assert (dof_handler.get_tria()
+ .vertex_used((i-dof_handler.vertex_dofs.begin()) /
+ dof_handler.selected_fe->dofs_per_vertex)
+ == false,
+ ExcInternalError ());
+
- for (std::vector<unsigned int>::iterator
++ for (std::vector<types::global_dof_index>::iterator
+ i=dof_handler.faces->lines.dofs.begin();
+ i!=dof_handler.faces->lines.dofs.end(); ++i)
+ if (*i != DoFHandler<2,spacedim>::invalid_dof_index)
+ *i = ((indices.n_elements() == 0) ?
+ new_numbers[*i] :
+ new_numbers[indices.index_within_set(*i)]);
+
+ for (unsigned int level=0; level<dof_handler.levels.size(); ++level)
{
- // note that we can not use cell
- // iterators in this function since
- // then we would renumber the dofs on
- // the interface of two cells more
- // than once. Anyway, this way it's
- // not only more correct but also
- // faster; note, however, that dof
- // numbers may be invalid_dof_index,
- // namely when the appropriate
- // vertex/line/etc is unused
- for (std::vector<unsigned int>::iterator
+ for (std::vector<types::global_dof_index>::iterator
- i=dof_handler.vertex_dofs.begin();
- i!=dof_handler.vertex_dofs.end(); ++i)
- if (*i != DoFHandler<3,spacedim>::invalid_dof_index)
+ i=dof_handler.levels[level]->dof_object.dofs.begin();
+ i!=dof_handler.levels[level]->dof_object.dofs.end(); ++i)
+ if (*i != DoFHandler<2,spacedim>::invalid_dof_index)
*i = ((indices.n_elements() == 0) ?
new_numbers[*i] :
new_numbers[indices.index_within_set(*i)]);
- else if (check_validity)
- // if index is invalid_dof_index:
- // check if this one really is
- // unused
- Assert (dof_handler.get_tria()
- .vertex_used((i-dof_handler.vertex_dofs.begin()) /
- dof_handler.selected_fe->dofs_per_vertex)
- == false,
- ExcInternalError ());
+ }
- renumber_dofs (const std::vector<unsigned int> &new_numbers,
+ // update the cache
+ // used for cell dof
+ // indices
+ for (typename DoFHandler<2,spacedim>::cell_iterator
+ cell = dof_handler.begin();
+ cell != dof_handler.end(); ++cell)
+ cell->update_cell_dof_indices_cache ();
+ }
+
+
+ template <int spacedim>
+ static
+ void
- for (std::vector<unsigned int>::iterator
++ renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
+ const IndexSet &indices,
+ DoFHandler<3,spacedim> &dof_handler,
+ const bool check_validity)
+ {
+ // note that we can not use cell
+ // iterators in this function since
+ // then we would renumber the dofs on
+ // the interface of two cells more
+ // than once. Anyway, this way it's
+ // not only more correct but also
+ // faster; note, however, that dof
+ // numbers may be invalid_dof_index,
+ // namely when the appropriate
+ // vertex/line/etc is unused
- for (std::vector<unsigned int>::iterator
++ for (std::vector<types::global_dof_index>::iterator
+ i=dof_handler.vertex_dofs.begin();
+ i!=dof_handler.vertex_dofs.end(); ++i)
+ if (*i != DoFHandler<3,spacedim>::invalid_dof_index)
+ *i = ((indices.n_elements() == 0) ?
+ new_numbers[*i] :
+ new_numbers[indices.index_within_set(*i)]);
+ else if (check_validity)
+ // if index is invalid_dof_index:
+ // check if this one really is
+ // unused
+ Assert (dof_handler.get_tria()
+ .vertex_used((i-dof_handler.vertex_dofs.begin()) /
+ dof_handler.selected_fe->dofs_per_vertex)
+ == false,
+ ExcInternalError ());
+
- for (std::vector<unsigned int>::iterator
++ for (std::vector<types::global_dof_index>::iterator
+ i=dof_handler.faces->lines.dofs.begin();
+ i!=dof_handler.faces->lines.dofs.end(); ++i)
+ if (*i != DoFHandler<3,spacedim>::invalid_dof_index)
+ *i = ((indices.n_elements() == 0) ?
+ new_numbers[*i] :
+ new_numbers[indices.index_within_set(*i)]);
- for (std::vector<unsigned int>::iterator
++ for (std::vector<types::global_dof_index>::iterator
+ i=dof_handler.faces->quads.dofs.begin();
+ i!=dof_handler.faces->quads.dofs.end(); ++i)
+ if (*i != DoFHandler<3,spacedim>::invalid_dof_index)
+ *i = ((indices.n_elements() == 0) ?
+ new_numbers[*i] :
+ new_numbers[indices.index_within_set(*i)]);
+
+ for (unsigned int level=0; level<dof_handler.levels.size(); ++level)
+ {
- i=dof_handler.faces->lines.dofs.begin();
- i!=dof_handler.faces->lines.dofs.end(); ++i)
- if (*i != DoFHandler<3,spacedim>::invalid_dof_index)
- *i = ((indices.n_elements() == 0) ?
- new_numbers[*i] :
- new_numbers[indices.index_within_set(*i)]);
- for (std::vector<types::global_dof_index>::iterator
- i=dof_handler.faces->quads.dofs.begin();
- i!=dof_handler.faces->quads.dofs.end(); ++i)
+ for (std::vector<types::global_dof_index>::iterator
+ i=dof_handler.levels[level]->dof_object.dofs.begin();
+ i!=dof_handler.levels[level]->dof_object.dofs.end(); ++i)
if (*i != DoFHandler<3,spacedim>::invalid_dof_index)
*i = ((indices.n_elements() == 0) ?
new_numbers[*i] :
number_cache.locally_owned_dofs.compress();
number_cache.n_locally_owned_dofs_per_processor
- = std::vector<unsigned int> (1,
- number_cache.n_global_dofs);
+ = std::vector<types::global_dof_index> (1,
- number_cache.n_global_dofs);
++ number_cache.n_global_dofs);
number_cache.locally_owned_dofs_per_processor
= std::vector<IndexSet> (1,
number_cache.locally_owned_dofs.compress();
number_cache.n_locally_owned_dofs_per_processor
- = std::vector<unsigned int> (1,
- number_cache.n_global_dofs);
+ = std::vector<types::global_dof_index> (1,
- number_cache.n_global_dofs);
++ number_cache.n_global_dofs);
number_cache.locally_owned_dofs_per_processor
= std::vector<IndexSet> (1,
struct types
{
- /**
- * A list of tree+quadrant and
- * their dof indices. dofs is of
- * the form num_dofindices of
- * quadrant 0, followed by
- * num_dofindices indices,
- * num_dofindices of quadrant 1,
- * ...
- */
- struct cellinfo
- {
- std::vector<unsigned int> tree_index;
- std::vector<typename dealii::internal::p4est::types<dim>::quadrant> quadrants;
- std::vector<dealii::types::global_dof_index> dofs;
+ /**
+ * A list of tree+quadrant and
+ * their dof indices. dofs is of
+ * the form num_dofindices of
+ * quadrant 0, followed by
+ * num_dofindices indices,
+ * num_dofindices of quadrant 1,
+ * ...
+ */
+ struct cellinfo
+ {
+ std::vector<unsigned int> tree_index;
+ std::vector<typename dealii::internal::p4est::types<dim>::quadrant> quadrants;
- std::vector<unsigned int> dofs;
++ std::vector<dealii::types::global_dof_index> dofs;
- unsigned int bytes_for_buffer () const
- {
- return (sizeof(unsigned int) +
- tree_index.size() * sizeof(unsigned int) +
- quadrants.size() * sizeof(typename dealii::internal::p4est
- ::types<dim>::quadrant) +
- dofs.size() * sizeof(dealii::types::global_dof_index));
- }
+ unsigned int bytes_for_buffer () const
+ {
+ return (sizeof(unsigned int) +
+ tree_index.size() * sizeof(unsigned int) +
+ quadrants.size() * sizeof(typename dealii::internal::p4est
+ ::types<dim>::quadrant) +
- dofs.size() * sizeof(unsigned int));
++ dofs.size() * sizeof(dealii::types::global_dof_index));
+ }
- void pack_data (std::vector<char> &buffer) const
- {
- buffer.resize(bytes_for_buffer());
+ void pack_data (std::vector<char> &buffer) const
+ {
+ buffer.resize(bytes_for_buffer());
- char * ptr = &buffer[0];
+ char *ptr = &buffer[0];
- const unsigned int num_cells = tree_index.size();
- std::memcpy(ptr, &num_cells, sizeof(unsigned int));
- ptr += sizeof(unsigned int);
+ const unsigned int num_cells = tree_index.size();
+ std::memcpy(ptr, &num_cells, sizeof(unsigned int));
+ ptr += sizeof(unsigned int);
- std::memcpy(ptr,
- &tree_index[0],
- num_cells*sizeof(unsigned int));
- ptr += num_cells*sizeof(unsigned int);
+ std::memcpy(ptr,
+ &tree_index[0],
+ num_cells*sizeof(unsigned int));
+ ptr += num_cells*sizeof(unsigned int);
- std::memcpy(ptr,
- &quadrants[0],
- num_cells * sizeof(typename dealii::internal::p4est::
- types<dim>::quadrant));
- ptr += num_cells*sizeof(typename dealii::internal::p4est::types<dim>::
- quadrant);
+ std::memcpy(ptr,
+ &quadrants[0],
+ num_cells * sizeof(typename dealii::internal::p4est::
+ types<dim>::quadrant));
+ ptr += num_cells*sizeof(typename dealii::internal::p4est::types<dim>::
+ quadrant);
- std::memcpy(ptr,
- &dofs[0],
- dofs.size() * sizeof(dealii::types::global_dof_index));
- ptr += dofs.size() * sizeof(dealii::types::global_dof_index);
+ std::memcpy(ptr,
+ &dofs[0],
- dofs.size() * sizeof(unsigned int));
- ptr += dofs.size() * sizeof(unsigned int);
++ dofs.size() * sizeof(dealii::types::global_dof_index));
++ ptr += dofs.size() * sizeof(dealii::types::global_dof_index);
- Assert (ptr == &buffer[0]+buffer.size(),
- ExcInternalError());
+ Assert (ptr == &buffer[0]+buffer.size(),
+ ExcInternalError());
- }
- };
+ }
+ };
};
if (send_to.size() > 0)
{
- // this cell's dof_indices
- // need to be sent to
- // someone
+ // this cell's dof_indices
+ // need to be sent to
+ // someone
- std::vector<unsigned int>
+ std::vector<dealii::types::global_dof_index>
- local_dof_indices (dealii_cell->get_fe().dofs_per_cell);
+ local_dof_indices (dealii_cell->get_fe().dofs_per_cell);
dealii_cell->get_dof_indices (local_dof_indices);
for (std::set<dealii::types::subdomain_id>::iterator it=send_to.begin();
const typename dealii::internal::p4est::types<dim>::quadrant &p4est_cell,
const typename DoFHandler<dim,spacedim>::cell_iterator &dealii_cell,
const typename dealii::internal::p4est::types<dim>::quadrant &quadrant,
- dealii::types::global_dof_index * dofs)
- unsigned int *dofs)
++ dealii::types::global_dof_index *dofs)
{
if (internal::p4est::quadrant_is_equal<dim>(p4est_cell, quadrant))
{
Assert(!dealii_cell->has_children(), ExcInternalError());
Assert(dealii_cell->is_ghost(), ExcInternalError());
- // update dof indices of cell
+ // update dof indices of cell
- std::vector<unsigned int>
+ std::vector<dealii::types::global_dof_index>
- dof_indices (dealii_cell->get_fe().dofs_per_cell);
+ dof_indices (dealii_cell->get_fe().dofs_per_cell);
dealii_cell->update_cell_dof_indices_cache();
dealii_cell->get_dof_indices(dof_indices);
}
- // mark all own cells, that miss some
- // dof_data and collect the neighbors
- // that are going to send stuff to us
+ // mark all own cells, that miss some
+ // dof_data and collect the neighbors
+ // that are going to send stuff to us
std::set<dealii::types::subdomain_id> senders;
{
- std::vector<unsigned int> local_dof_indices;
+ std::vector<dealii::types::global_dof_index> local_dof_indices;
typename DoFHandler<dim,spacedim>::active_cell_iterator
- cell, endc = dof_handler.end();
+ cell, endc = dof_handler.end();
for (cell = dof_handler.begin_active(); cell != endc; ++cell)
if (!cell->is_artificial())
memcpy(&cells, ptr, sizeof(unsigned int));
ptr+=sizeof(unsigned int);
- //TODO: reinterpret too evil?
- unsigned int * treeindex=reinterpret_cast<unsigned int*>(ptr);
- //reinterpret too evil?
++ //TODO: reinterpret too evil?
+ unsigned int *treeindex=reinterpret_cast<unsigned int *>(ptr);
ptr+=cells*sizeof(unsigned int);
- typename dealii::internal::p4est::types<dim>::quadrant * quadrant
+ typename dealii::internal::p4est::types<dim>::quadrant *quadrant
=reinterpret_cast<typename dealii::internal::p4est::types<dim>::quadrant *>(ptr);
ptr+=cells*sizeof(typename dealii::internal::p4est::types<dim>::quadrant);
- dealii::types::global_dof_index * dofs=reinterpret_cast<dealii::types::global_dof_index*>(ptr);
- unsigned int *dofs=reinterpret_cast<unsigned int *>(ptr);
++ dealii::types::global_dof_index *dofs=reinterpret_cast<dealii::types::global_dof_index *>(ptr);
- for (unsigned int c=0;c<cells;++c, dofs+=1+dofs[0])
+ for (unsigned int c=0; c<cells; ++c, dofs+=1+dofs[0])
{
typename DoFHandler<dim,spacedim>::cell_iterator
- cell (&dof_handler.get_tria(),
- 0,
- p4est_tree_to_coarse_cell_permutation[treeindex[c]],
- &dof_handler);
+ cell (&dof_handler.get_tria(),
+ 0,
+ p4est_tree_to_coarse_cell_permutation[treeindex[c]],
+ &dof_handler);
typename dealii::internal::p4est::types<dim>::quadrant p4est_coarse_cell;
internal::p4est::init_coarse_quadrant<dim>(p4est_coarse_cell);
Assert (tr != 0, ExcInternalError());
const unsigned int
- n_cpus = Utilities::MPI::n_mpi_processes (tr->get_communicator());
+ n_cpus = Utilities::MPI::n_mpi_processes (tr->get_communicator());
- //* 1. distribute on own
- //* subdomain
+ //* 1. distribute on own
+ //* subdomain
- const unsigned int n_initial_local_dofs =
+ const dealii::types::global_dof_index n_initial_local_dofs =
Implementation::distribute_dofs (0, tr->locally_owned_subdomain(),
dof_handler);
- //* 2. iterate over ghostcells and
- //kill dofs that are not owned
- //by us
+ //* 2. iterate over ghostcells and
+ //kill dofs that are not owned
+ //by us
- std::vector<unsigned int> renumbering(n_initial_local_dofs);
+ std::vector<dealii::types::global_dof_index> renumbering(n_initial_local_dofs);
for (unsigned int i=0; i<renumbering.size(); ++i)
renumbering[i] = i;
{
- std::vector<unsigned int> local_dof_indices;
+ std::vector<dealii::types::global_dof_index> local_dof_indices;
typename DoFHandler<dim,spacedim>::active_cell_iterator
- cell = dof_handler.begin_active(),
- endc = dof_handler.end();
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
for (; cell != endc; ++cell)
if (cell->is_ghost() &&
}
- // make indices consecutive
+ // make indices consecutive
number_cache.n_locally_owned_dofs = 0;
- for (std::vector<unsigned int>::iterator it=renumbering.begin();
+ for (std::vector<dealii::types::global_dof_index>::iterator it=renumbering.begin();
it!=renumbering.end(); ++it)
if (*it != DoFHandler<dim,spacedim>::invalid_dof_index)
*it = number_cache.n_locally_owned_dofs++;
1, MPI_UNSIGNED,
tr->get_communicator());
- const unsigned int
+ const dealii::types::global_dof_index
- shift = std::accumulate (number_cache
- .n_locally_owned_dofs_per_processor.begin(),
- number_cache
- .n_locally_owned_dofs_per_processor.begin()
- + tr->locally_owned_subdomain(),
- static_cast<dealii::types::global_dof_index>(0));
+ shift = std::accumulate (number_cache
+ .n_locally_owned_dofs_per_processor.begin(),
+ number_cache
+ .n_locally_owned_dofs_per_processor.begin()
+ + tr->locally_owned_subdomain(),
- 0);
- for (std::vector<unsigned int>::iterator it=renumbering.begin();
++ static_cast<dealii::types::global_dof_index>(0));
+ for (std::vector<dealii::types::global_dof_index>::iterator it=renumbering.begin();
it!=renumbering.end(); ++it)
if (*it != DoFHandler<dim,spacedim>::invalid_dof_index)
(*it) += shift;
number_cache.locally_owned_dofs = IndexSet(number_cache.n_global_dofs);
number_cache.locally_owned_dofs
- .add_range(shift,
- shift+number_cache.n_locally_owned_dofs);
+ .add_range(shift,
+ shift+number_cache.n_locally_owned_dofs);
number_cache.locally_owned_dofs.compress();
- // fill global_dof_indexsets
+ // fill global_dof_indexsets
number_cache.locally_owned_dofs_per_processor.resize(n_cpus);
{
- unsigned int lshift = 0;
+ dealii::types::global_dof_index lshift = 0;
- for (unsigned int i=0;i<n_cpus;++i)
+ for (unsigned int i=0; i<n_cpus; ++i)
{
number_cache.locally_owned_dofs_per_processor[i]
= IndexSet(number_cache.n_global_dofs);
tr->load_user_flags(user_flags);
#ifdef DEBUG
- //check that we are really done
+ //check that we are really done
{
- std::vector<unsigned int> local_dof_indices;
+ std::vector<dealii::types::global_dof_index> local_dof_indices;
typename DoFHandler<dim,spacedim>::active_cell_iterator
- cell, endc = dof_handler.end();
+ cell, endc = dof_handler.end();
for (cell = dof_handler.begin_active(); cell != endc; ++cell)
if (!cell->is_artificial())
number_cache.locally_owned_dofs = IndexSet (dof_handler.n_dofs());
if (dof_handler.locally_owned_dofs().n_elements()>0)
- {
- std::vector<dealii::types::global_dof_index>::const_iterator it = new_numbers.begin();
- const unsigned int n_blocks = dof_handler.get_fe().n_blocks();
- std::vector<std::pair<dealii::types::global_dof_index,unsigned int> > block_indices(n_blocks);
- block_indices[0].first = *it++;
- block_indices[0].second = 1;
- unsigned int current_block = 0, n_filled_blocks = 1;
- for ( ; it != new_numbers.end(); ++it)
- {
- bool done = false;
+ {
- std::vector<unsigned int>::const_iterator it = new_numbers.begin();
++ std::vector<dealii::types::global_dof_index>::const_iterator it = new_numbers.begin();
+ const unsigned int n_blocks = dof_handler.get_fe().n_blocks();
- std::vector<std::pair<unsigned int,unsigned int> > block_indices(n_blocks);
++ std::vector<std::pair<dealii::types::global_dof_index,unsigned int> > block_indices(n_blocks);
+ block_indices[0].first = *it++;
+ block_indices[0].second = 1;
+ unsigned int current_block = 0, n_filled_blocks = 1;
+ for ( ; it != new_numbers.end(); ++it)
+ {
+ bool done = false;
+
+ // search from the current block onwards
+ // whether the next index is shifted by one
+ // from the previous one.
+ for (unsigned int i=0; i<n_filled_blocks; ++i)
+ if (*it == block_indices[current_block].first
+ +block_indices[current_block].second)
+ {
+ block_indices[current_block].second++;
+ done = true;
+ break;
+ }
+ else
+ {
+ if (current_block == n_filled_blocks-1)
+ current_block = 0;
+ else
+ ++current_block;
+ }
- // search from the current block onwards
- // whether the next index is shifted by one
- // from the previous one.
- for (unsigned int i=0; i<n_filled_blocks; ++i)
- if (*it == block_indices[current_block].first
- +block_indices[current_block].second)
- {
- block_indices[current_block].second++;
- done = true;
- break;
- }
- else
+ // could not find any contiguous range: need
+ // to add a new block if possible. Abort
+ // otherwise, which will add all elements
+ // individually to the IndexSet.
+ if (done == false)
{
- if (current_block == n_filled_blocks-1)
- current_block = 0;
+ if (n_filled_blocks < n_blocks)
+ {
+ block_indices[n_filled_blocks].first = *it;
+ block_indices[n_filled_blocks].second = 1;
+ current_block = n_filled_blocks;
+ ++n_filled_blocks;
+ }
else
- ++current_block;
+ break;
}
+ }
- // could not find any contiguous range: need
- // to add a new block if possible. Abort
- // otherwise, which will add all elements
- // individually to the IndexSet.
- if (done == false)
- {
- if (n_filled_blocks < n_blocks)
- {
- block_indices[n_filled_blocks].first = *it;
- block_indices[n_filled_blocks].second = 1;
- current_block = n_filled_blocks;
- ++n_filled_blocks;
- }
- else
- break;
- }
- }
-
- // check whether all indices could be assigned
- // to blocks. If yes, we can add the block
- // ranges to the IndexSet, otherwise we need
- // to go through the indices once again and
- // add each element individually (slow!)
- unsigned int sum = 0;
- for (unsigned int i=0; i<n_filled_blocks; ++i)
- sum += block_indices[i].second;
- if (sum == new_numbers.size())
+ // check whether all indices could be assigned
+ // to blocks. If yes, we can add the block
+ // ranges to the IndexSet, otherwise we need
+ // to go through the indices once again and
+ // add each element individually (slow!)
+ unsigned int sum = 0;
for (unsigned int i=0; i<n_filled_blocks; ++i)
- number_cache.locally_owned_dofs.add_range (block_indices[i].first,
- block_indices[i].first+
- block_indices[i].second);
- else
- for (it=new_numbers.begin() ; it != new_numbers.end(); ++it)
- number_cache.locally_owned_dofs.add_index (*it);
- }
+ sum += block_indices[i].second;
+ if (sum == new_numbers.size())
+ for (unsigned int i=0; i<n_filled_blocks; ++i)
+ number_cache.locally_owned_dofs.add_range (block_indices[i].first,
+ block_indices[i].first+
+ block_indices[i].second);
+ else
+ for (it=new_numbers.begin() ; it != new_numbers.end(); ++it)
+ number_cache.locally_owned_dofs.add_index (*it);
+ }
number_cache.locally_owned_dofs.compress();
dof_handler.n_locally_owned_dofs(),
ExcInternalError());
- // then also set this number
- // in our own copy
+ // then also set this number
+ // in our own copy
number_cache.n_locally_owned_dofs = dof_handler.n_locally_owned_dofs();
- // mark not locally active DoFs as
- // invalid
+ // mark not locally active DoFs as
+ // invalid
{
- std::vector<unsigned int> local_dof_indices;
+ std::vector<dealii::types::global_dof_index> local_dof_indices;
typename DoFHandler<dim,spacedim>::active_cell_iterator
- cell = dof_handler.begin_active(),
- endc = dof_handler.end();
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
for (; cell != endc; ++cell)
if (!cell->is_artificial())
template <class T>
class WrapDoFIterator : private T
{
- public:
- typedef typename T::AccessorType AccessorType;
+ public:
+ typedef typename T::AccessorType AccessorType;
- WrapDoFIterator (const T& t) : T(t) {}
+ WrapDoFIterator (const T &t) : T(t) {}
- void get_dof_indices (std::vector<types::global_dof_index>& v) const
- {
- (*this)->get_dof_indices(v);
- }
- void get_dof_indices (std::vector<unsigned int> &v) const
++ void get_dof_indices (std::vector<types::global_dof_index> &v) const
+ {
+ (*this)->get_dof_indices(v);
+ }
- template <class T2>
- bool operator != (const T2& i) const
- {
- return (! (T::operator==(i)));
- }
- // Allow access to these private operators of T
- using T::operator->;
- using T::operator++;
- using T::operator==;
+ template <class T2>
+ bool operator != (const T2 &i) const
+ {
+ return (! (T::operator==(i)));
+ }
+ // Allow access to these private operators of T
+ using T::operator->;
+ using T::operator++;
+ using T::operator==;
};
template <class T>
class WrapMGDoFIterator : private T
{
- public:
- typedef typename T::AccessorType AccessorType;
+ public:
+ typedef typename T::AccessorType AccessorType;
- WrapMGDoFIterator (const T& t) : T(t) {}
+ WrapMGDoFIterator (const T &t) : T(t) {}
- void get_dof_indices (std::vector<types::global_dof_index>& v) const
- {
- (*this)->get_mg_dof_indices(v);
- }
- void get_dof_indices (std::vector<unsigned int> &v) const
++ void get_dof_indices (std::vector<types::global_dof_index> &v) const
+ {
+ (*this)->get_mg_dof_indices(v);
+ }
- bool operator != (const WrapMGDoFIterator<T>& i) const
- {
- return (! (T::operator==(i)));
- }
- // Allow access to these
- // private operators of T
- using T::operator->;
- using T::operator++;
- using T::operator==;
+ bool operator != (const WrapMGDoFIterator<T> &i) const
+ {
+ return (! (T::operator==(i)));
+ }
+ // Allow access to these
+ // private operators of T
+ using T::operator->;
+ using T::operator++;
+ using T::operator==;
};
}
template <class DH>
void create_graph (const DH &dof_handler,
const bool use_constraints,
- types::Graph &graph,
- types::property_map<types::Graph,types::vertex_degree_t>::type &graph_degree)
+ boosttypes::Graph &graph,
+ boosttypes::property_map<boosttypes::Graph,boosttypes::vertex_degree_t>::type &graph_degree)
{
{
- // create intermediate sparsity pattern
- // (faster than directly submitting
- // indices)
+ // create intermediate sparsity pattern
+ // (faster than directly submitting
+ // indices)
ConstraintMatrix constraints;
if (use_constraints)
DoFTools::make_hanging_node_constraints (dof_handler, constraints);
const bool reversed_numbering,
const bool use_constraints)
{
- std::vector<unsigned int> renumbering(dof_handler.n_dofs(),
- DH::invalid_dof_index);
+ std::vector<types::global_dof_index> renumbering(dof_handler.n_dofs(),
- DH::invalid_dof_index);
++ DH::invalid_dof_index);
compute_Cuthill_McKee(renumbering, dof_handler, reversed_numbering,
use_constraints);
template <class DH>
void
- compute_Cuthill_McKee (std::vector<types::global_dof_index>& new_dof_indices,
- compute_Cuthill_McKee (std::vector<unsigned int> &new_dof_indices,
++ compute_Cuthill_McKee (std::vector<types::global_dof_index> &new_dof_indices,
const DH &dof_handler,
const bool reversed_numbering,
const bool use_constraints)
{
- types::Graph
+ boosttypes::Graph
- graph(dof_handler.n_dofs());
+ graph(dof_handler.n_dofs());
- types::property_map<types::Graph,types::vertex_degree_t>::type
+ boosttypes::property_map<boosttypes::Graph,boosttypes::vertex_degree_t>::type
- graph_degree;
+ graph_degree;
internal::create_graph (dof_handler, use_constraints, graph, graph_degree);
- types::property_map<types::Graph, types::vertex_index_t>::type
+ boosttypes::property_map<boosttypes::Graph, boosttypes::vertex_index_t>::type
- index_map = get(::boost::vertex_index, graph);
+ index_map = get(::boost::vertex_index, graph);
- std::vector<types::Vertex> inv_perm(num_vertices(graph));
+ std::vector<boosttypes::Vertex> inv_perm(num_vertices(graph));
if (reversed_numbering == false)
::boost::cuthill_mckee_ordering(graph, inv_perm.rbegin(),
const bool reversed_numbering,
const bool use_constraints)
{
- std::vector<unsigned int> renumbering(dof_handler.n_dofs(),
- DH::invalid_dof_index);
+ std::vector<types::global_dof_index> renumbering(dof_handler.n_dofs(),
- DH::invalid_dof_index);
++ DH::invalid_dof_index);
compute_king_ordering(renumbering, dof_handler, reversed_numbering,
use_constraints);
template <class DH>
void
- compute_king_ordering (std::vector<types::global_dof_index>& new_dof_indices,
- compute_king_ordering (std::vector<unsigned int> &new_dof_indices,
++ compute_king_ordering (std::vector<types::global_dof_index> &new_dof_indices,
const DH &dof_handler,
const bool reversed_numbering,
const bool use_constraints)
{
- types::Graph
+ boosttypes::Graph
- graph(dof_handler.n_dofs());
+ graph(dof_handler.n_dofs());
- types::property_map<types::Graph,types::vertex_degree_t>::type
+ boosttypes::property_map<boosttypes::Graph,boosttypes::vertex_degree_t>::type
- graph_degree;
+ graph_degree;
internal::create_graph (dof_handler, use_constraints, graph, graph_degree);
- types::property_map<types::Graph, types::vertex_index_t>::type
+ boosttypes::property_map<boosttypes::Graph, boosttypes::vertex_index_t>::type
- index_map = get(::boost::vertex_index, graph);
+ index_map = get(::boost::vertex_index, graph);
- std::vector<types::Vertex> inv_perm(num_vertices(graph));
+ std::vector<boosttypes::Vertex> inv_perm(num_vertices(graph));
if (reversed_numbering == false)
::boost::king_ordering(graph, inv_perm.rbegin());
const bool reversed_numbering,
const bool use_constraints)
{
- std::vector<unsigned int> renumbering(dof_handler.n_dofs(),
- DH::invalid_dof_index);
+ std::vector<types::global_dof_index> renumbering(dof_handler.n_dofs(),
- DH::invalid_dof_index);
++ DH::invalid_dof_index);
compute_minimum_degree(renumbering, dof_handler, reversed_numbering,
use_constraints);
template <class DH>
void
- compute_minimum_degree (std::vector<types::global_dof_index>& new_dof_indices,
- compute_minimum_degree (std::vector<unsigned int> &new_dof_indices,
++ compute_minimum_degree (std::vector<types::global_dof_index> &new_dof_indices,
const DH &dof_handler,
const bool reversed_numbering,
const bool use_constraints)
template <class DH>
void
- Cuthill_McKee (DH& dof_handler,
+ Cuthill_McKee (DH &dof_handler,
const bool reversed_numbering,
const bool use_constraints,
- const std::vector<unsigned int> &starting_indices)
+ const std::vector<types::global_dof_index> &starting_indices)
{
- std::vector<unsigned int> renumbering(dof_handler.n_dofs(),
- DH::invalid_dof_index);
+ std::vector<types::global_dof_index> renumbering(dof_handler.n_dofs(),
- DH::invalid_dof_index);
++ DH::invalid_dof_index);
compute_Cuthill_McKee(renumbering, dof_handler, reversed_numbering,
use_constraints, starting_indices);
template <class DH>
void
- compute_Cuthill_McKee (std::vector<types::global_dof_index>& new_indices,
- const DH& dof_handler,
- compute_Cuthill_McKee (std::vector<unsigned int> &new_indices,
++ compute_Cuthill_McKee (std::vector<types::global_dof_index> &new_indices,
+ const DH &dof_handler,
const bool reversed_numbering,
const bool use_constraints,
- const std::vector<types::global_dof_index>& starting_indices)
- const std::vector<unsigned int> &starting_indices)
++ const std::vector<types::global_dof_index> &starting_indices)
{
- // make the connection graph. in
- // more than 2d use an intermediate
- // compressed sparsity pattern
- // since the we don't have very
- // good estimates for
- // max_couplings_between_dofs() in
- // 3d and this then leads to
- // excessive memory consumption
- //
- // note that if constraints are not
- // requested, then the
- // 'constraints' object will be
- // empty, and calling condense with
- // it is a no-op
+ // make the connection graph. in
+ // more than 2d use an intermediate
+ // compressed sparsity pattern
+ // since the we don't have very
+ // good estimates for
+ // max_couplings_between_dofs() in
+ // 3d and this then leads to
+ // excessive memory consumption
+ //
+ // note that if constraints are not
+ // requested, then the
+ // 'constraints' object will be
+ // empty, and calling condense with
+ // it is a no-op
ConstraintMatrix constraints;
if (use_constraints)
DoFTools::make_hanging_node_constraints (dof_handler, constraints);
component_wise (DoFHandler<dim,spacedim> &dof_handler,
const std::vector<unsigned int> &component_order_arg)
{
- std::vector<unsigned int> renumbering (dof_handler.n_locally_owned_dofs(),
- DoFHandler<dim>::invalid_dof_index);
+ std::vector<types::global_dof_index> renumbering (dof_handler.n_locally_owned_dofs(),
- DoFHandler<dim>::invalid_dof_index);
++ DoFHandler<dim>::invalid_dof_index);
typedef
- internal::WrapDoFIterator<typename DoFHandler<dim,spacedim>
- ::active_cell_iterator>
- ITERATOR;
+ internal::WrapDoFIterator<typename DoFHandler<dim,spacedim>
+ ::active_cell_iterator>
+ ITERATOR;
typename DoFHandler<dim,spacedim>::active_cell_iterator
- istart = dof_handler.begin_active();
+ istart = dof_handler.begin_active();
ITERATOR start = istart;
const typename DoFHandler<dim,spacedim>::cell_iterator
- end = dof_handler.end();
+ end = dof_handler.end();
- const unsigned int result =
+ const types::global_dof_index result =
compute_component_wise<dim, spacedim, ITERATOR,
typename DoFHandler<dim,spacedim>::cell_iterator>
(renumbering, start, end, component_order_arg);
component_wise (hp::DoFHandler<dim> &dof_handler,
const std::vector<unsigned int> &component_order_arg)
{
- std::vector<unsigned int> renumbering (dof_handler.n_dofs(),
- hp::DoFHandler<dim>::invalid_dof_index);
+ std::vector<types::global_dof_index> renumbering (dof_handler.n_dofs(),
- hp::DoFHandler<dim>::invalid_dof_index);
++ hp::DoFHandler<dim>::invalid_dof_index);
typedef
- internal::WrapDoFIterator<typename hp::DoFHandler<dim>::active_cell_iterator> ITERATOR;
+ internal::WrapDoFIterator<typename hp::DoFHandler<dim>::active_cell_iterator> ITERATOR;
typename hp::DoFHandler<dim>::active_cell_iterator
- istart = dof_handler.begin_active();
+ istart = dof_handler.begin_active();
ITERATOR start = istart;
const typename hp::DoFHandler<dim>::cell_iterator
- end = dof_handler.end();
+ end = dof_handler.end();
- const unsigned int result =
+ const types::global_dof_index result =
compute_component_wise<dim, dim, ITERATOR,
typename hp::DoFHandler<dim>::cell_iterator>(renumbering,
start, end,
const unsigned int level,
const std::vector<unsigned int> &component_order_arg)
{
- std::vector<unsigned int> renumbering (dof_handler.n_dofs(level),
- DoFHandler<dim>::invalid_dof_index);
+ std::vector<types::global_dof_index> renumbering (dof_handler.n_dofs(level),
- DoFHandler<dim>::invalid_dof_index);
++ DoFHandler<dim>::invalid_dof_index);
typedef
- internal::WrapMGDoFIterator<typename MGDoFHandler<dim>::cell_iterator> ITERATOR;
+ internal::WrapMGDoFIterator<typename MGDoFHandler<dim>::cell_iterator> ITERATOR;
typename MGDoFHandler<dim>::cell_iterator
- istart =dof_handler.begin(level);
+ istart =dof_handler.begin(level);
ITERATOR start = istart;
typename MGDoFHandler<dim>::cell_iterator
- iend = dof_handler.end(level);
+ iend = dof_handler.end(level);
const ITERATOR end = iend;
- const unsigned int result =
+ const types::global_dof_index result =
compute_component_wise<dim, dim, ITERATOR, ITERATOR>(
renumbering, start, end, component_order_arg);
template <int dim, int spacedim, class ITERATOR, class ENDITERATOR>
- unsigned int
- compute_component_wise (std::vector<unsigned int> &new_indices,
+ types::global_dof_index
- compute_component_wise (std::vector<types::global_dof_index>& new_indices,
- const ITERATOR & start,
- const ENDITERATOR& end,
++ compute_component_wise (std::vector<types::global_dof_index> &new_indices,
+ const ITERATOR &start,
+ const ENDITERATOR &end,
const std::vector<unsigned int> &component_order_arg)
{
const hp::FECollection<dim,spacedim>
- // now concatenate all the
- // components in the order the user
- // desired to see
+ // now concatenate all the
+ // components in the order the user
+ // desired to see
- unsigned int next_free_index = 0;
+ types::global_dof_index next_free_index = 0;
for (unsigned int component=0; component<fe_collection.n_components(); ++component)
{
const typename std::vector<unsigned int>::const_iterator
namespace
{
- // helper function for hierarchical()
+ // helper function for hierarchical()
template <int dim, class iterator>
- unsigned int
+ types::global_dof_index
compute_hierarchical_recursive (
- unsigned int next_free,
+ types::global_dof_index next_free,
- std::vector<unsigned int>& new_indices,
- const iterator & cell,
- const IndexSet & locally_owned)
+ std::vector<unsigned int> &new_indices,
+ const iterator &cell,
+ const IndexSet &locally_owned)
{
if (cell->has_children())
{
void
hierarchical (DoFHandler<dim> &dof_handler)
{
- std::vector<unsigned int> renumbering (dof_handler.n_locally_owned_dofs(),
- DoFHandler<dim>::invalid_dof_index);
+ std::vector<types::global_dof_index> renumbering (dof_handler.n_locally_owned_dofs(),
- DoFHandler<dim>::invalid_dof_index);
++ DoFHandler<dim>::invalid_dof_index);
- typename DoFHandler<dim>::cell_iterator cell;
+ typename DoFHandler<dim>::cell_iterator cell;
- types::global_dof_index next_free = 0;
- const IndexSet locally_owned = dof_handler.locally_owned_dofs();
- unsigned int next_free = 0;
++ types::global_dof_index next_free = 0;
+ const IndexSet locally_owned = dof_handler.locally_owned_dofs();
- const parallel::distributed::Triangulation<dim> * tria
- = dynamic_cast<const parallel::distributed::Triangulation<dim>*>
- (&dof_handler.get_tria());
+ const parallel::distributed::Triangulation<dim> *tria
+ = dynamic_cast<const parallel::distributed::Triangulation<dim>*>
+ (&dof_handler.get_tria());
- if (tria)
+ if (tria)
{
#ifdef DEAL_II_USE_P4EST
//this is a distributed Triangulation. We need to traverse the coarse
template <class DH>
void
- sort_selected_dofs_back (DH& dof_handler,
- const std::vector<bool>& selected_dofs)
+ sort_selected_dofs_back (DH &dof_handler,
+ const std::vector<bool> &selected_dofs)
{
- std::vector<unsigned int> renumbering(dof_handler.n_dofs(),
- DH::invalid_dof_index);
+ std::vector<types::global_dof_index> renumbering(dof_handler.n_dofs(),
- DH::invalid_dof_index);
++ DH::invalid_dof_index);
compute_sort_selected_dofs_back(renumbering, dof_handler, selected_dofs);
dof_handler.renumber_dofs(renumbering);
template <class DH>
void
- compute_sort_selected_dofs_back (std::vector<types::global_dof_index>& new_indices,
- const DH& dof_handler,
- const std::vector<bool>& selected_dofs)
- compute_sort_selected_dofs_back (std::vector<unsigned int> &new_indices,
++ compute_sort_selected_dofs_back (std::vector<types::global_dof_index> &new_indices,
+ const DH &dof_handler,
+ const std::vector<bool> &selected_dofs)
{
- const unsigned int n_dofs = dof_handler.n_dofs();
+ const types::global_dof_index n_dofs = dof_handler.n_dofs();
Assert (selected_dofs.size() == n_dofs,
ExcDimensionMismatch (selected_dofs.size(), n_dofs));
Assert (new_indices.size() == n_dofs,
ExcDimensionMismatch(new_indices.size(), n_dofs));
- const unsigned int n_selected_dofs = std::count (selected_dofs.begin(),
- selected_dofs.end(),
- false);
+ const types::global_dof_index n_selected_dofs = std::count (selected_dofs.begin(),
- selected_dofs.end(),
- false);
++ selected_dofs.end(),
++ false);
- unsigned int next_unselected = 0;
- unsigned int next_selected = n_selected_dofs;
- for (unsigned int i=0; i<n_dofs; ++i)
+ types::global_dof_index next_unselected = 0;
+ types::global_dof_index next_selected = n_selected_dofs;
+ for (types::global_dof_index i=0; i<n_dofs; ++i)
if (selected_dofs[i] == false)
{
new_indices[i] = next_unselected;
template <class DH>
void
- cell_wise_dg (DH& dof,
- const std::vector<typename DH::cell_iterator>& cells)
+ cell_wise_dg (DH &dof,
+ const std::vector<typename DH::cell_iterator> &cells)
{
- std::vector<unsigned int> renumbering(dof.n_dofs());
- std::vector<unsigned int> reverse(dof.n_dofs());
+ std::vector<types::global_dof_index> renumbering(dof.n_dofs());
+ std::vector<types::global_dof_index> reverse(dof.n_dofs());
compute_cell_wise_dg(renumbering, reverse, dof, cells);
dof.renumber_dofs(renumbering);
template <class DH>
void
cell_wise (
- DH& dof,
- const std::vector<typename DH::cell_iterator>& cells)
+ DH &dof,
+ const std::vector<typename DH::cell_iterator> &cells)
{
- std::vector<unsigned int> renumbering(dof.n_dofs());
- std::vector<unsigned int> reverse(dof.n_dofs());
+ std::vector<types::global_dof_index> renumbering(dof.n_dofs());
+ std::vector<types::global_dof_index> reverse(dof.n_dofs());
compute_cell_wise(renumbering, reverse, dof, cells);
dof.renumber_dofs(renumbering);
template <class DH>
void
compute_cell_wise_dg (
- std::vector<types::global_dof_index>& new_indices,
- std::vector<types::global_dof_index>& reverse,
- const DH& dof,
- const typename std::vector<typename DH::cell_iterator>& cells)
- std::vector<unsigned int> &new_indices,
- std::vector<unsigned int> &reverse,
++ std::vector<types::global_dof_index> &new_indices,
++ std::vector<types::global_dof_index> &reverse,
+ const DH &dof,
+ const typename std::vector<typename DH::cell_iterator> &cells)
{
Assert(cells.size() == dof.get_tria().n_active_cells(),
ExcDimensionMismatch(cells.size(),
dof.get_tria().n_active_cells()));
- unsigned int n_global_dofs = dof.n_dofs();
+ types::global_dof_index n_global_dofs = dof.n_dofs();
- // Actually, we compute the
- // inverse of the reordering
- // vector, called reverse here.
- // Later, its inverse is computed
- // into new_indices, which is the
- // return argument.
+ // Actually, we compute the
+ // inverse of the reordering
+ // vector, called reverse here.
+ // Later, its inverse is computed
+ // into new_indices, which is the
+ // return argument.
Assert(new_indices.size() == n_global_dofs,
ExcDimensionMismatch(new_indices.size(), n_global_dofs));
}
Assert(global_index == n_global_dofs, ExcRenumberingIncomplete());
- for (types::global_dof_index i=0;i<reverse.size(); ++i)
- for (unsigned int i=0; i<reverse.size(); ++i)
++ for (types::global_dof_index i=0; i<reverse.size(); ++i)
new_indices[reverse[i]] = i;
}
template <class DH>
void
compute_cell_wise (
- std::vector<types::global_dof_index>& new_indices,
- std::vector<types::global_dof_index>& reverse,
- const DH& dof,
- const typename std::vector<typename DH::cell_iterator>& cells)
- std::vector<unsigned int> &new_indices,
- std::vector<unsigned int> &reverse,
++ std::vector<types::global_dof_index> &new_indices,
++ std::vector<types::global_dof_index> &reverse,
+ const DH &dof,
+ const typename std::vector<typename DH::cell_iterator> &cells)
{
Assert(cells.size() == dof.get_tria().n_active_cells(),
ExcDimensionMismatch(cells.size(),
dof.get_tria().n_active_cells()));
- unsigned int n_global_dofs = dof.n_dofs();
+ types::global_dof_index n_global_dofs = dof.n_dofs();
- // Actually, we compute the
- // inverse of the reordering
- // vector, called reverse here.
- // Later, irs inverse is computed
- // into new_indices, which is the
- // return argument.
+ // Actually, we compute the
+ // inverse of the reordering
+ // vector, called reverse here.
+ // Later, irs inverse is computed
+ // into new_indices, which is the
+ // return argument.
Assert(new_indices.size() == n_global_dofs,
ExcDimensionMismatch(new_indices.size(), n_global_dofs));
Assert(reverse.size() == n_global_dofs,
ExcDimensionMismatch(reverse.size(), n_global_dofs));
- // For continuous elements, we must
- // make sure, that each dof is
- // reordered only once.
+ // For continuous elements, we must
+ // make sure, that each dof is
+ // reordered only once.
std::vector<bool> already_sorted(n_global_dofs, false);
- std::vector<unsigned int> cell_dofs;
+ std::vector<types::global_dof_index> cell_dofs;
- unsigned int global_index = 0;
+ types::global_dof_index global_index = 0;
typename std::vector<typename DH::cell_iterator>::const_iterator cell;
}
Assert(global_index == n_global_dofs, ExcRenumberingIncomplete());
- for (types::global_dof_index i=0;i<reverse.size(); ++i)
- for (unsigned int i=0; i<reverse.size(); ++i)
++ for (types::global_dof_index i=0; i<reverse.size(); ++i)
new_indices[reverse[i]] = i;
}
template <int dim>
void cell_wise_dg (
- MGDoFHandler<dim>& dof,
+ MGDoFHandler<dim> &dof,
const unsigned int level,
- const typename std::vector<typename MGDoFHandler<dim>::cell_iterator>& cells)
+ const typename std::vector<typename MGDoFHandler<dim>::cell_iterator> &cells)
{
- std::vector<unsigned int> renumbering(dof.n_dofs(level));
- std::vector<unsigned int> reverse(dof.n_dofs(level));
+ std::vector<types::global_dof_index> renumbering(dof.n_dofs(level));
+ std::vector<types::global_dof_index> reverse(dof.n_dofs(level));
compute_cell_wise_dg(renumbering, reverse, dof, level, cells);
dof.renumber_dofs(level, renumbering);
template <int dim>
void cell_wise (
- MGDoFHandler<dim>& dof,
+ MGDoFHandler<dim> &dof,
const unsigned int level,
- const typename std::vector<typename MGDoFHandler<dim>::cell_iterator>& cells)
+ const typename std::vector<typename MGDoFHandler<dim>::cell_iterator> &cells)
{
- std::vector<unsigned int> renumbering(dof.n_dofs(level));
- std::vector<unsigned int> reverse(dof.n_dofs(level));
+ std::vector<types::global_dof_index> renumbering(dof.n_dofs(level));
+ std::vector<types::global_dof_index> reverse(dof.n_dofs(level));
compute_cell_wise(renumbering, reverse, dof, level, cells);
dof.renumber_dofs(level, renumbering);
template <int dim>
void compute_cell_wise_dg (
- std::vector<types::global_dof_index>& new_order,
- std::vector<types::global_dof_index>& reverse,
- const MGDoFHandler<dim>& dof,
- std::vector<unsigned int> &new_order,
- std::vector<unsigned int> &reverse,
++ std::vector<types::global_dof_index> &new_order,
++ std::vector<types::global_dof_index> &reverse,
+ const MGDoFHandler<dim> &dof,
const unsigned int level,
- const typename std::vector<typename MGDoFHandler<dim>::cell_iterator>& cells)
+ const typename std::vector<typename MGDoFHandler<dim>::cell_iterator> &cells)
{
Assert(cells.size() == dof.get_tria().n_cells(level),
ExcDimensionMismatch(cells.size(),
}
Assert(global_index == n_global_dofs, ExcRenumberingIncomplete());
- for (types::global_dof_index i=0;i<new_order.size(); ++i)
- for (unsigned int i=0; i<new_order.size(); ++i)
++ for (types::global_dof_index i=0; i<new_order.size(); ++i)
new_order[reverse[i]] = i;
}
template <int dim>
void compute_cell_wise (
- std::vector<types::global_dof_index>& new_order,
- std::vector<types::global_dof_index>& reverse,
- const MGDoFHandler<dim>& dof,
- std::vector<unsigned int> &new_order,
- std::vector<unsigned int> &reverse,
++ std::vector<types::global_dof_index> &new_order,
++ std::vector<types::global_dof_index> &reverse,
+ const MGDoFHandler<dim> &dof,
const unsigned int level,
- const typename std::vector<typename MGDoFHandler<dim>::cell_iterator>& cells)
+ const typename std::vector<typename MGDoFHandler<dim>::cell_iterator> &cells)
{
Assert(cells.size() == dof.get_tria().n_cells(level),
ExcDimensionMismatch(cells.size(),
}
Assert(global_index == n_global_dofs, ExcRenumberingIncomplete());
- for (types::global_dof_index i=0;i<new_order.size(); ++i)
- for (unsigned int i=0; i<new_order.size(); ++i)
++ for (types::global_dof_index i=0; i<new_order.size(); ++i)
new_order[reverse[i]] = i;
}
template <class DH, int dim>
void
- downstream_dg (DH& dof, const Point<dim>& direction)
+ downstream_dg (DH &dof, const Point<dim> &direction)
{
- std::vector<unsigned int> renumbering(dof.n_dofs());
+ std::vector<types::global_dof_index> renumbering(dof.n_dofs());
compute_downstream_dg(renumbering, dof, direction);
dof.renumber_dofs(renumbering);
template <class DH, int dim>
void
- downstream (DH& dof, const Point<dim>& direction,
+ downstream (DH &dof, const Point<dim> &direction,
const bool dof_wise_renumbering)
{
- std::vector<unsigned int> renumbering(dof.n_dofs());
- std::vector<unsigned int> reverse(dof.n_dofs());
+ std::vector<types::global_dof_index> renumbering(dof.n_dofs());
+ std::vector<types::global_dof_index> reverse(dof.n_dofs());
compute_downstream(renumbering, reverse, dof, direction,
dof_wise_renumbering);
template <class DH, int dim>
void
compute_downstream_dg (
- std::vector<types::global_dof_index>& new_indices,
- const DH& dof,
- const Point<dim>& direction)
- std::vector<unsigned int> &new_indices,
++ std::vector<types::global_dof_index> &new_indices,
+ const DH &dof,
+ const Point<dim> &direction)
{
std::vector<typename DH::cell_iterator>
- ordered_cells(dof.get_tria().n_active_cells());
+ ordered_cells(dof.get_tria().n_active_cells());
const CompareDownstream<typename DH::cell_iterator, dim> comparator(direction);
typename DH::active_cell_iterator begin = dof.begin_active();
template <class DH, int dim>
void
compute_downstream_dg (
- std::vector<types::global_dof_index>& new_indices,
- std::vector<types::global_dof_index>& reverse,
- const DH& dof,
- const Point<dim>& direction)
- std::vector<unsigned int> &new_indices,
- std::vector<unsigned int> &reverse,
++ std::vector<types::global_dof_index> &new_indices,
++ std::vector<types::global_dof_index> &reverse,
+ const DH &dof,
+ const Point<dim> &direction)
{
std::vector<typename DH::cell_iterator>
- ordered_cells(dof.get_tria().n_active_cells());
+ ordered_cells(dof.get_tria().n_active_cells());
const CompareDownstream<typename DH::cell_iterator, dim> comparator(direction);
typename DH::active_cell_iterator begin = dof.begin_active();
template <class DH, int dim>
void
compute_downstream (
- std::vector<types::global_dof_index>& new_indices,
- std::vector<types::global_dof_index>& reverse,
- const DH& dof,
- const Point<dim>& direction,
- std::vector<unsigned int> &new_indices,
- std::vector<unsigned int> &reverse,
++ std::vector<types::global_dof_index> &new_indices,
++ std::vector<types::global_dof_index> &reverse,
+ const DH &dof,
+ const Point<dim> &direction,
const bool dof_wise_renumbering)
{
if (dof_wise_renumbering == false)
}
else
{
- // similar code as for
- // DoFTools::map_dofs_to_support_points, but
- // need to do this for general DH classes and
- // want to be able to sort the result
- // (otherwise, could use something like
- // DoFTools::map_support_points_to_dofs)
+ // similar code as for
+ // DoFTools::map_dofs_to_support_points, but
+ // need to do this for general DH classes and
+ // want to be able to sort the result
+ // (otherwise, could use something like
+ // DoFTools::map_support_points_to_dofs)
- const unsigned int n_dofs = dof.n_dofs();
- std::vector<std::pair<Point<dim>,unsigned int> > support_point_list
+ const types::global_dof_index n_dofs = dof.n_dofs();
+ std::vector<std::pair<Point<dim>,types::global_dof_index> > support_point_list
- (n_dofs);
+ (n_dofs);
const hp::FECollection<dim> fe_collection (dof.get_fe ());
Assert (fe_collection[0].has_support_points(),
template <int dim>
- void downstream_dg (MGDoFHandler<dim>& dof,
+ void downstream_dg (MGDoFHandler<dim> &dof,
const unsigned int level,
- const Point<dim>& direction)
- const Point<dim> &direction)
++ const Point<dim> &direction)
{
- std::vector<unsigned int> renumbering(dof.n_dofs(level));
- std::vector<unsigned int> reverse(dof.n_dofs(level));
+ std::vector<types::global_dof_index> renumbering(dof.n_dofs(level));
+ std::vector<types::global_dof_index> reverse(dof.n_dofs(level));
compute_downstream_dg(renumbering, reverse, dof, level, direction);
dof.renumber_dofs(level, renumbering);
template <int dim>
- void downstream (MGDoFHandler<dim>& dof,
+ void downstream (MGDoFHandler<dim> &dof,
const unsigned int level,
- const Point<dim>& direction,
- const Point<dim> &direction,
++ const Point<dim> &direction,
const bool dof_wise_renumbering)
{
- std::vector<unsigned int> renumbering(dof.n_dofs(level));
- std::vector<unsigned int> reverse(dof.n_dofs(level));
+ std::vector<types::global_dof_index> renumbering(dof.n_dofs(level));
+ std::vector<types::global_dof_index> reverse(dof.n_dofs(level));
compute_downstream(renumbering, reverse, dof, level, direction,
dof_wise_renumbering);
template <int dim>
void
compute_downstream_dg (
- std::vector<types::global_dof_index>& new_indices,
- std::vector<types::global_dof_index>& reverse,
- const MGDoFHandler<dim>& dof,
- std::vector<unsigned int> &new_indices,
- std::vector<unsigned int> &reverse,
++ std::vector<types::global_dof_index> &new_indices,
++ std::vector<types::global_dof_index> &reverse,
+ const MGDoFHandler<dim> &dof,
const unsigned int level,
- const Point<dim>& direction)
+ const Point<dim> &direction)
{
std::vector<typename MGDoFHandler<dim>::cell_iterator>
- ordered_cells(dof.get_tria().n_cells(level));
+ ordered_cells(dof.get_tria().n_cells(level));
const CompareDownstream<typename MGDoFHandler<dim>::cell_iterator, dim>
- comparator(direction);
+ comparator(direction);
typename MGDoFHandler<dim>::cell_iterator begin = dof.begin(level);
typename MGDoFHandler<dim>::cell_iterator end = dof.end(level);
template <int dim>
void
compute_downstream (
- std::vector<types::global_dof_index>& new_indices,
- std::vector<types::global_dof_index>& reverse,
- const MGDoFHandler<dim>& dof,
- std::vector<unsigned int> &new_indices,
- std::vector<unsigned int> &reverse,
++ std::vector<types::global_dof_index> &new_indices,
++ std::vector<types::global_dof_index> &reverse,
+ const MGDoFHandler<dim> &dof,
const unsigned int level,
- const Point<dim>& direction,
+ const Point<dim> &direction,
const bool dof_wise_renumbering)
{
if (dof_wise_renumbering == false)
{
Assert (dof.get_fe().has_support_points(),
typename FiniteElement<dim>::ExcFEHasNoSupportPoints());
- const unsigned int n_dofs = dof.n_dofs(level);
- std::vector<std::pair<Point<dim>,unsigned int> > support_point_list
+ const types::global_dof_index n_dofs = dof.n_dofs(level);
+ std::vector<std::pair<Point<dim>,types::global_dof_index> > support_point_list
- (n_dofs);
+ (n_dofs);
Quadrature<dim> q_dummy(dof.get_fe().get_unit_support_points());
FEValues<dim,dim> fe_values (dof.get_fe(), q_dummy,
template <class DH, int dim>
void
clockwise_dg (
- DH& dof,
- const Point<dim>& center,
+ DH &dof,
+ const Point<dim> ¢er,
const bool counter)
{
- std::vector<unsigned int> renumbering(dof.n_dofs());
+ std::vector<types::global_dof_index> renumbering(dof.n_dofs());
compute_clockwise_dg(renumbering, dof, center, counter);
dof.renumber_dofs(renumbering);
template <class DH, int dim>
void
compute_clockwise_dg (
- std::vector<types::global_dof_index>& new_indices,
- const DH& dof,
- const Point<dim>& center,
- std::vector<unsigned int> &new_indices,
++ std::vector<types::global_dof_index> &new_indices,
+ const DH &dof,
+ const Point<dim> ¢er,
const bool counter)
{
std::vector<typename DH::cell_iterator>
template <class DH>
void
- random (DH& dof_handler)
+ random (DH &dof_handler)
{
- std::vector<unsigned int> renumbering(dof_handler.n_dofs(),
- DH::invalid_dof_index);
+ std::vector<types::global_dof_index> renumbering(dof_handler.n_dofs(),
- DH::invalid_dof_index);
++ DH::invalid_dof_index);
compute_random(renumbering, dof_handler);
dof_handler.renumber_dofs(renumbering);
template <class DH>
void
compute_random (
- std::vector<types::global_dof_index>& new_indices,
- const DH& dof_handler)
- std::vector<unsigned int> &new_indices,
++ std::vector<types::global_dof_index> &new_indices,
+ const DH &dof_handler)
{
- const unsigned int n_dofs = dof_handler.n_dofs();
+ const types::global_dof_index n_dofs = dof_handler.n_dofs();
Assert(new_indices.size() == n_dofs,
ExcDimensionMismatch(new_indices.size(), n_dofs));
void
subdomain_wise (DH &dof_handler)
{
- std::vector<unsigned int> renumbering(dof_handler.n_dofs(),
- DH::invalid_dof_index);
+ std::vector<types::global_dof_index> renumbering(dof_handler.n_dofs(),
- DH::invalid_dof_index);
++ DH::invalid_dof_index);
compute_subdomain_wise(renumbering, dof_handler);
dof_handler.renumber_dofs(renumbering);
= *std::max_element (subdomain_association.begin(),
subdomain_association.end()) + 1;
- // then renumber the subdomains by first
- // looking at those belonging to subdomain
- // 0, then those of subdomain 1, etc. note
- // that the algorithm is stable, i.e. if
- // two dofs i,j have i<j and belong to the
- // same subdomain, then they will be in
- // this order also after reordering
+ // then renumber the subdomains by first
+ // looking at those belonging to subdomain
+ // 0, then those of subdomain 1, etc. note
+ // that the algorithm is stable, i.e. if
+ // two dofs i,j have i<j and belong to the
+ // same subdomain, then they will be in
+ // this order also after reordering
std::fill (new_dof_indices.begin(), new_dof_indices.end(),
numbers::invalid_unsigned_int);
- unsigned int next_free_index = 0;
- for (unsigned int subdomain=0; subdomain<n_subdomains; ++subdomain)
- for (unsigned int i=0; i<n_dofs; ++i)
+ types::global_dof_index next_free_index = 0;
+ for (types::subdomain_id_t subdomain=0; subdomain<n_subdomains; ++subdomain)
+ for (types::global_dof_index i=0; i<n_dofs; ++i)
if (subdomain_association[i] == subdomain)
{
Assert (new_dof_indices[i] == numbers::invalid_unsigned_int,
const typename DH::cell_iterator cell_row = cell_iter->first;
const typename DH::cell_iterator cell_col = cell_iter->second;
- if (!cell_row->has_children() && !cell_col->has_children())
- {
- const unsigned int dofs_per_cell_row =
- cell_row->get_fe().dofs_per_cell;
- const unsigned int dofs_per_cell_col =
- cell_col->get_fe().dofs_per_cell;
- std::vector<types::global_dof_index>
- local_dof_indices_row(dofs_per_cell_row);
- std::vector<types::global_dof_index>
- local_dof_indices_col(dofs_per_cell_col);
- cell_row->get_dof_indices (local_dof_indices_row);
- cell_col->get_dof_indices (local_dof_indices_col);
- for (unsigned int i=0; i<dofs_per_cell_row; ++i)
- sparsity.add_entries (local_dof_indices_row[i],
- local_dof_indices_col.begin(),
- local_dof_indices_col.end());
- }
- else if (cell_row->has_children())
- {
- const std::vector<typename DH::active_cell_iterator >
- child_cells = GridTools::get_active_child_cells<DH> (cell_row);
- for (unsigned int i=0; i<child_cells.size(); i++)
- {
- const typename DH::active_cell_iterator
- cell_row_child = child_cells[i];
- const unsigned int dofs_per_cell_row =
- cell_row_child->get_fe().dofs_per_cell;
- const unsigned int dofs_per_cell_col =
- cell_col->get_fe().dofs_per_cell;
- std::vector<types::global_dof_index>
- local_dof_indices_row(dofs_per_cell_row);
- std::vector<types::global_dof_index>
- local_dof_indices_col(dofs_per_cell_col);
- cell_row_child->get_dof_indices (local_dof_indices_row);
- cell_col->get_dof_indices (local_dof_indices_col);
- for (unsigned int i=0; i<dofs_per_cell_row; ++i)
- sparsity.add_entries (local_dof_indices_row[i],
- local_dof_indices_col.begin(),
- local_dof_indices_col.end());
- }
- }
- else
- {
- std::vector<typename DH::active_cell_iterator>
- child_cells = GridTools::get_active_child_cells<DH> (cell_col);
- for (unsigned int i=0; i<child_cells.size(); i++)
- {
- const typename DH::active_cell_iterator
- cell_col_child = child_cells[i];
- const unsigned int dofs_per_cell_row =
- cell_row->get_fe().dofs_per_cell;
- const unsigned int dofs_per_cell_col =
- cell_col_child->get_fe().dofs_per_cell;
- std::vector<types::global_dof_index>
- local_dof_indices_row(dofs_per_cell_row);
- std::vector<types::global_dof_index>
- local_dof_indices_col(dofs_per_cell_col);
- cell_row->get_dof_indices (local_dof_indices_row);
- cell_col_child->get_dof_indices (local_dof_indices_col);
- for (unsigned int i=0; i<dofs_per_cell_row; ++i)
- sparsity.add_entries (local_dof_indices_row[i],
- local_dof_indices_col.begin(),
- local_dof_indices_col.end());
- }
- }
+ if (!cell_row->has_children() && !cell_col->has_children())
+ {
+ const unsigned int dofs_per_cell_row =
+ cell_row->get_fe().dofs_per_cell;
+ const unsigned int dofs_per_cell_col =
+ cell_col->get_fe().dofs_per_cell;
- std::vector<unsigned int>
++ std::vector<types::global_dof_index>
+ local_dof_indices_row(dofs_per_cell_row);
- std::vector<unsigned int>
++ std::vector<types::global_dof_index>
+ local_dof_indices_col(dofs_per_cell_col);
+ cell_row->get_dof_indices (local_dof_indices_row);
+ cell_col->get_dof_indices (local_dof_indices_col);
+ for (unsigned int i=0; i<dofs_per_cell_row; ++i)
+ sparsity.add_entries (local_dof_indices_row[i],
+ local_dof_indices_col.begin(),
+ local_dof_indices_col.end());
+ }
+ else if (cell_row->has_children())
+ {
+ const std::vector<typename DH::active_cell_iterator >
+ child_cells = GridTools::get_active_child_cells<DH> (cell_row);
+ for (unsigned int i=0; i<child_cells.size(); i++)
+ {
+ const typename DH::active_cell_iterator
+ cell_row_child = child_cells[i];
+ const unsigned int dofs_per_cell_row =
+ cell_row_child->get_fe().dofs_per_cell;
+ const unsigned int dofs_per_cell_col =
+ cell_col->get_fe().dofs_per_cell;
- std::vector<unsigned int>
++ std::vector<types::global_dof_index>
+ local_dof_indices_row(dofs_per_cell_row);
- std::vector<unsigned int>
++ std::vector<types::global_dof_index>
+ local_dof_indices_col(dofs_per_cell_col);
+ cell_row_child->get_dof_indices (local_dof_indices_row);
+ cell_col->get_dof_indices (local_dof_indices_col);
+ for (unsigned int i=0; i<dofs_per_cell_row; ++i)
+ sparsity.add_entries (local_dof_indices_row[i],
+ local_dof_indices_col.begin(),
+ local_dof_indices_col.end());
+ }
+ }
+ else
+ {
+ std::vector<typename DH::active_cell_iterator>
+ child_cells = GridTools::get_active_child_cells<DH> (cell_col);
+ for (unsigned int i=0; i<child_cells.size(); i++)
+ {
+ const typename DH::active_cell_iterator
+ cell_col_child = child_cells[i];
+ const unsigned int dofs_per_cell_row =
+ cell_row->get_fe().dofs_per_cell;
+ const unsigned int dofs_per_cell_col =
+ cell_col_child->get_fe().dofs_per_cell;
- std::vector<unsigned int>
++ std::vector<types::global_dof_index>
+ local_dof_indices_row(dofs_per_cell_row);
- std::vector<unsigned int>
++ std::vector<types::global_dof_index>
+ local_dof_indices_col(dofs_per_cell_col);
+ cell_row->get_dof_indices (local_dof_indices_row);
+ cell_col_child->get_dof_indices (local_dof_indices_col);
+ for (unsigned int i=0; i<dofs_per_cell_row; ++i)
+ sparsity.add_entries (local_dof_indices_row[i],
+ local_dof_indices_col.begin(),
+ local_dof_indices_col.end());
+ }
+ }
}
}
#ifdef DEBUG
if (sparsity.n_rows() != 0)
{
- types::global_dof_index max_element = 0;
- for (std::vector<types::global_dof_index>::const_iterator i=dof_to_boundary_mapping.begin();
- i!=dof_to_boundary_mapping.end(); ++i)
- if ((*i != DH::invalid_dof_index) &&
- (*i > max_element))
- max_element = *i;
- AssertDimension (max_element, sparsity.n_rows()-1);
- unsigned int max_element = 0;
- for (std::vector<unsigned int>::const_iterator i=dof_to_boundary_mapping.begin();
++ types::global_dof_index max_element = 0;
++ for (std::vector<types::global_dof_index>::const_iterator i=dof_to_boundary_mapping.begin();
+ i!=dof_to_boundary_mapping.end(); ++i)
+ if ((*i != DH::invalid_dof_index) &&
+ (*i > max_element))
+ max_element = *i;
+ AssertDimension (max_element, sparsity.n_rows()-1);
};
#endif
- std::vector<unsigned int> dofs_on_this_face;
+ std::vector<types::global_dof_index> dofs_on_this_face;
dofs_on_this_face.reserve (max_dofs_per_face(dof));
- // loop over all faces to check
- // whether they are at a
- // boundary. note that we need not
- // take special care of single
- // lines (using
- // @p{cell->has_boundary_lines}),
- // since we do not support
- // boundaries of dimension dim-2,
- // and so every boundary line is
- // also part of a boundary face.
+ // loop over all faces to check
+ // whether they are at a
+ // boundary. note that we need not
+ // take special care of single
+ // lines (using
+ // @p{cell->has_boundary_lines}),
+ // since we do not support
+ // boundaries of dimension dim-2,
+ // and so every boundary line is
+ // also part of a boundary face.
typename DH::active_cell_iterator cell = dof.begin_active(),
endc = dof.end();
for (; cell!=endc; ++cell)
while (!cell->active())
cell = cell->child(direction);
- const unsigned int dofs_per_vertex = cell->get_fe().dofs_per_vertex;
- std::vector<types::global_dof_index> boundary_dof_boundary_indices (dofs_per_vertex);
+ const unsigned int dofs_per_vertex = cell->get_fe().dofs_per_vertex;
- std::vector<unsigned int> boundary_dof_boundary_indices (dofs_per_vertex);
++ std::vector<types::global_dof_index> boundary_dof_boundary_indices (dofs_per_vertex);
- // next get boundary mapped dof
- // indices of boundary dofs
+ // next get boundary mapped dof
+ // indices of boundary dofs
for (unsigned int i=0; i<dofs_per_vertex; ++i)
boundary_dof_boundary_indices[i]
= dof_to_boundary_mapping[cell->vertex_dof_index(direction,i)];
#ifdef DEBUG
if (sparsity.n_rows() != 0)
{
- types::global_dof_index max_element = 0;
- for (std::vector<types::global_dof_index>::const_iterator i=dof_to_boundary_mapping.begin();
- i!=dof_to_boundary_mapping.end(); ++i)
- if ((*i != DH::invalid_dof_index) &&
- (*i > max_element))
- max_element = *i;
- AssertDimension (max_element, sparsity.n_rows()-1);
- unsigned int max_element = 0;
- for (std::vector<unsigned int>::const_iterator i=dof_to_boundary_mapping.begin();
++ types::global_dof_index max_element = 0;
++ for (std::vector<types::global_dof_index>::const_iterator i=dof_to_boundary_mapping.begin();
+ i!=dof_to_boundary_mapping.end(); ++i)
+ if ((*i != DH::invalid_dof_index) &&
+ (*i > max_element))
+ max_element = *i;
+ AssertDimension (max_element, sparsity.n_rows()-1);
};
#endif
{
const FiniteElement<DH::dimension,DH::space_dimension> &fe = dof.get_fe();
- std::vector<types::global_dof_index> dofs_on_this_cell(fe.dofs_per_cell);
- std::vector<types::global_dof_index> dofs_on_other_cell(fe.dofs_per_cell);
- std::vector<unsigned int> dofs_on_this_cell(fe.dofs_per_cell);
- std::vector<unsigned int> dofs_on_other_cell(fe.dofs_per_cell);
++ std::vector<types::global_dof_index> dofs_on_this_cell(fe.dofs_per_cell);
++ std::vector<types::global_dof_index> dofs_on_other_cell(fe.dofs_per_cell);
const Table<2,Coupling>
- int_dof_mask = dof_couplings_from_component_couplings(fe, int_mask),
- flux_dof_mask = dof_couplings_from_component_couplings(fe, flux_mask);
+ int_dof_mask = dof_couplings_from_component_couplings(fe, int_mask),
+ flux_dof_mask = dof_couplings_from_component_couplings(fe, flux_mask);
Table<2,bool> support_on_face(fe.dofs_per_cell,
GeometryInfo<DH::dimension>::faces_per_cell);
const dealii::hp::FECollection<dim,spacedim> &fe = dof.get_fe();
- std::vector<types::global_dof_index> dofs_on_this_cell(DoFTools::max_dofs_per_cell(dof));
- std::vector<types::global_dof_index> dofs_on_other_cell(DoFTools::max_dofs_per_cell(dof));
- std::vector<unsigned int> dofs_on_this_cell(DoFTools::max_dofs_per_cell(dof));
- std::vector<unsigned int> dofs_on_other_cell(DoFTools::max_dofs_per_cell(dof));
++ std::vector<types::global_dof_index> dofs_on_this_cell(DoFTools::max_dofs_per_cell(dof));
++ std::vector<types::global_dof_index> dofs_on_other_cell(DoFTools::max_dofs_per_cell(dof));
const std::vector<Table<2,Coupling> >
- int_dof_mask
+ int_dof_mask
= dof_couplings_from_component_couplings(fe, int_mask);
typename dealii::hp::DoFHandler<dim,spacedim>::active_cell_iterator
inline
bool
check_master_dof_list (const FullMatrix<double> &face_interpolation_matrix,
- const std::vector<types::global_dof_index> &master_dof_list)
- const std::vector<unsigned int> &master_dof_list)
++ const std::vector<types::global_dof_index> &master_dof_list)
{
const unsigned int N = master_dof_list.size();
(fe2.dofs_per_quad <= fe1.dofs_per_quad),
ExcInternalError());
- // the idea here is to designate as
- // many DoFs in fe1 per object
- // (vertex, line, quad) as master as
- // there are such dofs in fe2
- // (indices are int, because we want
- // to avoid the 'unsigned int < 0 is
- // always false warning for the cases
- // at the bottom in 1d and 2d)
- //
- // as mentioned in the paper, it is
- // not always easy to find a set of
- // master dofs that produces an
- // invertible matrix. to this end, we
- // check in each step whether the
- // matrix is still invertible and
- // simply discard this dof if the
- // matrix is not invertible anymore.
- //
- // the cases where we did have
- // trouble in the past were with
- // adding more quad dofs when Q3 and
- // Q4 elements meet at a refined face
- // in 3d (see the hp/crash_12 test
- // that tests that we can do exactly
- // this, and failed before we had
- // code to compensate for this
- // case). the other case are system
- // elements: if we have say a Q1Q2 vs
- // a Q2Q3 element, then we can't just
- // take all master dofs on a line
- // from a single base element, since
- // the shape functions of that base
- // element are independent of that of
- // the other one. this latter case
- // shows up when running
- // hp/hp_constraints_q_system_06
- std::vector<types::global_dof_index> master_dof_list;
- unsigned int index = 0;
- for (int v=0;
- v<static_cast<signed int>(GeometryInfo<dim>::vertices_per_face);
- ++v)
- {
- unsigned int dofs_added = 0;
- unsigned int i = 0;
- while (dofs_added < fe2.dofs_per_vertex)
- {
- // make sure that we
- // were able to find
- // a set of master
- // dofs and that the
- // code down below
- // didn't just reject
- // all our efforts
+ // the idea here is to designate as
+ // many DoFs in fe1 per object
+ // (vertex, line, quad) as master as
+ // there are such dofs in fe2
+ // (indices are int, because we want
+ // to avoid the 'unsigned int < 0 is
+ // always false warning for the cases
+ // at the bottom in 1d and 2d)
+ //
+ // as mentioned in the paper, it is
+ // not always easy to find a set of
+ // master dofs that produces an
+ // invertible matrix. to this end, we
+ // check in each step whether the
+ // matrix is still invertible and
+ // simply discard this dof if the
+ // matrix is not invertible anymore.
+ //
+ // the cases where we did have
+ // trouble in the past were with
+ // adding more quad dofs when Q3 and
+ // Q4 elements meet at a refined face
+ // in 3d (see the hp/crash_12 test
+ // that tests that we can do exactly
+ // this, and failed before we had
+ // code to compensate for this
+ // case). the other case are system
+ // elements: if we have say a Q1Q2 vs
+ // a Q2Q3 element, then we can't just
+ // take all master dofs on a line
+ // from a single base element, since
+ // the shape functions of that base
+ // element are independent of that of
+ // the other one. this latter case
+ // shows up when running
+ // hp/hp_constraints_q_system_06
- std::vector<unsigned int> master_dof_list;
++ std::vector<types::global_dof_index> master_dof_list;
+ unsigned int index = 0;
+ for (int v=0;
+ v<static_cast<signed int>(GeometryInfo<dim>::vertices_per_face);
+ ++v)
+ {
+ unsigned int dofs_added = 0;
+ unsigned int i = 0;
+ while (dofs_added < fe2.dofs_per_vertex)
+ {
+ // make sure that we
+ // were able to find
+ // a set of master
+ // dofs and that the
+ // code down below
+ // didn't just reject
+ // all our efforts
Assert (i < fe1.dofs_per_vertex,
ExcInternalError());
- // tentatively push
- // this vertex dof
+ // tentatively push
+ // this vertex dof
master_dof_list.push_back (index+i);
- // then see what
- // happens. if it
- // succeeds, fine
+ // then see what
+ // happens. if it
+ // succeeds, fine
if (check_master_dof_list (face_interpolation_matrix,
master_dof_list)
== true)
AssertDimension (index, fe1.dofs_per_face);
AssertDimension (master_dof_list.size(), fe2.dofs_per_face);
- // finally copy the list into the
- // mask
- std::fill (master_dof_mask.begin(), master_dof_mask.end(), false);
- for (std::vector<types::global_dof_index>::const_iterator i=master_dof_list.begin();
- i!=master_dof_list.end(); ++i)
- master_dof_mask[*i] = true;
+ // finally copy the list into the
+ // mask
+ std::fill (master_dof_mask.begin(), master_dof_mask.end(), false);
- for (std::vector<unsigned int>::const_iterator i=master_dof_list.begin();
++ for (std::vector<types::global_dof_index>::const_iterator i=master_dof_list.begin();
+ i!=master_dof_list.end(); ++i)
+ master_dof_mask[*i] = true;
}
- /**
- * Copy constraints into a constraint
- * matrix object.
- *
- * This function removes zero
- * constraints and those, which
- * constrain a DoF which was
- * already eliminated in one of
- * the previous steps of the hp
- * hanging node procedure.
- *
- * It also suppresses very small
- * entries in the constraint matrix to
- * avoid making the sparsity pattern
- * fuller than necessary.
- */
+ /**
+ * Copy constraints into a constraint
+ * matrix object.
+ *
+ * This function removes zero
+ * constraints and those, which
+ * constrain a DoF which was
+ * already eliminated in one of
+ * the previous steps of the hp
+ * hanging node procedure.
+ *
+ * It also suppresses very small
+ * entries in the constraint matrix to
+ * avoid making the sparsity pattern
+ * fuller than necessary.
+ */
void
- filter_constraints (const std::vector<unsigned int> &master_dofs,
- const std::vector<unsigned int> &slave_dofs,
+ filter_constraints (const std::vector<types::global_dof_index> &master_dofs,
- const std::vector<types::global_dof_index> &slave_dofs,
- const FullMatrix<double> &face_constraints,
- ConstraintMatrix &constraints)
++ const std::vector<types::global_dof_index> &slave_dofs,
+ const FullMatrix<double> &face_constraints,
+ ConstraintMatrix &constraints)
{
Assert (face_constraints.n () == master_dofs.size (),
ExcDimensionMismatch(master_dofs.size (),
void
- make_hp_hanging_node_constraints (const dealii::hp::DoFHandler<1> & /*dof_handler*/,
- ConstraintMatrix & /*constraints*/)
+ make_hp_hanging_node_constraints (const dealii::hp::DoFHandler<1> &/*dof_handler*/,
+ ConstraintMatrix &/*constraints*/)
{
- // we may have to compute
- // constraints for
- // vertices. gotta think about
- // that a bit more
+ // we may have to compute
+ // constraints for
+ // vertices. gotta think about
+ // that a bit more
//TODO[WB]: think about what to do here...
}
void
- make_oldstyle_hanging_node_constraints (const dealii::hp::DoFHandler<1> & /*dof_handler*/,
- ConstraintMatrix & /*constraints*/,
+ make_oldstyle_hanging_node_constraints (const dealii::hp::DoFHandler<1> &/*dof_handler*/,
+ ConstraintMatrix &/*constraints*/,
dealii::internal::int2type<1>)
{
- // we may have to compute
- // constraints for
- // vertices. gotta think about
- // that a bit more
+ // we may have to compute
+ // constraints for
+ // vertices. gotta think about
+ // that a bit more
//TODO[WB]: think about what to do here...
}
const unsigned int spacedim = DH::space_dimension;
- std::vector<unsigned int> dofs_on_mother;
- std::vector<unsigned int> dofs_on_children;
+ std::vector<types::global_dof_index> dofs_on_mother;
+ std::vector<types::global_dof_index> dofs_on_children;
- // loop over all lines; only on
- // lines there can be constraints.
- // We do so by looping over all
- // active cells and checking
- // whether any of the faces are
- // refined which can only be from
- // the neighboring cell because
- // this one is active. In that
- // case, the face is subject to
- // constraints
- //
- // note that even though we may
- // visit a face twice if the
- // neighboring cells are equally
- // refined, we can only visit each
- // face with hanging nodes once
+ // loop over all lines; only on
+ // lines there can be constraints.
+ // We do so by looping over all
+ // active cells and checking
+ // whether any of the faces are
+ // refined which can only be from
+ // the neighboring cell because
+ // this one is active. In that
+ // case, the face is subject to
+ // constraints
+ //
+ // note that even though we may
+ // visit a face twice if the
+ // neighboring cells are equally
+ // refined, we can only visit each
+ // face with hanging nodes once
typename DH::active_cell_iterator cell = dof_handler.begin_active(),
endc = dof_handler.end();
for (; cell!=endc; ++cell)
{
const unsigned int dim = 3;
- std::vector<unsigned int> dofs_on_mother;
- std::vector<unsigned int> dofs_on_children;
+ std::vector<types::global_dof_index> dofs_on_mother;
+ std::vector<types::global_dof_index> dofs_on_children;
- // loop over all quads; only on
- // quads there can be constraints.
- // We do so by looping over all
- // active cells and checking
- // whether any of the faces are
- // refined which can only be from
- // the neighboring cell because
- // this one is active. In that
- // case, the face is subject to
- // constraints
- //
- // note that even though we may
- // visit a face twice if the
- // neighboring cells are equally
- // refined, we can only visit each
- // face with hanging nodes once
+ // loop over all quads; only on
+ // quads there can be constraints.
+ // We do so by looping over all
+ // active cells and checking
+ // whether any of the faces are
+ // refined which can only be from
+ // the neighboring cell because
+ // this one is active. In that
+ // case, the face is subject to
+ // constraints
+ //
+ // note that even though we may
+ // visit a face twice if the
+ // neighboring cells are equally
+ // refined, we can only visit each
+ // face with hanging nodes once
typename DH::active_cell_iterator cell = dof_handler.begin_active(),
endc = dof_handler.end();
for (; cell!=endc; ++cell)
const unsigned int spacedim = DH::space_dimension;
- // a matrix to be used for
- // constraints below. declared
- // here and simply resized down
- // below to avoid permanent
- // re-allocation of memory
+ // a matrix to be used for
+ // constraints below. declared
+ // here and simply resized down
+ // below to avoid permanent
+ // re-allocation of memory
FullMatrix<double> constraint_matrix;
- // similarly have arrays that
- // will hold master and slave
- // dof numbers, as well as a
- // scratch array needed for the
- // complicated case below
+ // similarly have arrays that
+ // will hold master and slave
+ // dof numbers, as well as a
+ // scratch array needed for the
+ // complicated case below
- std::vector<unsigned int> master_dofs;
- std::vector<unsigned int> slave_dofs;
- std::vector<unsigned int> scratch_dofs;
+ std::vector<types::global_dof_index> master_dofs;
+ std::vector<types::global_dof_index> slave_dofs;
+ std::vector<types::global_dof_index> scratch_dofs;
- // caches for the face and
- // subface interpolation
- // matrices between different
- // (or the same) finite
- // elements. we compute them
- // only once, namely the first
- // time they are needed, and
- // then just reuse them
+ // caches for the face and
+ // subface interpolation
+ // matrices between different
+ // (or the same) finite
+ // elements. we compute them
+ // only once, namely the first
+ // time they are needed, and
+ // then just reuse them
Table<2,std_cxx1x::shared_ptr<FullMatrix<double> > >
- face_interpolation_matrices (n_finite_elements (dof_handler),
- n_finite_elements (dof_handler));
+ face_interpolation_matrices (n_finite_elements (dof_handler),
+ n_finite_elements (dof_handler));
Table<3,std_cxx1x::shared_ptr<FullMatrix<double> > >
- subface_interpolation_matrices (n_finite_elements (dof_handler),
- n_finite_elements (dof_handler),
- GeometryInfo<dim>::max_children_per_face);
-
- // similarly have a cache for
- // the matrices that are split
- // into their master and slave
- // parts, and for which the
- // master part is
- // inverted. these two matrices
- // are derived from the face
- // interpolation matrix as
- // described in the @ref hp_paper "hp paper"
+ subface_interpolation_matrices (n_finite_elements (dof_handler),
+ n_finite_elements (dof_handler),
+ GeometryInfo<dim>::max_children_per_face);
+
+ // similarly have a cache for
+ // the matrices that are split
+ // into their master and slave
+ // parts, and for which the
+ // master part is
+ // inverted. these two matrices
+ // are derived from the face
+ // interpolation matrix as
+ // described in the @ref hp_paper "hp paper"
Table<2,std_cxx1x::shared_ptr<std::pair<FullMatrix<double>,FullMatrix<double> > > >
- split_face_interpolation_matrices (n_finite_elements (dof_handler),
- n_finite_elements (dof_handler));
-
- // finally, for each pair of finite
- // elements, have a mask that states
- // which of the degrees of freedom on
- // the coarse side of a refined face
- // will act as master dofs.
+ split_face_interpolation_matrices (n_finite_elements (dof_handler),
+ n_finite_elements (dof_handler));
+
+ // finally, for each pair of finite
+ // elements, have a mask that states
+ // which of the degrees of freedom on
+ // the coarse side of a refined face
+ // will act as master dofs.
Table<2,std_cxx1x::shared_ptr<std::vector<bool> > >
- master_dof_masks (n_finite_elements (dof_handler),
- n_finite_elements (dof_handler));
-
- // loop over all faces
- //
- // note that even though we may
- // visit a face twice if the
- // neighboring cells are equally
- // refined, we can only visit each
- // face with hanging nodes once
+ master_dof_masks (n_finite_elements (dof_handler),
+ n_finite_elements (dof_handler));
+
+ // loop over all faces
+ //
+ // note that even though we may
+ // visit a face twice if the
+ // neighboring cells are equally
+ // refined, we can only visit each
+ // face with hanging nodes once
typename DH::active_cell_iterator cell = dof_handler.begin_active(),
endc = dof_handler.end();
for (; cell!=endc; ++cell)
// then loop over all cells and do
// the work
- std::vector<unsigned int> indices;
+ std::vector<types::global_dof_index> indices;
for (typename DH::active_cell_iterator c=dof.begin_active();
- c!=dof.end(); ++ c)
+ c!=dof.end(); ++ c)
if (c->is_locally_owned())
{
const unsigned int fe_index = c->active_fe_index();
const ComponentMask &component_mask,
std::vector<bool> &selected_dofs)
{
- const FiniteElement<DH::dimension,DH::space_dimension>& fe = dof.get_fe();
- const FiniteElement<dim,spacedim> &fe = dof.get_fe();
++ const FiniteElement<DH::dimension,DH::space_dimension> &fe = dof.get_fe();
Assert(component_mask.represents_n_components(n_components(dof)),
ExcMessage ("The given component mask is not sized correctly to represent the "
for (unsigned int i=0; i<fe.dofs_per_cell; ++i)
local_selected_dofs[i] = component_mask[local_component_asssociation[i]];
- // then loop over all cells and do
- // work
+ // then loop over all cells and do
+ // work
- std::vector<unsigned int> indices(fe.dofs_per_cell);
- typename MGDoFHandler<dim,spacedim>::cell_iterator c;
+ std::vector<types::global_dof_index> indices(fe.dofs_per_cell);
+ typename DH::cell_iterator c;
for (c = dof.begin(level) ; c != dof.end(level) ; ++ c)
{
c->get_mg_dof_indices(indices);
selected_dofs.clear ();
selected_dofs.set_size(dof_handler.n_dofs());
- // let's see whether we have to
- // check for certain boundary
- // indicators or whether we can
- // accept all
+ // let's see whether we have to
+ // check for certain boundary
+ // indicators or whether we can
+ // accept all
const bool check_boundary_indicator = (boundary_indicators.size() != 0);
- // also see whether we have to
- // check whether a certain vector
- // component is selected, or all
+ // also see whether we have to
+ // check whether a certain vector
+ // component is selected, or all
const bool check_vector_component
- = ((component_mask.represents_the_all_selected_mask() == false)
- ||
- (component_mask.n_selected_components(n_components(dof_handler)) !=
- n_components(dof_handler)));
+ = ((component_mask.represents_the_all_selected_mask() == false)
+ ||
+ (component_mask.n_selected_components(n_components(dof_handler)) !=
+ n_components(dof_handler)));
- std::vector<unsigned int> face_dof_indices;
+ std::vector<types::global_dof_index> face_dof_indices;
face_dof_indices.reserve (max_dofs_per_face(dof_handler));
- // now loop over all cells and
- // check whether their faces are at
- // the boundary. note that we need
- // not take special care of single
- // lines being at the boundary
- // (using
- // @p{cell->has_boundary_lines}),
- // since we do not support
- // boundaries of dimension dim-2,
- // and so every isolated boundary
- // line is also part of a boundary
- // face which we will be visiting
- // sooner or later
+ // now loop over all cells and
+ // check whether their faces are at
+ // the boundary. note that we need
+ // not take special care of single
+ // lines being at the boundary
+ // (using
+ // @p{cell->has_boundary_lines}),
+ // since we do not support
+ // boundaries of dimension dim-2,
+ // and so every isolated boundary
+ // line is also part of a boundary
+ // face which we will be visiting
+ // sooner or later
for (typename DH::active_cell_iterator cell=dof_handler.begin_active();
cell!=dof_handler.end(); ++cell)
- // only work on cells that are either locally owned or at
- // least ghost cells
+ // only work on cells that are either locally owned or at
+ // least ghost cells
if (cell->is_artificial() == false)
for (unsigned int face=0;
face<GeometryInfo<DH::dimension>::faces_per_cell; ++face)
const bool check_vector_component
= (component_mask.represents_the_all_selected_mask() == false);
- // clear and reset array by default
- // values
+ // clear and reset array by default
+ // values
selected_dofs.clear ();
selected_dofs.resize (dof_handler.n_dofs(), false);
- std::vector<unsigned int> cell_dof_indices;
+ std::vector<types::global_dof_index> cell_dof_indices;
cell_dof_indices.reserve (max_dofs_per_cell(dof_handler));
- // now loop over all cells and
- // check whether their faces are at
- // the boundary. note that we need
- // not take special care of single
- // lines being at the boundary
- // (using
- // @p{cell->has_boundary_lines}),
- // since we do not support
- // boundaries of dimension dim-2,
- // and so every isolated boundary
- // line is also part of a boundary
- // face which we will be visiting
- // sooner or later
+ // now loop over all cells and
+ // check whether their faces are at
+ // the boundary. note that we need
+ // not take special care of single
+ // lines being at the boundary
+ // (using
+ // @p{cell->has_boundary_lines}),
+ // since we do not support
+ // boundaries of dimension dim-2,
+ // and so every isolated boundary
+ // line is also part of a boundary
+ // face which we will be visiting
+ // sooner or later
for (typename DH::active_cell_iterator cell=dof_handler.begin_active();
cell!=dof_handler.end(); ++cell)
for (unsigned int face=0;
Assert(selected_dofs.size() == dof_handler.n_dofs(),
ExcDimensionMismatch(selected_dofs.size(), dof_handler.n_dofs()));
- // preset all values by false
+ // preset all values by false
std::fill_n (selected_dofs.begin(), dof_handler.n_dofs(), false);
- std::vector<unsigned int> local_dof_indices;
+ std::vector<types::global_dof_index> local_dof_indices;
local_dof_indices.reserve (max_dofs_per_cell(dof_handler));
- // this function is similar to the
- // make_sparsity_pattern function,
- // see there for more information
+ // this function is similar to the
+ // make_sparsity_pattern function,
+ // see there for more information
typename DH::active_cell_iterator
- cell = dof_handler.begin_active(),
- endc = dof_handler.end();
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
for (; cell!=endc; ++cell)
if (cell->subdomain_id() == subdomain_id)
{
template <class DH>
void
- extract_locally_active_dofs (const DH & dof_handler,
- IndexSet & dof_set)
+ extract_locally_active_dofs (const DH &dof_handler,
+ IndexSet &dof_set)
{
- // collect all the locally owned dofs
+ // collect all the locally owned dofs
dof_set = dof_handler.locally_owned_dofs();
- // add the DoF on the adjacent ghost cells
- // to the IndexSet, cache them in a
- // set. need to check each dof manually
- // because we can't be sure that the dof
- // range of locally_owned_dofs is really
- // contiguous.
+ // add the DoF on the adjacent ghost cells
+ // to the IndexSet, cache them in a
+ // set. need to check each dof manually
+ // because we can't be sure that the dof
+ // range of locally_owned_dofs is really
+ // contiguous.
- std::vector<unsigned int> dof_indices;
- std::set<unsigned int> global_dof_indices;
+ std::vector<types::global_dof_index> dof_indices;
+ std::set<types::global_dof_index> global_dof_indices;
typename DH::active_cell_iterator cell = dof_handler.begin_active(),
endc = dof_handler.end();
dof_indices.resize(cell->get_fe().dofs_per_cell);
cell->get_dof_indices(dof_indices);
- for (std::vector<types::global_dof_index>::iterator it=dof_indices.begin();
- it!=dof_indices.end();
- ++it)
- if (!dof_set.is_element(*it))
- global_dof_indices.insert(*it);
- }
- for (std::vector<unsigned int>::iterator it=dof_indices.begin();
++ for (std::vector<types::global_dof_index>::iterator it=dof_indices.begin();
+ it!=dof_indices.end();
+ ++it)
+ if (!dof_set.is_element(*it))
+ global_dof_indices.insert(*it);
+ }
dof_set.add_indices(global_dof_indices.begin(), global_dof_indices.end());
template <class DH>
void
- extract_locally_relevant_dofs (const DH & dof_handler,
- IndexSet & dof_set)
+ extract_locally_relevant_dofs (const DH &dof_handler,
+ IndexSet &dof_set)
{
- // collect all the locally owned dofs
+ // collect all the locally owned dofs
dof_set = dof_handler.locally_owned_dofs();
- // add the DoF on the adjacent ghost cells
- // to the IndexSet, cache them in a
- // set. need to check each dof manually
- // because we can't be sure that the dof
- // range of locally_owned_dofs is really
- // contiguous.
+ // add the DoF on the adjacent ghost cells
+ // to the IndexSet, cache them in a
+ // set. need to check each dof manually
+ // because we can't be sure that the dof
+ // range of locally_owned_dofs is really
+ // contiguous.
- std::vector<unsigned int> dof_indices;
- std::set<unsigned int> global_dof_indices;
+ std::vector<types::global_dof_index> dof_indices;
+ std::set<types::global_dof_index> global_dof_indices;
typename DH::active_cell_iterator cell = dof_handler.begin_active(),
endc = dof_handler.end();
dof_indices.resize(cell->get_fe().dofs_per_cell);
cell->get_dof_indices(dof_indices);
- for (std::vector<types::global_dof_index>::iterator it=dof_indices.begin();
- it!=dof_indices.end();
- ++it)
- if (!dof_set.is_element(*it))
- global_dof_indices.insert(*it);
- }
- for (std::vector<unsigned int>::iterator it=dof_indices.begin();
++ for (std::vector<types::global_dof_index>::iterator it=dof_indices.begin();
+ it!=dof_indices.end();
+ ++it)
+ if (!dof_set.is_element(*it))
+ global_dof_indices.insert(*it);
+ }
dof_set.add_indices(global_dof_indices.begin(), global_dof_indices.end());
std::fill_n (subdomain_association.begin(), dof_handler.n_dofs(),
types::invalid_subdomain_id);
- std::vector<unsigned int> local_dof_indices;
+ std::vector<types::global_dof_index> local_dof_indices;
local_dof_indices.reserve (max_dofs_per_cell(dof_handler));
- // pseudo-randomly assign variables
- // which lie on the interface
- // between subdomains to each of
- // the two or more
+ // pseudo-randomly assign variables
+ // which lie on the interface
+ // between subdomains to each of
+ // the two or more
bool coin_flip = true;
- // loop over all cells and record
- // which subdomain a DoF belongs
- // to. toss a coin in case it is on
- // an interface
+ // loop over all cells and record
+ // which subdomain a DoF belongs
+ // to. toss a coin in case it is on
+ // an interface
typename DH::active_cell_iterator
- cell = dof_handler.begin_active(),
- endc = dof_handler.end();
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
for (; cell!=endc; ++cell)
{
Assert (cell->is_artificial() == false,
IndexSet index_set (dof_handler.n_dofs());
- std::vector<unsigned int> local_dof_indices;
+ std::vector<types::global_dof_index> local_dof_indices;
local_dof_indices.reserve (max_dofs_per_cell(dof_handler));
- // first generate an unsorted list of all
- // indices which we fill from the back. could
- // also insert them directly into the
- // IndexSet, but that inserts indices in the
- // middle, which is an O(n^2) algorithm and
- // hence too expensive. Could also use
- // std::set, but that is in general more
- // expensive than a vector
+ // first generate an unsorted list of all
+ // indices which we fill from the back. could
+ // also insert them directly into the
+ // IndexSet, but that inserts indices in the
+ // middle, which is an O(n^2) algorithm and
+ // hence too expensive. Could also use
+ // std::set, but that is in general more
+ // expensive than a vector
- std::vector<unsigned int> subdomain_indices;
+ std::vector<types::global_dof_index> subdomain_indices;
typename DH::active_cell_iterator
- cell = dof_handler.begin_active(),
- endc = dof_handler.end();
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
for (; cell!=endc; ++cell)
if ((cell->is_artificial() == false)
&&
// component's index
template <int dim, int spacedim>
void
- resolve_components (const FiniteElement<dim,spacedim>&fe,
- const std::vector<unsigned char> &dofs_by_component,
- const std::vector<unsigned int> &target_component,
- const bool only_once,
- std::vector<types::global_dof_index> &dofs_per_component,
- unsigned int &component)
+ resolve_components (const FiniteElement<dim,spacedim> &fe,
+ const std::vector<unsigned char> &dofs_by_component,
- const std::vector<unsigned int> &target_component,
++ const std::vector<unsigned int> &target_component,
+ const bool only_once,
- std::vector<unsigned int> &dofs_per_component,
++ std::vector<types::global_dof_index> &dofs_per_component,
+ unsigned int &component)
{
- for (unsigned int b=0;b<fe.n_base_elements();++b)
+ for (unsigned int b=0; b<fe.n_base_elements(); ++b)
{
- const FiniteElement<dim,spacedim>& base = fe.base_element(b);
- // Dimension of base element
+ const FiniteElement<dim,spacedim> &base = fe.base_element(b);
+ // Dimension of base element
unsigned int d = base.n_components();
- for (unsigned int m=0;m<fe.element_multiplicity(b);++m)
+ for (unsigned int m=0; m<fe.element_multiplicity(b); ++m)
{
if (base.n_base_elements() > 1)
resolve_components(base, dofs_by_component, target_component,
template <int dim, int spacedim>
void
- resolve_components (const hp::FECollection<dim,spacedim>&fe_collection,
+ resolve_components (const hp::FECollection<dim,spacedim> &fe_collection,
const std::vector<unsigned char> &dofs_by_component,
- const std::vector<unsigned int> &target_component,
+ const std::vector<unsigned int> &target_component,
const bool only_once,
- std::vector<unsigned int> &dofs_per_component,
+ std::vector<types::global_dof_index> &dofs_per_component,
unsigned int &component)
{
- // assert that all elements in the collection have the same
- // structure (base elements and multiplicity, components per base
- // element) and then simply call the function above
+ // assert that all elements in the collection have the same
+ // structure (base elements and multiplicity, components per base
+ // element) and then simply call the function above
for (unsigned int fe=1; fe<fe_collection.size(); ++fe)
{
Assert (fe_collection[fe].n_components() == fe_collection[0].n_components(),
template <class DH>
void
count_dofs_per_component (
- const DH & dof_handler,
- std::vector<types::global_dof_index>& dofs_per_component,
+ const DH &dof_handler,
- std::vector<unsigned int> &dofs_per_component,
++ std::vector<types::global_dof_index> &dofs_per_component,
bool only_once,
std::vector<unsigned int> target_component)
{
template <int dim, int spacedim>
void
count_dofs_per_component (const DoFHandler<dim,spacedim> &dof_handler,
- std::vector<types::global_dof_index> &dofs_per_component,
- std::vector<unsigned int> target_component)
- std::vector<unsigned int> &dofs_per_component,
++ std::vector<types::global_dof_index> &dofs_per_component,
+ std::vector<unsigned int> target_component)
{
count_dofs_per_component (dof_handler, dofs_per_component,
false, target_component);
const unsigned int n_fine_dofs = weight_mapping.size();
dealii::Vector<double> global_parameter_representation (n_fine_dofs);
- typename dealii::DoFHandler<dim,spacedim>::active_cell_iterator cell;
- std::vector<types::global_dof_index> parameter_dof_indices (coarse_fe.dofs_per_cell);
+ typename dealii::DoFHandler<dim,spacedim>::active_cell_iterator cell;
- std::vector<unsigned int> parameter_dof_indices (coarse_fe.dofs_per_cell);
++ std::vector<types::global_dof_index> parameter_dof_indices (coarse_fe.dofs_per_cell);
for (cell=begin; cell!=end; ++cell)
{
unsigned int n_parameters_on_fine_grid=0;
if (true)
{
- // have a flag for each dof on
- // the fine grid and set it
- // to true if this is an
- // interesting dof. finally count
- // how many true's there
- std::vector<bool> dof_is_interesting (fine_grid.n_dofs(), false);
- std::vector<types::global_dof_index> local_dof_indices (fine_fe.dofs_per_cell);
+ // have a flag for each dof on
+ // the fine grid and set it
+ // to true if this is an
+ // interesting dof. finally count
+ // how many true's there
+ std::vector<bool> dof_is_interesting (fine_grid.n_dofs(), false);
- std::vector<unsigned int> local_dof_indices (fine_fe.dofs_per_cell);
++ std::vector<types::global_dof_index> local_dof_indices (fine_fe.dofs_per_cell);
for (typename dealii::DoFHandler<dim,spacedim>::active_cell_iterator
- cell=fine_grid.begin_active();
+ cell=fine_grid.begin_active();
cell!=fine_grid.end(); ++cell)
{
cell->get_dof_indices (local_dof_indices);
weight_mapping.clear ();
weight_mapping.resize (n_fine_dofs, -1);
- if (true)
- {
- std::vector<types::global_dof_index> local_dof_indices(fine_fe.dofs_per_cell);
- unsigned int next_free_index=0;
- for (typename dealii::DoFHandler<dim,spacedim>::active_cell_iterator
- cell=fine_grid.begin_active();
- cell != fine_grid.end(); ++cell)
- {
- cell->get_dof_indices (local_dof_indices);
- for (unsigned int i=0; i<fine_fe.dofs_per_cell; ++i)
-
- // if this DoF is a
- // parameter dof and has
- // not yet been numbered,
- // then do so
+ if (true)
+ {
- std::vector<unsigned int> local_dof_indices(fine_fe.dofs_per_cell);
++ std::vector<types::global_dof_index> local_dof_indices(fine_fe.dofs_per_cell);
+ unsigned int next_free_index=0;
+ for (typename dealii::DoFHandler<dim,spacedim>::active_cell_iterator
+ cell=fine_grid.begin_active();
+ cell != fine_grid.end(); ++cell)
+ {
+ cell->get_dof_indices (local_dof_indices);
+ for (unsigned int i=0; i<fine_fe.dofs_per_cell; ++i)
++
+ // if this DoF is a
+ // parameter dof and has
+ // not yet been numbered,
+ // then do so
if ((fine_fe.system_to_component_index(i).first == fine_component) &&
(weight_mapping[local_dof_indices[i]] == -1))
{
template <class DH>
void
map_dof_to_boundary_indices (const DH &dof_handler,
- std::vector<types::global_dof_index> &mapping)
- std::vector<unsigned int> &mapping)
++ std::vector<types::global_dof_index> &mapping)
{
Assert (&dof_handler.get_fe() != 0, ExcNoFESelected());
{
namespace
{
- template <class DH>
+ template<class DH>
void
- map_dofs_to_support_points(const hp::MappingCollection<DH::dimension, DH::space_dimension> & mapping,
+ map_dofs_to_support_points(const hp::MappingCollection<DH::dimension, DH::space_dimension> &mapping,
- const DH &dof_handler,
- std::map<unsigned int,Point<DH::space_dimension> > &support_points)
+ const DH &dof_handler,
+ std::map<types::global_dof_index,Point<DH::space_dimension> > &support_points)
{
const unsigned int dim = DH::dimension;
const unsigned int spacedim = DH::space_dimension;
typename DH::active_cell_iterator cell =
dof_handler.begin_active(), endc = dof_handler.end();
- std::vector<types::global_dof_index> local_dof_indices;
- for (; cell != endc; ++cell)
- // only work on locally relevant cells
- if (cell->is_artificial() == false)
- {
- hp_fe_values.reinit(cell);
- const FEValues<dim, spacedim> &fe_values = hp_fe_values.get_present_fe_values();
- std::vector<unsigned int> local_dof_indices;
++ std::vector<types::global_dof_index> local_dof_indices;
+ for (; cell != endc; ++cell)
+ // only work on locally relevant cells
+ if (cell->is_artificial() == false)
+ {
+ hp_fe_values.reinit(cell);
+ const FEValues<dim, spacedim> &fe_values = hp_fe_values.get_present_fe_values();
local_dof_indices.resize(cell->get_fe().dofs_per_cell);
cell->get_dof_indices(local_dof_indices);
template <class DH>
void
- map_dofs_to_support_points(const hp::MappingCollection<DH::dimension, DH::space_dimension> & mapping,
+ map_dofs_to_support_points(const hp::MappingCollection<DH::dimension, DH::space_dimension> &mapping,
- const DH &dof_handler,
- std::vector<Point<DH::space_dimension> > &support_points)
+ const DH &dof_handler,
+ std::vector<Point<DH::space_dimension> > &support_points)
{
- // get the data in the form of the map as above
- std::map<types::global_dof_index,Point<DH::space_dimension> > x_support_points;
- map_dofs_to_support_points(mapping, dof_handler, x_support_points);
+ // get the data in the form of the map as above
- std::map<unsigned int,Point<DH::space_dimension> > x_support_points;
++ std::map<types::global_dof_index,Point<DH::space_dimension> > x_support_points;
+ map_dofs_to_support_points(mapping, dof_handler, x_support_points);
- // now convert from the map to the linear vector. make sure every
- // entry really appeared in the map
+ // now convert from the map to the linear vector. make sure every
+ // entry really appeared in the map
for (unsigned int i=0; i<dof_handler.n_dofs(); ++i)
{
Assert (x_support_points.find(i) != x_support_points.end(),
Assert (component_mask.n_selected_components(n_components) > 0,
VectorTools::ExcNoComponentSelected());
- // a field to store the indices
+ // a field to store the indices
- std::vector<unsigned int> face_dofs;
+ std::vector<types::global_dof_index> face_dofs;
face_dofs.reserve (max_dofs_per_face(dof));
typename DH<dim,spacedim>::active_cell_iterator
template <class DH, class Sparsity>
void make_cell_patches(
- Sparsity& block_list,
- const DH& dof_handler,
+ Sparsity &block_list,
+ const DH &dof_handler,
const unsigned int level,
- const std::vector<bool>& selected_dofs,
+ const std::vector<bool> &selected_dofs,
- unsigned int offset)
+ types::global_dof_index offset)
{
typename DH::cell_iterator cell;
typename DH::cell_iterator endc = dof_handler.end(level);
template <int dim, int spacedim>
void
FE_DGPNonparametric<dim,spacedim>::fill_fe_values (
- const Mapping<dim,spacedim>&,
- const typename Triangulation<dim,spacedim>::cell_iterator&,
- const Quadrature<dim>&,
- typename Mapping<dim,spacedim>::InternalDataBase&,
- typename Mapping<dim,spacedim>::InternalDataBase& fedata,
- FEValuesData<dim,spacedim>&data,
+ const Mapping<dim,spacedim> &,
+ const typename Triangulation<dim,spacedim>::cell_iterator &,
+ const Quadrature<dim> &,
+ typename Mapping<dim,spacedim>::InternalDataBase &,
+ typename Mapping<dim,spacedim>::InternalDataBase &fedata,
+ FEValuesData<dim,spacedim> &data,
- CellSimilarity::Similarity & /*cell_similarity*/) const
+ CellSimilarity::Similarity &/*cell_similarity*/) const
{
- // convert data object to internal
- // data for this class. fails with
- // an exception if that is not
- // possible
+ // convert data object to internal
+ // data for this class. fails with
+ // an exception if that is not
+ // possible
Assert (dynamic_cast<InternalData *> (&fedata) != 0,
ExcInternalError());
InternalData &fe_data = static_cast<InternalData &> (fedata);
template <int dim>
std::vector<std::pair<unsigned int, unsigned int> >
FE_Nothing<dim> ::
-hp_vertex_dof_identities (const FiniteElement<dim> & /*fe_other*/) const
+hp_vertex_dof_identities (const FiniteElement<dim> &/*fe_other*/) const
{
- // the FE_Nothing has no
- // degrees of freedom, so there
- // are no equivalencies to be
- // recorded
- return std::vector<std::pair<unsigned int, unsigned int> > ();
+ // the FE_Nothing has no
+ // degrees of freedom, so there
+ // are no equivalencies to be
+ // recorded
+ return std::vector<std::pair<unsigned int, unsigned int> > ();
}
template <int dim>
std::vector<std::pair<unsigned int, unsigned int> >
FE_Nothing<dim> ::
-hp_line_dof_identities (const FiniteElement<dim> & /*fe_other*/) const
+hp_line_dof_identities (const FiniteElement<dim> &/*fe_other*/) const
{
- // the FE_Nothing has no
- // degrees of freedom, so there
- // are no equivalencies to be
- // recorded
- return std::vector<std::pair<unsigned int, unsigned int> > ();
+ // the FE_Nothing has no
+ // degrees of freedom, so there
+ // are no equivalencies to be
+ // recorded
+ return std::vector<std::pair<unsigned int, unsigned int> > ();
}
template <int dim>
std::vector<std::pair<unsigned int, unsigned int> >
FE_Nothing<dim> ::
-hp_quad_dof_identities (const FiniteElement<dim> & /*fe_other*/) const
+hp_quad_dof_identities (const FiniteElement<dim> &/*fe_other*/) const
{
- // the FE_Nothing has no
- // degrees of freedom, so there
- // are no equivalencies to be
- // recorded
- return std::vector<std::pair<unsigned int, unsigned int> > ();
+ // the FE_Nothing has no
+ // degrees of freedom, so there
+ // are no equivalencies to be
+ // recorded
+ return std::vector<std::pair<unsigned int, unsigned int> > ();
}
template <int dim>
void
FE_Nothing<dim>::
-get_face_interpolation_matrix (const FiniteElement<dim> & /*source_fe*/,
+get_face_interpolation_matrix (const FiniteElement<dim> &/*source_fe*/,
FullMatrix<double> &interpolation_matrix) const
{
- // since this element has no face dofs, the
- // interpolation matrix is necessarily empty
+ // since this element has no face dofs, the
+ // interpolation matrix is necessarily empty
Assert (interpolation_matrix.m() == 0,
ExcDimensionMismatch (interpolation_matrix.m(),
FE_Nothing<dim>::
get_subface_interpolation_matrix (const FiniteElement<dim> & /*source_fe*/,
const unsigned int /*index*/,
- FullMatrix<double> &interpolation_matrix) const
+ FullMatrix<double> &interpolation_matrix) const
{
- // since this element has no face dofs, the
- // interpolation matrix is necessarily empty
+ // since this element has no face dofs, the
+ // interpolation matrix is necessarily empty
Assert (interpolation_matrix.m() == 0,
ExcDimensionMismatch (interpolation_matrix.m(),
}
template <>
-void FE_Q<1>::initialize_unit_face_support_points (const Quadrature<1> & /*points*/)
+void FE_Q<1>::initialize_unit_face_support_points (const Quadrature<1> &/*points*/)
{
- // no faces in 1d, so nothing to do
+ // no faces in 1d, so nothing to do
}
template <>
}
template <>
-void FE_Q<1,2>::initialize_unit_face_support_points (const Quadrature<1> & /*points*/)
+void FE_Q<1,2>::initialize_unit_face_support_points (const Quadrature<1> &/*points*/)
{
- // no faces in 1d, so nothing to do
+ // no faces in 1d, so nothing to do
}
template <>
}
template <>
-void FE_Q<1,3>::initialize_unit_face_support_points (const Quadrature<1> & /*points*/)
+void FE_Q<1,3>::initialize_unit_face_support_points (const Quadrature<1> &/*points*/)
{
- // no faces in 1d, so nothing to do
+ // no faces in 1d, so nothing to do
}
template <int dim, int spacedim>
template <int dim, int spacedim>
FESystem<dim,spacedim>::FESystem (
- const std::vector<const FiniteElement<dim,spacedim>*> &fes,
+ const std::vector<const FiniteElement<dim,spacedim>*> &fes,
const std::vector<unsigned int> &multiplicities)
- :
- FiniteElement<dim,spacedim> (multiply_dof_numbers(fes, multiplicities),
- compute_restriction_is_additive_flags (fes, multiplicities),
- compute_nonzero_components(fes, multiplicities)),
- base_elements(count_nonzeros(multiplicities))
+ :
+ FiniteElement<dim,spacedim> (multiply_dof_numbers(fes, multiplicities),
+ compute_restriction_is_additive_flags (fes, multiplicities),
+ compute_nonzero_components(fes, multiplicities)),
+ base_elements(count_nonzeros(multiplicities))
{
initialize(fes, multiplicities);
}
template <int dim, int spacedim>
Tensor<1,dim>
FESystem<dim,spacedim>::shape_grad_component (const unsigned int i,
- const Point<dim> &p,
- const unsigned int component) const
- const Point<dim> &p,
++ const Point<dim> &p,
+ const unsigned int component) const
{
Assert (i<this->dofs_per_cell, ExcIndexRange(i, 0, this->dofs_per_cell));
Assert (component < this->n_components(),
template <int dim, int spacedim>
Tensor<2,dim>
FESystem<dim,spacedim>::shape_grad_grad_component (const unsigned int i,
- const Point<dim> &p,
- const unsigned int component) const
- const Point<dim> &p,
++ const Point<dim> &p,
+ const unsigned int component) const
{
Assert (i<this->dofs_per_cell, ExcIndexRange(i, 0, this->dofs_per_cell));
Assert (component < this->n_components(),
u2.compress();
touch_count.compress();
- // if we work on parallel distributed
- // vectors, we have to ensure, that we only
- // work on dofs this processor owns.
+ // if we work on parallel distributed
+ // vectors, we have to ensure, that we only
+ // work on dofs this processor owns.
IndexSet locally_owned_dofs = dof2.locally_owned_dofs();
- // when a discontinuous element is
- // interpolated to a continuous
- // one, we take the mean values.
- // for parallel vectors check,
- // if this component is owned by
- // this processor.
+ // when a discontinuous element is
+ // interpolated to a continuous
+ // one, we take the mean values.
+ // for parallel vectors check,
+ // if this component is owned by
+ // this processor.
- for (unsigned int i=0; i<dof2.n_dofs(); ++i)
+ for (types::global_dof_index i=0; i<dof2.n_dofs(); ++i)
if (locally_owned_dofs.is_element(i))
{
Assert(touch_count(i)!=0, ExcInternalError());
template <int dim, int spacedim>
class FEValuesBase<dim,spacedim>::CellIteratorBase
{
- public:
- /**
- * Destructor. Made virtual
- * since we store only
- * pointers to the base
- * class.
- */
- virtual ~CellIteratorBase ();
-
- /**
- * Conversion operator to an
- * iterator for
- * triangulations. This
- * conversion is implicit for
- * the original iterators,
- * since they are derived
- * classes. However, since
- * here we have kind of a
- * parallel class hierarchy,
- * we have to have a
- * conversion operator.
- */
- virtual
- operator typename Triangulation<dim,spacedim>::cell_iterator () const = 0;
-
- /**
- * Return the number of
- * degrees of freedom the DoF
- * handler object has to
- * which the iterator belongs
- * to.
- */
- virtual
- types::global_dof_index
- n_dofs_for_dof_handler () const = 0;
+ public:
+ /**
+ * Destructor. Made virtual
+ * since we store only
+ * pointers to the base
+ * class.
+ */
+ virtual ~CellIteratorBase ();
+
+ /**
+ * Conversion operator to an
+ * iterator for
+ * triangulations. This
+ * conversion is implicit for
+ * the original iterators,
+ * since they are derived
+ * classes. However, since
+ * here we have kind of a
+ * parallel class hierarchy,
+ * we have to have a
+ * conversion operator.
+ */
+ virtual
+ operator typename Triangulation<dim,spacedim>::cell_iterator () const = 0;
+
+ /**
+ * Return the number of
+ * degrees of freedom the DoF
+ * handler object has to
+ * which the iterator belongs
+ * to.
+ */
+ virtual
- unsigned int
++ types::global_dof_index
+ n_dofs_for_dof_handler () const = 0;
#include "fe_values.decl.1.inst"
template <typename CI>
class FEValuesBase<dim,spacedim>::CellIterator : public FEValuesBase<dim,spacedim>::CellIteratorBase
{
- public:
- /**
- * Constructor. Take an
- * iterator and store it in
- * this class.
- */
- CellIterator (const CI &cell);
-
- /**
- * Conversion operator to an
- * iterator for
- * triangulations. This
- * conversion is implicit for
- * the original iterators,
- * since they are derived
- * classes. However, since
- * here we have kind of a
- * parallel class hierarchy,
- * we have to have a
- * conversion operator.
- */
- virtual
- operator typename Triangulation<dim,spacedim>::cell_iterator () const;
-
- /**
- * Return the number of
- * degrees of freedom the DoF
- * handler object has to
- * which the iterator belongs
- * to.
- */
- virtual
- types::global_dof_index
- n_dofs_for_dof_handler () const;
+ public:
+ /**
+ * Constructor. Take an
+ * iterator and store it in
+ * this class.
+ */
+ CellIterator (const CI &cell);
+
+ /**
+ * Conversion operator to an
+ * iterator for
+ * triangulations. This
+ * conversion is implicit for
+ * the original iterators,
+ * since they are derived
+ * classes. However, since
+ * here we have kind of a
+ * parallel class hierarchy,
+ * we have to have a
+ * conversion operator.
+ */
+ virtual
+ operator typename Triangulation<dim,spacedim>::cell_iterator () const;
+
+ /**
+ * Return the number of
+ * degrees of freedom the DoF
+ * handler object has to
+ * which the iterator belongs
+ * to.
+ */
+ virtual
- unsigned int
++ types::global_dof_index
+ n_dofs_for_dof_handler () const;
#include "fe_values.decl.2.inst"
template <int dim, int spacedim>
class FEValuesBase<dim,spacedim>::TriaCellIterator : public FEValuesBase<dim,spacedim>::CellIteratorBase
{
- public:
- /**
- * Constructor. Take an
- * iterator and store it in
- * this class.
- */
- TriaCellIterator (const typename Triangulation<dim,spacedim>::cell_iterator &cell);
-
- /**
- * Conversion operator to an
- * iterator for
- * triangulations. This
- * conversion is implicit for
- * the original iterators,
- * since they are derived
- * classes. However, since
- * here we have kind of a
- * parallel class hierarchy,
- * we have to have a
- * conversion operator. Here,
- * the conversion is trivial,
- * from and to the same time.
- */
- virtual
- operator typename Triangulation<dim,spacedim>::cell_iterator () const;
-
- /**
- * Implement the respective
- * function of the base
- * class. Since this is not
- * possible, we just raise an
- * error.
- */
- virtual
- types::global_dof_index
- n_dofs_for_dof_handler () const;
+ public:
+ /**
+ * Constructor. Take an
+ * iterator and store it in
+ * this class.
+ */
+ TriaCellIterator (const typename Triangulation<dim,spacedim>::cell_iterator &cell);
+
+ /**
+ * Conversion operator to an
+ * iterator for
+ * triangulations. This
+ * conversion is implicit for
+ * the original iterators,
+ * since they are derived
+ * classes. However, since
+ * here we have kind of a
+ * parallel class hierarchy,
+ * we have to have a
+ * conversion operator. Here,
+ * the conversion is trivial,
+ * from and to the same time.
+ */
+ virtual
+ operator typename Triangulation<dim,spacedim>::cell_iterator () const;
+
+ /**
+ * Implement the respective
+ * function of the base
+ * class. Since this is not
+ * possible, we just raise an
+ * error.
+ */
+ virtual
- unsigned int
++ types::global_dof_index
+ n_dofs_for_dof_handler () const;
#include "fe_values.decl.2.inst"
template <int dim, int spacedim>
template <class InputVector, typename number>
void FEValuesBase<dim,spacedim>::get_function_values (
- const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
+ const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
std::vector<number> &values) const
{
Assert (this->update_flags & update_values, ExcAccessToUninitializedField());
template <int dim, int spacedim>
template <class InputVector, typename number>
void FEValuesBase<dim,spacedim>::get_function_values (
- const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
- std::vector<Vector<number> >& values) const
+ const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
+ std::vector<Vector<number> > &values) const
{
- // One value per quadrature point
+ // One value per quadrature point
Assert (n_quadrature_points == values.size(),
ExcDimensionMismatch(values.size(), n_quadrature_points));
template <int dim, int spacedim>
template <class InputVector>
void FEValuesBase<dim,spacedim>::get_function_values (
- const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
+ const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
VectorSlice<std::vector<std::vector<double> > > values,
bool quadrature_points_fastest) const
{
template <int dim, int spacedim>
template <class InputVector>
void FEValuesBase<dim,spacedim>::get_function_gradients (
- const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
+ const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
std::vector<Tensor<1,spacedim> > &gradients) const
{
Assert (this->update_flags & update_gradients, ExcAccessToUninitializedField());
template <int dim, int spacedim>
template <class InputVector>
void FEValuesBase<dim,spacedim>::get_function_gradients (
- const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
+ const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
VectorSlice<std::vector<std::vector<Tensor<1,spacedim> > > > gradients,
bool quadrature_points_fastest) const
{
template <int dim, int spacedim>
template <class InputVector>
void FEValuesBase<dim,spacedim>::get_function_hessians (
- const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
+ const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
std::vector<Tensor<2,spacedim> > &hessians) const
{
Assert (this->update_flags & update_second_derivatives, ExcAccessToUninitializedField());
template <int dim, int spacedim>
template <class InputVector>
void FEValuesBase<dim, spacedim>::get_function_hessians (
- const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
+ const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
VectorSlice<std::vector<std::vector<Tensor<2,spacedim> > > > hessians,
bool quadrature_points_fastest) const
{
template <int dim, int spacedim>
template <class InputVector, typename number>
void FEValuesBase<dim,spacedim>::get_function_laplacians (
- const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
+ const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
std::vector<number> &laplacians) const
{
Assert (this->update_flags & update_hessians, ExcAccessToUninitializedField());
template <int dim, int spacedim>
template <class InputVector, typename number>
void FEValuesBase<dim,spacedim>::get_function_laplacians (
- const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
- std::vector<Vector<number> >& laplacians) const
+ const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
+ std::vector<Vector<number> > &laplacians) const
{
- // One value per quadrature point
+ // One value per quadrature point
Assert (n_quadrature_points == laplacians.size(),
ExcDimensionMismatch(laplacians.size(), n_quadrature_points));
template <int dim, int spacedim>
template <class InputVector, typename number>
void FEValuesBase<dim,spacedim>::get_function_laplacians (
- const InputVector& fe_function,
- const VectorSlice<const std::vector<types::global_dof_index> >& indices,
- std::vector<std::vector<number> >& laplacians,
+ const InputVector &fe_function,
- const VectorSlice<const std::vector<unsigned int> > &indices,
++ const VectorSlice<const std::vector<types::global_dof_index> > &indices,
+ std::vector<std::vector<number> > &laplacians,
bool quadrature_points_fastest) const
{
const unsigned int n_components = fe->n_components();
typename Mapping<dim, spacedim>::InternalDataBase &mapping_data,
std::vector<Point<dim> > &quadrature_points,
std::vector<double> &JxW_values,
- std::vector<Tensor<1,dim> > &boundary_forms,
+ std::vector<Tensor<1,dim> > &boundary_forms,
std::vector<Point<spacedim> > &normal_vectors) const
{
- // convert data object to internal
- // data for this class. fails with
- // an exception if that is not
- // possible
- Assert (dynamic_cast<InternalData*> (&mapping_data) != 0,
+ // convert data object to internal
+ // data for this class. fails with
+ // an exception if that is not
+ // possible
+ Assert (dynamic_cast<InternalData *> (&mapping_data) != 0,
ExcInternalError());
- InternalData &data = static_cast<InternalData&> (mapping_data);
+ InternalData &data = static_cast<InternalData &> (mapping_data);
compute_fill (cell, face_no, invalid_face_number,
CellSimilarity::none,
typename Mapping<dim, spacedim>::InternalDataBase &mapping_data,
std::vector<Point<dim> > &quadrature_points,
std::vector<double> &JxW_values,
- std::vector<Tensor<1,dim> > &boundary_forms,
+ std::vector<Tensor<1,dim> > &boundary_forms,
std::vector<Point<spacedim> > &normal_vectors) const
{
- // convert data object to internal
- // data for this class. fails with
- // an exception if that is not
- // possible
- Assert (dynamic_cast<InternalData*> (&mapping_data) != 0, ExcInternalError());
- InternalData &data = static_cast<InternalData&> (mapping_data);
+ // convert data object to internal
+ // data for this class. fails with
+ // an exception if that is not
+ // possible
+ Assert (dynamic_cast<InternalData *> (&mapping_data) != 0, ExcInternalError());
+ InternalData &data = static_cast<InternalData &> (mapping_data);
compute_fill (cell, face_no, sub_no, CellSimilarity::none,
data,
typename Mapping<dim,spacedim>::InternalDataBase &mapping_data,
std::vector<Point<spacedim> > &quadrature_points,
std::vector<double> &JxW_values,
- std::vector<Tensor<1,spacedim> > &exterior_forms,
+ std::vector<Tensor<1,spacedim> > &exterior_forms,
std::vector<Point<spacedim> > &normal_vectors) const
{
- // convert data object to internal
- // data for this class. fails with
- // an exception if that is not
- // possible
- Assert (dynamic_cast<InternalData*> (&mapping_data) != 0,
+ // convert data object to internal
+ // data for this class. fails with
+ // an exception if that is not
+ // possible
+ Assert (dynamic_cast<InternalData *> (&mapping_data) != 0,
ExcInternalError());
- InternalData &data = static_cast<InternalData&> (mapping_data);
-
- // check whether this cell needs
- // the full mapping or can be
- // treated by a reduced Q1 mapping,
- // e.g. if the cell is entirely in
- // the interior of the domain. note
- // that it is not sufficient to ask
- // whether the present _face_ is in
- // the interior, as the mapping on
- // the face depends on the mapping
- // of the cell, which in turn
- // depends on the fact whether
- // _any_ of the faces of this cell
- // is at the boundary, not only the
- // present face
+ InternalData &data = static_cast<InternalData &> (mapping_data);
+
+ // check whether this cell needs
+ // the full mapping or can be
+ // treated by a reduced Q1 mapping,
+ // e.g. if the cell is entirely in
+ // the interior of the domain. note
+ // that it is not sufficient to ask
+ // whether the present _face_ is in
+ // the interior, as the mapping on
+ // the face depends on the mapping
+ // of the cell, which in turn
+ // depends on the fact whether
+ // _any_ of the faces of this cell
+ // is at the boundary, not only the
+ // present face
data.use_mapping_q1_on_current_cell=!(use_mapping_q_on_all_cells
|| cell->has_boundary_lines());
template<int dim, int spacedim>
void
MappingQ<dim,spacedim>::fill_fe_subface_values (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
- const unsigned int face_no,
- const unsigned int sub_no,
- const Quadrature<dim-1> &q,
- typename Mapping<dim,spacedim>::InternalDataBase &mapping_data,
- std::vector<Point<spacedim> > &quadrature_points,
- std::vector<double> &JxW_values,
- std::vector<Tensor<1,spacedim> > &exterior_forms,
- std::vector<Point<spacedim> > &normal_vectors) const
+ const unsigned int face_no,
+ const unsigned int sub_no,
+ const Quadrature<dim-1> &q,
+ typename Mapping<dim,spacedim>::InternalDataBase &mapping_data,
+ std::vector<Point<spacedim> > &quadrature_points,
+ std::vector<double> &JxW_values,
- std::vector<Tensor<1,spacedim> > &exterior_forms,
++ std::vector<Tensor<1,spacedim> > &exterior_forms,
+ std::vector<Point<spacedim> > &normal_vectors) const
{
- // convert data object to internal
- // data for this class. fails with
- // an exception if that is not
- // possible
- Assert (dynamic_cast<InternalData*> (&mapping_data) != 0,
+ // convert data object to internal
+ // data for this class. fails with
+ // an exception if that is not
+ // possible
+ Assert (dynamic_cast<InternalData *> (&mapping_data) != 0,
ExcInternalError());
- InternalData &data = static_cast<InternalData&> (mapping_data);
-
- // check whether this cell needs
- // the full mapping or can be
- // treated by a reduced Q1 mapping,
- // e.g. if the cell is entirely in
- // the interior of the domain. note
- // that it is not sufficient to ask
- // whether the present _face_ is in
- // the interior, as the mapping on
- // the face depends on the mapping
- // of the cell, which in turn
- // depends on the fact whether
- // _any_ of the faces of this cell
- // is at the boundary, not only the
- // present face
+ InternalData &data = static_cast<InternalData &> (mapping_data);
+
+ // check whether this cell needs
+ // the full mapping or can be
+ // treated by a reduced Q1 mapping,
+ // e.g. if the cell is entirely in
+ // the interior of the domain. note
+ // that it is not sufficient to ask
+ // whether the present _face_ is in
+ // the interior, as the mapping on
+ // the face depends on the mapping
+ // of the cell, which in turn
+ // depends on the fact whether
+ // _any_ of the faces of this cell
+ // is at the boundary, not only the
+ // present face
data.use_mapping_q1_on_current_cell=!(use_mapping_q_on_all_cells
|| cell->has_boundary_lines());
typename Mapping<dim,spacedim>::InternalDataBase &mapping_data,
std::vector<Point<spacedim> > &quadrature_points,
std::vector<double> &JxW_values,
- std::vector<Tensor<1,spacedim> > &boundary_forms,
+ std::vector<Tensor<1,spacedim> > &boundary_forms,
std::vector<Point<spacedim> > &normal_vectors) const
{
- // ensure that the following cast
- // is really correct:
+ // ensure that the following cast
+ // is really correct:
Assert (dynamic_cast<InternalData *>(&mapping_data) != 0,
ExcInternalError());
- InternalData &data = static_cast<InternalData&>(mapping_data);
+ InternalData &data = static_cast<InternalData &>(mapping_data);
const unsigned int n_q_points = q.size();
template <int dim, class EulerVectorType, int spacedim>
MappingQ1Eulerian<dim, EulerVectorType, spacedim>::
-MappingQ1Eulerian (const EulerVectorType &euler_transform_vectors,
+MappingQ1Eulerian (const EulerVectorType &euler_transform_vectors,
const DoFHandler<dim,spacedim> &shiftmap_dof_handler)
- :
- euler_transform_vectors(&euler_transform_vectors),
- shiftmap_dof_handler(&shiftmap_dof_handler)
+ :
+ euler_transform_vectors(&euler_transform_vectors),
+ shiftmap_dof_handler(&shiftmap_dof_handler)
{}
void
GridGenerator::moebius (
- Triangulation<3>& tria,
- Triangulation<3> &tria,
++ Triangulation<3> &tria,
const unsigned int n_cells,
const unsigned int n_rotations,
const double R,
void
- GridGenerator::torus (Triangulation<2,3>& tria,
-GridGenerator::torus (Triangulation<2,3> &tria,
++GridGenerator::torus (Triangulation<2,3> &tria,
const double R,
const double r)
{
template<>
void
GridGenerator::parallelogram (
- Triangulation<2>& tria,
- const Tensor<2,2>& corners,
- Triangulation<2> &tria,
++ Triangulation<2> &tria,
+ const Tensor<2,2> &corners,
const bool colorize)
{
std::vector<Point<2> > vertices (GeometryInfo<2>::vertices_per_cell);
template <int dim, int spacedim>
-void GridIn<dim, spacedim>::debug_output_grid (const std::vector<CellData<dim> > & /*cells*/,
- const std::vector<Point<spacedim> > & /*vertices*/,
- std::ostream & /*out*/)
+void GridIn<dim, spacedim>::debug_output_grid (const std::vector<CellData<dim> > &/*cells*/,
- const std::vector<Point<spacedim> > &/*vertices*/,
- std::ostream &/*out*/)
++ const std::vector<Point<spacedim> > &/*vertices*/,
++ std::ostream &/*out*/)
{
Assert (false, ExcNotImplemented());
}
const unsigned int s1,
const unsigned int s2,
const unsigned int s3,
- const CellData<2> &cd)
+ const CellData<2> &cd)
- :
- original_cell_data (cd)
+ :
+ original_cell_data (cd)
{
v[0] = v0;
v[1] = v1;
<< arg1 << " and " << arg2 << " is multiply set.");
- /**
- * A class into which we put many of the functions that implement
- * functionality of the Triangulation class. The main reason for this
- * class is as follows: the majority of the functions in Triangulation
- * need to be implemented differently for dim==1, dim==2, and
- * dim==3. However, their implementation is largly independent of the
- * spacedim template parameter. So we would like to write things like
- *
- * template <int spacedim>
- * void Triangulation<1,spacedim>::create_triangulation (...) {...}
- *
- * Unfortunately, C++ doesn't allow this: member functions of class
- * templates have to be either not specialized at all, or fully
- * specialized. No partial specialization is allowed. One possible
- * solution would be to just duplicate the bodies of the functions and
- * have equally implemented functions
- *
- * template <>
- * void Triangulation<1,1>::create_triangulation (...) {...}
- *
- * template <>
- * void Triangulation<1,2>::create_triangulation (...) {...}
- *
- * but that is clearly an unsatisfactory solution. Rather, what we do
- * is introduce the current Implementation class in which we can write
- * these functions as member templates over spacedim, i.e. we can have
- *
- * template <int dim_, int spacedim_>
- * template <int spacedim>
- * void Triangulation<dim_,spacedim_>::Implementation::
- * create_triangulation (...,
- * Triangulation<1,spacedim> &tria ) {...}
- *
- * The outer template parameters are here unused, only the inner
- * ones are of real interest.
- *
- * One may ask why we put these functions into an class rather
- * than an anonymous namespace, for example?
- *
- * First, these implementation functions need to be friends of the
- * Triangulation class. It is simpler to make the entire class a friend
- * rather than listing all members of an implementation namespace as
- * friends of the Triangulation class (there is no such thing as a "friend
- * namespace XXX" directive).
- *
- * Ideally, we would make this class a member class of the
- * Triangulation<dim,spacedim> class, since then our implementation functions
- * have immediate access to the typedefs and static functions of the
- * surrounding Triangulation class. I.e., we do not have to write "typename
- * Triangulation<dim,spacedim>::active_cell_iterator" but can write
- * "active_cell_iterator" right away. This is, in fact, the way it was
- * implemented first, but we ran into a bug in gcc4.0:
- * @code
- * class Triangulation {
- * struct Implementation;
- * friend class TriaAccessor;
- * };
- *
- * class TriaAccessor {
- * struct Implementation;
- * friend class Triangulation;
- * };
- * @endcode
- *
- * Here, friendship (per C++ standard) is supposed to extend to all members of
- * the befriended class, including its 'Implementation' member class. But gcc4.0
- * gets this wrong: the members of Triangulation::Implementation are not friends
- * of TriaAccessor and the other way around. Ideally, one would fix this by
- * saying
- * @code
- * class Triangulation {
- * struct Implementation;
- * friend class TriaAccessor;
- * friend class TriaAccessor::Implementation; // **
- * };
- *
- * class TriaAccessor {
- * struct Implementation;
- * friend class Triangulation;
- * friend class Triangulation::Implementation;
- * };
- * @endcode
- * but that's not legal because in ** we don't know yet that TriaAccessor has
- * a member class Implementation and so we can't make it a friend. The only
- * way forward at this point was to make Implementation a class in the
- * internal namespace so that we can forward declare it and make it a friend
- * of the respective other outer class -- not quite what we wanted but the
- * only way I could see to make it work...
- */
+ /**
+ * A class into which we put many of the functions that implement
+ * functionality of the Triangulation class. The main reason for this
+ * class is as follows: the majority of the functions in Triangulation
+ * need to be implemented differently for dim==1, dim==2, and
+ * dim==3. However, their implementation is largly independent of the
+ * spacedim template parameter. So we would like to write things like
+ *
+ * template <int spacedim>
+ * void Triangulation<1,spacedim>::create_triangulation (...) {...}
+ *
+ * Unfortunately, C++ doesn't allow this: member functions of class
+ * templates have to be either not specialized at all, or fully
+ * specialized. No partial specialization is allowed. One possible
+ * solution would be to just duplicate the bodies of the functions and
+ * have equally implemented functions
+ *
+ * template <>
+ * void Triangulation<1,1>::create_triangulation (...) {...}
+ *
+ * template <>
+ * void Triangulation<1,2>::create_triangulation (...) {...}
+ *
+ * but that is clearly an unsatisfactory solution. Rather, what we do
+ * is introduce the current Implementation class in which we can write
+ * these functions as member templates over spacedim, i.e. we can have
+ *
+ * template <int dim_, int spacedim_>
+ * template <int spacedim>
+ * void Triangulation<dim_,spacedim_>::Implementation::
+ * create_triangulation (...,
+ * Triangulation<1,spacedim> &tria ) {...}
+ *
+ * The outer template parameters are here unused, only the inner
+ * ones are of real interest.
+ *
+ * One may ask why we put these functions into an class rather
+ * than an anonymous namespace, for example?
+ *
+ * First, these implementation functions need to be friends of the
+ * Triangulation class. It is simpler to make the entire class a friend
+ * rather than listing all members of an implementation namespace as
+ * friends of the Triangulation class (there is no such thing as a "friend
+ * namespace XXX" directive).
+ *
+ * Ideally, we would make this class a member class of the
+ * Triangulation<dim,spacedim> class, since then our implementation functions
+ * have immediate access to the typedefs and static functions of the
+ * surrounding Triangulation class. I.e., we do not have to write "typename
+ * Triangulation<dim,spacedim>::active_cell_iterator" but can write
+ * "active_cell_iterator" right away. This is, in fact, the way it was
+ * implemented first, but we ran into a bug in gcc4.0:
+ * @code
+ * class Triangulation {
+ * struct Implementation;
+ * friend class TriaAccessor;
+ * };
+ *
+ * class TriaAccessor {
+ * struct Implementation;
+ * friend class Triangulation;
+ * };
+ * @endcode
+ *
+ * Here, friendship (per C++ standard) is supposed to extend to all members of
+ * the befriended class, including its 'Implementation' member class. But gcc4.0
+ * gets this wrong: the members of Triangulation::Implementation are not friends
+ * of TriaAccessor and the other way around. Ideally, one would fix this by
+ * saying
+ * @code
+ * class Triangulation {
+ * struct Implementation;
+ * friend class TriaAccessor;
+ * friend class TriaAccessor::Implementation; // **
+ * };
+ *
+ * class TriaAccessor {
+ * struct Implementation;
+ * friend class Triangulation;
+ * friend class Triangulation::Implementation;
+ * };
+ * @endcode
+ * but that's not legal because in ** we don't know yet that TriaAccessor has
+ * a member class Implementation and so we can't make it a friend. The only
+ * way forward at this point was to make Implementation a class in the
+ * internal namespace so that we can forward declare it and make it a friend
+ * of the respective other outer class -- not quite what we wanted but the
+ * only way I could see to make it work...
+ */
struct Implementation
{
- /**
- * For a given Triangulation, update the
- * number cache for lines. For 1d, we have
- * to deal with the fact that lines have
- * levels, whereas for higher dimensions
- * they do not.
- *
- * The second argument indicates
- * for how many levels the
- * Triangulation has objects,
- * though the highest levels need
- * not contain active cells if they
- * have previously all been
- * coarsened away.
- */
- template <int dim, int spacedim>
- static
- void compute_number_cache (const Triangulation<dim,spacedim> &triangulation,
- const unsigned int level_objects,
- internal::Triangulation::NumberCache<1> &number_cache)
- {
- typedef
- typename Triangulation<dim,spacedim>::line_iterator line_iterator;
- typedef
- typename Triangulation<dim,spacedim>::active_line_iterator active_line_iterator;
-
- number_cache.n_levels = 0;
- if (level_objects > 0)
- // find the last level
- // on which there are
- // used cells
- for (unsigned int level=0; level<level_objects; ++level)
- if (triangulation.begin(level) !=
- triangulation.end(level))
- number_cache.n_levels = level+1;
-
- // no cells at all?
- Assert (number_cache.n_levels > 0, ExcInternalError());
-
- ///////////////////////////////////
- // update the number of lines
- // on the different levels in
- // the cache
- number_cache.n_lines_level.resize (number_cache.n_levels);
- number_cache.n_lines = 0;
-
- number_cache.n_active_lines_level.resize (number_cache.n_levels);
- number_cache.n_active_lines = 0;
-
- // for 1d, lines have levels so take
- // count the objects per level and
- // globally
- if (dim == 1)
- {
- for (unsigned int level=0; level<number_cache.n_levels; ++level)
- {
- // count lines on this level
- number_cache.n_lines_level[level] = 0;
-
- line_iterator line = triangulation.begin_line (level),
- endc = (level == number_cache.n_levels-1 ?
- line_iterator(triangulation.end_line()) :
- triangulation.begin_line (level+1));
- for (; line!=endc; ++line)
- ++number_cache.n_lines_level[level];
-
- // update total number of lines
- number_cache.n_lines += number_cache.n_lines_level[level];
- }
-
- // do the update for the number of
- // active lines as well
- for (unsigned int level=0; level<number_cache.n_levels; ++level)
- {
- // count lines on this level
- number_cache.n_active_lines_level[level] = 0;
-
- active_line_iterator line = triangulation.begin_active_line (level),
- endc = triangulation.end_line ();
- for (; (line!=endc) && (line->level() == static_cast<signed int>(level)); ++line)
- ++number_cache.n_active_lines_level[level];
-
- // update total number of lines
- number_cache.n_active_lines += number_cache.n_active_lines_level[level];
- }
- }
- else
- {
- // for dim>1, there are no
- // levels for lines
- {
- line_iterator line = triangulation.begin_line (),
- endc = triangulation.end_line();
- for (; line!=endc; ++line)
- ++number_cache.n_lines;
- }
-
- {
- active_line_iterator line = triangulation.begin_active_line (),
- endc = triangulation.end_line();
- for (; line!=endc; ++line)
- ++number_cache.n_active_lines;
- }
- }
- }
-
- /**
- * For a given Triangulation, update the
- * number cache for quads. For 2d, we have
- * to deal with the fact that quads have
- * levels, whereas for higher dimensions
- * they do not.
- *
- * The second argument indicates
- * for how many levels the
- * Triangulation has objects,
- * though the highest levels need
- * not contain active cells if they
- * have previously all been
- * coarsened away.
- *
- * At the beginning of the function, we call the
- * respective function to update the number
- * cache for lines.
- */
- template <int dim, int spacedim>
- static
- void compute_number_cache (const Triangulation<dim,spacedim> &triangulation,
- const unsigned int level_objects,
- internal::Triangulation::NumberCache<2> &number_cache)
- {
- // update lines and n_levels
- compute_number_cache (triangulation,
- level_objects,
- static_cast<internal::Triangulation::NumberCache<1>&>
- (number_cache));
-
- typedef
- typename Triangulation<dim,spacedim>::quad_iterator quad_iterator;
- typedef
- typename Triangulation<dim,spacedim>::active_quad_iterator active_quad_iterator;
-
- ///////////////////////////////////
- // update the number of quads
- // on the different levels in
- // the cache
- number_cache.n_quads_level.resize (number_cache.n_levels);
- number_cache.n_quads = 0;
-
- number_cache.n_active_quads_level.resize (number_cache.n_levels);
- number_cache.n_active_quads = 0;
-
- // for 2d, quads have levels so take
- // count the objects per level and
- // globally
- if (dim == 2)
- {
- for (unsigned int level=0; level<number_cache.n_levels; ++level)
- {
- // count quads on this level
- number_cache.n_quads_level[level] = 0;
-
- quad_iterator quad = triangulation.begin_quad (level),
- endc = (level == number_cache.n_levels-1 ?
- quad_iterator(triangulation.end_quad()) :
- triangulation.begin_quad (level+1));
- for (; quad!=endc; ++quad)
- ++number_cache.n_quads_level[level];
-
- // update total number of quads
- number_cache.n_quads += number_cache.n_quads_level[level];
- }
-
- // do the update for the number of
- // active quads as well
- for (unsigned int level=0; level<number_cache.n_levels; ++level)
- {
- // count quads on this level
- number_cache.n_active_quads_level[level] = 0;
-
- active_quad_iterator quad = triangulation.begin_active_quad (level),
- endc = triangulation.end_quad ();
- for (; (quad!=endc) && (quad->level() == static_cast<signed int>(level)); ++quad)
- ++number_cache.n_active_quads_level[level];
-
- // update total number of quads
- number_cache.n_active_quads += number_cache.n_active_quads_level[level];
- }
- }
- else
- {
- // for dim>2, there are no
- // levels for quads
- {
- quad_iterator quad = triangulation.begin_quad (),
- endc = triangulation.end_quad();
- for (; quad!=endc; ++quad)
- ++number_cache.n_quads;
- }
-
- {
- active_quad_iterator quad = triangulation.begin_active_quad (),
- endc = triangulation.end_quad();
- for (; quad!=endc; ++quad)
- ++number_cache.n_active_quads;
- }
- }
- }
-
- /**
- * For a given Triangulation, update the
- * number cache for hexes. For 3d, we have
- * to deal with the fact that hexes have
- * levels, whereas for higher dimensions
- * they do not.
- *
- * The second argument indicates
- * for how many levels the
- * Triangulation has objects,
- * though the highest levels need
- * not contain active cells if they
- * have previously all been
- * coarsened away.
- *
- * At the end of the function, we call the
- * respective function to update the number
- * cache for quads, which will in turn call
- * the respective function for lines.
- */
- template <int dim, int spacedim>
- static
- void compute_number_cache (const Triangulation<dim,spacedim> &triangulation,
- const unsigned int level_objects,
- internal::Triangulation::NumberCache<3> &number_cache)
- {
- // update quads, lines and n_levels
- compute_number_cache (triangulation,
- level_objects,
- static_cast<internal::Triangulation::NumberCache<2>&>
- (number_cache));
-
- typedef
- typename Triangulation<dim,spacedim>::hex_iterator hex_iterator;
- typedef
- typename Triangulation<dim,spacedim>::active_hex_iterator active_hex_iterator;
-
- ///////////////////////////////////
- // update the number of hexes
- // on the different levels in
- // the cache
- number_cache.n_hexes_level.resize (number_cache.n_levels);
- number_cache.n_hexes = 0;
-
- number_cache.n_active_hexes_level.resize (number_cache.n_levels);
- number_cache.n_active_hexes = 0;
-
- // for 3d, hexes have levels so take
- // count the objects per level and
- // globally
- if (dim == 3)
- {
- for (unsigned int level=0; level<number_cache.n_levels; ++level)
- {
- // count hexes on this level
- number_cache.n_hexes_level[level] = 0;
-
- hex_iterator hex = triangulation.begin_hex (level),
- endc = (level == number_cache.n_levels-1 ?
- hex_iterator(triangulation.end_hex()) :
- triangulation.begin_hex (level+1));
- for (; hex!=endc; ++hex)
- ++number_cache.n_hexes_level[level];
-
- // update total number of hexes
- number_cache.n_hexes += number_cache.n_hexes_level[level];
- }
-
- // do the update for the number of
- // active hexes as well
- for (unsigned int level=0; level<number_cache.n_levels; ++level)
- {
- // count hexes on this level
- number_cache.n_active_hexes_level[level] = 0;
-
- active_hex_iterator hex = triangulation.begin_active_hex (level),
- endc = triangulation.end_hex ();
- for (; (hex!=endc) && (hex->level() == static_cast<signed int>(level)); ++hex)
- ++number_cache.n_active_hexes_level[level];
-
- // update total number of hexes
- number_cache.n_active_hexes += number_cache.n_active_hexes_level[level];
- }
- }
- else
- {
- // for dim>3, there are no
- // levels for hexs
- {
- hex_iterator hex = triangulation.begin_hex (),
- endc = triangulation.end_hex();
- for (; hex!=endc; ++hex)
- ++number_cache.n_hexes;
- }
-
- {
- active_hex_iterator hex = triangulation.begin_active_hex (),
- endc = triangulation.end_hex();
- for (; hex!=endc; ++hex)
- ++number_cache.n_active_hexes;
- }
- }
- }
-
-
- /**
- * Create a triangulation from
- * given data. This function does
- * this work for 1-dimensional
- * triangulations independently
- * of the actual space dimension.
- */
- template <int spacedim>
- static
- void
- create_triangulation (const std::vector<Point<spacedim> > &v,
- const std::vector<CellData<1> > &cells,
- const SubCellData &/*subcelldata*/,
- Triangulation<1,spacedim> &triangulation)
+ /**
+ * For a given Triangulation, update the
+ * number cache for lines. For 1d, we have
+ * to deal with the fact that lines have
+ * levels, whereas for higher dimensions
+ * they do not.
+ *
+ * The second argument indicates
+ * for how many levels the
+ * Triangulation has objects,
+ * though the highest levels need
+ * not contain active cells if they
+ * have previously all been
+ * coarsened away.
+ */
+ template <int dim, int spacedim>
+ static
+ void compute_number_cache (const Triangulation<dim,spacedim> &triangulation,
+ const unsigned int level_objects,
+ internal::Triangulation::NumberCache<1> &number_cache)
+ {
+ typedef
+ typename Triangulation<dim,spacedim>::line_iterator line_iterator;
+ typedef
+ typename Triangulation<dim,spacedim>::active_line_iterator active_line_iterator;
+
+ number_cache.n_levels = 0;
+ if (level_objects > 0)
+ // find the last level
+ // on which there are
+ // used cells
+ for (unsigned int level=0; level<level_objects; ++level)
+ if (triangulation.begin(level) !=
+ triangulation.end(level))
+ number_cache.n_levels = level+1;
+
+ // no cells at all?
+ Assert (number_cache.n_levels > 0, ExcInternalError());
+
+ ///////////////////////////////////
+ // update the number of lines
+ // on the different levels in
+ // the cache
+ number_cache.n_lines_level.resize (number_cache.n_levels);
+ number_cache.n_lines = 0;
+
+ number_cache.n_active_lines_level.resize (number_cache.n_levels);
+ number_cache.n_active_lines = 0;
+
+ // for 1d, lines have levels so take
+ // count the objects per level and
+ // globally
+ if (dim == 1)
{
- AssertThrow (v.size() > 0, ExcMessage ("No vertices given"));
- AssertThrow (cells.size() > 0, ExcMessage ("No cells given"));
-
- // note: since no boundary
- // information can be given in one
- // dimension, the @p{subcelldata}
- // field is ignored. (only used for
- // error checking, which is a good
- // idea in any case)
- const unsigned int dim=1;
-
- // copy vertices
- triangulation.vertices = v;
- triangulation.vertices_used = std::vector<bool> (v.size(), true);
-
- // store the indices of the lines
- // which are adjacent to a given
- // vertex
- std::vector<std::vector<int> > lines_at_vertex (v.size());
-
- // reserve enough space
- triangulation.levels.push_back (new internal::Triangulation::TriaLevel<dim>);
- triangulation.levels[0]->reserve_space (cells.size(), dim, spacedim);
- triangulation.levels[0]->cells.reserve_space (0,cells.size());
-
- // make up cells
- typename Triangulation<dim,spacedim>::raw_line_iterator
- next_free_line = triangulation.begin_raw_line ();
- for (unsigned int cell=0; cell<cells.size(); ++cell)
+ for (unsigned int level=0; level<number_cache.n_levels; ++level)
{
- while (next_free_line->used())
- ++next_free_line;
-
- next_free_line->set (internal::Triangulation
- ::TriaObject<1> (cells[cell].vertices[0],
- cells[cell].vertices[1]));
- next_free_line->set_used_flag ();
- next_free_line->set_material_id (cells[cell].material_id);
- next_free_line->clear_user_data ();
- next_free_line->set_subdomain_id (0);
-
- // note that this cell is
- // adjacent to these vertices
- lines_at_vertex[cells[cell].vertices[0]].push_back (cell);
- lines_at_vertex[cells[cell].vertices[1]].push_back (cell);
+ // count lines on this level
+ number_cache.n_lines_level[level] = 0;
+
+ line_iterator line = triangulation.begin_line (level),
+ endc = (level == number_cache.n_levels-1 ?
+ line_iterator(triangulation.end_line()) :
+ triangulation.begin_line (level+1));
+ for (; line!=endc; ++line)
+ ++number_cache.n_lines_level[level];
+
+ // update total number of lines
+ number_cache.n_lines += number_cache.n_lines_level[level];
}
+ // do the update for the number of
+ // active lines as well
+ for (unsigned int level=0; level<number_cache.n_levels; ++level)
+ {
+ // count lines on this level
+ number_cache.n_active_lines_level[level] = 0;
+
+ active_line_iterator line = triangulation.begin_active_line (level),
+ endc = triangulation.end_line ();
+ for (; (line!=endc) && (line->level() == static_cast<signed int>(level)); ++line)
+ ++number_cache.n_active_lines_level[level];
- // some security tests
+ // update total number of lines
+ number_cache.n_active_lines += number_cache.n_active_lines_level[level];
+ }
+ }
+ else
+ {
+ // for dim>1, there are no
+ // levels for lines
{
- unsigned int boundary_nodes = 0;
- for (unsigned int i=0; i<lines_at_vertex.size(); ++i)
- switch (lines_at_vertex[i].size())
- {
- case 1:
- // this vertex has only
- // one adjacent line
- ++boundary_nodes;
- break;
- case 2:
- break;
- default:
- // a node must have one
- // or two adjacent
- // lines
- AssertThrow (false, ExcInternalError());
- }
+ line_iterator line = triangulation.begin_line (),
+ endc = triangulation.end_line();
+ for (; line!=endc; ++line)
+ ++number_cache.n_lines;
+ }
- // assert there are no more
- // than two boundary
- // nodes. note that if the
- // space dimension is
- // bigger than 1, then we
- // can have fewer than 2
- // nodes (for example a
- // ring of cells -- no end
- // points at all)
- AssertThrow (((spacedim == 1) && (boundary_nodes == 2))
- ||
- (spacedim > 1),
- ExcMessage("The Triangulation has too many end points"));
+ {
+ active_line_iterator line = triangulation.begin_active_line (),
+ endc = triangulation.end_line();
+ for (; line!=endc; ++line)
+ ++number_cache.n_active_lines;
}
+ }
+ }
+ /**
+ * For a given Triangulation, update the
+ * number cache for quads. For 2d, we have
+ * to deal with the fact that quads have
+ * levels, whereas for higher dimensions
+ * they do not.
+ *
+ * The second argument indicates
+ * for how many levels the
+ * Triangulation has objects,
+ * though the highest levels need
+ * not contain active cells if they
+ * have previously all been
+ * coarsened away.
+ *
+ * At the beginning of the function, we call the
+ * respective function to update the number
+ * cache for lines.
+ */
+ template <int dim, int spacedim>
+ static
+ void compute_number_cache (const Triangulation<dim,spacedim> &triangulation,
+ const unsigned int level_objects,
+ internal::Triangulation::NumberCache<2> &number_cache)
+ {
+ // update lines and n_levels
+ compute_number_cache (triangulation,
+ level_objects,
+ static_cast<internal::Triangulation::NumberCache<1>&>
+ (number_cache));
+
+ typedef
+ typename Triangulation<dim,spacedim>::quad_iterator quad_iterator;
+ typedef
+ typename Triangulation<dim,spacedim>::active_quad_iterator active_quad_iterator;
+
+ ///////////////////////////////////
+ // update the number of quads
+ // on the different levels in
+ // the cache
+ number_cache.n_quads_level.resize (number_cache.n_levels);
+ number_cache.n_quads = 0;
+
+ number_cache.n_active_quads_level.resize (number_cache.n_levels);
+ number_cache.n_active_quads = 0;
+
+ // for 2d, quads have levels so take
+ // count the objects per level and
+ // globally
+ if (dim == 2)
+ {
+ for (unsigned int level=0; level<number_cache.n_levels; ++level)
+ {
+ // count quads on this level
+ number_cache.n_quads_level[level] = 0;
+
+ quad_iterator quad = triangulation.begin_quad (level),
+ endc = (level == number_cache.n_levels-1 ?
+ quad_iterator(triangulation.end_quad()) :
+ triangulation.begin_quad (level+1));
+ for (; quad!=endc; ++quad)
+ ++number_cache.n_quads_level[level];
+
+ // update total number of quads
+ number_cache.n_quads += number_cache.n_quads_level[level];
+ }
+ // do the update for the number of
+ // active quads as well
+ for (unsigned int level=0; level<number_cache.n_levels; ++level)
+ {
+ // count quads on this level
+ number_cache.n_active_quads_level[level] = 0;
- // update neighborship info
- typename Triangulation<dim,spacedim>::active_line_iterator
- line = triangulation.begin_active_line ();
- // for all lines
- for (; line!=triangulation.end(); ++line)
- // for each of the two vertices
- for (unsigned int vertex=0; vertex<GeometryInfo<dim>::vertices_per_cell; ++vertex)
- // if first cell adjacent to
- // this vertex is the present
- // one, then the neighbor is
- // the second adjacent cell and
- // vice versa
- if (lines_at_vertex[line->vertex_index(vertex)][0] == line->index())
- if (lines_at_vertex[line->vertex_index(vertex)].size() == 2)
- {
- const typename Triangulation<dim,spacedim>::cell_iterator
- neighbor (&triangulation,
- 0, // level
- lines_at_vertex[line->vertex_index(vertex)][1]);
- line->set_neighbor (vertex, neighbor);
- }
- else
- // no second adjacent cell
- // entered -> cell at
- // boundary
- line->set_neighbor (vertex, triangulation.end());
- else
- // present line is not first
- // adjacent one -> first
- // adjacent one is neighbor
- {
- const typename Triangulation<dim,spacedim>::cell_iterator
- neighbor (&triangulation,
- 0, // level
- lines_at_vertex[line->vertex_index(vertex)][0]);
- line->set_neighbor (vertex, neighbor);
- }
+ active_quad_iterator quad = triangulation.begin_active_quad (level),
+ endc = triangulation.end_quad ();
+ for (; (quad!=endc) && (quad->level() == static_cast<signed int>(level)); ++quad)
+ ++number_cache.n_active_quads_level[level];
- // finally set the
- // vertex_to_boundary_id_map_1d
- // map
- triangulation.vertex_to_boundary_id_map_1d->clear();
- for (typename Triangulation<dim,spacedim>::active_cell_iterator
- cell = triangulation.begin_active();
- cell != triangulation.end(); ++cell)
- for (unsigned int f=0; f<GeometryInfo<dim>::faces_per_cell; ++f)
- if (cell->at_boundary(f))
- (*triangulation
- .vertex_to_boundary_id_map_1d)[cell->face(f)->vertex_index()]
- = f;
+ // update total number of quads
+ number_cache.n_active_quads += number_cache.n_active_quads_level[level];
+ }
}
+ else
+ {
+ // for dim>2, there are no
+ // levels for quads
+ {
+ quad_iterator quad = triangulation.begin_quad (),
+ endc = triangulation.end_quad();
+ for (; quad!=endc; ++quad)
+ ++number_cache.n_quads;
+ }
+ {
+ active_quad_iterator quad = triangulation.begin_active_quad (),
+ endc = triangulation.end_quad();
+ for (; quad!=endc; ++quad)
+ ++number_cache.n_active_quads;
+ }
+ }
+ }
- /**
- * Create a triangulation from
- * given data. This function does
- * this work for 2-dimensional
- * triangulations independently
- * of the actual space dimension.
- */
- template <int spacedim>
- static
- void
- create_triangulation (const std::vector<Point<spacedim> > &v,
- const std::vector<CellData<2> > &cells,
- const SubCellData &subcelldata,
- Triangulation<2,spacedim> &triangulation)
+ /**
+ * For a given Triangulation, update the
+ * number cache for hexes. For 3d, we have
+ * to deal with the fact that hexes have
+ * levels, whereas for higher dimensions
+ * they do not.
+ *
+ * The second argument indicates
+ * for how many levels the
+ * Triangulation has objects,
+ * though the highest levels need
+ * not contain active cells if they
+ * have previously all been
+ * coarsened away.
+ *
+ * At the end of the function, we call the
+ * respective function to update the number
+ * cache for quads, which will in turn call
+ * the respective function for lines.
+ */
+ template <int dim, int spacedim>
+ static
+ void compute_number_cache (const Triangulation<dim,spacedim> &triangulation,
+ const unsigned int level_objects,
+ internal::Triangulation::NumberCache<3> &number_cache)
+ {
+ // update quads, lines and n_levels
+ compute_number_cache (triangulation,
+ level_objects,
+ static_cast<internal::Triangulation::NumberCache<2>&>
+ (number_cache));
+
+ typedef
+ typename Triangulation<dim,spacedim>::hex_iterator hex_iterator;
+ typedef
+ typename Triangulation<dim,spacedim>::active_hex_iterator active_hex_iterator;
+
+ ///////////////////////////////////
+ // update the number of hexes
+ // on the different levels in
+ // the cache
+ number_cache.n_hexes_level.resize (number_cache.n_levels);
+ number_cache.n_hexes = 0;
+
+ number_cache.n_active_hexes_level.resize (number_cache.n_levels);
+ number_cache.n_active_hexes = 0;
+
+ // for 3d, hexes have levels so take
+ // count the objects per level and
+ // globally
+ if (dim == 3)
{
- AssertThrow (v.size() > 0, ExcMessage ("No vertices given"));
- AssertThrow (cells.size() > 0, ExcMessage ("No cells given"));
-
- const unsigned int dim=2;
-
- // copy vertices
- triangulation.vertices = v;
- triangulation.vertices_used = std::vector<bool> (v.size(), true);
-
- // make up a list of the needed
- // lines each line is a pair of
- // vertices. The list is kept
- // sorted and it is guaranteed that
- // each line is inserted only once.
- // While the key of such an entry
- // is the pair of vertices, the
- // thing it points to is an
- // iterator pointing to the line
- // object itself. In the first run,
- // these iterators are all invalid
- // ones, but they are filled
- // afterwards
- std::map<std::pair<int,int>,
- typename Triangulation<dim,spacedim>::line_iterator> needed_lines;
- for (unsigned int cell=0; cell<cells.size(); ++cell)
+ for (unsigned int level=0; level<number_cache.n_levels; ++level)
{
- for (unsigned int vertex=0; vertex<4; ++vertex)
- AssertThrow (cells[cell].vertices[vertex] < triangulation.vertices.size(),
- ExcInvalidVertexIndex (cell, cells[cell].vertices[vertex],
- triangulation.vertices.size()));
-
- for (unsigned int line=0; line<GeometryInfo<dim>::faces_per_cell; ++line)
- {
- // given a line vertex number
- // (0,1) on a specific line we
- // get the cell vertex number
- // (0-4) through the
- // line_to_cell_vertices
- // function
- std::pair<int,int> line_vertices(
- cells[cell].vertices[GeometryInfo<dim>::line_to_cell_vertices(line, 0)],
- cells[cell].vertices[GeometryInfo<dim>::line_to_cell_vertices(line, 1)]);
-
- // assert that the line was
- // not already inserted in
- // reverse order. This
- // happens in spite of the
- // vertex rotation above,
- // if the sense of the cell
- // was incorrect.
- //
- // Here is what usually
- // happened when this
- // exception is thrown:
- // consider these two cells
- // and the vertices
- // 3---4---5
- // | | |
- // 0---1---2
- // If in the input vector
- // the two cells are given
- // with vertices <0 1 4 3>
- // and <4 1 2 5>, in the
- // first cell the middle
- // line would have
- // direction 1->4, while in
- // the second it would be
- // 4->1. This will cause
- // the exception.
- AssertThrow (needed_lines.find(std::make_pair(line_vertices.second,
- line_vertices.first))
- ==
- needed_lines.end(),
- ExcGridHasInvalidCell(cell));
-
- // insert line, with
- // invalid iterator if line
- // already exists, then
- // nothing bad happens here
- needed_lines[line_vertices] = triangulation.end_line();
- }
+ // count hexes on this level
+ number_cache.n_hexes_level[level] = 0;
+
+ hex_iterator hex = triangulation.begin_hex (level),
+ endc = (level == number_cache.n_levels-1 ?
+ hex_iterator(triangulation.end_hex()) :
+ triangulation.begin_hex (level+1));
+ for (; hex!=endc; ++hex)
+ ++number_cache.n_hexes_level[level];
+
+ // update total number of hexes
+ number_cache.n_hexes += number_cache.n_hexes_level[level];
}
+ // do the update for the number of
+ // active hexes as well
+ for (unsigned int level=0; level<number_cache.n_levels; ++level)
+ {
+ // count hexes on this level
+ number_cache.n_active_hexes_level[level] = 0;
+
+ active_hex_iterator hex = triangulation.begin_active_hex (level),
+ endc = triangulation.end_hex ();
+ for (; (hex!=endc) && (hex->level() == static_cast<signed int>(level)); ++hex)
+ ++number_cache.n_active_hexes_level[level];
- // check that every vertex has at
- // least two adjacent lines
+ // update total number of hexes
+ number_cache.n_active_hexes += number_cache.n_active_hexes_level[level];
+ }
+ }
+ else
+ {
+ // for dim>3, there are no
+ // levels for hexs
{
- std::vector<unsigned short int> vertex_touch_count (v.size(), 0);
- typename std::map<std::pair<int,int>,
- typename Triangulation<dim,spacedim>::line_iterator>::iterator i;
- for (i=needed_lines.begin(); i!=needed_lines.end(); i++)
- {
- // touch the vertices of
- // this line
- ++vertex_touch_count[i->first.first];
- ++vertex_touch_count[i->first.second];
- }
+ hex_iterator hex = triangulation.begin_hex (),
+ endc = triangulation.end_hex();
+ for (; hex!=endc; ++hex)
+ ++number_cache.n_hexes;
+ }
- // assert minimum touch count
- // is at least two. if not so,
- // then clean triangulation and
- // exit with an exception
- AssertThrow (* (std::min_element(vertex_touch_count.begin(),
- vertex_touch_count.end())) >= 2,
- ExcGridHasInvalidVertices());
+ {
+ active_hex_iterator hex = triangulation.begin_active_hex (),
+ endc = triangulation.end_hex();
+ for (; hex!=endc; ++hex)
+ ++number_cache.n_active_hexes;
}
+ }
+ }
- // reserve enough space
- triangulation.levels.push_back (new internal::Triangulation::TriaLevel<dim>);
- triangulation.faces = new internal::Triangulation::TriaFaces<dim>;
- triangulation.levels[0]->reserve_space (cells.size(), dim, spacedim);
- triangulation.faces->lines.reserve_space (0,needed_lines.size());
- triangulation.levels[0]->cells.reserve_space (0,cells.size());
- // make up lines
- {
- typename Triangulation<dim,spacedim>::raw_line_iterator
- line = triangulation.begin_raw_line();
- typename std::map<std::pair<int,int>,
- typename Triangulation<dim,spacedim>::line_iterator>::iterator i;
- for (i = needed_lines.begin();
- line!=triangulation.end_line(); ++line, ++i)
+ /**
+ * Create a triangulation from
+ * given data. This function does
+ * this work for 1-dimensional
+ * triangulations independently
+ * of the actual space dimension.
+ */
+ template <int spacedim>
+ static
+ void
+ create_triangulation (const std::vector<Point<spacedim> > &v,
+ const std::vector<CellData<1> > &cells,
- const SubCellData & /*subcelldata*/,
++ const SubCellData &/*subcelldata*/,
+ Triangulation<1,spacedim> &triangulation)
+ {
+ AssertThrow (v.size() > 0, ExcMessage ("No vertices given"));
+ AssertThrow (cells.size() > 0, ExcMessage ("No cells given"));
+
+ // note: since no boundary
+ // information can be given in one
+ // dimension, the @p{subcelldata}
+ // field is ignored. (only used for
+ // error checking, which is a good
+ // idea in any case)
+ const unsigned int dim=1;
+
+ // copy vertices
+ triangulation.vertices = v;
+ triangulation.vertices_used = std::vector<bool> (v.size(), true);
+
+ // store the indices of the lines
+ // which are adjacent to a given
+ // vertex
+ std::vector<std::vector<int> > lines_at_vertex (v.size());
+
+ // reserve enough space
+ triangulation.levels.push_back (new internal::Triangulation::TriaLevel<dim>);
+ triangulation.levels[0]->reserve_space (cells.size(), dim, spacedim);
+ triangulation.levels[0]->cells.reserve_space (0,cells.size());
+
+ // make up cells
+ typename Triangulation<dim,spacedim>::raw_line_iterator
+ next_free_line = triangulation.begin_raw_line ();
+ for (unsigned int cell=0; cell<cells.size(); ++cell)
+ {
+ while (next_free_line->used())
+ ++next_free_line;
+
+ next_free_line->set (internal::Triangulation
+ ::TriaObject<1> (cells[cell].vertices[0],
+ cells[cell].vertices[1]));
+ next_free_line->set_used_flag ();
+ next_free_line->set_material_id (cells[cell].material_id);
+ next_free_line->clear_user_data ();
+ next_free_line->set_subdomain_id (0);
+
+ // note that this cell is
+ // adjacent to these vertices
+ lines_at_vertex[cells[cell].vertices[0]].push_back (cell);
+ lines_at_vertex[cells[cell].vertices[1]].push_back (cell);
+ }
+
+
+ // some security tests
+ {
+ unsigned int boundary_nodes = 0;
+ for (unsigned int i=0; i<lines_at_vertex.size(); ++i)
+ switch (lines_at_vertex[i].size())
+ {
+ case 1:
+ // this vertex has only
+ // one adjacent line
+ ++boundary_nodes;
+ break;
+ case 2:
+ break;
+ default:
+ // a node must have one
+ // or two adjacent
+ // lines
+ AssertThrow (false, ExcInternalError());
+ }
+
+ // assert there are no more
+ // than two boundary
+ // nodes. note that if the
+ // space dimension is
+ // bigger than 1, then we
+ // can have fewer than 2
+ // nodes (for example a
+ // ring of cells -- no end
+ // points at all)
+ AssertThrow (((spacedim == 1) && (boundary_nodes == 2))
+ ||
+ (spacedim > 1),
+ ExcMessage("The Triangulation has too many end points"));
+ }
+
+
+
+ // update neighborship info
+ typename Triangulation<dim,spacedim>::active_line_iterator
+ line = triangulation.begin_active_line ();
+ // for all lines
+ for (; line!=triangulation.end(); ++line)
+ // for each of the two vertices
+ for (unsigned int vertex=0; vertex<GeometryInfo<dim>::vertices_per_cell; ++vertex)
+ // if first cell adjacent to
+ // this vertex is the present
+ // one, then the neighbor is
+ // the second adjacent cell and
+ // vice versa
+ if (lines_at_vertex[line->vertex_index(vertex)][0] == line->index())
+ if (lines_at_vertex[line->vertex_index(vertex)].size() == 2)
{
- line->set (internal::Triangulation::TriaObject<1>(i->first.first,
- i->first.second));
- line->set_used_flag ();
- line->clear_user_flag ();
- line->clear_user_data ();
- i->second = line;
+ const typename Triangulation<dim,spacedim>::cell_iterator
+ neighbor (&triangulation,
+ 0, // level
+ lines_at_vertex[line->vertex_index(vertex)][1]);
+ line->set_neighbor (vertex, neighbor);
}
+ else
+ // no second adjacent cell
+ // entered -> cell at
+ // boundary
+ line->set_neighbor (vertex, triangulation.end());
+ else
+ // present line is not first
+ // adjacent one -> first
+ // adjacent one is neighbor
+ {
+ const typename Triangulation<dim,spacedim>::cell_iterator
+ neighbor (&triangulation,
+ 0, // level
+ lines_at_vertex[line->vertex_index(vertex)][0]);
+ line->set_neighbor (vertex, neighbor);
+ }
+
+ // finally set the
+ // vertex_to_boundary_id_map_1d
+ // map
+ triangulation.vertex_to_boundary_id_map_1d->clear();
+ for (typename Triangulation<dim,spacedim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell)
+ for (unsigned int f=0; f<GeometryInfo<dim>::faces_per_cell; ++f)
+ if (cell->at_boundary(f))
+ (*triangulation
+ .vertex_to_boundary_id_map_1d)[cell->face(f)->vertex_index()]
+ = f;
+ }
+
+
+ /**
+ * Create a triangulation from
+ * given data. This function does
+ * this work for 2-dimensional
+ * triangulations independently
+ * of the actual space dimension.
+ */
+ template <int spacedim>
+ static
+ void
+ create_triangulation (const std::vector<Point<spacedim> > &v,
+ const std::vector<CellData<2> > &cells,
+ const SubCellData &subcelldata,
+ Triangulation<2,spacedim> &triangulation)
+ {
+ AssertThrow (v.size() > 0, ExcMessage ("No vertices given"));
+ AssertThrow (cells.size() > 0, ExcMessage ("No cells given"));
+
+ const unsigned int dim=2;
+
+ // copy vertices
+ triangulation.vertices = v;
+ triangulation.vertices_used = std::vector<bool> (v.size(), true);
+
+ // make up a list of the needed
+ // lines each line is a pair of
+ // vertices. The list is kept
+ // sorted and it is guaranteed that
+ // each line is inserted only once.
+ // While the key of such an entry
+ // is the pair of vertices, the
+ // thing it points to is an
+ // iterator pointing to the line
+ // object itself. In the first run,
+ // these iterators are all invalid
+ // ones, but they are filled
+ // afterwards
+ std::map<std::pair<int,int>,
+ typename Triangulation<dim,spacedim>::line_iterator> needed_lines;
+ for (unsigned int cell=0; cell<cells.size(); ++cell)
+ {
+ for (unsigned int vertex=0; vertex<4; ++vertex)
+ AssertThrow (cells[cell].vertices[vertex] < triangulation.vertices.size(),
+ ExcInvalidVertexIndex (cell, cells[cell].vertices[vertex],
+ triangulation.vertices.size()));
+
+ for (unsigned int line=0; line<GeometryInfo<dim>::faces_per_cell; ++line)
+ {
+ // given a line vertex number
+ // (0,1) on a specific line we
+ // get the cell vertex number
+ // (0-4) through the
+ // line_to_cell_vertices
+ // function
+ std::pair<int,int> line_vertices(
+ cells[cell].vertices[GeometryInfo<dim>::line_to_cell_vertices(line, 0)],
+ cells[cell].vertices[GeometryInfo<dim>::line_to_cell_vertices(line, 1)]);
+
+ // assert that the line was
+ // not already inserted in
+ // reverse order. This
+ // happens in spite of the
+ // vertex rotation above,
+ // if the sense of the cell
+ // was incorrect.
+ //
+ // Here is what usually
+ // happened when this
+ // exception is thrown:
+ // consider these two cells
+ // and the vertices
+ // 3---4---5
+ // | | |
+ // 0---1---2
+ // If in the input vector
+ // the two cells are given
+ // with vertices <0 1 4 3>
+ // and <4 1 2 5>, in the
+ // first cell the middle
+ // line would have
+ // direction 1->4, while in
+ // the second it would be
+ // 4->1. This will cause
+ // the exception.
+ AssertThrow (needed_lines.find(std::make_pair(line_vertices.second,
+ line_vertices.first))
+ ==
+ needed_lines.end(),
+ ExcGridHasInvalidCell(cell));
+
+ // insert line, with
+ // invalid iterator if line
+ // already exists, then
+ // nothing bad happens here
+ needed_lines[line_vertices] = triangulation.end_line();
+ }
+ }
+
+
+ // check that every vertex has at
+ // least two adjacent lines
+ {
+ std::vector<unsigned short int> vertex_touch_count (v.size(), 0);
+ typename std::map<std::pair<int,int>,
+ typename Triangulation<dim,spacedim>::line_iterator>::iterator i;
+ for (i=needed_lines.begin(); i!=needed_lines.end(); i++)
+ {
+ // touch the vertices of
+ // this line
+ ++vertex_touch_count[i->first.first];
+ ++vertex_touch_count[i->first.second];
}
+ // assert minimum touch count
+ // is at least two. if not so,
+ // then clean triangulation and
+ // exit with an exception
+ AssertThrow (* (std::min_element(vertex_touch_count.begin(),
+ vertex_touch_count.end())) >= 2,
+ ExcGridHasInvalidVertices());
+ }
- // store for each line index
- // the adjacent cells
- std::map<int,std::vector<typename Triangulation<dim,spacedim>::cell_iterator> >
- adjacent_cells;
+ // reserve enough space
+ triangulation.levels.push_back (new internal::Triangulation::TriaLevel<dim>);
+ triangulation.faces = new internal::Triangulation::TriaFaces<dim>;
+ triangulation.levels[0]->reserve_space (cells.size(), dim, spacedim);
+ triangulation.faces->lines.reserve_space (0,needed_lines.size());
+ triangulation.levels[0]->cells.reserve_space (0,cells.size());
- // finally make up cells
+ // make up lines
+ {
+ typename Triangulation<dim,spacedim>::raw_line_iterator
+ line = triangulation.begin_raw_line();
+ typename std::map<std::pair<int,int>,
+ typename Triangulation<dim,spacedim>::line_iterator>::iterator i;
+ for (i = needed_lines.begin();
+ line!=triangulation.end_line(); ++line, ++i)
{
- typename Triangulation<dim,spacedim>::raw_cell_iterator
- cell = triangulation.begin_raw_quad();
- for (unsigned int c=0; c<cells.size(); ++c, ++cell)
- {
- typename Triangulation<dim,spacedim>::line_iterator
- lines[GeometryInfo<dim>::lines_per_cell];
- for (unsigned int line=0; line<GeometryInfo<dim>::lines_per_cell; ++line)
- lines[line]=needed_lines[std::make_pair(
- cells[c].vertices[GeometryInfo<dim>::line_to_cell_vertices(line, 0)],
- cells[c].vertices[GeometryInfo<dim>::line_to_cell_vertices(line, 1)])];
-
- cell->set (internal::Triangulation::TriaObject<2> (lines[0]->index(),
- lines[1]->index(),
- lines[2]->index(),
- lines[3]->index()));
-
- cell->set_used_flag ();
- cell->set_material_id (cells[c].material_id);
- cell->clear_user_data ();
- cell->set_subdomain_id (0);
-
- // note that this cell is
- // adjacent to the four
- // lines
- for (unsigned int line=0; line<GeometryInfo<dim>::lines_per_cell; ++line)
- adjacent_cells[lines[line]->index()].push_back (cell);
- }
+ line->set (internal::Triangulation::TriaObject<1>(i->first.first,
+ i->first.second));
+ line->set_used_flag ();
+ line->clear_user_flag ();
+ line->clear_user_data ();
+ i->second = line;
}
+ }
- for (typename Triangulation<dim,spacedim>::line_iterator
- line=triangulation.begin_line();
- line!=triangulation.end_line(); ++line)
- {
- const unsigned int n_adj_cells = adjacent_cells[line->index()].size();
- // assert that every line has
- // one or two adjacent cells
- AssertThrow ((n_adj_cells >= 1) &&
- (n_adj_cells <= 2),
- ExcInternalError());
+ // store for each line index
+ // the adjacent cells
+ std::map<int,std::vector<typename Triangulation<dim,spacedim>::cell_iterator> >
+ adjacent_cells;
- // if only one cell: line is at
- // boundary -> give it the
- // boundary indicator zero by
- // default
- if (n_adj_cells == 1)
- line->set_boundary_indicator (0);
- else
- // interior line -> numbers::internal_face_boundary_id
- line->set_boundary_indicator (numbers::internal_face_boundary_id);
- }
+ // finally make up cells
+ {
+ typename Triangulation<dim,spacedim>::raw_cell_iterator
+ cell = triangulation.begin_raw_quad();
+ for (unsigned int c=0; c<cells.size(); ++c, ++cell)
+ {
+ typename Triangulation<dim,spacedim>::line_iterator
+ lines[GeometryInfo<dim>::lines_per_cell];
+ for (unsigned int line=0; line<GeometryInfo<dim>::lines_per_cell; ++line)
+ lines[line]=needed_lines[std::make_pair(
+ cells[c].vertices[GeometryInfo<dim>::line_to_cell_vertices(line, 0)],
+ cells[c].vertices[GeometryInfo<dim>::line_to_cell_vertices(line, 1)])];
+
+ cell->set (internal::Triangulation::TriaObject<2> (lines[0]->index(),
+ lines[1]->index(),
+ lines[2]->index(),
+ lines[3]->index()));
+
+ cell->set_used_flag ();
+ cell->set_material_id (cells[c].material_id);
+ cell->clear_user_data ();
+ cell->set_subdomain_id (0);
+
+ // note that this cell is
+ // adjacent to the four
+ // lines
+ for (unsigned int line=0; line<GeometryInfo<dim>::lines_per_cell; ++line)
+ adjacent_cells[lines[line]->index()].push_back (cell);
+ }
+ }
+
+
+ for (typename Triangulation<dim,spacedim>::line_iterator
+ line=triangulation.begin_line();
+ line!=triangulation.end_line(); ++line)
+ {
+ const unsigned int n_adj_cells = adjacent_cells[line->index()].size();
+ // assert that every line has
+ // one or two adjacent cells
+ AssertThrow ((n_adj_cells >= 1) &&
+ (n_adj_cells <= 2),
+ ExcInternalError());
+
+ // if only one cell: line is at
+ // boundary -> give it the
+ // boundary indicator zero by
+ // default
+ if (n_adj_cells == 1)
+ line->set_boundary_indicator (0);
+ else
+ // interior line -> numbers::internal_face_boundary_id
+ line->set_boundary_indicator (numbers::internal_face_boundary_id);
+ }
- // set boundary indicators where
- // given
- std::vector<CellData<1> >::const_iterator boundary_line
- = subcelldata.boundary_lines.begin();
- std::vector<CellData<1> >::const_iterator end_boundary_line
- = subcelldata.boundary_lines.end();
- for (; boundary_line!=end_boundary_line; ++boundary_line)
+ // set boundary indicators where
+ // given
+ std::vector<CellData<1> >::const_iterator boundary_line
+ = subcelldata.boundary_lines.begin();
+ std::vector<CellData<1> >::const_iterator end_boundary_line
+ = subcelldata.boundary_lines.end();
+ for (; boundary_line!=end_boundary_line; ++boundary_line)
+ {
+ typename Triangulation<dim,spacedim>::line_iterator line;
+ std::pair<int,int> line_vertices(std::make_pair(boundary_line->vertices[0],
+ boundary_line->vertices[1]));
+ if (needed_lines.find(line_vertices) != needed_lines.end())
+ // line found in this
+ // direction
+ line = needed_lines[line_vertices];
+ else
{
- typename Triangulation<dim,spacedim>::line_iterator line;
- std::pair<int,int> line_vertices(std::make_pair(boundary_line->vertices[0],
- boundary_line->vertices[1]));
+ // look whether it exists
+ // in reverse direction
+ std::swap (line_vertices.first, line_vertices.second);
if (needed_lines.find(line_vertices) != needed_lines.end())
- // line found in this
- // direction
line = needed_lines[line_vertices];
else
- {
- // look whether it exists
- // in reverse direction
- std::swap (line_vertices.first, line_vertices.second);
- if (needed_lines.find(line_vertices) != needed_lines.end())
- line = needed_lines[line_vertices];
- else
- // line does not exist
- AssertThrow (false, ExcLineInexistant(line_vertices.first,
- line_vertices.second));
- }
+ // line does not exist
+ AssertThrow (false, ExcLineInexistant(line_vertices.first,
+ line_vertices.second));
+ }
- // assert that we only set
- // boundary info once
- AssertThrow (! (line->boundary_indicator() != 0 &&
- line->boundary_indicator() != numbers::internal_face_boundary_id),
- ExcMultiplySetLineInfoOfLine(line_vertices.first,
- line_vertices.second));
+ // assert that we only set
+ // boundary info once
+ AssertThrow (! (line->boundary_indicator() != 0 &&
+ line->boundary_indicator() != numbers::internal_face_boundary_id),
+ ExcMultiplySetLineInfoOfLine(line_vertices.first,
+ line_vertices.second));
- // Assert that only exterior lines
- // are given a boundary indicator
- AssertThrow (! (line->boundary_indicator() == numbers::internal_face_boundary_id),
- ExcInteriorLineCantBeBoundary());
+ // Assert that only exterior lines
+ // are given a boundary indicator
+ AssertThrow (! (line->boundary_indicator() == numbers::internal_face_boundary_id),
+ ExcInteriorLineCantBeBoundary());
- line->set_boundary_indicator (boundary_line->boundary_id);
- }
+ line->set_boundary_indicator (boundary_line->boundary_id);
+ }
- // finally update neighborship info
- for (typename Triangulation<dim,spacedim>::cell_iterator
- cell=triangulation.begin(); cell!=triangulation.end(); ++cell)
- for (unsigned int side=0; side<4; ++side)
- if (adjacent_cells[cell->line(side)->index()][0] == cell)
- // first adjacent cell is
- // this one
- {
- if (adjacent_cells[cell->line(side)->index()].size() == 2)
- // there is another
- // adjacent cell
- cell->set_neighbor (side,
- adjacent_cells[cell->line(side)->index()][1]);
- }
- // first adjacent cell is not this
- // one, -> it must be the neighbor
- // we are looking for
- else
+ // finally update neighborship info
+ for (typename Triangulation<dim,spacedim>::cell_iterator
+ cell=triangulation.begin(); cell!=triangulation.end(); ++cell)
+ for (unsigned int side=0; side<4; ++side)
+ if (adjacent_cells[cell->line(side)->index()][0] == cell)
+ // first adjacent cell is
+ // this one
+ {
+ if (adjacent_cells[cell->line(side)->index()].size() == 2)
+ // there is another
+ // adjacent cell
cell->set_neighbor (side,
- adjacent_cells[cell->line(side)->index()][0]);
- }
+ adjacent_cells[cell->line(side)->index()][1]);
+ }
+ // first adjacent cell is not this
+ // one, -> it must be the neighbor
+ // we are looking for
+ else
+ cell->set_neighbor (side,
+ adjacent_cells[cell->line(side)->index()][0]);
+ }
- /**
- * Invent an object which compares two internal::Triangulation::TriaObject<2>
- * against each other. This comparison is needed in order to establish a map
- * of TriaObject<2> to iterators in the Triangulation<3,3>::create_triangulation
- * function.
- *
- * Since this comparison is not canonical, we do not include it into the
- * general internal::Triangulation::TriaObject<2> class.
- */
- struct QuadComparator
+ /**
+ * Invent an object which compares two internal::Triangulation::TriaObject<2>
+ * against each other. This comparison is needed in order to establish a map
+ * of TriaObject<2> to iterators in the Triangulation<3,3>::create_triangulation
+ * function.
+ *
+ * Since this comparison is not canonical, we do not include it into the
+ * general internal::Triangulation::TriaObject<2> class.
+ */
+ struct QuadComparator
+ {
+ inline bool operator () (const internal::Triangulation::TriaObject<2> &q1,
+ const internal::Triangulation::TriaObject<2> &q2) const
{
- inline bool operator () (const internal::Triangulation::TriaObject<2> &q1,
- const internal::Triangulation::TriaObject<2> &q2) const
- {
- // here is room to
- // optimize the repeated
- // equality test of the
- // previous lines; the
- // compiler will probably
- // take care of most of
- // it anyway
- if ((q1.face(0) < q2.face(0)) ||
- ((q1.face(0) == q2.face(0)) &&
- (q1.face(1) < q2.face(1))) ||
- ((q1.face(0) == q2.face(0)) &&
- (q1.face(1) == q2.face(1)) &&
- (q1.face(2) < q2.face(2))) ||
- ((q1.face(0) == q2.face(0)) &&
- (q1.face(1) == q2.face(1)) &&
- (q1.face(2) == q2.face(2)) &&
- (q1.face(3) < q2.face(3))))
- return true;
- else
- return false;
- }
- };
+ // here is room to
+ // optimize the repeated
+ // equality test of the
+ // previous lines; the
+ // compiler will probably
+ // take care of most of
+ // it anyway
+ if ((q1.face(0) < q2.face(0)) ||
+ ((q1.face(0) == q2.face(0)) &&
+ (q1.face(1) < q2.face(1))) ||
+ ((q1.face(0) == q2.face(0)) &&
+ (q1.face(1) == q2.face(1)) &&
+ (q1.face(2) < q2.face(2))) ||
+ ((q1.face(0) == q2.face(0)) &&
+ (q1.face(1) == q2.face(1)) &&
+ (q1.face(2) == q2.face(2)) &&
+ (q1.face(3) < q2.face(3))))
+ return true;
+ else
+ return false;
+ }
+ };
{
namespace DoFHandler
{
- // access class
- // dealii::hp::DoFHandler instead of
- // namespace internal::hp::DoFHandler, etc
+ // access class
+ // dealii::hp::DoFHandler instead of
+ // namespace internal::hp::DoFHandler, etc
using dealii::hp::DoFHandler;
- /**
- * A class with the same purpose as the similarly named class of the
- * Triangulation class. See there for more information.
- */
+ /**
+ * A class with the same purpose as the similarly named class of the
+ * Triangulation class. See there for more information.
+ */
struct Implementation
{
- /**
- * Do that part of reserving
- * space that pertains to
- * vertices, since this is the
- * same in all space
- * dimensions.
- */
- template<int dim, int spacedim>
- static
- void
- reserve_space_vertices (DoFHandler<dim,spacedim> &dof_handler)
- {
- // The final step is allocating
- // memory is to set up vertex dof
- // information. since vertices
- // are sequentially numbered,
- // what we do first is to set up
- // an array in which we record
- // whether a vertex is associated
- // with any of the given fe's, by
- // setting a bit. in a later
- // step, we then actually
- // allocate memory for the
- // required dofs
- std::vector<std::vector<bool> >
- vertex_fe_association (dof_handler.finite_elements->size(),
- std::vector<bool> (dof_handler.tria->n_vertices(), false));
-
- for (typename DoFHandler<dim,spacedim>::active_cell_iterator
- cell=dof_handler.begin_active(); cell!=dof_handler.end(); ++cell)
- for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
- vertex_fe_association[cell->active_fe_index()][cell->vertex_index(v)]
- = true;
-
- // in debug mode, make sure
- // that each vertex is
- // associated with at least one
- // fe (note that except for
- // unused vertices, all
- // vertices are actually
- // active)
+ /**
+ * Do that part of reserving
+ * space that pertains to
+ * vertices, since this is the
+ * same in all space
+ * dimensions.
+ */
+ template<int dim, int spacedim>
+ static
+ void
+ reserve_space_vertices (DoFHandler<dim,spacedim> &dof_handler)
+ {
+ // The final step is allocating
+ // memory is to set up vertex dof
+ // information. since vertices
+ // are sequentially numbered,
+ // what we do first is to set up
+ // an array in which we record
+ // whether a vertex is associated
+ // with any of the given fe's, by
+ // setting a bit. in a later
+ // step, we then actually
+ // allocate memory for the
+ // required dofs
+ std::vector<std::vector<bool> >
+ vertex_fe_association (dof_handler.finite_elements->size(),
+ std::vector<bool> (dof_handler.tria->n_vertices(), false));
+
+ for (typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell=dof_handler.begin_active(); cell!=dof_handler.end(); ++cell)
+ for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
+ vertex_fe_association[cell->active_fe_index()][cell->vertex_index(v)]
+ = true;
+
+ // in debug mode, make sure
+ // that each vertex is
+ // associated with at least one
+ // fe (note that except for
+ // unused vertices, all
+ // vertices are actually
+ // active)
#ifdef DEBUG
- for (unsigned int v=0; v<dof_handler.tria->n_vertices(); ++v)
- if (dof_handler.tria->vertex_used(v) == true)
- {
- unsigned int fe=0;
- for (; fe<dof_handler.finite_elements->size(); ++fe)
- if (vertex_fe_association[fe][v] == true)
- break;
- Assert (fe != dof_handler.finite_elements->size(), ExcInternalError());
- }
+ for (unsigned int v=0; v<dof_handler.tria->n_vertices(); ++v)
+ if (dof_handler.tria->vertex_used(v) == true)
+ {
+ unsigned int fe=0;
+ for (; fe<dof_handler.finite_elements->size(); ++fe)
+ if (vertex_fe_association[fe][v] == true)
+ break;
+ Assert (fe != dof_handler.finite_elements->size(), ExcInternalError());
+ }
#endif
- // next count how much memory
- // we actually need. for each
- // vertex, we need one slot per
- // fe to store the fe_index,
- // plus dofs_per_vertex for
- // this fe. in addition, we
- // need one slot as the end
- // marker for the
- // fe_indices. at the same time
- // already fill the
- // vertex_dofs_offsets field
- dof_handler.vertex_dofs_offsets.resize (dof_handler.tria->n_vertices(),
- numbers::invalid_unsigned_int);
-
- unsigned int vertex_slots_needed = 0;
- for (unsigned int v=0; v<dof_handler.tria->n_vertices(); ++v)
- if (dof_handler.tria->vertex_used(v) == true)
- {
- dof_handler.vertex_dofs_offsets[v] = vertex_slots_needed;
-
- for (unsigned int fe=0; fe<dof_handler.finite_elements->size(); ++fe)
- if (vertex_fe_association[fe][v] == true)
- vertex_slots_needed += (*dof_handler.finite_elements)[fe].dofs_per_vertex + 1;
- ++vertex_slots_needed;
- }
-
- // now allocate the space we
- // have determined we need, and
- // set up the linked lists for
- // each of the vertices
- dof_handler.vertex_dofs.resize (vertex_slots_needed,
- DoFHandler<dim,spacedim>::invalid_dof_index);
- for (unsigned int v=0; v<dof_handler.tria->n_vertices(); ++v)
- if (dof_handler.tria->vertex_used(v) == true)
- {
- unsigned int pointer = dof_handler.vertex_dofs_offsets[v];
- for (unsigned int fe=0; fe<dof_handler.finite_elements->size(); ++fe)
- if (vertex_fe_association[fe][v] == true)
- {
- // if this vertex
- // uses this fe,
- // then set the
- // fe_index and
- // move the pointer
- // ahead
- dof_handler.vertex_dofs[pointer] = fe;
- pointer += (*dof_handler.finite_elements)[fe].dofs_per_vertex + 1;
- }
- // finally place the end
- // marker
- dof_handler.vertex_dofs[pointer] = numbers::invalid_unsigned_int;
- }
- }
-
-
-
- /**
- * Distribute dofs on the given cell,
- * with new dofs starting with index
- * @p next_free_dof. Return the next
- * unused index number. The finite
- * element used is the one given to
- * @p distribute_dofs, which is copied
- * to @p selected_fe.
- *
- * This function is excluded from the
- * @p distribute_dofs function since
- * it can not be implemented dimension
- * independent.
- */
- template <int spacedim>
- static
- types::global_dof_index
- distribute_dofs_on_cell (const typename dealii::hp::DoFHandler<1,spacedim>::active_cell_iterator &cell,
- types::global_dof_index next_free_dof)
- {
- const unsigned int dim = 1;
-
- const FiniteElement<dim,spacedim> &fe = cell->get_fe();
- const unsigned int fe_index = cell->active_fe_index ();
-
- // number dofs on vertices. to do
- // so, check whether dofs for
- // this vertex have been
- // distributed and for the
- // present fe (only check the
- // first dof), and if this isn't
- // the case distribute new ones
- // there
- if (fe.dofs_per_vertex > 0)
- for (unsigned int vertex=0; vertex<GeometryInfo<1>::vertices_per_cell; ++vertex)
- if (cell->vertex_dof_index(vertex, 0, fe_index) ==
- DoFHandler<dim,spacedim>::invalid_dof_index)
- for (unsigned int d=0; d<fe.dofs_per_vertex; ++d, ++next_free_dof)
- cell->set_vertex_dof_index (vertex, d, next_free_dof, fe_index);
-
- // finally for the line. this one
- // shouldn't be numbered yet
- if (fe.dofs_per_line > 0)
- {
- Assert ((cell->dof_index(0, fe_index) ==
- DoFHandler<dim,spacedim>::invalid_dof_index),
- ExcInternalError());
-
- for (unsigned int d=0; d<fe.dofs_per_line; ++d, ++next_free_dof)
- cell->set_dof_index (d, next_free_dof, fe_index);
- }
-
- // note that this cell has been processed
- cell->set_user_flag ();
-
- return next_free_dof;
- }
-
-
- template <int spacedim>
- static
- types::global_dof_index
- distribute_dofs_on_cell (const typename dealii::hp::DoFHandler<2,spacedim>::active_cell_iterator &cell,
- types::global_dof_index next_free_dof)
- {
- const unsigned int dim = 2;
-
- const FiniteElement<dim,spacedim> &fe = cell->get_fe();
- const unsigned int fe_index = cell->active_fe_index ();
-
- // number dofs on vertices. to do
- // so, check whether dofs for
- // this vertex have been
- // distributed and for the
- // present fe (only check the
- // first dof), and if this isn't
- // the case distribute new ones
- // there
- if (fe.dofs_per_vertex > 0)
- for (unsigned int vertex=0; vertex<GeometryInfo<2>::vertices_per_cell; ++vertex)
- if (cell->vertex_dof_index(vertex, 0, fe_index) ==
- DoFHandler<dim,spacedim>::invalid_dof_index)
- for (unsigned int d=0; d<fe.dofs_per_vertex; ++d, ++next_free_dof)
- cell->set_vertex_dof_index (vertex, d, next_free_dof, fe_index);
-
- // next the sides. do the
- // same as above: check whether
- // the line is already numbered
- // for the present fe_index, and
- // if not do it
- if (fe.dofs_per_line > 0)
- for (unsigned int l=0; l<GeometryInfo<2>::lines_per_cell; ++l)
- {
- typename DoFHandler<dim,spacedim>::line_iterator
- line = cell->line(l);
-
- if (line->dof_index(0,fe_index) ==
- DoFHandler<dim,spacedim>::invalid_dof_index)
- for (unsigned int d=0; d<fe.dofs_per_line; ++d, ++next_free_dof)
- line->set_dof_index (d, next_free_dof, fe_index);
- }
-
-
- // finally for the quad. this one
- // shouldn't be numbered yet
- if (fe.dofs_per_quad > 0)
- {
- Assert ((cell->dof_index(0, fe_index) ==
- DoFHandler<dim,spacedim>::invalid_dof_index),
- ExcInternalError());
-
- for (unsigned int d=0; d<fe.dofs_per_quad; ++d, ++next_free_dof)
- cell->set_dof_index (d, next_free_dof, fe_index);
- }
-
- // note that this cell has been processed
- cell->set_user_flag ();
-
- return next_free_dof;
- }
-
-
- template <int spacedim>
- static
- types::global_dof_index
- distribute_dofs_on_cell (const typename dealii::hp::DoFHandler<3,spacedim>::active_cell_iterator &cell,
- types::global_dof_index next_free_dof)
- {
- const unsigned int dim = 3;
-
- const FiniteElement<dim,spacedim> &fe = cell->get_fe();
- const unsigned int fe_index = cell->active_fe_index ();
-
- // number dofs on vertices. to do
- // so, check whether dofs for
- // this vertex have been
- // distributed and for the
- // present fe (only check the
- // first dof), and if this isn't
- // the case distribute new ones
- // there
- if (fe.dofs_per_vertex > 0)
- for (unsigned int vertex=0; vertex<GeometryInfo<3>::vertices_per_cell; ++vertex)
- if (cell->vertex_dof_index(vertex, 0, fe_index) ==
- DoFHandler<dim,spacedim>::invalid_dof_index)
- for (unsigned int d=0; d<fe.dofs_per_vertex; ++d, ++next_free_dof)
- cell->set_vertex_dof_index (vertex, d, next_free_dof, fe_index);
-
- // next the four lines. do the
- // same as above: check whether
- // the line is already numbered
- // for the present fe_index, and
- // if not do it
- if (fe.dofs_per_line > 0)
- for (unsigned int l=0; l<GeometryInfo<3>::lines_per_cell; ++l)
- {
- typename DoFHandler<dim,spacedim>::line_iterator
- line = cell->line(l);
-
- if (line->dof_index(0,fe_index) ==
- DoFHandler<dim,spacedim>::invalid_dof_index)
- for (unsigned int d=0; d<fe.dofs_per_line; ++d, ++next_free_dof)
- line->set_dof_index (d, next_free_dof, fe_index);
- }
-
- // same for quads
- if (fe.dofs_per_quad > 0)
- for (unsigned int q=0; q<GeometryInfo<3>::quads_per_cell; ++q)
- {
- typename DoFHandler<dim,spacedim>::quad_iterator
- quad = cell->quad(q);
-
- if (quad->dof_index(0,fe_index) ==
- DoFHandler<dim,spacedim>::invalid_dof_index)
- for (unsigned int d=0; d<fe.dofs_per_quad; ++d, ++next_free_dof)
- quad->set_dof_index (d, next_free_dof, fe_index);
- }
-
-
- // finally for the hex. this one
- // shouldn't be numbered yet
- if (fe.dofs_per_hex > 0)
- {
- Assert ((cell->dof_index(0, fe_index) ==
- DoFHandler<dim,spacedim>::invalid_dof_index),
- ExcInternalError());
-
- for (unsigned int d=0; d<fe.dofs_per_hex; ++d, ++next_free_dof)
- cell->set_dof_index (d, next_free_dof, fe_index);
- }
-
- // note that this cell has been processed
- cell->set_user_flag ();
-
- return next_free_dof;
- }
-
-
- /**
- * Reserve enough space in the
- * <tt>levels[]</tt> objects to store the
- * numbers of the degrees of freedom
- * needed for the given element. The
- * given element is that one which
- * was selected when calling
- * @p distribute_dofs the last time.
- */
- template <int spacedim>
- static
- void
- reserve_space (DoFHandler<1,spacedim> &dof_handler)
- {
- const unsigned int dim = 1;
-
- typedef DoFHandler<dim,spacedim> BaseClass;
-
- Assert (dof_handler.finite_elements != 0,
- typename BaseClass::ExcNoFESelected());
- Assert (dof_handler.finite_elements->size() > 0,
- typename BaseClass::ExcNoFESelected());
- Assert (dof_handler.tria->n_levels() > 0,
- typename
- BaseClass::ExcInvalidTriangulation());
- Assert (dof_handler.tria->n_levels() == dof_handler.levels.size (),
- ExcInternalError ());
-
- // Release all space except the
- // active_fe_indices field which
- // we have to backup before
- {
- std::vector<std::vector<unsigned int> >
- active_fe_backup(dof_handler.levels.size ());
- for (unsigned int level = 0; level<dof_handler.levels.size (); ++level)
- std::swap (dof_handler.levels[level]->active_fe_indices, active_fe_backup[level]);
-
- // delete all levels and set them up
- // newly, since vectors are
- // troublesome if you want to change
- // their size
- dof_handler.clear_space ();
-
- for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
- {
- dof_handler.levels.push_back (new internal::hp::DoFLevel<dim>);
- std::swap (active_fe_backup[level],
- dof_handler.levels[level]->active_fe_indices);
- }
- }
-
- // LINE (CELL) DOFs
-
- // count how much space we need
- // on each level for the cell
- // dofs and set the
- // dof_*_offsets
- // data. initially set the latter
- // to an invalid index, and only
- // later set it to something
- // reasonable for active dof_handler.cells
- //
- // note that for dof_handler.cells, the
- // situation is simpler than for
- // other (lower dimensional)
- // objects since exactly one
- // finite element is used for it
- for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
- {
- dof_handler.levels[level]->dof_object.dof_offsets
- = std::vector<unsigned int> (dof_handler.tria->n_raw_lines(level),
- DoFHandler<dim,spacedim>::invalid_dof_index);
-
- types::global_dof_index next_free_dof = 0;
- for (typename DoFHandler<dim,spacedim>::active_cell_iterator
- cell=dof_handler.begin_active(level);
- cell!=dof_handler.end_active(level); ++cell)
- if (!cell->has_children())
- {
- dof_handler.levels[level]->dof_object.dof_offsets[cell->index()] = next_free_dof;
- next_free_dof += cell->get_fe().dofs_per_line;
- }
-
- dof_handler.levels[level]->dof_object.dofs
- = std::vector<types::global_dof_index> (next_free_dof,
- DoFHandler<dim,spacedim>::invalid_dof_index);
- }
-
- // safety check: make sure that
- // the number of DoFs we
- // allocated is actually correct
- // (above we have also set the
- // dof_*_offsets field, so
- // we couldn't use this simpler
- // algorithm)
+ // next count how much memory
+ // we actually need. for each
+ // vertex, we need one slot per
+ // fe to store the fe_index,
+ // plus dofs_per_vertex for
+ // this fe. in addition, we
+ // need one slot as the end
+ // marker for the
+ // fe_indices. at the same time
+ // already fill the
+ // vertex_dofs_offsets field
+ dof_handler.vertex_dofs_offsets.resize (dof_handler.tria->n_vertices(),
+ numbers::invalid_unsigned_int);
+
+ unsigned int vertex_slots_needed = 0;
+ for (unsigned int v=0; v<dof_handler.tria->n_vertices(); ++v)
+ if (dof_handler.tria->vertex_used(v) == true)
+ {
+ dof_handler.vertex_dofs_offsets[v] = vertex_slots_needed;
+
+ for (unsigned int fe=0; fe<dof_handler.finite_elements->size(); ++fe)
+ if (vertex_fe_association[fe][v] == true)
+ vertex_slots_needed += (*dof_handler.finite_elements)[fe].dofs_per_vertex + 1;
+ ++vertex_slots_needed;
+ }
+
+ // now allocate the space we
+ // have determined we need, and
+ // set up the linked lists for
+ // each of the vertices
+ dof_handler.vertex_dofs.resize (vertex_slots_needed,
+ DoFHandler<dim,spacedim>::invalid_dof_index);
+ for (unsigned int v=0; v<dof_handler.tria->n_vertices(); ++v)
+ if (dof_handler.tria->vertex_used(v) == true)
+ {
+ unsigned int pointer = dof_handler.vertex_dofs_offsets[v];
+ for (unsigned int fe=0; fe<dof_handler.finite_elements->size(); ++fe)
+ if (vertex_fe_association[fe][v] == true)
+ {
+ // if this vertex
+ // uses this fe,
+ // then set the
+ // fe_index and
+ // move the pointer
+ // ahead
+ dof_handler.vertex_dofs[pointer] = fe;
+ pointer += (*dof_handler.finite_elements)[fe].dofs_per_vertex + 1;
+ }
+ // finally place the end
+ // marker
+ dof_handler.vertex_dofs[pointer] = numbers::invalid_unsigned_int;
+ }
+ }
+
+
+
+ /**
+ * Distribute dofs on the given cell,
+ * with new dofs starting with index
+ * @p next_free_dof. Return the next
+ * unused index number. The finite
+ * element used is the one given to
+ * @p distribute_dofs, which is copied
+ * to @p selected_fe.
+ *
+ * This function is excluded from the
+ * @p distribute_dofs function since
+ * it can not be implemented dimension
+ * independent.
+ */
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ distribute_dofs_on_cell (const typename dealii::hp::DoFHandler<1,spacedim>::active_cell_iterator &cell,
- unsigned int next_free_dof)
++ types::global_dof_index next_free_dof)
+ {
+ const unsigned int dim = 1;
+
+ const FiniteElement<dim,spacedim> &fe = cell->get_fe();
+ const unsigned int fe_index = cell->active_fe_index ();
+
+ // number dofs on vertices. to do
+ // so, check whether dofs for
+ // this vertex have been
+ // distributed and for the
+ // present fe (only check the
+ // first dof), and if this isn't
+ // the case distribute new ones
+ // there
+ if (fe.dofs_per_vertex > 0)
+ for (unsigned int vertex=0; vertex<GeometryInfo<1>::vertices_per_cell; ++vertex)
+ if (cell->vertex_dof_index(vertex, 0, fe_index) ==
+ DoFHandler<dim,spacedim>::invalid_dof_index)
+ for (unsigned int d=0; d<fe.dofs_per_vertex; ++d, ++next_free_dof)
+ cell->set_vertex_dof_index (vertex, d, next_free_dof, fe_index);
+
+ // finally for the line. this one
+ // shouldn't be numbered yet
+ if (fe.dofs_per_line > 0)
+ {
+ Assert ((cell->dof_index(0, fe_index) ==
+ DoFHandler<dim,spacedim>::invalid_dof_index),
+ ExcInternalError());
+
+ for (unsigned int d=0; d<fe.dofs_per_line; ++d, ++next_free_dof)
+ cell->set_dof_index (d, next_free_dof, fe_index);
+ }
+
+ // note that this cell has been processed
+ cell->set_user_flag ();
+
+ return next_free_dof;
+ }
+
+
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ distribute_dofs_on_cell (const typename dealii::hp::DoFHandler<2,spacedim>::active_cell_iterator &cell,
- unsigned int next_free_dof)
++ types::global_dof_index next_free_dof)
+ {
+ const unsigned int dim = 2;
+
+ const FiniteElement<dim,spacedim> &fe = cell->get_fe();
+ const unsigned int fe_index = cell->active_fe_index ();
+
+ // number dofs on vertices. to do
+ // so, check whether dofs for
+ // this vertex have been
+ // distributed and for the
+ // present fe (only check the
+ // first dof), and if this isn't
+ // the case distribute new ones
+ // there
+ if (fe.dofs_per_vertex > 0)
+ for (unsigned int vertex=0; vertex<GeometryInfo<2>::vertices_per_cell; ++vertex)
+ if (cell->vertex_dof_index(vertex, 0, fe_index) ==
+ DoFHandler<dim,spacedim>::invalid_dof_index)
+ for (unsigned int d=0; d<fe.dofs_per_vertex; ++d, ++next_free_dof)
+ cell->set_vertex_dof_index (vertex, d, next_free_dof, fe_index);
+
+ // next the sides. do the
+ // same as above: check whether
+ // the line is already numbered
+ // for the present fe_index, and
+ // if not do it
+ if (fe.dofs_per_line > 0)
+ for (unsigned int l=0; l<GeometryInfo<2>::lines_per_cell; ++l)
+ {
+ typename DoFHandler<dim,spacedim>::line_iterator
+ line = cell->line(l);
+
+ if (line->dof_index(0,fe_index) ==
+ DoFHandler<dim,spacedim>::invalid_dof_index)
+ for (unsigned int d=0; d<fe.dofs_per_line; ++d, ++next_free_dof)
+ line->set_dof_index (d, next_free_dof, fe_index);
+ }
+
+
+ // finally for the quad. this one
+ // shouldn't be numbered yet
+ if (fe.dofs_per_quad > 0)
+ {
+ Assert ((cell->dof_index(0, fe_index) ==
+ DoFHandler<dim,spacedim>::invalid_dof_index),
+ ExcInternalError());
+
+ for (unsigned int d=0; d<fe.dofs_per_quad; ++d, ++next_free_dof)
+ cell->set_dof_index (d, next_free_dof, fe_index);
+ }
+
+ // note that this cell has been processed
+ cell->set_user_flag ();
+
+ return next_free_dof;
+ }
+
+
+ template <int spacedim>
+ static
- unsigned int
++ types::global_dof_index
+ distribute_dofs_on_cell (const typename dealii::hp::DoFHandler<3,spacedim>::active_cell_iterator &cell,
- unsigned int next_free_dof)
++ types::global_dof_index next_free_dof)
+ {
+ const unsigned int dim = 3;
+
+ const FiniteElement<dim,spacedim> &fe = cell->get_fe();
+ const unsigned int fe_index = cell->active_fe_index ();
+
+ // number dofs on vertices. to do
+ // so, check whether dofs for
+ // this vertex have been
+ // distributed and for the
+ // present fe (only check the
+ // first dof), and if this isn't
+ // the case distribute new ones
+ // there
+ if (fe.dofs_per_vertex > 0)
+ for (unsigned int vertex=0; vertex<GeometryInfo<3>::vertices_per_cell; ++vertex)
+ if (cell->vertex_dof_index(vertex, 0, fe_index) ==
+ DoFHandler<dim,spacedim>::invalid_dof_index)
+ for (unsigned int d=0; d<fe.dofs_per_vertex; ++d, ++next_free_dof)
+ cell->set_vertex_dof_index (vertex, d, next_free_dof, fe_index);
+
+ // next the four lines. do the
+ // same as above: check whether
+ // the line is already numbered
+ // for the present fe_index, and
+ // if not do it
+ if (fe.dofs_per_line > 0)
+ for (unsigned int l=0; l<GeometryInfo<3>::lines_per_cell; ++l)
+ {
+ typename DoFHandler<dim,spacedim>::line_iterator
+ line = cell->line(l);
+
+ if (line->dof_index(0,fe_index) ==
+ DoFHandler<dim,spacedim>::invalid_dof_index)
+ for (unsigned int d=0; d<fe.dofs_per_line; ++d, ++next_free_dof)
+ line->set_dof_index (d, next_free_dof, fe_index);
+ }
+
+ // same for quads
+ if (fe.dofs_per_quad > 0)
+ for (unsigned int q=0; q<GeometryInfo<3>::quads_per_cell; ++q)
+ {
+ typename DoFHandler<dim,spacedim>::quad_iterator
+ quad = cell->quad(q);
+
+ if (quad->dof_index(0,fe_index) ==
+ DoFHandler<dim,spacedim>::invalid_dof_index)
+ for (unsigned int d=0; d<fe.dofs_per_quad; ++d, ++next_free_dof)
+ quad->set_dof_index (d, next_free_dof, fe_index);
+ }
+
+
+ // finally for the hex. this one
+ // shouldn't be numbered yet
+ if (fe.dofs_per_hex > 0)
+ {
+ Assert ((cell->dof_index(0, fe_index) ==
+ DoFHandler<dim,spacedim>::invalid_dof_index),
+ ExcInternalError());
+
+ for (unsigned int d=0; d<fe.dofs_per_hex; ++d, ++next_free_dof)
+ cell->set_dof_index (d, next_free_dof, fe_index);
+ }
+
+ // note that this cell has been processed
+ cell->set_user_flag ();
+
+ return next_free_dof;
+ }
+
+
+ /**
+ * Reserve enough space in the
+ * <tt>levels[]</tt> objects to store the
+ * numbers of the degrees of freedom
+ * needed for the given element. The
+ * given element is that one which
+ * was selected when calling
+ * @p distribute_dofs the last time.
+ */
+ template <int spacedim>
+ static
+ void
+ reserve_space (DoFHandler<1,spacedim> &dof_handler)
+ {
+ const unsigned int dim = 1;
+
+ typedef DoFHandler<dim,spacedim> BaseClass;
+
+ Assert (dof_handler.finite_elements != 0,
+ typename BaseClass::ExcNoFESelected());
+ Assert (dof_handler.finite_elements->size() > 0,
+ typename BaseClass::ExcNoFESelected());
+ Assert (dof_handler.tria->n_levels() > 0,
+ typename
+ BaseClass::ExcInvalidTriangulation());
+ Assert (dof_handler.tria->n_levels() == dof_handler.levels.size (),
+ ExcInternalError ());
+
+ // Release all space except the
+ // active_fe_indices field which
+ // we have to backup before
+ {
+ std::vector<std::vector<unsigned int> >
+ active_fe_backup(dof_handler.levels.size ());
+ for (unsigned int level = 0; level<dof_handler.levels.size (); ++level)
+ std::swap (dof_handler.levels[level]->active_fe_indices, active_fe_backup[level]);
+
+ // delete all levels and set them up
+ // newly, since vectors are
+ // troublesome if you want to change
+ // their size
+ dof_handler.clear_space ();
+
+ for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
+ {
+ dof_handler.levels.push_back (new internal::hp::DoFLevel<dim>);
+ std::swap (active_fe_backup[level],
+ dof_handler.levels[level]->active_fe_indices);
+ }
+ }
+
+ // LINE (CELL) DOFs
+
+ // count how much space we need
+ // on each level for the cell
+ // dofs and set the
+ // dof_*_offsets
+ // data. initially set the latter
+ // to an invalid index, and only
+ // later set it to something
+ // reasonable for active dof_handler.cells
+ //
+ // note that for dof_handler.cells, the
+ // situation is simpler than for
+ // other (lower dimensional)
+ // objects since exactly one
+ // finite element is used for it
+ for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
+ {
+ dof_handler.levels[level]->dof_object.dof_offsets
+ = std::vector<unsigned int> (dof_handler.tria->n_raw_lines(level),
+ DoFHandler<dim,spacedim>::invalid_dof_index);
+
- unsigned int next_free_dof = 0;
++ types::global_dof_index next_free_dof = 0;
+ for (typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell=dof_handler.begin_active(level);
+ cell!=dof_handler.end_active(level); ++cell)
+ if (!cell->has_children())
+ {
+ dof_handler.levels[level]->dof_object.dof_offsets[cell->index()] = next_free_dof;
+ next_free_dof += cell->get_fe().dofs_per_line;
+ }
+
+ dof_handler.levels[level]->dof_object.dofs
- = std::vector<unsigned int> (next_free_dof,
- DoFHandler<dim,spacedim>::invalid_dof_index);
++ = std::vector<types::global_dof_index> (next_free_dof,
++ DoFHandler<dim,spacedim>::invalid_dof_index);
+ }
+
+ // safety check: make sure that
+ // the number of DoFs we
+ // allocated is actually correct
+ // (above we have also set the
+ // dof_*_offsets field, so
+ // we couldn't use this simpler
+ // algorithm)
#ifdef DEBUG
- for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
- {
- types::global_dof_index counter = 0;
- for (typename DoFHandler<dim,spacedim>::cell_iterator
- cell=dof_handler.begin_active(level);
- cell!=dof_handler.end_active(level); ++cell)
- if (!cell->has_children())
- counter += cell->get_fe().dofs_per_line;
-
- Assert (dof_handler.levels[level]->dof_object.dofs.size() == counter,
- ExcInternalError());
- Assert (static_cast<unsigned int>
- (std::count (dof_handler.levels[level]->dof_object.dof_offsets.begin(),
- dof_handler.levels[level]->dof_object.dof_offsets.end(),
- DoFHandler<dim,spacedim>::invalid_dof_index))
- ==
- dof_handler.tria->n_raw_lines(level) - dof_handler.tria->n_active_lines(level),
- ExcInternalError());
- }
+ for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
+ {
- unsigned int counter = 0;
++ types::global_dof_index counter = 0;
+ for (typename DoFHandler<dim,spacedim>::cell_iterator
+ cell=dof_handler.begin_active(level);
+ cell!=dof_handler.end_active(level); ++cell)
+ if (!cell->has_children())
+ counter += cell->get_fe().dofs_per_line;
+
+ Assert (dof_handler.levels[level]->dof_object.dofs.size() == counter,
+ ExcInternalError());
+ Assert (static_cast<unsigned int>
+ (std::count (dof_handler.levels[level]->dof_object.dof_offsets.begin(),
+ dof_handler.levels[level]->dof_object.dof_offsets.end(),
+ DoFHandler<dim,spacedim>::invalid_dof_index))
+ ==
+ dof_handler.tria->n_raw_lines(level) - dof_handler.tria->n_active_lines(level),
+ ExcInternalError());
+ }
#endif
- // VERTEX DOFS
- reserve_space_vertices (dof_handler);
- }
-
-
- template <int spacedim>
- static
- void
- reserve_space (DoFHandler<2,spacedim> &dof_handler)
- {
- const unsigned int dim = 2;
-
- typedef DoFHandler<dim,spacedim> BaseClass;
-
- Assert (dof_handler.finite_elements != 0,
- typename BaseClass::ExcNoFESelected());
- Assert (dof_handler.finite_elements->size() > 0,
- typename BaseClass::ExcNoFESelected());
- Assert (dof_handler.tria->n_levels() > 0,
- typename BaseClass::ExcInvalidTriangulation());
- Assert (dof_handler.tria->n_levels() == dof_handler.levels.size (),
- ExcInternalError ());
-
- // Release all space except the
- // active_fe_indices field which
- // we have to backup before
- {
- std::vector<std::vector<unsigned int> >
- active_fe_backup(dof_handler.levels.size ());
- for (unsigned int level = 0; level<dof_handler.levels.size (); ++level)
- std::swap (dof_handler.levels[level]->active_fe_indices,
- active_fe_backup[level]);
-
- // delete all levels and set them up
- // newly, since vectors are
- // troublesome if you want to change
- // their size
- dof_handler.clear_space ();
-
- for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
- {
- dof_handler.levels.push_back (new internal::hp::DoFLevel<dim>);
- std::swap (active_fe_backup[level],
- dof_handler.levels[level]->active_fe_indices);
- }
- dof_handler.faces = new internal::hp::DoFFaces<2>;
- }
-
-
- // QUAD (CELL) DOFs
-
- // count how much space we need
- // on each level for the cell
- // dofs and set the
- // dof_*_offsets
- // data. initially set the latter
- // to an invalid index, and only
- // later set it to something
- // reasonable for active dof_handler.cells
- //
- // note that for dof_handler.cells, the
- // situation is simpler than for
- // other (lower dimensional)
- // objects since exactly one
- // finite element is used for it
- for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
- {
- dof_handler.levels[level]->dof_object.dof_offsets
- = std::vector<unsigned int> (dof_handler.tria->n_raw_quads(level),
- DoFHandler<dim,spacedim>::invalid_dof_index);
-
- types::global_dof_index next_free_dof = 0;
- for (typename DoFHandler<dim,spacedim>::active_cell_iterator
- cell=dof_handler.begin_active(level);
- cell!=dof_handler.end_active(level); ++cell)
- if (!cell->has_children())
- {
- dof_handler.levels[level]->dof_object.dof_offsets[cell->index()] = next_free_dof;
- next_free_dof += cell->get_fe().dofs_per_quad;
- }
-
- dof_handler.levels[level]->dof_object.dofs
- = std::vector<types::global_dof_index> (next_free_dof,
- DoFHandler<dim,spacedim>::invalid_dof_index);
- }
-
- // safety check: make sure that
- // the number of DoFs we
- // allocated is actually correct
- // (above we have also set the
- // dof_*_offsets field, so
- // we couldn't use this simpler
- // algorithm)
+ // VERTEX DOFS
+ reserve_space_vertices (dof_handler);
+ }
+
+
+ template <int spacedim>
+ static
+ void
+ reserve_space (DoFHandler<2,spacedim> &dof_handler)
+ {
+ const unsigned int dim = 2;
+
+ typedef DoFHandler<dim,spacedim> BaseClass;
+
+ Assert (dof_handler.finite_elements != 0,
+ typename BaseClass::ExcNoFESelected());
+ Assert (dof_handler.finite_elements->size() > 0,
+ typename BaseClass::ExcNoFESelected());
+ Assert (dof_handler.tria->n_levels() > 0,
+ typename BaseClass::ExcInvalidTriangulation());
+ Assert (dof_handler.tria->n_levels() == dof_handler.levels.size (),
+ ExcInternalError ());
+
+ // Release all space except the
+ // active_fe_indices field which
+ // we have to backup before
+ {
+ std::vector<std::vector<unsigned int> >
+ active_fe_backup(dof_handler.levels.size ());
+ for (unsigned int level = 0; level<dof_handler.levels.size (); ++level)
+ std::swap (dof_handler.levels[level]->active_fe_indices,
+ active_fe_backup[level]);
+
+ // delete all levels and set them up
+ // newly, since vectors are
+ // troublesome if you want to change
+ // their size
+ dof_handler.clear_space ();
+
+ for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
+ {
+ dof_handler.levels.push_back (new internal::hp::DoFLevel<dim>);
+ std::swap (active_fe_backup[level],
+ dof_handler.levels[level]->active_fe_indices);
+ }
+ dof_handler.faces = new internal::hp::DoFFaces<2>;
+ }
+
+
+ // QUAD (CELL) DOFs
+
+ // count how much space we need
+ // on each level for the cell
+ // dofs and set the
+ // dof_*_offsets
+ // data. initially set the latter
+ // to an invalid index, and only
+ // later set it to something
+ // reasonable for active dof_handler.cells
+ //
+ // note that for dof_handler.cells, the
+ // situation is simpler than for
+ // other (lower dimensional)
+ // objects since exactly one
+ // finite element is used for it
+ for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
+ {
+ dof_handler.levels[level]->dof_object.dof_offsets
+ = std::vector<unsigned int> (dof_handler.tria->n_raw_quads(level),
+ DoFHandler<dim,spacedim>::invalid_dof_index);
+
- unsigned int next_free_dof = 0;
++ types::global_dof_index next_free_dof = 0;
+ for (typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell=dof_handler.begin_active(level);
+ cell!=dof_handler.end_active(level); ++cell)
+ if (!cell->has_children())
+ {
+ dof_handler.levels[level]->dof_object.dof_offsets[cell->index()] = next_free_dof;
+ next_free_dof += cell->get_fe().dofs_per_quad;
+ }
+
+ dof_handler.levels[level]->dof_object.dofs
- = std::vector<unsigned int> (next_free_dof,
- DoFHandler<dim,spacedim>::invalid_dof_index);
++ = std::vector<types::global_dof_index> (next_free_dof,
++ DoFHandler<dim,spacedim>::invalid_dof_index);
+ }
+
+ // safety check: make sure that
+ // the number of DoFs we
+ // allocated is actually correct
+ // (above we have also set the
+ // dof_*_offsets field, so
+ // we couldn't use this simpler
+ // algorithm)
#ifdef DEBUG
- for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
- {
- types::global_dof_index counter = 0;
- for (typename DoFHandler<dim,spacedim>::cell_iterator
- cell=dof_handler.begin_active(level);
- cell!=dof_handler.end_active(level); ++cell)
- if (!cell->has_children())
- counter += cell->get_fe().dofs_per_quad;
-
- Assert (dof_handler.levels[level]->dof_object.dofs.size() == counter,
- ExcInternalError());
- Assert (static_cast<unsigned int>
- (std::count (dof_handler.levels[level]->dof_object.dof_offsets.begin(),
- dof_handler.levels[level]->dof_object.dof_offsets.end(),
- DoFHandler<dim,spacedim>::invalid_dof_index))
- ==
- dof_handler.tria->n_raw_quads(level) - dof_handler.tria->n_active_quads(level),
- ExcInternalError());
- }
+ for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
+ {
- unsigned int counter = 0;
++ types::global_dof_index counter = 0;
+ for (typename DoFHandler<dim,spacedim>::cell_iterator
+ cell=dof_handler.begin_active(level);
+ cell!=dof_handler.end_active(level); ++cell)
+ if (!cell->has_children())
+ counter += cell->get_fe().dofs_per_quad;
+
+ Assert (dof_handler.levels[level]->dof_object.dofs.size() == counter,
+ ExcInternalError());
+ Assert (static_cast<unsigned int>
+ (std::count (dof_handler.levels[level]->dof_object.dof_offsets.begin(),
+ dof_handler.levels[level]->dof_object.dof_offsets.end(),
+ DoFHandler<dim,spacedim>::invalid_dof_index))
+ ==
+ dof_handler.tria->n_raw_quads(level) - dof_handler.tria->n_active_quads(level),
+ ExcInternalError());
+ }
#endif
- // LINE DOFS
- //
- // same here: count line dofs,
- // then allocate as much space as
- // we need and prime the linked
- // list for lines (see the
- // description in hp::DoFLevels)
- // with the indices we will
- // need. note that our task is
- // more complicated since two
- // adjacent dof_handler.cells may have
- // different active_fe_indices,
- // in which case we need to
- // allocate *two* sets of line
- // dofs for the same line
- //
- // the way we do things is that
- // we loop over all active dof_handler.cells
- // (these are the ones that have
- // DoFs only anyway) and all
- // their dof_handler.faces. We note in the
- // user flags whether we have
- // previously visited a face and
- // if so skip it (consequently,
- // we have to save and later
- // restore the line flags)
- {
- std::vector<bool> saved_line_user_flags;
- const_cast<dealii::Triangulation<dim,spacedim>&>(*dof_handler.tria)
- .save_user_flags_line (saved_line_user_flags);
- const_cast<dealii::Triangulation<dim,spacedim>&>(*dof_handler.tria)
- .clear_user_flags_line ();
-
- // an array to hold how many
- // slots (see the hp::DoFLevel
- // class) we will have to store
- // on each level
- unsigned int n_line_slots = 0;
-
- for (typename DoFHandler<dim,spacedim>::active_cell_iterator
- cell=dof_handler.begin_active(); cell!=dof_handler.end(); ++cell)
- for (unsigned int face=0; face<GeometryInfo<dim>::faces_per_cell; ++face)
- if (! cell->face(face)->user_flag_set())
- {
- // ok, face has not been
- // visited. so we need to
- // allocate space for it. let's
- // see how much we need: we need
- // one set if a) there is no
- // neighbor behind this face, or
- // b) the neighbor is either
- // coarser or finer than we are,
- // or c) the neighbor is neither
- // coarser nor finer, but has
- // happens to have the same
- // active_fe_index:
- if (cell->at_boundary(face)
- ||
- cell->face(face)->has_children()
- ||
- cell->neighbor_is_coarser(face)
- ||
- (!cell->at_boundary(face)
- &&
- (cell->active_fe_index() == cell->neighbor(face)->active_fe_index())))
- // ok, one set of
- // dofs. that makes
- // one index, 1 times
- // dofs_per_line
- // dofs, and one stop
- // index
- n_line_slots
- += (*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_line + 2;
-
- // otherwise we do
- // indeed need two
- // sets, i.e. two
- // indices, two sets of
- // dofs, and one stop
- // index:
- else
- n_line_slots
- += ((*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_line
- +
- (*dof_handler.finite_elements)[cell->neighbor(face)->active_fe_index()]
- .dofs_per_line
- +
- 3);
-
- // mark this face as
- // visited
- cell->face(face)->set_user_flag ();
- }
-
- // now that we know how many
- // line dofs we will have to
- // have on each level, allocate
- // the memory. note that we
- // allocate offsets for all
- // lines, though only the
- // active ones will have a
- // non-invalid value later on
- dof_handler.faces->lines.dof_offsets
- = std::vector<unsigned int> (dof_handler.tria->n_raw_lines(),
- DoFHandler<dim,spacedim>::invalid_dof_index);
- dof_handler.faces->lines.dofs
- = std::vector<types::global_dof_index> (n_line_slots,
- DoFHandler<dim,spacedim>::invalid_dof_index);
-
- // with the memory now
- // allocated, loop over the
- // dof_handler.cells again and prime the
- // _offset values as well as
- // the fe_index fields
- const_cast<dealii::Triangulation<dim,spacedim>&>(*dof_handler.tria)
- .clear_user_flags_line ();
-
- unsigned int next_free_line_slot = 0;
-
- for (typename DoFHandler<dim,spacedim>::active_cell_iterator
- cell=dof_handler.begin_active(); cell!=dof_handler.end(); ++cell)
- for (unsigned int face=0; face<GeometryInfo<dim>::faces_per_cell; ++face)
- if (! cell->face(face)->user_flag_set())
- {
- // same decision tree
- // as before
- if (cell->at_boundary(face)
- ||
- cell->face(face)->has_children()
- ||
- cell->neighbor_is_coarser(face)
- ||
- (!cell->at_boundary(face)
- &&
- (cell->active_fe_index() == cell->neighbor(face)->active_fe_index())))
- {
- dof_handler.faces
- ->lines.dof_offsets[cell->face(face)->index()]
- = next_free_line_slot;
-
- // set first slot
- // for this line to
- // active_fe_index
- // of this face
- dof_handler.faces
- ->lines.dofs[next_free_line_slot]
- = cell->active_fe_index();
-
- // the next
- // dofs_per_line
- // indices remain
- // unset for the
- // moment (i.e. at
- // invalid_dof_index).
- // following this
- // comes the stop
- // index, which
- // also is
- // invalid_dof_index
- // and therefore
- // does not have to
- // be explicitly
- // set
-
- // finally, mark
- // those slots as
- // used
- next_free_line_slot
- += (*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_line + 2;
- }
- else
- {
- dof_handler.faces
- ->lines.dof_offsets[cell->face(face)->index()]
- = next_free_line_slot;
-
- // set first slot
- // for this line to
- // active_fe_index
- // of this face
- dof_handler.faces
- ->lines.dofs[next_free_line_slot]
- = cell->active_fe_index();
-
- // the next
- // dofs_per_line
- // indices remain
- // unset for the
- // moment (i.e. at
- // invalid_dof_index).
- //
- // then comes the
- // fe_index for the
- // neighboring
- // cell:
- dof_handler.faces
- ->lines.dofs[next_free_line_slot
- +
- (*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_line
- +
- 1]
- = cell->neighbor(face)->active_fe_index();
- // then again a set
- // of dofs that we
- // need not set
- // right now
- //
- // following this
- // comes the stop
- // index, which
- // also is
- // invalid_dof_index
- // and therefore
- // does not have to
- // be explicitly
- // set
-
- // finally, mark
- // those slots as
- // used
- next_free_line_slot
- += ((*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_line
- +
- (*dof_handler.finite_elements)[cell->neighbor(face)->active_fe_index()]
- .dofs_per_line
- +
- 3);
- }
-
- // mark this face as
- // visited
- cell->face(face)->set_user_flag ();
- }
-
- // we should have moved the
- // cursor for each level to the
- // total number of dofs on that
- // level. check that
- Assert (next_free_line_slot == n_line_slots,
- ExcInternalError());
-
- // at the end, restore the user
- // flags for the lines
- const_cast<dealii::Triangulation<dim,spacedim>&>(*dof_handler.tria)
- .load_user_flags_line (saved_line_user_flags);
- }
-
-
- // VERTEX DOFS
- reserve_space_vertices (dof_handler);
- }
-
-
- template <int spacedim>
- static
- void
- reserve_space (DoFHandler<3,spacedim> &dof_handler)
- {
- const unsigned int dim = 3;
-
- typedef DoFHandler<dim,spacedim> BaseClass;
-
- Assert (dof_handler.finite_elements != 0,
- typename BaseClass::ExcNoFESelected());
- Assert (dof_handler.finite_elements->size() > 0,
- typename BaseClass::ExcNoFESelected());
- Assert (dof_handler.tria->n_levels() > 0,
- typename BaseClass::ExcInvalidTriangulation());
- Assert (dof_handler.tria->n_levels() == dof_handler.levels.size (),
- ExcInternalError ());
-
- // Release all space except the
- // active_fe_indices field which
- // we have to backup before
- {
- std::vector<std::vector<unsigned int> >
- active_fe_backup(dof_handler.levels.size ());
- for (unsigned int level = 0; level<dof_handler.levels.size (); ++level)
- std::swap (dof_handler.levels[level]->active_fe_indices,
- active_fe_backup[level]);
-
- // delete all levels and set them up
- // newly, since vectors are
- // troublesome if you want to change
- // their size
- dof_handler.clear_space ();
-
- for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
- {
- dof_handler.levels.push_back (new internal::hp::DoFLevel<dim>);
- std::swap (active_fe_backup[level],
- dof_handler.levels[level]->active_fe_indices);
- }
- dof_handler.faces = new internal::hp::DoFFaces<3>;
- }
-
-
- // HEX (CELL) DOFs
-
- // count how much space we need
- // on each level for the cell
- // dofs and set the
- // dof_*_offsets
- // data. initially set the latter
- // to an invalid index, and only
- // later set it to something
- // reasonable for active dof_handler.cells
- //
- // note that for dof_handler.cells, the
- // situation is simpler than for
- // other (lower dimensional)
- // objects since exactly one
- // finite element is used for it
- for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
- {
- dof_handler.levels[level]->dof_object.dof_offsets
- = std::vector<unsigned int> (dof_handler.tria->n_raw_hexs(level),
- DoFHandler<dim,spacedim>::invalid_dof_index);
-
- types::global_dof_index next_free_dof = 0;
- for (typename DoFHandler<dim,spacedim>::active_cell_iterator
- cell=dof_handler.begin_active(level);
- cell!=dof_handler.end_active(level); ++cell)
- if (!cell->has_children())
- {
- dof_handler.levels[level]->dof_object.dof_offsets[cell->index()] = next_free_dof;
- next_free_dof += cell->get_fe().dofs_per_hex;
- }
-
- dof_handler.levels[level]->dof_object.dofs
- = std::vector<types::global_dof_index> (next_free_dof,
- DoFHandler<dim,spacedim>::invalid_dof_index);
- }
-
- // safety check: make sure that
- // the number of DoFs we
- // allocated is actually correct
- // (above we have also set the
- // dof_*_offsets field, so
- // we couldn't use this simpler
- // algorithm)
+ // LINE DOFS
+ //
+ // same here: count line dofs,
+ // then allocate as much space as
+ // we need and prime the linked
+ // list for lines (see the
+ // description in hp::DoFLevels)
+ // with the indices we will
+ // need. note that our task is
+ // more complicated since two
+ // adjacent dof_handler.cells may have
+ // different active_fe_indices,
+ // in which case we need to
+ // allocate *two* sets of line
+ // dofs for the same line
+ //
+ // the way we do things is that
+ // we loop over all active dof_handler.cells
+ // (these are the ones that have
+ // DoFs only anyway) and all
+ // their dof_handler.faces. We note in the
+ // user flags whether we have
+ // previously visited a face and
+ // if so skip it (consequently,
+ // we have to save and later
+ // restore the line flags)
+ {
+ std::vector<bool> saved_line_user_flags;
+ const_cast<dealii::Triangulation<dim,spacedim>&>(*dof_handler.tria)
+ .save_user_flags_line (saved_line_user_flags);
+ const_cast<dealii::Triangulation<dim,spacedim>&>(*dof_handler.tria)
+ .clear_user_flags_line ();
+
+ // an array to hold how many
+ // slots (see the hp::DoFLevel
+ // class) we will have to store
+ // on each level
+ unsigned int n_line_slots = 0;
+
+ for (typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell=dof_handler.begin_active(); cell!=dof_handler.end(); ++cell)
+ for (unsigned int face=0; face<GeometryInfo<dim>::faces_per_cell; ++face)
+ if (! cell->face(face)->user_flag_set())
+ {
+ // ok, face has not been
+ // visited. so we need to
+ // allocate space for it. let's
+ // see how much we need: we need
+ // one set if a) there is no
+ // neighbor behind this face, or
+ // b) the neighbor is either
+ // coarser or finer than we are,
+ // or c) the neighbor is neither
+ // coarser nor finer, but has
+ // happens to have the same
+ // active_fe_index:
+ if (cell->at_boundary(face)
+ ||
+ cell->face(face)->has_children()
+ ||
+ cell->neighbor_is_coarser(face)
+ ||
+ (!cell->at_boundary(face)
+ &&
+ (cell->active_fe_index() == cell->neighbor(face)->active_fe_index())))
+ // ok, one set of
+ // dofs. that makes
+ // one index, 1 times
+ // dofs_per_line
+ // dofs, and one stop
+ // index
+ n_line_slots
+ += (*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_line + 2;
+
+ // otherwise we do
+ // indeed need two
+ // sets, i.e. two
+ // indices, two sets of
+ // dofs, and one stop
+ // index:
+ else
+ n_line_slots
+ += ((*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_line
+ +
+ (*dof_handler.finite_elements)[cell->neighbor(face)->active_fe_index()]
+ .dofs_per_line
+ +
+ 3);
+
+ // mark this face as
+ // visited
+ cell->face(face)->set_user_flag ();
+ }
+
+ // now that we know how many
+ // line dofs we will have to
+ // have on each level, allocate
+ // the memory. note that we
+ // allocate offsets for all
+ // lines, though only the
+ // active ones will have a
+ // non-invalid value later on
+ dof_handler.faces->lines.dof_offsets
+ = std::vector<unsigned int> (dof_handler.tria->n_raw_lines(),
+ DoFHandler<dim,spacedim>::invalid_dof_index);
+ dof_handler.faces->lines.dofs
- = std::vector<unsigned int> (n_line_slots,
- DoFHandler<dim,spacedim>::invalid_dof_index);
++ = std::vector<types::global_dof_index> (n_line_slots,
++ DoFHandler<dim,spacedim>::invalid_dof_index);
+
+ // with the memory now
+ // allocated, loop over the
+ // dof_handler.cells again and prime the
+ // _offset values as well as
+ // the fe_index fields
+ const_cast<dealii::Triangulation<dim,spacedim>&>(*dof_handler.tria)
+ .clear_user_flags_line ();
+
+ unsigned int next_free_line_slot = 0;
+
+ for (typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell=dof_handler.begin_active(); cell!=dof_handler.end(); ++cell)
+ for (unsigned int face=0; face<GeometryInfo<dim>::faces_per_cell; ++face)
+ if (! cell->face(face)->user_flag_set())
+ {
+ // same decision tree
+ // as before
+ if (cell->at_boundary(face)
+ ||
+ cell->face(face)->has_children()
+ ||
+ cell->neighbor_is_coarser(face)
+ ||
+ (!cell->at_boundary(face)
+ &&
+ (cell->active_fe_index() == cell->neighbor(face)->active_fe_index())))
+ {
+ dof_handler.faces
+ ->lines.dof_offsets[cell->face(face)->index()]
+ = next_free_line_slot;
+
+ // set first slot
+ // for this line to
+ // active_fe_index
+ // of this face
+ dof_handler.faces
+ ->lines.dofs[next_free_line_slot]
+ = cell->active_fe_index();
+
+ // the next
+ // dofs_per_line
+ // indices remain
+ // unset for the
+ // moment (i.e. at
+ // invalid_dof_index).
+ // following this
+ // comes the stop
+ // index, which
+ // also is
+ // invalid_dof_index
+ // and therefore
+ // does not have to
+ // be explicitly
+ // set
+
+ // finally, mark
+ // those slots as
+ // used
+ next_free_line_slot
+ += (*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_line + 2;
+ }
+ else
+ {
+ dof_handler.faces
+ ->lines.dof_offsets[cell->face(face)->index()]
+ = next_free_line_slot;
+
+ // set first slot
+ // for this line to
+ // active_fe_index
+ // of this face
+ dof_handler.faces
+ ->lines.dofs[next_free_line_slot]
+ = cell->active_fe_index();
+
+ // the next
+ // dofs_per_line
+ // indices remain
+ // unset for the
+ // moment (i.e. at
+ // invalid_dof_index).
+ //
+ // then comes the
+ // fe_index for the
+ // neighboring
+ // cell:
+ dof_handler.faces
+ ->lines.dofs[next_free_line_slot
+ +
+ (*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_line
+ +
+ 1]
+ = cell->neighbor(face)->active_fe_index();
+ // then again a set
+ // of dofs that we
+ // need not set
+ // right now
+ //
+ // following this
+ // comes the stop
+ // index, which
+ // also is
+ // invalid_dof_index
+ // and therefore
+ // does not have to
+ // be explicitly
+ // set
+
+ // finally, mark
+ // those slots as
+ // used
+ next_free_line_slot
+ += ((*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_line
+ +
+ (*dof_handler.finite_elements)[cell->neighbor(face)->active_fe_index()]
+ .dofs_per_line
+ +
+ 3);
+ }
+
+ // mark this face as
+ // visited
+ cell->face(face)->set_user_flag ();
+ }
+
+ // we should have moved the
+ // cursor for each level to the
+ // total number of dofs on that
+ // level. check that
+ Assert (next_free_line_slot == n_line_slots,
+ ExcInternalError());
+
+ // at the end, restore the user
+ // flags for the lines
+ const_cast<dealii::Triangulation<dim,spacedim>&>(*dof_handler.tria)
+ .load_user_flags_line (saved_line_user_flags);
+ }
+
+
+ // VERTEX DOFS
+ reserve_space_vertices (dof_handler);
+ }
+
+
+ template <int spacedim>
+ static
+ void
+ reserve_space (DoFHandler<3,spacedim> &dof_handler)
+ {
+ const unsigned int dim = 3;
+
+ typedef DoFHandler<dim,spacedim> BaseClass;
+
+ Assert (dof_handler.finite_elements != 0,
+ typename BaseClass::ExcNoFESelected());
+ Assert (dof_handler.finite_elements->size() > 0,
+ typename BaseClass::ExcNoFESelected());
+ Assert (dof_handler.tria->n_levels() > 0,
+ typename BaseClass::ExcInvalidTriangulation());
+ Assert (dof_handler.tria->n_levels() == dof_handler.levels.size (),
+ ExcInternalError ());
+
+ // Release all space except the
+ // active_fe_indices field which
+ // we have to backup before
+ {
+ std::vector<std::vector<unsigned int> >
+ active_fe_backup(dof_handler.levels.size ());
+ for (unsigned int level = 0; level<dof_handler.levels.size (); ++level)
+ std::swap (dof_handler.levels[level]->active_fe_indices,
+ active_fe_backup[level]);
+
+ // delete all levels and set them up
+ // newly, since vectors are
+ // troublesome if you want to change
+ // their size
+ dof_handler.clear_space ();
+
+ for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
+ {
+ dof_handler.levels.push_back (new internal::hp::DoFLevel<dim>);
+ std::swap (active_fe_backup[level],
+ dof_handler.levels[level]->active_fe_indices);
+ }
+ dof_handler.faces = new internal::hp::DoFFaces<3>;
+ }
+
+
+ // HEX (CELL) DOFs
+
+ // count how much space we need
+ // on each level for the cell
+ // dofs and set the
+ // dof_*_offsets
+ // data. initially set the latter
+ // to an invalid index, and only
+ // later set it to something
+ // reasonable for active dof_handler.cells
+ //
+ // note that for dof_handler.cells, the
+ // situation is simpler than for
+ // other (lower dimensional)
+ // objects since exactly one
+ // finite element is used for it
+ for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
+ {
+ dof_handler.levels[level]->dof_object.dof_offsets
+ = std::vector<unsigned int> (dof_handler.tria->n_raw_hexs(level),
+ DoFHandler<dim,spacedim>::invalid_dof_index);
+
- unsigned int next_free_dof = 0;
++ types::global_dof_index next_free_dof = 0;
+ for (typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell=dof_handler.begin_active(level);
+ cell!=dof_handler.end_active(level); ++cell)
+ if (!cell->has_children())
+ {
+ dof_handler.levels[level]->dof_object.dof_offsets[cell->index()] = next_free_dof;
+ next_free_dof += cell->get_fe().dofs_per_hex;
+ }
+
+ dof_handler.levels[level]->dof_object.dofs
- = std::vector<unsigned int> (next_free_dof,
- DoFHandler<dim,spacedim>::invalid_dof_index);
++ = std::vector<types::global_dof_index> (next_free_dof,
++ DoFHandler<dim,spacedim>::invalid_dof_index);
+ }
+
+ // safety check: make sure that
+ // the number of DoFs we
+ // allocated is actually correct
+ // (above we have also set the
+ // dof_*_offsets field, so
+ // we couldn't use this simpler
+ // algorithm)
#ifdef DEBUG
- for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
- {
- types::global_dof_index counter = 0;
- for (typename DoFHandler<dim,spacedim>::cell_iterator
- cell=dof_handler.begin_active(level);
- cell!=dof_handler.end_active(level); ++cell)
- if (!cell->has_children())
- counter += cell->get_fe().dofs_per_hex;
-
- Assert (dof_handler.levels[level]->dof_object.dofs.size() == counter,
- ExcInternalError());
- Assert (static_cast<unsigned int>
- (std::count (dof_handler.levels[level]->dof_object.dof_offsets.begin(),
- dof_handler.levels[level]->dof_object.dof_offsets.end(),
- DoFHandler<dim,spacedim>::invalid_dof_index))
- ==
- dof_handler.tria->n_raw_hexs(level) - dof_handler.tria->n_active_hexs(level),
- ExcInternalError());
- }
+ for (unsigned int level=0; level<dof_handler.tria->n_levels(); ++level)
+ {
- unsigned int counter = 0;
++ types::global_dof_index counter = 0;
+ for (typename DoFHandler<dim,spacedim>::cell_iterator
+ cell=dof_handler.begin_active(level);
+ cell!=dof_handler.end_active(level); ++cell)
+ if (!cell->has_children())
+ counter += cell->get_fe().dofs_per_hex;
+
+ Assert (dof_handler.levels[level]->dof_object.dofs.size() == counter,
+ ExcInternalError());
+ Assert (static_cast<unsigned int>
+ (std::count (dof_handler.levels[level]->dof_object.dof_offsets.begin(),
+ dof_handler.levels[level]->dof_object.dof_offsets.end(),
+ DoFHandler<dim,spacedim>::invalid_dof_index))
+ ==
+ dof_handler.tria->n_raw_hexs(level) - dof_handler.tria->n_active_hexs(level),
+ ExcInternalError());
+ }
#endif
- // QUAD DOFS
- //
- // same here: count quad dofs,
- // then allocate as much space as
- // we need and prime the linked
- // list for quad (see the
- // description in hp::DoFLevels)
- // with the indices we will
- // need. note that our task is
- // more complicated since two
- // adjacent dof_handler.cells may have
- // different active_fe_indices,
- // in which case we need to
- // allocate *two* sets of line
- // dofs for the same line
- //
- // the way we do things is that
- // we loop over all active dof_handler.cells
- // (these are the ones that have
- // DoFs only anyway) and all
- // their dof_handler.faces. We note in the
- // user flags whether we have
- // previously visited a face and
- // if so skip it (consequently,
- // we have to save and later
- // restore the line flags)
- {
- std::vector<bool> saved_quad_user_flags;
- const_cast<dealii::Triangulation<dim,spacedim>&>(*dof_handler.tria)
- .save_user_flags_quad (saved_quad_user_flags);
- const_cast<dealii::Triangulation<dim,spacedim>&>(*dof_handler.tria)
- .clear_user_flags_quad ();
-
- // examine, how how many
- // slots (see the hp::DoFLevel
- // class) we will have to store
- unsigned int n_quad_slots = 0;
-
- for (typename DoFHandler<dim,spacedim>::active_cell_iterator
- cell=dof_handler.begin_active(); cell!=dof_handler.end(); ++cell)
- for (unsigned int face=0; face<GeometryInfo<dim>::faces_per_cell; ++face)
- if (! cell->face(face)->user_flag_set())
- {
- // ok, face has not been
- // visited. so we need to
- // allocate space for
- // it. let's see how much
- // we need: we need one
- // set if a) there is no
- // neighbor behind this
- // face, or b) the
- // neighbor is not on the
- // same level or further
- // refined, or c) the
- // neighbor is on the
- // same level, but
- // happens to have the
- // same active_fe_index:
- if (cell->at_boundary(face)
- ||
- cell->face(face)->has_children()
- ||
- cell->neighbor_is_coarser(face)
- ||
- (!cell->at_boundary(face)
- &&
- (cell->active_fe_index() == cell->neighbor(face)->active_fe_index())))
- // ok, one set of
- // dofs. that makes
- // one index, 1 times
- // dofs_per_quad
- // dofs, and one stop
- // index
- n_quad_slots
- += (*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_quad + 2;
-
- // otherwise we do
- // indeed need two
- // sets, i.e. two
- // indices, two sets of
- // dofs, and one stop
- // index:
- else
- n_quad_slots
- += ((*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_quad
- +
- (*dof_handler.finite_elements)[cell->neighbor(face)->active_fe_index()]
- .dofs_per_quad
- +
- 3);
-
- // mark this face as
- // visited
- cell->face(face)->set_user_flag ();
- }
-
- // now that we know how many
- // quad dofs we will have to
- // have, allocate
- // the memory. note that we
- // allocate offsets for all
- // quads, though only the
- // active ones will have a
- // non-invalid value later on
- if (true)
- {
- dof_handler.faces->quads.dof_offsets
- = std::vector<unsigned int> (dof_handler.tria->n_raw_quads(),
- DoFHandler<dim,spacedim>::invalid_dof_index);
- dof_handler.faces->quads.dofs
- = std::vector<types::global_dof_index> (n_quad_slots,
- DoFHandler<dim,spacedim>::invalid_dof_index);
- }
-
- // with the memory now
- // allocated, loop over the
- // dof_handler.cells again and prime the
- // _offset values as well as
- // the fe_index fields
- const_cast<dealii::Triangulation<dim,spacedim>&>(*dof_handler.tria)
- .clear_user_flags_quad ();
-
- unsigned int next_free_quad_slot = 0;
-
- for (typename DoFHandler<dim,spacedim>::active_cell_iterator
- cell=dof_handler.begin_active(); cell!=dof_handler.end(); ++cell)
- for (unsigned int face=0; face<GeometryInfo<dim>::faces_per_cell; ++face)
- if (! cell->face(face)->user_flag_set())
- {
- // same decision tree
- // as before
- if (cell->at_boundary(face)
- ||
- cell->face(face)->has_children()
- ||
- cell->neighbor_is_coarser(face)
- ||
- (!cell->at_boundary(face)
- &&
- (cell->active_fe_index() == cell->neighbor(face)->active_fe_index())))
- {
- dof_handler.faces
- ->quads.dof_offsets[cell->face(face)->index()]
- = next_free_quad_slot;
-
- // set first slot
- // for this quad to
- // active_fe_index
- // of this face
- dof_handler.faces
- ->quads.dofs[next_free_quad_slot]
- = cell->active_fe_index();
-
- // the next
- // dofs_per_quad
- // indices remain
- // unset for the
- // moment (i.e. at
- // invalid_dof_index).
- // following this
- // comes the stop
- // index, which
- // also is
- // invalid_dof_index
- // and therefore
- // does not have to
- // be explicitly
- // set
-
- // finally, mark
- // those slots as
- // used
- next_free_quad_slot
- += (*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_quad + 2;
- }
- else
- {
- dof_handler.faces
- ->quads.dof_offsets[cell->face(face)->index()]
- = next_free_quad_slot;
-
- // set first slot
- // for this quad to
- // active_fe_index
- // of this face
- dof_handler.faces
- ->quads.dofs[next_free_quad_slot]
- = cell->active_fe_index();
-
- // the next
- // dofs_per_quad
- // indices remain
- // unset for the
- // moment (i.e. at
- // invalid_dof_index).
- //
- // then comes the
- // fe_index for the
- // neighboring
- // cell:
- dof_handler.faces
- ->quads.dofs[next_free_quad_slot
- +
- (*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_quad
- +
- 1]
- = cell->neighbor(face)->active_fe_index();
- // then again a set
- // of dofs that we
- // need not set
- // right now
- //
- // following this
- // comes the stop
- // index, which
- // also is
- // invalid_dof_index
- // and therefore
- // does not have to
- // be explicitly
- // set
-
- // finally, mark
- // those slots as
- // used
- next_free_quad_slot
- += ((*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_quad
- +
- (*dof_handler.finite_elements)[cell->neighbor(face)->active_fe_index()]
- .dofs_per_quad
- +
- 3);
- }
-
- // mark this face as
- // visited
- cell->face(face)->set_user_flag ();
- }
-
- // we should have moved the
- // cursor to the total number
- // of dofs. check that
- Assert (next_free_quad_slot == n_quad_slots,
- ExcInternalError());
-
- // at the end, restore the user
- // flags for the quads
- const_cast<dealii::Triangulation<dim,spacedim>&>(*dof_handler.tria)
- .load_user_flags_quad (saved_quad_user_flags);
- }
-
-
- // LINE DOFS
-
- // the situation here is pretty
- // much like with vertices: there
- // can be an arbitrary number of
- // finite elements associated
- // with each line.
- //
- // the algorithm we use is
- // somewhat similar to what we do
- // in reserve_space_vertices()
- if (true)
- {
- // what we do first is to set up
- // an array in which we record
- // whether a line is associated
- // with any of the given fe's, by
- // setting a bit. in a later
- // step, we then actually
- // allocate memory for the
- // required dofs
- std::vector<std::vector<bool> >
- line_fe_association (dof_handler.finite_elements->size(),
- std::vector<bool> (dof_handler.tria->n_raw_lines(),
- false));
-
- for (typename DoFHandler<dim,spacedim>::active_cell_iterator
- cell=dof_handler.begin_active();
- cell!=dof_handler.end(); ++cell)
- for (unsigned int l=0; l<GeometryInfo<dim>::lines_per_cell; ++l)
- line_fe_association[cell->active_fe_index()][cell->line_index(l)]
- = true;
-
- // first check which of the
- // lines is used at all,
- // i.e. is associated with a
- // finite element. we do this
- // since not all lines may
- // actually be used, in which
- // case we do not have to
- // allocate any memory at
- // all
- std::vector<bool> line_is_used (dof_handler.tria->n_raw_lines(), false);
- for (unsigned int line=0; line<dof_handler.tria->n_raw_lines(); ++line)
- for (unsigned int fe=0; fe<dof_handler.finite_elements->size(); ++fe)
- if (line_fe_association[fe][line] == true)
- {
- line_is_used[line] = true;
- break;
- }
-
- // next count how much memory
- // we actually need. for each
- // line, we need one slot per
- // fe to store the fe_index,
- // plus dofs_per_line for
- // this fe. in addition, we
- // need one slot as the end
- // marker for the
- // fe_indices. at the same
- // time already fill the
- // line_dofs_offsets field
- dof_handler.faces->lines.dof_offsets
- .resize (dof_handler.tria->n_raw_lines(),
- numbers::invalid_unsigned_int);
-
- unsigned int line_slots_needed = 0;
- for (unsigned int line=0; line<dof_handler.tria->n_raw_lines(); ++line)
- if (line_is_used[line] == true)
- {
- dof_handler.faces->lines.dof_offsets[line] = line_slots_needed;
-
- for (unsigned int fe=0; fe<dof_handler.finite_elements->size(); ++fe)
- if (line_fe_association[fe][line] == true)
- line_slots_needed += (*dof_handler.finite_elements)[fe].dofs_per_line + 1;
- ++line_slots_needed;
- }
-
- // now allocate the space we
- // have determined we need, and
- // set up the linked lists for
- // each of the lines
- dof_handler.faces->lines.dofs.resize (line_slots_needed,
- DoFHandler<dim,spacedim>::invalid_dof_index);
- for (unsigned int line=0; line<dof_handler.tria->n_raw_lines(); ++line)
- if (line_is_used[line] == true)
- {
- unsigned int pointer = dof_handler.faces->lines.dof_offsets[line];
- for (unsigned int fe=0; fe<dof_handler.finite_elements->size(); ++fe)
- if (line_fe_association[fe][line] == true)
- {
- // if this line
- // uses this fe,
- // then set the
- // fe_index and
- // move the
- // pointer ahead
- dof_handler.faces->lines.dofs[pointer] = fe;
- pointer += (*dof_handler.finite_elements)[fe].dofs_per_line + 1;
- }
- // finally place the end
- // marker
- dof_handler.faces->lines.dofs[pointer] = numbers::invalid_unsigned_int;
- }
- }
-
-
-
- // VERTEX DOFS
- reserve_space_vertices (dof_handler);
- }
-
-
- /**
- * Implement the function of same name
- * in the mother class.
- */
- template <int spacedim>
- static
- unsigned int
- max_couplings_between_dofs (const DoFHandler<1,spacedim> &dof_handler)
- {
- return std::min(3*dof_handler.finite_elements->max_dofs_per_vertex() +
- 2*dof_handler.finite_elements->max_dofs_per_line(), dof_handler.n_dofs());
- }
-
-
-
- template <int spacedim>
- static
- unsigned int
- max_couplings_between_dofs (const DoFHandler<2,spacedim> &dof_handler)
- {
- // get these numbers by drawing pictures
- // and counting...
- // example:
- // | | |
- // --x-----x--x--X--
- // | | | |
- // | x--x--x
- // | | | |
- // --x--x--*--x--x--
- // | | | |
- // x--x--x |
- // | | | |
- // --X--x--x-----x--
- // | | |
- // x = vertices connected with center vertex *;
- // = total of 19
- // (the X vertices are connected with * if
- // the vertices adjacent to X are hanging
- // nodes)
- // count lines -> 28 (don't forget to count
- // mother and children separately!)
- unsigned int max_couplings;
- switch (dof_handler.tria->max_adjacent_cells())
- {
- case 4:
- max_couplings=19*dof_handler.finite_elements->max_dofs_per_vertex() +
- 28*dof_handler.finite_elements->max_dofs_per_line() +
- 8*dof_handler.finite_elements->max_dofs_per_quad();
- break;
- case 5:
- max_couplings=21*dof_handler.finite_elements->max_dofs_per_vertex() +
- 31*dof_handler.finite_elements->max_dofs_per_line() +
- 9*dof_handler.finite_elements->max_dofs_per_quad();
- break;
- case 6:
- max_couplings=28*dof_handler.finite_elements->max_dofs_per_vertex() +
- 42*dof_handler.finite_elements->max_dofs_per_line() +
- 12*dof_handler.finite_elements->max_dofs_per_quad();
- break;
- case 7:
- max_couplings=30*dof_handler.finite_elements->max_dofs_per_vertex() +
- 45*dof_handler.finite_elements->max_dofs_per_line() +
- 13*dof_handler.finite_elements->max_dofs_per_quad();
- break;
- case 8:
- max_couplings=37*dof_handler.finite_elements->max_dofs_per_vertex() +
- 56*dof_handler.finite_elements->max_dofs_per_line() +
- 16*dof_handler.finite_elements->max_dofs_per_quad();
- break;
- default:
- Assert (false, ExcNotImplemented());
- max_couplings=0;
- };
- return std::min(max_couplings,dof_handler.n_dofs());
- }
-
-
- template <int spacedim>
- static
- unsigned int
- max_couplings_between_dofs (const DoFHandler<3,spacedim> &dof_handler)
- {
+ // QUAD DOFS
+ //
+ // same here: count quad dofs,
+ // then allocate as much space as
+ // we need and prime the linked
+ // list for quad (see the
+ // description in hp::DoFLevels)
+ // with the indices we will
+ // need. note that our task is
+ // more complicated since two
+ // adjacent dof_handler.cells may have
+ // different active_fe_indices,
+ // in which case we need to
+ // allocate *two* sets of line
+ // dofs for the same line
+ //
+ // the way we do things is that
+ // we loop over all active dof_handler.cells
+ // (these are the ones that have
+ // DoFs only anyway) and all
+ // their dof_handler.faces. We note in the
+ // user flags whether we have
+ // previously visited a face and
+ // if so skip it (consequently,
+ // we have to save and later
+ // restore the line flags)
+ {
+ std::vector<bool> saved_quad_user_flags;
+ const_cast<dealii::Triangulation<dim,spacedim>&>(*dof_handler.tria)
+ .save_user_flags_quad (saved_quad_user_flags);
+ const_cast<dealii::Triangulation<dim,spacedim>&>(*dof_handler.tria)
+ .clear_user_flags_quad ();
+
+ // examine, how how many
+ // slots (see the hp::DoFLevel
+ // class) we will have to store
+ unsigned int n_quad_slots = 0;
+
+ for (typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell=dof_handler.begin_active(); cell!=dof_handler.end(); ++cell)
+ for (unsigned int face=0; face<GeometryInfo<dim>::faces_per_cell; ++face)
+ if (! cell->face(face)->user_flag_set())
+ {
+ // ok, face has not been
+ // visited. so we need to
+ // allocate space for
+ // it. let's see how much
+ // we need: we need one
+ // set if a) there is no
+ // neighbor behind this
+ // face, or b) the
+ // neighbor is not on the
+ // same level or further
+ // refined, or c) the
+ // neighbor is on the
+ // same level, but
+ // happens to have the
+ // same active_fe_index:
+ if (cell->at_boundary(face)
+ ||
+ cell->face(face)->has_children()
+ ||
+ cell->neighbor_is_coarser(face)
+ ||
+ (!cell->at_boundary(face)
+ &&
+ (cell->active_fe_index() == cell->neighbor(face)->active_fe_index())))
+ // ok, one set of
+ // dofs. that makes
+ // one index, 1 times
+ // dofs_per_quad
+ // dofs, and one stop
+ // index
+ n_quad_slots
+ += (*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_quad + 2;
+
+ // otherwise we do
+ // indeed need two
+ // sets, i.e. two
+ // indices, two sets of
+ // dofs, and one stop
+ // index:
+ else
+ n_quad_slots
+ += ((*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_quad
+ +
+ (*dof_handler.finite_elements)[cell->neighbor(face)->active_fe_index()]
+ .dofs_per_quad
+ +
+ 3);
+
+ // mark this face as
+ // visited
+ cell->face(face)->set_user_flag ();
+ }
+
+ // now that we know how many
+ // quad dofs we will have to
+ // have, allocate
+ // the memory. note that we
+ // allocate offsets for all
+ // quads, though only the
+ // active ones will have a
+ // non-invalid value later on
+ if (true)
+ {
+ dof_handler.faces->quads.dof_offsets
+ = std::vector<unsigned int> (dof_handler.tria->n_raw_quads(),
+ DoFHandler<dim,spacedim>::invalid_dof_index);
+ dof_handler.faces->quads.dofs
- = std::vector<unsigned int> (n_quad_slots,
- DoFHandler<dim,spacedim>::invalid_dof_index);
++ = std::vector<types::global_dof_index> (n_quad_slots,
++ DoFHandler<dim,spacedim>::invalid_dof_index);
+ }
+
+ // with the memory now
+ // allocated, loop over the
+ // dof_handler.cells again and prime the
+ // _offset values as well as
+ // the fe_index fields
+ const_cast<dealii::Triangulation<dim,spacedim>&>(*dof_handler.tria)
+ .clear_user_flags_quad ();
+
+ unsigned int next_free_quad_slot = 0;
+
+ for (typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell=dof_handler.begin_active(); cell!=dof_handler.end(); ++cell)
+ for (unsigned int face=0; face<GeometryInfo<dim>::faces_per_cell; ++face)
+ if (! cell->face(face)->user_flag_set())
+ {
+ // same decision tree
+ // as before
+ if (cell->at_boundary(face)
+ ||
+ cell->face(face)->has_children()
+ ||
+ cell->neighbor_is_coarser(face)
+ ||
+ (!cell->at_boundary(face)
+ &&
+ (cell->active_fe_index() == cell->neighbor(face)->active_fe_index())))
+ {
+ dof_handler.faces
+ ->quads.dof_offsets[cell->face(face)->index()]
+ = next_free_quad_slot;
+
+ // set first slot
+ // for this quad to
+ // active_fe_index
+ // of this face
+ dof_handler.faces
+ ->quads.dofs[next_free_quad_slot]
+ = cell->active_fe_index();
+
+ // the next
+ // dofs_per_quad
+ // indices remain
+ // unset for the
+ // moment (i.e. at
+ // invalid_dof_index).
+ // following this
+ // comes the stop
+ // index, which
+ // also is
+ // invalid_dof_index
+ // and therefore
+ // does not have to
+ // be explicitly
+ // set
+
+ // finally, mark
+ // those slots as
+ // used
+ next_free_quad_slot
+ += (*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_quad + 2;
+ }
+ else
+ {
+ dof_handler.faces
+ ->quads.dof_offsets[cell->face(face)->index()]
+ = next_free_quad_slot;
+
+ // set first slot
+ // for this quad to
+ // active_fe_index
+ // of this face
+ dof_handler.faces
+ ->quads.dofs[next_free_quad_slot]
+ = cell->active_fe_index();
+
+ // the next
+ // dofs_per_quad
+ // indices remain
+ // unset for the
+ // moment (i.e. at
+ // invalid_dof_index).
+ //
+ // then comes the
+ // fe_index for the
+ // neighboring
+ // cell:
+ dof_handler.faces
+ ->quads.dofs[next_free_quad_slot
+ +
+ (*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_quad
+ +
+ 1]
+ = cell->neighbor(face)->active_fe_index();
+ // then again a set
+ // of dofs that we
+ // need not set
+ // right now
+ //
+ // following this
+ // comes the stop
+ // index, which
+ // also is
+ // invalid_dof_index
+ // and therefore
+ // does not have to
+ // be explicitly
+ // set
+
+ // finally, mark
+ // those slots as
+ // used
+ next_free_quad_slot
+ += ((*dof_handler.finite_elements)[cell->active_fe_index()].dofs_per_quad
+ +
+ (*dof_handler.finite_elements)[cell->neighbor(face)->active_fe_index()]
+ .dofs_per_quad
+ +
+ 3);
+ }
+
+ // mark this face as
+ // visited
+ cell->face(face)->set_user_flag ();
+ }
+
+ // we should have moved the
+ // cursor to the total number
+ // of dofs. check that
+ Assert (next_free_quad_slot == n_quad_slots,
+ ExcInternalError());
+
+ // at the end, restore the user
+ // flags for the quads
+ const_cast<dealii::Triangulation<dim,spacedim>&>(*dof_handler.tria)
+ .load_user_flags_quad (saved_quad_user_flags);
+ }
+
+
+ // LINE DOFS
+
+ // the situation here is pretty
+ // much like with vertices: there
+ // can be an arbitrary number of
+ // finite elements associated
+ // with each line.
+ //
+ // the algorithm we use is
+ // somewhat similar to what we do
+ // in reserve_space_vertices()
+ if (true)
+ {
+ // what we do first is to set up
+ // an array in which we record
+ // whether a line is associated
+ // with any of the given fe's, by
+ // setting a bit. in a later
+ // step, we then actually
+ // allocate memory for the
+ // required dofs
+ std::vector<std::vector<bool> >
+ line_fe_association (dof_handler.finite_elements->size(),
+ std::vector<bool> (dof_handler.tria->n_raw_lines(),
+ false));
+
+ for (typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell=dof_handler.begin_active();
+ cell!=dof_handler.end(); ++cell)
+ for (unsigned int l=0; l<GeometryInfo<dim>::lines_per_cell; ++l)
+ line_fe_association[cell->active_fe_index()][cell->line_index(l)]
+ = true;
+
+ // first check which of the
+ // lines is used at all,
+ // i.e. is associated with a
+ // finite element. we do this
+ // since not all lines may
+ // actually be used, in which
+ // case we do not have to
+ // allocate any memory at
+ // all
+ std::vector<bool> line_is_used (dof_handler.tria->n_raw_lines(), false);
+ for (unsigned int line=0; line<dof_handler.tria->n_raw_lines(); ++line)
+ for (unsigned int fe=0; fe<dof_handler.finite_elements->size(); ++fe)
+ if (line_fe_association[fe][line] == true)
+ {
+ line_is_used[line] = true;
+ break;
+ }
+
+ // next count how much memory
+ // we actually need. for each
+ // line, we need one slot per
+ // fe to store the fe_index,
+ // plus dofs_per_line for
+ // this fe. in addition, we
+ // need one slot as the end
+ // marker for the
+ // fe_indices. at the same
+ // time already fill the
+ // line_dofs_offsets field
+ dof_handler.faces->lines.dof_offsets
+ .resize (dof_handler.tria->n_raw_lines(),
+ numbers::invalid_unsigned_int);
+
+ unsigned int line_slots_needed = 0;
+ for (unsigned int line=0; line<dof_handler.tria->n_raw_lines(); ++line)
+ if (line_is_used[line] == true)
+ {
+ dof_handler.faces->lines.dof_offsets[line] = line_slots_needed;
+
+ for (unsigned int fe=0; fe<dof_handler.finite_elements->size(); ++fe)
+ if (line_fe_association[fe][line] == true)
+ line_slots_needed += (*dof_handler.finite_elements)[fe].dofs_per_line + 1;
+ ++line_slots_needed;
+ }
+
+ // now allocate the space we
+ // have determined we need, and
+ // set up the linked lists for
+ // each of the lines
+ dof_handler.faces->lines.dofs.resize (line_slots_needed,
+ DoFHandler<dim,spacedim>::invalid_dof_index);
+ for (unsigned int line=0; line<dof_handler.tria->n_raw_lines(); ++line)
+ if (line_is_used[line] == true)
+ {
+ unsigned int pointer = dof_handler.faces->lines.dof_offsets[line];
+ for (unsigned int fe=0; fe<dof_handler.finite_elements->size(); ++fe)
+ if (line_fe_association[fe][line] == true)
+ {
+ // if this line
+ // uses this fe,
+ // then set the
+ // fe_index and
+ // move the
+ // pointer ahead
+ dof_handler.faces->lines.dofs[pointer] = fe;
+ pointer += (*dof_handler.finite_elements)[fe].dofs_per_line + 1;
+ }
+ // finally place the end
+ // marker
+ dof_handler.faces->lines.dofs[pointer] = numbers::invalid_unsigned_int;
+ }
+ }
+
+
+
+ // VERTEX DOFS
+ reserve_space_vertices (dof_handler);
+ }
+
+
+ /**
+ * Implement the function of same name
+ * in the mother class.
+ */
+ template <int spacedim>
+ static
+ unsigned int
+ max_couplings_between_dofs (const DoFHandler<1,spacedim> &dof_handler)
+ {
+ return std::min(3*dof_handler.finite_elements->max_dofs_per_vertex() +
+ 2*dof_handler.finite_elements->max_dofs_per_line(), dof_handler.n_dofs());
+ }
+
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ max_couplings_between_dofs (const DoFHandler<2,spacedim> &dof_handler)
+ {
+ // get these numbers by drawing pictures
+ // and counting...
+ // example:
+ // | | |
+ // --x-----x--x--X--
+ // | | | |
+ // | x--x--x
+ // | | | |
+ // --x--x--*--x--x--
+ // | | | |
+ // x--x--x |
+ // | | | |
+ // --X--x--x-----x--
+ // | | |
+ // x = vertices connected with center vertex *;
+ // = total of 19
+ // (the X vertices are connected with * if
+ // the vertices adjacent to X are hanging
+ // nodes)
+ // count lines -> 28 (don't forget to count
+ // mother and children separately!)
+ unsigned int max_couplings;
+ switch (dof_handler.tria->max_adjacent_cells())
+ {
+ case 4:
+ max_couplings=19*dof_handler.finite_elements->max_dofs_per_vertex() +
+ 28*dof_handler.finite_elements->max_dofs_per_line() +
+ 8*dof_handler.finite_elements->max_dofs_per_quad();
+ break;
+ case 5:
+ max_couplings=21*dof_handler.finite_elements->max_dofs_per_vertex() +
+ 31*dof_handler.finite_elements->max_dofs_per_line() +
+ 9*dof_handler.finite_elements->max_dofs_per_quad();
+ break;
+ case 6:
+ max_couplings=28*dof_handler.finite_elements->max_dofs_per_vertex() +
+ 42*dof_handler.finite_elements->max_dofs_per_line() +
+ 12*dof_handler.finite_elements->max_dofs_per_quad();
+ break;
+ case 7:
+ max_couplings=30*dof_handler.finite_elements->max_dofs_per_vertex() +
+ 45*dof_handler.finite_elements->max_dofs_per_line() +
+ 13*dof_handler.finite_elements->max_dofs_per_quad();
+ break;
+ case 8:
+ max_couplings=37*dof_handler.finite_elements->max_dofs_per_vertex() +
+ 56*dof_handler.finite_elements->max_dofs_per_line() +
+ 16*dof_handler.finite_elements->max_dofs_per_quad();
+ break;
+ default:
+ Assert (false, ExcNotImplemented());
+ max_couplings=0;
+ };
+ return std::min(max_couplings,dof_handler.n_dofs());
+ }
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ max_couplings_between_dofs (const DoFHandler<3,spacedim> &dof_handler)
+ {
//TODO:[?] Invent significantly better estimates than the ones in this function
- // doing the same thing here is a rather
- // complicated thing, compared to the 2d
- // case, since it is hard to draw pictures
- // with several refined hexahedra :-) so I
- // presently only give a coarse estimate
- // for the case that at most 8 hexes meet
- // at each vertex
- //
- // can anyone give better estimate here?
- const unsigned int max_adjacent_cells = dof_handler.tria->max_adjacent_cells();
-
- unsigned int max_couplings;
- if (max_adjacent_cells <= 8)
- max_couplings=7*7*7*dof_handler.finite_elements->max_dofs_per_vertex() +
- 7*6*7*3*dof_handler.finite_elements->max_dofs_per_line() +
- 9*4*7*3*dof_handler.finite_elements->max_dofs_per_quad() +
- 27*dof_handler.finite_elements->max_dofs_per_hex();
- else
- {
- Assert (false, ExcNotImplemented());
- max_couplings=0;
- }
-
- return std::min(max_couplings,dof_handler.n_dofs());
- }
+ // doing the same thing here is a rather
+ // complicated thing, compared to the 2d
+ // case, since it is hard to draw pictures
+ // with several refined hexahedra :-) so I
+ // presently only give a coarse estimate
+ // for the case that at most 8 hexes meet
+ // at each vertex
+ //
+ // can anyone give better estimate here?
+ const unsigned int max_adjacent_cells = dof_handler.tria->max_adjacent_cells();
+
+ unsigned int max_couplings;
+ if (max_adjacent_cells <= 8)
+ max_couplings=7*7*7*dof_handler.finite_elements->max_dofs_per_vertex() +
+ 7*6*7*3*dof_handler.finite_elements->max_dofs_per_line() +
+ 9*4*7*3*dof_handler.finite_elements->max_dofs_per_quad() +
+ 27*dof_handler.finite_elements->max_dofs_per_hex();
+ else
+ {
+ Assert (false, ExcNotImplemented());
+ max_couplings=0;
+ }
+
+ return std::min(max_couplings,dof_handler.n_dofs());
+ }
};
}
}
Assert (finite_elements != 0, ExcNoFESelected());
DoFHandler<1,1>::cell_iterator cell;
- unsigned int n = 0;
+ types::global_dof_index n = 0;
- // search left-most cell
+ // search left-most cell
cell = this->begin_active();
while (!cell->at_boundary(0))
cell = cell->neighbor(0);
ExcInvalidBoundaryIndicator());
DoFHandler<1,1>::active_cell_iterator cell;
- unsigned int n = 0;
+ types::global_dof_index n = 0;
- // search left-most cell
+ // search left-most cell
if (boundary_indicators.find (0) != boundary_indicators.end())
{
cell = this->begin_active();
ExcInvalidBoundaryIndicator());
DoFHandler<1,1>::active_cell_iterator cell;
- unsigned int n = 0;
+ types::global_dof_index n = 0;
- // search left-most cell
+ // search left-most cell
if (boundary_indicators.find (0) != boundary_indicators.end())
{
cell = this->begin_active();
}
- template <>
- types::global_dof_index DoFHandler<1,3>::n_boundary_dofs () const
+
- unsigned int DoFHandler<1,3>::n_boundary_dofs () const
+ template <>
++ types::global_dof_index DoFHandler<1,3>::n_boundary_dofs () const
{
Assert(false,ExcNotImplemented());
return 0;
{
Assert (finite_elements != 0, ExcNoFESelected());
- std::set<int> boundary_dofs;
- std::vector<unsigned int> dofs_on_face;
+ std::set<types::global_dof_index> boundary_dofs;
+ std::vector<types::global_dof_index> dofs_on_face;
dofs_on_face.reserve (this->get_fe ().max_dofs_per_face());
- // loop over all faces to check
- // whether they are at a
- // boundary. note that we need not
- // take special care of single
- // lines in 3d (using
- // @p{cell->has_boundary_lines}),
- // since we do not support
- // boundaries of dimension dim-2,
- // and so every boundary line is
- // also part of a boundary face.
+ // loop over all faces to check
+ // whether they are at a
+ // boundary. note that we need not
+ // take special care of single
+ // lines in 3d (using
+ // @p{cell->has_boundary_lines}),
+ // since we do not support
+ // boundaries of dimension dim-2,
+ // and so every boundary line is
+ // also part of a boundary face.
typename DoFHandler<dim,spacedim>::active_cell_iterator cell = this->begin_active (),
- endc = this->end();
+ endc = this->end();
for (; cell!=endc; ++cell)
for (unsigned int f=0; f<GeometryInfo<dim>::faces_per_cell; ++f)
if (cell->at_boundary(f))
Assert (boundary_indicators.find(numbers::internal_face_boundary_id) == boundary_indicators.end(),
ExcInvalidBoundaryIndicator());
- // same as above, but with
- // additional checks for set of
- // boundary indicators
+ // same as above, but with
+ // additional checks for set of
+ // boundary indicators
- std::set<int> boundary_dofs;
- std::vector<unsigned int> dofs_on_face;
+ std::set<types::global_dof_index> boundary_dofs;
+ std::vector<types::global_dof_index> dofs_on_face;
dofs_on_face.reserve (this->get_fe ().max_dofs_per_face());
typename DoFHandler<dim,spacedim>::active_cell_iterator cell = this->begin_active (),
Assert (boundary_indicators.find (numbers::internal_face_boundary_id) == boundary_indicators.end(),
ExcInvalidBoundaryIndicator());
- // same as above, but with
- // additional checks for set of
- // boundary indicators
+ // same as above, but with
+ // additional checks for set of
+ // boundary indicators
- std::set<int> boundary_dofs;
- std::vector<unsigned int> dofs_on_face;
+ std::set<types::global_dof_index> boundary_dofs;
+ std::vector<types::global_dof_index> dofs_on_face;
dofs_on_face.reserve (this->get_fe ().max_dofs_per_face());
typename DoFHandler<dim,spacedim>::active_cell_iterator cell = this->begin_active (),
template<int dim, int spacedim>
void
DoFHandler<dim,spacedim>::
- compute_vertex_dof_identities (std::vector<unsigned int> &new_dof_indices) const
+ compute_vertex_dof_identities (std::vector<types::global_dof_index> &new_dof_indices) const
{
- // Note: we may wish to have
- // something here similar to what
- // we do for lines and quads,
- // namely that we only identify
- // dofs for any fe towards the
- // most dominating one. however,
- // it is not clear whether this
- // is actually necessary for
- // vertices at all, I can't think
- // of a finite element that would
- // make that necessary...
+ // Note: we may wish to have
+ // something here similar to what
+ // we do for lines and quads,
+ // namely that we only identify
+ // dofs for any fe towards the
+ // most dominating one. however,
+ // it is not clear whether this
+ // is actually necessary for
+ // vertices at all, I can't think
+ // of a finite element that would
+ // make that necessary...
Table<2,std_cxx1x::shared_ptr<internal::hp::DoFIdentities> >
- vertex_dof_identities (get_fe().size(),
- get_fe().size());
+ vertex_dof_identities (get_fe().size(),
+ get_fe().size());
- // loop over all vertices and
- // see which one we need to
- // work on
+ // loop over all vertices and
+ // see which one we need to
+ // work on
for (unsigned int vertex_index=0; vertex_index<get_tria().n_vertices();
- ++vertex_index)
+ ++vertex_index)
{
- const unsigned int n_active_fe_indices
- = internal::DoFAccessor::Implementation::
- n_active_vertex_fe_indices (*this, vertex_index);
- if (n_active_fe_indices > 1)
- {
- const unsigned int
- first_fe_index
- = internal::DoFAccessor::Implementation::
- nth_active_vertex_fe_index (*this, vertex_index, 0);
-
- // loop over all the
- // other FEs with which
- // we want to identify
- // the DoF indices of
- // the first FE of
- for (unsigned int f=1; f<n_active_fe_indices; ++f)
- {
- const unsigned int
- other_fe_index
- = internal::DoFAccessor::Implementation::
- nth_active_vertex_fe_index (*this, vertex_index, f);
-
- // make sure the
- // entry in the
- // equivalence
- // table exists
- internal::hp::ensure_existence_of_dof_identities<0>
- (get_fe()[first_fe_index],
- get_fe()[other_fe_index],
- vertex_dof_identities[first_fe_index][other_fe_index]);
-
- // then loop
- // through the
- // identities we
- // have. first get
- // the global
- // numbers of the
- // dofs we want to
- // identify and
- // make sure they
- // are not yet
- // constrained to
- // anything else,
- // except for to
- // each other. use
- // the rule that we
- // will always
- // constrain the
- // dof with the
- // higher fe
- // index to the
- // one with the
- // lower, to avoid
- // circular
- // reasoning.
- internal::hp::DoFIdentities &identities
- = *vertex_dof_identities[first_fe_index][other_fe_index];
- for (unsigned int i=0; i<identities.size(); ++i)
- {
- const types::global_dof_index lower_dof_index
- = internal::DoFAccessor::Implementation::
- get_vertex_dof_index (*this,
- vertex_index,
- first_fe_index,
- identities[i].first);
- const types::global_dof_index higher_dof_index
- = internal::DoFAccessor::Implementation::
- get_vertex_dof_index (*this,
- vertex_index,
- other_fe_index,
- identities[i].second);
-
- Assert ((new_dof_indices[higher_dof_index] ==
- numbers::invalid_unsigned_int)
- ||
- (new_dof_indices[higher_dof_index] ==
- lower_dof_index),
- ExcInternalError());
-
- new_dof_indices[higher_dof_index] = lower_dof_index;
- }
- }
- }
+ const unsigned int n_active_fe_indices
+ = internal::DoFAccessor::Implementation::
+ n_active_vertex_fe_indices (*this, vertex_index);
+ if (n_active_fe_indices > 1)
+ {
+ const unsigned int
+ first_fe_index
+ = internal::DoFAccessor::Implementation::
+ nth_active_vertex_fe_index (*this, vertex_index, 0);
+
+ // loop over all the
+ // other FEs with which
+ // we want to identify
+ // the DoF indices of
+ // the first FE of
+ for (unsigned int f=1; f<n_active_fe_indices; ++f)
+ {
+ const unsigned int
+ other_fe_index
+ = internal::DoFAccessor::Implementation::
+ nth_active_vertex_fe_index (*this, vertex_index, f);
+
+ // make sure the
+ // entry in the
+ // equivalence
+ // table exists
+ internal::hp::ensure_existence_of_dof_identities<0>
+ (get_fe()[first_fe_index],
+ get_fe()[other_fe_index],
+ vertex_dof_identities[first_fe_index][other_fe_index]);
+
+ // then loop
+ // through the
+ // identities we
+ // have. first get
+ // the global
+ // numbers of the
+ // dofs we want to
+ // identify and
+ // make sure they
+ // are not yet
+ // constrained to
+ // anything else,
+ // except for to
+ // each other. use
+ // the rule that we
+ // will always
+ // constrain the
+ // dof with the
+ // higher fe
+ // index to the
+ // one with the
+ // lower, to avoid
+ // circular
+ // reasoning.
+ internal::hp::DoFIdentities &identities
+ = *vertex_dof_identities[first_fe_index][other_fe_index];
+ for (unsigned int i=0; i<identities.size(); ++i)
+ {
- const unsigned int lower_dof_index
++ const types::global_dof_index lower_dof_index
+ = internal::DoFAccessor::Implementation::
+ get_vertex_dof_index (*this,
+ vertex_index,
+ first_fe_index,
+ identities[i].first);
- const unsigned int higher_dof_index
++ const types::global_dof_index higher_dof_index
+ = internal::DoFAccessor::Implementation::
+ get_vertex_dof_index (*this,
+ vertex_index,
+ other_fe_index,
+ identities[i].second);
+
+ Assert ((new_dof_indices[higher_dof_index] ==
+ numbers::invalid_unsigned_int)
+ ||
+ (new_dof_indices[higher_dof_index] ==
+ lower_dof_index),
+ ExcInternalError());
+
+ new_dof_indices[higher_dof_index] = lower_dof_index;
+ }
+ }
+ }
}
}
template<int dim, int spacedim>
void
DoFHandler<dim,spacedim>::
- compute_line_dof_identities (std::vector<unsigned int> &new_dof_indices) const
+ compute_line_dof_identities (std::vector<types::global_dof_index> &new_dof_indices) const
{
- // we will mark lines that we
- // have already treated, so first
- // save and clear the user flags
- // on lines and later restore
- // them
+ // we will mark lines that we
+ // have already treated, so first
+ // save and clear the user flags
+ // on lines and later restore
+ // them
std::vector<bool> user_flags;
this->get_tria().save_user_flags_line(user_flags);
const_cast<Triangulation<dim,spacedim> &>(this->get_tria()).clear_user_flags_line ();
for (active_cell_iterator cell=begin_active(); cell!=end(); ++cell)
for (unsigned int l=0; l<GeometryInfo<dim>::lines_per_cell; ++l)
- if (cell->line(l)->user_flag_set() == false)
- {
- const line_iterator line = cell->line(l);
- line->set_user_flag ();
-
- unsigned int unique_sets_of_dofs
- = line->n_active_fe_indices();
-
- // do a first loop over all sets of
- // dofs and do identity
- // uniquification
- for (unsigned int f=0; f<line->n_active_fe_indices(); ++f)
- for (unsigned int g=f+1; g<line->n_active_fe_indices(); ++g)
- {
- const unsigned int fe_index_1 = line->nth_active_fe_index (f),
- fe_index_2 = line->nth_active_fe_index (g);
-
- if (((*finite_elements)[fe_index_1].dofs_per_line
- ==
- (*finite_elements)[fe_index_2].dofs_per_line)
- &&
- ((*finite_elements)[fe_index_1].dofs_per_line > 0))
- {
- internal::hp::ensure_existence_of_dof_identities<1>
- ((*finite_elements)[fe_index_1],
- (*finite_elements)[fe_index_2],
- line_dof_identities[fe_index_1][fe_index_2]);
- // see if these sets of dofs
- // are identical. the first
- // condition for this is that
- // indeed there are n
- // identities
- if (line_dof_identities[fe_index_1][fe_index_2]->size()
- ==
- (*finite_elements)[fe_index_1].dofs_per_line)
- {
- unsigned int i=0;
- for (; i<(*finite_elements)[fe_index_1].dofs_per_line; ++i)
- if (((*(line_dof_identities[fe_index_1][fe_index_2]))[i].first != i)
- &&
- ((*(line_dof_identities[fe_index_1][fe_index_2]))[i].second != i))
- // not an identity
- break;
-
- if (i == (*finite_elements)[fe_index_1].dofs_per_line)
- {
- // the dofs of these
- // two finite
- // elements are
- // identical. as a
- // safety check,
- // ensure that none
- // of the two FEs is
- // trying to dominate
- // the other, which
- // wouldn't make any
- // sense in this case
- Assert ((*finite_elements)[fe_index_1].compare_for_face_domination
- ((*finite_elements)[fe_index_2])
- ==
- FiniteElementDomination::either_element_can_dominate,
- ExcInternalError());
-
- --unique_sets_of_dofs;
-
- for (unsigned int j=0; j<(*finite_elements)[fe_index_1].dofs_per_line; ++j)
- {
- const types::global_dof_index master_dof_index
- = line->dof_index (j, fe_index_1);
- const types::global_dof_index slave_dof_index
- = line->dof_index (j, fe_index_2);
-
- // if master dof
- // was already
- // constrained,
- // constrain to
- // that one,
- // otherwise
- // constrain
- // slave to
- // master
- if (new_dof_indices[master_dof_index] !=
- numbers::invalid_unsigned_int)
- {
- Assert (new_dof_indices[new_dof_indices[master_dof_index]] ==
- numbers::invalid_unsigned_int,
- ExcInternalError());
-
- new_dof_indices[slave_dof_index]
- = new_dof_indices[master_dof_index];
- }
- else
- {
- Assert ((new_dof_indices[master_dof_index] ==
- numbers::invalid_unsigned_int)
- ||
- (new_dof_indices[slave_dof_index] ==
- master_dof_index),
- ExcInternalError());
-
- new_dof_indices[slave_dof_index] = master_dof_index;
- }
- }
- }
- }
- }
- }
-
- // if at this point, there is only
- // one unique set of dofs left, then
- // we have taken care of everything
- // above. if there are two, then we
- // need to deal with them here. if
- // there are more, then we punt, as
- // described in the paper (and
- // mentioned above)
+ if (cell->line(l)->user_flag_set() == false)
+ {
+ const line_iterator line = cell->line(l);
+ line->set_user_flag ();
+
+ unsigned int unique_sets_of_dofs
+ = line->n_active_fe_indices();
+
+ // do a first loop over all sets of
+ // dofs and do identity
+ // uniquification
+ for (unsigned int f=0; f<line->n_active_fe_indices(); ++f)
+ for (unsigned int g=f+1; g<line->n_active_fe_indices(); ++g)
+ {
+ const unsigned int fe_index_1 = line->nth_active_fe_index (f),
+ fe_index_2 = line->nth_active_fe_index (g);
+
+ if (((*finite_elements)[fe_index_1].dofs_per_line
+ ==
+ (*finite_elements)[fe_index_2].dofs_per_line)
+ &&
+ ((*finite_elements)[fe_index_1].dofs_per_line > 0))
+ {
+ internal::hp::ensure_existence_of_dof_identities<1>
+ ((*finite_elements)[fe_index_1],
+ (*finite_elements)[fe_index_2],
+ line_dof_identities[fe_index_1][fe_index_2]);
+ // see if these sets of dofs
+ // are identical. the first
+ // condition for this is that
+ // indeed there are n
+ // identities
+ if (line_dof_identities[fe_index_1][fe_index_2]->size()
+ ==
+ (*finite_elements)[fe_index_1].dofs_per_line)
+ {
+ unsigned int i=0;
+ for (; i<(*finite_elements)[fe_index_1].dofs_per_line; ++i)
+ if (((*(line_dof_identities[fe_index_1][fe_index_2]))[i].first != i)
+ &&
+ ((*(line_dof_identities[fe_index_1][fe_index_2]))[i].second != i))
+ // not an identity
+ break;
+
+ if (i == (*finite_elements)[fe_index_1].dofs_per_line)
+ {
+ // the dofs of these
+ // two finite
+ // elements are
+ // identical. as a
+ // safety check,
+ // ensure that none
+ // of the two FEs is
+ // trying to dominate
+ // the other, which
+ // wouldn't make any
+ // sense in this case
+ Assert ((*finite_elements)[fe_index_1].compare_for_face_domination
+ ((*finite_elements)[fe_index_2])
+ ==
+ FiniteElementDomination::either_element_can_dominate,
+ ExcInternalError());
+
+ --unique_sets_of_dofs;
+
+ for (unsigned int j=0; j<(*finite_elements)[fe_index_1].dofs_per_line; ++j)
+ {
- const unsigned int master_dof_index
++ const types::global_dof_index master_dof_index
+ = line->dof_index (j, fe_index_1);
- const unsigned int slave_dof_index
++ const types::global_dof_index slave_dof_index
+ = line->dof_index (j, fe_index_2);
+
+ // if master dof
+ // was already
+ // constrained,
+ // constrain to
+ // that one,
+ // otherwise
+ // constrain
+ // slave to
+ // master
+ if (new_dof_indices[master_dof_index] !=
+ numbers::invalid_unsigned_int)
+ {
+ Assert (new_dof_indices[new_dof_indices[master_dof_index]] ==
+ numbers::invalid_unsigned_int,
+ ExcInternalError());
+
+ new_dof_indices[slave_dof_index]
+ = new_dof_indices[master_dof_index];
+ }
+ else
+ {
+ Assert ((new_dof_indices[master_dof_index] ==
+ numbers::invalid_unsigned_int)
+ ||
+ (new_dof_indices[slave_dof_index] ==
+ master_dof_index),
+ ExcInternalError());
+
+ new_dof_indices[slave_dof_index] = master_dof_index;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // if at this point, there is only
+ // one unique set of dofs left, then
+ // we have taken care of everything
+ // above. if there are two, then we
+ // need to deal with them here. if
+ // there are more, then we punt, as
+ // described in the paper (and
+ // mentioned above)
//TODO: The check for 'dim==2' was inserted by intuition. It fixes
// the previous problems with step-27 in 3D. But an explanation
// for this is still required, and what we do here is not what we
// describe in the paper!.
- if ((unique_sets_of_dofs == 2) && (dim == 2))
- {
- // find out which is the
- // most dominating finite
- // element of the ones that
- // are used on this line
- const unsigned int most_dominating_fe_index
- = internal::hp::get_most_dominating_fe_index<dim,spacedim> (line);
-
- const unsigned int n_active_fe_indices
- = line->n_active_fe_indices ();
-
- // loop over the indices of
- // all the finite elements
- // that are not dominating,
- // and identify their dofs
- // to the most dominating
- // one
- for (unsigned int f=0; f<n_active_fe_indices; ++f)
- if (line->nth_active_fe_index (f) !=
- most_dominating_fe_index)
- {
- const unsigned int
- other_fe_index = line->nth_active_fe_index (f);
-
- internal::hp::ensure_existence_of_dof_identities<1>
- ((*finite_elements)[most_dominating_fe_index],
- (*finite_elements)[other_fe_index],
- line_dof_identities[most_dominating_fe_index][other_fe_index]);
-
- internal::hp::DoFIdentities &identities
- = *line_dof_identities[most_dominating_fe_index][other_fe_index];
- for (unsigned int i=0; i<identities.size(); ++i)
- {
- const types::global_dof_index master_dof_index
- = line->dof_index (identities[i].first, most_dominating_fe_index);
- const types::global_dof_index slave_dof_index
- = line->dof_index (identities[i].second, other_fe_index);
-
- Assert ((new_dof_indices[master_dof_index] ==
- numbers::invalid_unsigned_int)
- ||
- (new_dof_indices[slave_dof_index] ==
- master_dof_index),
- ExcInternalError());
-
- new_dof_indices[slave_dof_index] = master_dof_index;
- }
- }
- }
- }
-
- // finally restore the user flags
+ if ((unique_sets_of_dofs == 2) && (dim == 2))
+ {
+ // find out which is the
+ // most dominating finite
+ // element of the ones that
+ // are used on this line
+ const unsigned int most_dominating_fe_index
+ = internal::hp::get_most_dominating_fe_index<dim,spacedim> (line);
+
+ const unsigned int n_active_fe_indices
+ = line->n_active_fe_indices ();
+
+ // loop over the indices of
+ // all the finite elements
+ // that are not dominating,
+ // and identify their dofs
+ // to the most dominating
+ // one
+ for (unsigned int f=0; f<n_active_fe_indices; ++f)
+ if (line->nth_active_fe_index (f) !=
+ most_dominating_fe_index)
+ {
+ const unsigned int
+ other_fe_index = line->nth_active_fe_index (f);
+
+ internal::hp::ensure_existence_of_dof_identities<1>
+ ((*finite_elements)[most_dominating_fe_index],
+ (*finite_elements)[other_fe_index],
+ line_dof_identities[most_dominating_fe_index][other_fe_index]);
+
+ internal::hp::DoFIdentities &identities
+ = *line_dof_identities[most_dominating_fe_index][other_fe_index];
+ for (unsigned int i=0; i<identities.size(); ++i)
+ {
- const unsigned int master_dof_index
++ const types::global_dof_index master_dof_index
+ = line->dof_index (identities[i].first, most_dominating_fe_index);
- const unsigned int slave_dof_index
++ const types::global_dof_index slave_dof_index
+ = line->dof_index (identities[i].second, other_fe_index);
+
+ Assert ((new_dof_indices[master_dof_index] ==
+ numbers::invalid_unsigned_int)
+ ||
+ (new_dof_indices[slave_dof_index] ==
+ master_dof_index),
+ ExcInternalError());
+
+ new_dof_indices[slave_dof_index] = master_dof_index;
+ }
+ }
+ }
+ }
+
+ // finally restore the user flags
const_cast<Triangulation<dim,spacedim> &>(this->get_tria())
- .load_user_flags_line(user_flags);
+ .load_user_flags_line(user_flags);
}
template<int dim, int spacedim>
void
DoFHandler<dim,spacedim>::
- compute_quad_dof_identities (std::vector<unsigned int> &new_dof_indices) const
+ compute_quad_dof_identities (std::vector<types::global_dof_index> &new_dof_indices) const
{
- // we will mark quads that we
- // have already treated, so first
- // save and clear the user flags
- // on quads and later restore
- // them
+ // we will mark quads that we
+ // have already treated, so first
+ // save and clear the user flags
+ // on quads and later restore
+ // them
std::vector<bool> user_flags;
this->get_tria().save_user_flags_quad(user_flags);
const_cast<Triangulation<dim,spacedim> &>(this->get_tria()).clear_user_flags_quad ();
for (active_cell_iterator cell=begin_active(); cell!=end(); ++cell)
for (unsigned int q=0; q<GeometryInfo<dim>::quads_per_cell; ++q)
- if ((cell->quad(q)->user_flag_set() == false)
- &&
- (cell->quad(q)->n_active_fe_indices() == 2))
- {
- const quad_iterator quad = cell->quad(q);
- quad->set_user_flag ();
-
- // find out which is the
- // most dominating finite
- // element of the ones that
- // are used on this quad
- const unsigned int most_dominating_fe_index
- = internal::hp::get_most_dominating_fe_index<dim,spacedim> (quad);
-
- const unsigned int n_active_fe_indices
- = quad->n_active_fe_indices ();
-
- // loop over the indices of
- // all the finite elements
- // that are not dominating,
- // and identify their dofs
- // to the most dominating
- // one
- for (unsigned int f=0; f<n_active_fe_indices; ++f)
- if (quad->nth_active_fe_index (f) !=
- most_dominating_fe_index)
- {
- const unsigned int
- other_fe_index = quad->nth_active_fe_index (f);
-
- internal::hp::ensure_existence_of_dof_identities<2>
- ((*finite_elements)[most_dominating_fe_index],
- (*finite_elements)[other_fe_index],
- quad_dof_identities[most_dominating_fe_index][other_fe_index]);
-
- internal::hp::DoFIdentities &identities
- = *quad_dof_identities[most_dominating_fe_index][other_fe_index];
- for (unsigned int i=0; i<identities.size(); ++i)
- {
- const types::global_dof_index master_dof_index
- = quad->dof_index (identities[i].first, most_dominating_fe_index);
- const types::global_dof_index slave_dof_index
- = quad->dof_index (identities[i].second, other_fe_index);
-
- Assert ((new_dof_indices[master_dof_index] ==
- numbers::invalid_unsigned_int)
- ||
- (new_dof_indices[slave_dof_index] ==
- master_dof_index),
- ExcInternalError());
-
- new_dof_indices[slave_dof_index] = master_dof_index;
- }
- }
- }
-
- // finally restore the user flags
+ if ((cell->quad(q)->user_flag_set() == false)
+ &&
+ (cell->quad(q)->n_active_fe_indices() == 2))
+ {
+ const quad_iterator quad = cell->quad(q);
+ quad->set_user_flag ();
+
+ // find out which is the
+ // most dominating finite
+ // element of the ones that
+ // are used on this quad
+ const unsigned int most_dominating_fe_index
+ = internal::hp::get_most_dominating_fe_index<dim,spacedim> (quad);
+
+ const unsigned int n_active_fe_indices
+ = quad->n_active_fe_indices ();
+
+ // loop over the indices of
+ // all the finite elements
+ // that are not dominating,
+ // and identify their dofs
+ // to the most dominating
+ // one
+ for (unsigned int f=0; f<n_active_fe_indices; ++f)
+ if (quad->nth_active_fe_index (f) !=
+ most_dominating_fe_index)
+ {
+ const unsigned int
+ other_fe_index = quad->nth_active_fe_index (f);
+
+ internal::hp::ensure_existence_of_dof_identities<2>
+ ((*finite_elements)[most_dominating_fe_index],
+ (*finite_elements)[other_fe_index],
+ quad_dof_identities[most_dominating_fe_index][other_fe_index]);
+
+ internal::hp::DoFIdentities &identities
+ = *quad_dof_identities[most_dominating_fe_index][other_fe_index];
+ for (unsigned int i=0; i<identities.size(); ++i)
+ {
- const unsigned int master_dof_index
++ const types::global_dof_index master_dof_index
+ = quad->dof_index (identities[i].first, most_dominating_fe_index);
- const unsigned int slave_dof_index
++ const types::global_dof_index slave_dof_index
+ = quad->dof_index (identities[i].second, other_fe_index);
+
+ Assert ((new_dof_indices[master_dof_index] ==
+ numbers::invalid_unsigned_int)
+ ||
+ (new_dof_indices[slave_dof_index] ==
+ master_dof_index),
+ ExcInternalError());
+
+ new_dof_indices[slave_dof_index] = master_dof_index;
+ }
+ }
+ }
+
+ // finally restore the user flags
const_cast<Triangulation<dim,spacedim> &>(this->get_tria())
- .load_user_flags_quad(user_flags);
+ .load_user_flags_quad(user_flags);
}
const_cast<Triangulation<dim,spacedim> &>(*tria).clear_user_flags ();
- /////////////////////////////////
+ /////////////////////////////////
- // Step 1: distribute DoFs on all
- // active entities
+ // Step 1: distribute DoFs on all
+ // active entities
{
- unsigned int next_free_dof = 0;
+ types::global_dof_index next_free_dof = 0;
active_cell_iterator cell = begin_active(),
- endc = end();
+ endc = end();
for (; cell != endc; ++cell)
- next_free_dof
- = internal::hp::DoFHandler::Implementation::template distribute_dofs_on_cell<spacedim> (cell,
- next_free_dof);
+ next_free_dof
+ = internal::hp::DoFHandler::Implementation::template distribute_dofs_on_cell<spacedim> (cell,
+ next_free_dof);
number_cache.n_global_dofs = next_free_dof;
}
- /////////////////////////////////
+ /////////////////////////////////
- // Step 2: identify certain dofs
- // if the finite element tells us
- // that they should have the same
- // value. only pertinent for
- // faces and other
- // lower-dimensional objects
- // where elements come together
+ // Step 2: identify certain dofs
+ // if the finite element tells us
+ // that they should have the same
+ // value. only pertinent for
+ // faces and other
+ // lower-dimensional objects
+ // where elements come together
- std::vector<unsigned int>
+ std::vector<types::global_dof_index>
- constrained_indices (number_cache.n_global_dofs, numbers::invalid_unsigned_int);
+ constrained_indices (number_cache.n_global_dofs, numbers::invalid_unsigned_int);
compute_vertex_dof_identities (constrained_indices);
compute_line_dof_identities (constrained_indices);
compute_quad_dof_identities (constrained_indices);
- // loop over all dofs and assign
- // new numbers to those which are
- // not constrained
+ // loop over all dofs and assign
+ // new numbers to those which are
+ // not constrained
- std::vector<unsigned int>
+ std::vector<types::global_dof_index>
- new_dof_indices (number_cache.n_global_dofs, numbers::invalid_unsigned_int);
+ new_dof_indices (number_cache.n_global_dofs, numbers::invalid_unsigned_int);
- unsigned int next_free_dof = 0;
- for (unsigned int i=0; i<number_cache.n_global_dofs; ++i)
+ types::global_dof_index next_free_dof = 0;
+ for (types::global_dof_index i=0; i<number_cache.n_global_dofs; ++i)
if (constrained_indices[i] == numbers::invalid_unsigned_int)
- {
- new_dof_indices[i] = next_free_dof;
- ++next_free_dof;
- }
-
- // then loop over all those that
- // are constrained and record the
- // new dof number for those:
+ {
+ new_dof_indices[i] = next_free_dof;
+ ++next_free_dof;
+ }
+
+ // then loop over all those that
+ // are constrained and record the
+ // new dof number for those:
- for (unsigned int i=0; i<number_cache.n_global_dofs; ++i)
+ for (types::global_dof_index i=0; i<number_cache.n_global_dofs; ++i)
if (constrained_indices[i] != numbers::invalid_unsigned_int)
- {
- Assert (new_dof_indices[constrained_indices[i]] !=
- numbers::invalid_unsigned_int,
- ExcInternalError());
+ {
+ Assert (new_dof_indices[constrained_indices[i]] !=
+ numbers::invalid_unsigned_int,
+ ExcInternalError());
- new_dof_indices[i] = new_dof_indices[constrained_indices[i]];
- }
+ new_dof_indices[i] = new_dof_indices[constrained_indices[i]];
+ }
- for (unsigned int i=0; i<number_cache.n_global_dofs; ++i)
+ for (types::global_dof_index i=0; i<number_cache.n_global_dofs; ++i)
{
- Assert (new_dof_indices[i] != numbers::invalid_unsigned_int,
- ExcInternalError());
- Assert (new_dof_indices[i] < next_free_dof,
- ExcInternalError());
+ Assert (new_dof_indices[i] != numbers::invalid_unsigned_int,
+ ExcInternalError());
+ Assert (new_dof_indices[i] < next_free_dof,
+ ExcInternalError());
}
- // finally, do the renumbering
- // and set the number of actually
- // used dof indices
+ // finally, do the renumbering
+ // and set the number of actually
+ // used dof indices
renumber_dofs_internal (new_dof_indices, internal::int2type<dim>());
- // now set the elements of the
- // number cache appropriately
+ // now set the elements of the
+ // number cache appropriately
number_cache.n_global_dofs = next_free_dof;
number_cache.n_locally_owned_dofs = number_cache.n_global_dofs;
number_cache.locally_owned_dofs
= IndexSet (number_cache.n_global_dofs);
number_cache.locally_owned_dofs.add_range (0,
- number_cache.n_global_dofs);
+ number_cache.n_global_dofs);
-
+ Assert (number_cache.n_global_dofs < std::numeric_limits<unsigned int>::max (),
+ ExcMessage ("Global number of degrees of freedom is too large."));
number_cache.n_locally_owned_dofs_per_processor
= std::vector<unsigned int> (1,
- (unsigned int) number_cache.n_global_dofs);
- number_cache.n_global_dofs);
++ (unsigned int) number_cache.n_global_dofs);
number_cache.locally_owned_dofs_per_processor
= std::vector<IndexSet> (1,
{
Assert (new_numbers.size() == n_dofs(), ExcRenumberingIncomplete());
#ifdef DEBUG
- // assert that the new indices are
- // consecutively numbered
+ // assert that the new indices are
+ // consecutively numbered
if (true)
{
- std::vector<unsigned int> tmp(new_numbers);
+ std::vector<types::global_dof_index> tmp(new_numbers);
std::sort (tmp.begin(), tmp.end());
- std::vector<unsigned int>::const_iterator p = tmp.begin();
- unsigned int i = 0;
+ std::vector<types::global_dof_index>::const_iterator p = tmp.begin();
+ types::global_dof_index i = 0;
for (; p!=tmp.end(); ++p, ++i)
Assert (*p == i, ExcNewNumbersNotConsecutive(i));
}
template<int dim, int spacedim>
void
DoFHandler<dim,spacedim>::
- renumber_dofs_internal (const std::vector<unsigned int> &new_numbers,
+ renumber_dofs_internal (const std::vector<types::global_dof_index> &new_numbers,
- internal::int2type<0>)
+ internal::int2type<0>)
{
Assert (new_numbers.size() == n_dofs(), ExcRenumberingIncomplete());
for (unsigned int vertex_index=0; vertex_index<get_tria().n_vertices();
- ++vertex_index)
+ ++vertex_index)
{
- const unsigned int n_active_fe_indices
- = internal::DoFAccessor::Implementation::
- n_active_vertex_fe_indices (*this, vertex_index);
-
- for (unsigned int f=0; f<n_active_fe_indices; ++f)
- {
- const unsigned int fe_index
- = internal::DoFAccessor::Implementation::
- nth_active_vertex_fe_index (*this, vertex_index, f);
-
- for (unsigned int d=0; d<(*finite_elements)[fe_index].dofs_per_vertex; ++d)
- {
- const types::global_dof_index vertex_dof_index
- = internal::DoFAccessor::Implementation::
- get_vertex_dof_index(*this,
- vertex_index,
- fe_index,
- d);
- internal::DoFAccessor::Implementation::
- set_vertex_dof_index (*this,
- vertex_index,
- fe_index,
- d,
- new_numbers[vertex_dof_index]);
- }
- }
+ const unsigned int n_active_fe_indices
+ = internal::DoFAccessor::Implementation::
+ n_active_vertex_fe_indices (*this, vertex_index);
+
+ for (unsigned int f=0; f<n_active_fe_indices; ++f)
+ {
+ const unsigned int fe_index
+ = internal::DoFAccessor::Implementation::
+ nth_active_vertex_fe_index (*this, vertex_index, f);
+
+ for (unsigned int d=0; d<(*finite_elements)[fe_index].dofs_per_vertex; ++d)
+ {
- const unsigned int vertex_dof_index
++ const types::global_dof_index vertex_dof_index
+ = internal::DoFAccessor::Implementation::
+ get_vertex_dof_index(*this,
+ vertex_index,
+ fe_index,
+ d);
+ internal::DoFAccessor::Implementation::
+ set_vertex_dof_index (*this,
+ vertex_index,
+ fe_index,
+ d,
+ new_numbers[vertex_dof_index]);
+ }
+ }
}
}
template<int dim, int spacedim>
void
DoFHandler<dim,spacedim>::
- renumber_dofs_internal (const std::vector<unsigned int> &new_numbers,
+ renumber_dofs_internal (const std::vector<types::global_dof_index> &new_numbers,
- internal::int2type<1>)
+ internal::int2type<1>)
{
Assert (new_numbers.size() == n_dofs(), ExcRenumberingIncomplete());
template<>
void
DoFHandler<2,2>::
- renumber_dofs_internal (const std::vector<unsigned int> &new_numbers,
+ renumber_dofs_internal (const std::vector<types::global_dof_index> &new_numbers,
- internal::int2type<2>)
+ internal::int2type<2>)
{
const unsigned int dim = 2;
const unsigned int spacedim = 2;
template<>
void
DoFHandler<2,3>::
- renumber_dofs_internal (const std::vector<unsigned int> &new_numbers,
+ renumber_dofs_internal (const std::vector<types::global_dof_index> &new_numbers,
- internal::int2type<2>)
+ internal::int2type<2>)
{
const unsigned int dim = 2;
const unsigned int spacedim = 3;
template<>
void
DoFHandler<3,3>::
- renumber_dofs_internal (const std::vector<unsigned int> &new_numbers,
+ renumber_dofs_internal (const std::vector<types::global_dof_index> &new_numbers,
- internal::int2type<2>)
+ internal::int2type<2>)
{
const unsigned int dim = 3;
const unsigned int spacedim = 3;
template<>
void
DoFHandler<3,3>::
- renumber_dofs_internal (const std::vector<unsigned int> &new_numbers,
+ renumber_dofs_internal (const std::vector<types::global_dof_index> &new_numbers,
- internal::int2type<3>)
+ internal::int2type<3>)
{
const unsigned int dim = 3;
const unsigned int spacedim = 3;
}
- template <>
+
+ template <>
void DoFHandler<1,3>::pre_refinement_action ()
{
create_active_fe_table ();
template <int dim, int spacedim>
FEFaceValues<dim,spacedim>::FEFaceValues (const hp::MappingCollection<dim,spacedim> &mapping,
- const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim-1> &q_collection,
- const UpdateFlags update_flags)
- :
- internal::hp::FEValuesBase<dim,dim-1,dealii::FEFaceValues<dim,spacedim> > (mapping,
- fe_collection,
- q_collection,
- update_flags)
- const hp::FECollection<dim,spacedim> &fe_collection,
++ const hp::FECollection<dim,spacedim> &fe_collection,
+ const hp::QCollection<dim-1> &q_collection,
+ const UpdateFlags update_flags)
+ :
+ internal::hp::FEValuesBase<dim,dim-1,dealii::FEFaceValues<dim,spacedim> > (mapping,
+ fe_collection,
+ q_collection,
+ update_flags)
{}
template <int dim, int spacedim>
- FEFaceValues<dim,spacedim>::FEFaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
+ FEFaceValues<dim,spacedim>::FEFaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim-1> &q_collection,
- const UpdateFlags update_flags)
- :
- internal::hp::FEValuesBase<dim,dim-1,dealii::FEFaceValues<dim,spacedim> > (fe_collection,
- q_collection,
- update_flags)
+ const hp::QCollection<dim-1> &q_collection,
+ const UpdateFlags update_flags)
+ :
+ internal::hp::FEValuesBase<dim,dim-1,dealii::FEFaceValues<dim,spacedim> > (fe_collection,
+ q_collection,
+ update_flags)
{}
template <int dim, int spacedim>
FESubfaceValues<dim,spacedim>::FESubfaceValues (const hp::MappingCollection<dim,spacedim> &mapping,
- const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim-1> &q_collection,
- const UpdateFlags update_flags)
- :
- internal::hp::FEValuesBase<dim,dim-1,dealii::FESubfaceValues<dim,spacedim> > (mapping,
- fe_collection,
- q_collection,
- update_flags)
- const hp::FECollection<dim,spacedim> &fe_collection,
++ const hp::FECollection<dim,spacedim> &fe_collection,
+ const hp::QCollection<dim-1> &q_collection,
+ const UpdateFlags update_flags)
+ :
+ internal::hp::FEValuesBase<dim,dim-1,dealii::FESubfaceValues<dim,spacedim> > (mapping,
+ fe_collection,
+ q_collection,
+ update_flags)
{}
template <int dim, int spacedim>
- FESubfaceValues<dim,spacedim>::FESubfaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
+ FESubfaceValues<dim,spacedim>::FESubfaceValues (const hp::FECollection<dim,spacedim> &fe_collection,
- const hp::QCollection<dim-1> &q_collection,
- const UpdateFlags update_flags)
- :
- internal::hp::FEValuesBase<dim,dim-1,dealii::FESubfaceValues<dim,spacedim> > (fe_collection,
- q_collection,
- update_flags)
+ const hp::QCollection<dim-1> &q_collection,
+ const UpdateFlags update_flags)
+ :
+ internal::hp::FEValuesBase<dim,dim-1,dealii::FESubfaceValues<dim,spacedim> > (fe_collection,
+ q_collection,
+ update_flags)
{}
template <class SparsityPatternBase>
-unsigned int
+types::global_dof_index
BlockSparsityPatternBase<SparsityPatternBase>::n_rows () const
{
- // only count in first column, since
- // all rows should be equivalent
+ // only count in first column, since
+ // all rows should be equivalent
- unsigned int count = 0;
+ types::global_dof_index count = 0;
for (unsigned int r=0; r<rows; ++r)
count += sub_objects[r][0]->n_rows();
return count;
template <class SparsityPatternBase>
-unsigned int
+types::global_dof_index
BlockSparsityPatternBase<SparsityPatternBase>::n_cols () const
{
- // only count in first row, since
- // all rows should be equivalent
+ // only count in first row, since
+ // all rows should be equivalent
- unsigned int count = 0;
+ types::global_dof_index count = 0;
for (unsigned int c=0; c<columns; ++c)
count += sub_objects[0][c]->n_cols();
return count;
void
BlockSparsityPattern::reinit(
- const BlockIndices& rows,
- const BlockIndices& cols,
- const std::vector<std::vector<types::global_dof_index> >& row_lengths)
+ const BlockIndices &rows,
+ const BlockIndices &cols,
- const std::vector<std::vector<unsigned int> > &row_lengths)
++ const std::vector<std::vector<types::global_dof_index> > &row_lengths)
{
AssertDimension (row_lengths.size(), cols.size());
this->reinit(rows.size(), cols.size());
- for (unsigned int j=0;j<cols.size();++j)
- for (unsigned int i=0;i<rows.size();++i)
+ for (unsigned int j=0; j<cols.size(); ++j)
+ for (unsigned int i=0; i<rows.size(); ++i)
{
- const unsigned int start = rows.local_to_global(i, 0);
- const unsigned int length = rows.block_size(i);
+ const types::global_dof_index start = rows.local_to_global(i, 0);
+ const types::global_dof_index length = rows.block_size(i);
if (row_lengths[j].size()==1)
block(i,j).reinit(rows.block_size(i),
cols.block_size(j), row_lengths[j][0], i==j);
else
{
- VectorSlice<const std::vector<unsigned int> >
+ VectorSlice<const std::vector<types::global_dof_index> >
- block_rows(row_lengths[j], start, length);
+ block_rows(row_lengths[j], start, length);
block(i,j).reinit(rows.block_size(i),
cols.block_size(j),
block_rows);
BlockCompressedSparsityPattern::
BlockCompressedSparsityPattern (
- const std::vector<types::global_dof_index>& row_indices,
- const std::vector<types::global_dof_index>& col_indices)
- const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices)
++ const std::vector<types::global_dof_index> &row_indices,
++ const std::vector<types::global_dof_index> &col_indices)
{
reinit(row_indices, col_indices);
}
void
BlockCompressedSparsityPattern::reinit (
- const std::vector< unsigned int > &row_block_sizes,
- const std::vector< unsigned int > &col_block_sizes)
+ const std::vector< types::global_dof_index > &row_block_sizes,
+ const std::vector< types::global_dof_index > &col_block_sizes)
{
BlockSparsityPatternBase<CompressedSparsityPattern>::reinit(row_block_sizes.size(), col_block_sizes.size());
- for (unsigned int i=0;i<row_block_sizes.size();++i)
- for (unsigned int j=0;j<col_block_sizes.size();++j)
+ for (unsigned int i=0; i<row_block_sizes.size(); ++i)
+ for (unsigned int j=0; j<col_block_sizes.size(); ++j)
this->block(i,j).reinit(row_block_sizes[i],col_block_sizes[j]);
this->collect_sizes();
}
BlockCompressedSetSparsityPattern::
BlockCompressedSetSparsityPattern (
- const std::vector<types::global_dof_index>& row_indices,
- const std::vector<types::global_dof_index>& col_indices)
- const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices)
++ const std::vector<types::global_dof_index> &row_indices,
++ const std::vector<types::global_dof_index> &col_indices)
{
reinit(row_indices, col_indices);
}
void
BlockCompressedSetSparsityPattern::reinit (
- const std::vector< unsigned int > &row_block_sizes,
- const std::vector< unsigned int > &col_block_sizes)
+ const std::vector< types::global_dof_index > &row_block_sizes,
+ const std::vector< types::global_dof_index > &col_block_sizes)
{
BlockSparsityPatternBase<CompressedSetSparsityPattern>::reinit(row_block_sizes.size(), col_block_sizes.size());
- for (unsigned int i=0;i<row_block_sizes.size();++i)
- for (unsigned int j=0;j<col_block_sizes.size();++j)
+ for (unsigned int i=0; i<row_block_sizes.size(); ++i)
+ for (unsigned int j=0; j<col_block_sizes.size(); ++j)
this->block(i,j).reinit(row_block_sizes[i],col_block_sizes[j]);
this->collect_sizes();
}
BlockCompressedSimpleSparsityPattern::
- BlockCompressedSimpleSparsityPattern (const std::vector<types::global_dof_index>& row_indices,
- const std::vector<types::global_dof_index>& col_indices)
- :
- BlockSparsityPatternBase<CompressedSimpleSparsityPattern>(row_indices.size(),
- col_indices.size())
- {
- for (unsigned int i=0;i<row_indices.size();++i)
- for (unsigned int j=0;j<col_indices.size();++j)
-BlockCompressedSimpleSparsityPattern (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices)
++BlockCompressedSimpleSparsityPattern (const std::vector<types::global_dof_index> &row_indices,
++ const std::vector<types::global_dof_index> &col_indices)
+ :
+ BlockSparsityPatternBase<CompressedSimpleSparsityPattern>(row_indices.size(),
+ col_indices.size())
+ {
+ for (unsigned int i=0; i<row_indices.size(); ++i)
+ for (unsigned int j=0; j<col_indices.size(); ++j)
this->block(i,j).reinit(row_indices[i],col_indices[j]);
this->collect_sizes();
}
void
BlockCompressedSimpleSparsityPattern::reinit (
- const std::vector< unsigned int > &row_block_sizes,
- const std::vector< unsigned int > &col_block_sizes)
+ const std::vector< types::global_dof_index > &row_block_sizes,
+ const std::vector< types::global_dof_index > &col_block_sizes)
{
BlockSparsityPatternBase<CompressedSimpleSparsityPattern>::
- reinit(row_block_sizes.size(), col_block_sizes.size());
- for (unsigned int i=0;i<row_block_sizes.size();++i)
- for (unsigned int j=0;j<col_block_sizes.size();++j)
+ reinit(row_block_sizes.size(), col_block_sizes.size());
+ for (unsigned int i=0; i<row_block_sizes.size(); ++i)
+ for (unsigned int j=0; j<col_block_sizes.size(); ++j)
this->block(i,j).reinit(row_block_sizes[i],col_block_sizes[j]);
this->collect_sizes();
}
BlockSparsityPattern::
- BlockSparsityPattern (const std::vector<types::global_dof_index>& row_indices,
- const std::vector<types::global_dof_index>& col_indices)
- :
- BlockSparsityPatternBase<SparsityPattern>(row_indices.size(),
- col_indices.size())
- BlockSparsityPattern (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices)
++ BlockSparsityPattern (const std::vector<types::global_dof_index> &row_indices,
++ const std::vector<types::global_dof_index> &col_indices)
+ :
+ BlockSparsityPatternBase<SparsityPattern>(row_indices.size(),
+ col_indices.size())
{
- for (unsigned int i=0;i<row_indices.size();++i)
- for (unsigned int j=0;j<col_indices.size();++j)
+ for (unsigned int i=0; i<row_indices.size(); ++i)
+ for (unsigned int j=0; j<col_indices.size(); ++j)
this->block(i,j).reinit(row_indices[i],col_indices[j]);
this->collect_sizes();
}
void
- BlockSparsityPattern::reinit (const std::vector<unsigned int> &row_block_sizes,
- const std::vector<unsigned int> &col_block_sizes)
+ BlockSparsityPattern::reinit (const std::vector<types::global_dof_index> &row_block_sizes,
+ const std::vector<types::global_dof_index> &col_block_sizes)
{
dealii::BlockSparsityPatternBase<SparsityPattern>::
- reinit(row_block_sizes.size(), col_block_sizes.size());
- for (unsigned int i=0;i<row_block_sizes.size();++i)
- for (unsigned int j=0;j<col_block_sizes.size();++j)
+ reinit(row_block_sizes.size(), col_block_sizes.size());
+ for (unsigned int i=0; i<row_block_sizes.size(); ++i)
+ for (unsigned int j=0; j<col_block_sizes.size(); ++j)
this->block(i,j).reinit(row_block_sizes[i],col_block_sizes[j]);
this->collect_sizes();
}
ChunkSparsityPattern::ChunkSparsityPattern (
- const unsigned int m,
- const unsigned int n,
+ const types::global_dof_index m,
+ const types::global_dof_index n,
- const std::vector<unsigned int>& row_lengths,
+ const std::vector<unsigned int> &row_lengths,
const unsigned int chunk_size,
const bool optimize_diag)
{
ChunkSparsityPattern::ChunkSparsityPattern (
- const unsigned int m,
+ const types::global_dof_index m,
- const std::vector<unsigned int>& row_lengths,
+ const std::vector<unsigned int> &row_lengths,
const unsigned int chunk_size,
const bool optimize_diag)
{
void
ChunkSparsityPattern::reinit (
- const unsigned int m,
- const unsigned int n,
+ const types::global_dof_index m,
+ const types::global_dof_index n,
- const VectorSlice<const std::vector<unsigned int> >&row_lengths,
+ const VectorSlice<const std::vector<unsigned int> > &row_lengths,
const unsigned int chunk_size,
const bool optimize_diag)
{
void
ChunkSparsityPattern::reinit (
- const unsigned int m,
- const unsigned int n,
+ const types::global_dof_index m,
+ const types::global_dof_index n,
- const std::vector<unsigned int>& row_lengths,
+ const std::vector<unsigned int> &row_lengths,
const unsigned int chunk_size,
const bool optimize_diag)
{
-CompressedSetSparsityPattern::CompressedSetSparsityPattern (const unsigned int m,
- const unsigned int n)
+CompressedSetSparsityPattern::CompressedSetSparsityPattern (const types::global_dof_index m,
+ const types::global_dof_index n)
- :
- rows(0),
- cols(0)
+ :
+ rows(0),
+ cols(0)
{
reinit (m,n);
}
-CompressedSetSparsityPattern::CompressedSetSparsityPattern (const unsigned int n)
+CompressedSetSparsityPattern::CompressedSetSparsityPattern (const types::global_dof_index n)
- :
- rows(0),
- cols(0)
+ :
+ rows(0),
+ cols(0)
{
reinit (n,n);
}
-CompressedSimpleSparsityPattern::CompressedSimpleSparsityPattern (const unsigned int m,
- const unsigned int n,
+CompressedSimpleSparsityPattern::CompressedSimpleSparsityPattern (const types::global_dof_index m,
- const types::global_dof_index n,
- const IndexSet & rowset_
- )
- :
- rows(0),
- cols(0),
- rowset(0)
++ const types::global_dof_index n,
+ const IndexSet &rowset_
+ )
+ :
+ rows(0),
+ cols(0),
+ rowset(0)
{
reinit (m,n, rowset_);
}
-CompressedSimpleSparsityPattern::CompressedSimpleSparsityPattern (const unsigned int n)
+CompressedSimpleSparsityPattern::CompressedSimpleSparsityPattern (const types::global_dof_index n)
- :
- rows(0),
- cols(0),
- rowset(0)
+ :
+ rows(0),
+ cols(0),
+ rowset(0)
{
reinit (n,n);
}
void
-CompressedSimpleSparsityPattern::reinit (const unsigned int m,
- const unsigned int n,
+CompressedSimpleSparsityPattern::reinit (const types::global_dof_index m,
+ const types::global_dof_index n,
- const IndexSet & rowset_)
+ const IndexSet &rowset_)
{
rows = m;
cols = n;
// explicit instantiations
-template void CompressedSimpleSparsityPattern::Line::add_entries(unsigned int *,
- unsigned int *,
+template void CompressedSimpleSparsityPattern::Line::add_entries(types::global_dof_index *,
- types::global_dof_index *,
- const bool);
++ types::global_dof_index *,
+ const bool);
-template void CompressedSimpleSparsityPattern::Line::add_entries(const unsigned int *,
- const unsigned int *,
+template void CompressedSimpleSparsityPattern::Line::add_entries(const types::global_dof_index *,
- const types::global_dof_index *,
- const bool);
++ const types::global_dof_index *,
+ const bool);
#ifndef DEAL_II_VECTOR_ITERATOR_IS_POINTER
template void CompressedSimpleSparsityPattern::Line::
add_entries(std::vector<unsigned int>::iterator,
-CompressedSparsityPattern::CompressedSparsityPattern (const unsigned int m,
- const unsigned int n)
+CompressedSparsityPattern::CompressedSparsityPattern (const types::global_dof_index m,
+ const types::global_dof_index n)
- :
- rows(0),
- cols(0)
+ :
+ rows(0),
+ cols(0)
{
reinit (m,n);
}
-CompressedSparsityPattern::CompressedSparsityPattern (const unsigned int n)
+CompressedSparsityPattern::CompressedSparsityPattern (const types::global_dof_index n)
- :
- rows(0),
- cols(0)
+ :
+ rows(0),
+ cols(0)
{
reinit (n,n);
}
Vector::Vector (const MPI_Comm &communicator,
- const VectorBase &v,
+ const VectorBase &v,
const unsigned int local_size)
- :
- communicator (communicator)
+ :
+ communicator (communicator)
{
Vector::create_vector (v.size(), local_size);
- SolverBase::SolverBase (SolverControl &cn,
+ SolverBase::SolverBase (SolverControl &cn,
const MPI_Comm &mpi_communicator)
- :
- solver_control (cn),
- mpi_communicator (mpi_communicator)
+ :
+ solver_control (cn),
+ mpi_communicator (mpi_communicator)
{}
template
void SparseDirectMA27::solve (const SparseMatrix<double> &matrix,
- Vector<double> &rhs_and_solution);
+ Vector<double> &rhs_and_solution);
template
-void SparseDirectMA27::solve (const SparseMatrix<float> &matrix,
+void SparseDirectMA27::solve (const SparseMatrix<float> &matrix,
Vector<double> &rhs_and_solution);
- SolverBase::SolverBase (SolverControl &cn)
+ SolverBase::SolverBase (SolverControl &cn)
- :
- solver_name (gmres),
- solver_control (cn)
+ :
+ solver_name (gmres),
+ solver_control (cn)
{}
- SolverDirect::SolverDirect (SolverControl &cn,
+ SolverDirect::SolverDirect (SolverControl &cn,
const AdditionalData &data)
- :
- solver_control (cn),
- additional_data (data.output_solver_details)
+ :
+ solver_control (cn),
+ additional_data (data.output_solver_details)
{}
- SparseMatrix::SparseMatrix (const Epetra_Map &input_map,
+ SparseMatrix::SparseMatrix (const Epetra_Map &input_map,
const unsigned int n_max_entries_per_row)
- :
- column_space_map (new Epetra_Map (input_map)),
- matrix (new Epetra_FECrsMatrix(Copy, *column_space_map,
- int(n_max_entries_per_row), false)),
- last_action (Zero),
- compressed (false)
+ :
+ column_space_map (new Epetra_Map (input_map)),
+ matrix (new Epetra_FECrsMatrix(Copy, *column_space_map,
+ int(n_max_entries_per_row), false)),
+ last_action (Zero),
+ compressed (false)
{}
- SparseMatrix::SparseMatrix (const Epetra_Map &input_row_map,
- const Epetra_Map &input_col_map,
+ SparseMatrix::SparseMatrix (const Epetra_Map &input_row_map,
+ const Epetra_Map &input_col_map,
const unsigned int n_max_entries_per_row)
- :
- column_space_map (new Epetra_Map (input_col_map)),
- matrix (new Epetra_FECrsMatrix(Copy, input_row_map,
- int(n_max_entries_per_row), false)),
- last_action (Zero),
- compressed (false)
+ :
+ column_space_map (new Epetra_Map (input_col_map)),
+ matrix (new Epetra_FECrsMatrix(Copy, input_row_map,
+ int(n_max_entries_per_row), false)),
+ last_action (Zero),
+ compressed (false)
{}
void
SparseMatrix::reinit (const Epetra_Map &input_row_map,
const Epetra_Map &input_col_map,
- const SparsityType &sparsity_pattern,
+ const SparsityType &sparsity_pattern,
const bool exchange_data)
{
- // release memory before reallocation
+ // release memory before reallocation
temp_vector.clear();
matrix.reset();
template <>
void MGDoFHandler<2>::renumber_dofs (const unsigned int level,
- const std::vector<unsigned int> &new_numbers) {
- const std::vector<unsigned int> &new_numbers)
++ const std::vector<unsigned int> &new_numbers)
+ {
Assert (new_numbers.size() == n_dofs(level),
DoFHandler<2>::ExcRenumberingIncomplete());
template <>
void MGDoFHandler<3>::renumber_dofs (const unsigned int level,
- const std::vector<unsigned int> &new_numbers) {
- const std::vector<unsigned int> &new_numbers)
++ const std::vector<unsigned int> &new_numbers)
+ {
Assert (new_numbers.size() == n_dofs(level),
DoFHandler<3>::ExcRenumberingIncomplete());
const unsigned int dofs_per_cell = dof.get_fe().dofs_per_cell;
std::vector<unsigned int> dofs_on_this_cell(dofs_per_cell);
- typename MGDoFHandler<dim,spacedim>::cell_iterator cell = dof.begin(level),
- endc = dof.end(level);
+ typename DH::cell_iterator cell = dof.begin(level),
- endc = dof.end(level);
++ endc = dof.end(level);
for (; cell!=endc; ++cell)
{
cell->get_mg_dof_indices (dofs_on_this_cell);
template <int dim, int spacedim>
void
-
count_dofs_per_component (const MGDoFHandler<dim,spacedim> &dof_handler,
- std::vector<std::vector<unsigned int> > &result,
- bool only_once,
- std::vector<unsigned int> target_component)
+ std::vector<std::vector<unsigned int> > &result,
+ bool only_once,
+ std::vector<unsigned int> target_component)
{
- const FiniteElement<dim>& fe = dof_handler.get_fe();
+ const FiniteElement<dim> &fe = dof_handler.get_fe();
const unsigned int n_components = fe.n_components();
const unsigned int nlevels = dof_handler.get_tria().n_levels();
dofs_in_component (n_components,
std::vector<bool>(dof_handler.n_dofs(l),
false));
- std::vector<ComponentMask> component_select (n_components);
- Threads::TaskGroup<> tasks;
- for (unsigned int i=0; i<n_components; ++i)
- {
- void (*fun_ptr) (const unsigned int level,
- const MGDoFHandler<dim,spacedim> &,
- const ComponentMask &,
- std::vector<bool> &)
- = &DoFTools::template extract_level_dofs<MGDoFHandler<dim,spacedim> >;
-
- std::vector<bool> tmp(n_components, false);
- tmp[i] = true;
- component_select[i] = ComponentMask(tmp);
-
- tasks += Threads::new_task (fun_ptr,
- l, dof_handler,
- component_select[i],
- dofs_in_component[i]);
- }
- tasks.join_all();
-
- // next count what we got
- unsigned int component = 0;
- for (unsigned int b=0;b<fe.n_base_elements();++b)
- {
- const FiniteElement<dim>& base = fe.base_element(b);
- // Dimension of base element
- unsigned int d = base.n_components();
-
- for (unsigned int m=0;m<fe.element_multiplicity(b);++m)
- {
- for (unsigned int dd=0;dd<d;++dd)
- {
- if (base.is_primitive() || (!only_once || dd==0))
- result[l][target_component[component]]
- += std::count(dofs_in_component[component].begin(),
- dofs_in_component[component].end(),
- true);
- ++component;
- }
- }
- }
- // finally sanity check
- Assert (!dof_handler.get_fe().is_primitive()
- ||
- std::accumulate (result[l].begin(),
- result[l].end(), 0U)
- ==
- dof_handler.n_dofs(l),
- ExcInternalError());
- }
+ std::vector<ComponentMask> component_select (n_components);
+ Threads::TaskGroup<> tasks;
+ for (unsigned int i=0; i<n_components; ++i)
+ {
+ void (*fun_ptr) (const unsigned int level,
+ const MGDoFHandler<dim,spacedim> &,
+ const ComponentMask &,
+ std::vector<bool> &)
- = &DoFTools::template extract_level_dofs<dim>;
++ = &DoFTools::template extract_level_dofs<MGDoFHandler<dim,spacedim> >;
+
+ std::vector<bool> tmp(n_components, false);
+ tmp[i] = true;
+ component_select[i] = ComponentMask(tmp);
+
+ tasks += Threads::new_task (fun_ptr,
+ l, dof_handler,
+ component_select[i],
+ dofs_in_component[i]);
+ }
+ tasks.join_all();
+
+ // next count what we got
+ unsigned int component = 0;
+ for (unsigned int b=0; b<fe.n_base_elements(); ++b)
+ {
+ const FiniteElement<dim> &base = fe.base_element(b);
+ // Dimension of base element
+ unsigned int d = base.n_components();
+
+ for (unsigned int m=0; m<fe.element_multiplicity(b); ++m)
+ {
+ for (unsigned int dd=0; dd<d; ++dd)
+ {
+ if (base.is_primitive() || (!only_once || dd==0))
+ result[l][target_component[component]]
+ += std::count(dofs_in_component[component].begin(),
+ dofs_in_component[component].end(),
+ true);
+ ++component;
+ }
+ }
+ }
+ // finally sanity check
+ Assert (!dof_handler.get_fe().is_primitive()
+ ||
+ std::accumulate (result[l].begin(),
+ result[l].end(), 0U)
+ ==
+ dof_handler.n_dofs(l),
+ ExcInternalError());
+ }
}
}
- template <int dim, int spacedim>
+ template <class DH>
void
count_dofs_per_block (
- const DH& dof_handler,
- std::vector<std::vector<unsigned int> >& dofs_per_block,
- const MGDoFHandler<dim,spacedim> &dof_handler,
++ const DH &dof_handler,
+ std::vector<std::vector<unsigned int> > &dofs_per_block,
std::vector<unsigned int> target_block)
{
- const FiniteElement<DH::dimension,DH::space_dimension>& fe = dof_handler.get_fe();
- const FiniteElement<dim,spacedim> &fe = dof_handler.get_fe();
++ const FiniteElement<DH::dimension,DH::space_dimension> &fe = dof_handler.get_fe();
const unsigned int n_blocks = fe.n_blocks();
const unsigned int n_levels = dof_handler.get_tria().n_levels();
for (unsigned int i=0; i<n_blocks; ++i)
{
void (*fun_ptr) (const unsigned int level,
- const DH&,
- const MGDoFHandler<dim,spacedim> &,
++ const DH &,
const BlockMask &,
- std::vector<bool>&)
+ std::vector<bool> &)
- = &DoFTools::template extract_level_dofs<dim>;
+ = &DoFTools::template extract_level_dofs<DH>;
std::vector<bool> tmp(n_blocks, false);
- tmp[i] = true;
- block_select[i] = tmp;
+ tmp[i] = true;
+ block_select[i] = tmp;
tasks += Threads::new_task (fun_ptr,
l, dof_handler, block_select[i],
"elements"));
}
- typename MGDoFHandler<dim,spacedim>::face_iterator face = cell->face(face_no);
- const types::boundary_id boundary_component = face->boundary_indicator();
- if (function_map.find(boundary_component) != function_map.end())
- // face is of the right component
- {
- // get indices, physical location and
- // boundary values of dofs on this
- // face
- local_dofs.resize (fe.dofs_per_face);
- face->get_mg_dof_indices (level, local_dofs);
- if (fe_is_system)
- {
- // enter those dofs
- // into the list that
- // match the
- // component
- // signature. avoid
- // the usual
- // complication that
- // we can't just use
- // *_system_to_component_index
- // for non-primitive
- // FEs
- for (unsigned int i=0; i<local_dofs.size(); ++i)
- {
- unsigned int component;
- if (fe.is_primitive())
- component = fe.face_system_to_component_index(i).first;
- else
- {
- // non-primitive
- // case. make
- // sure that
- // this
- // particular
- // shape
- // function
- // _is_
- // primitive,
- // and get at
- // it's
- // component. use
- // usual
- // trick to
- // transfer
- // face dof
- // index to
- // cell dof
- // index
- const unsigned int cell_i
- = (dim == 1 ?
- i
- :
- (dim == 2 ?
- (i<2*fe.dofs_per_vertex ? i : i+2*fe.dofs_per_vertex)
- :
- (dim == 3 ?
- (i<4*fe.dofs_per_vertex ?
- i
- :
- (i<4*fe.dofs_per_vertex+4*fe.dofs_per_line ?
- i+4*fe.dofs_per_vertex
- :
- i+4*fe.dofs_per_vertex+8*fe.dofs_per_line))
- :
- numbers::invalid_unsigned_int)));
- Assert (cell_i < fe.dofs_per_cell, ExcInternalError());
-
- // make sure
- // that if
- // this is
- // not a
- // primitive
- // shape function,
- // then all
- // the
- // corresponding
- // components
- // in the
- // mask are
- // not set
+ typename MGDoFHandler<dim,spacedim>::face_iterator face = cell->face(face_no);
+ const types::boundary_id boundary_component = face->boundary_indicator();
+ if (function_map.find(boundary_component) != function_map.end())
+ // face is of the right component
+ {
+ // get indices, physical location and
+ // boundary values of dofs on this
+ // face
+ local_dofs.resize (fe.dofs_per_face);
+ face->get_mg_dof_indices (level, local_dofs);
+ if (fe_is_system)
+ {
+ // enter those dofs
+ // into the list that
+ // match the
+ // component
+ // signature. avoid
+ // the usual
+ // complication that
+ // we can't just use
+ // *_system_to_component_index
+ // for non-primitive
+ // FEs
+ for (unsigned int i=0; i<local_dofs.size(); ++i)
+ {
+ unsigned int component;
+ if (fe.is_primitive())
+ component = fe.face_system_to_component_index(i).first;
+ else
+ {
+ // non-primitive
+ // case. make
+ // sure that
+ // this
+ // particular
+ // shape
+ // function
+ // _is_
+ // primitive,
+ // and get at
+ // it's
+ // component. use
+ // usual
+ // trick to
+ // transfer
+ // face dof
+ // index to
+ // cell dof
-
+ // index
+ const unsigned int cell_i
+ = (dim == 1 ?
+ i
+ :
+ (dim == 2 ?
+ (i<2*fe.dofs_per_vertex ? i : i+2*fe.dofs_per_vertex)
+ :
+ (dim == 3 ?
+ (i<4*fe.dofs_per_vertex ?
+ i
+ :
+ (i<4*fe.dofs_per_vertex+4*fe.dofs_per_line ?
+ i+4*fe.dofs_per_vertex
+ :
+ i+4*fe.dofs_per_vertex+8*fe.dofs_per_line))
+ :
+ numbers::invalid_unsigned_int)));
+ Assert (cell_i < fe.dofs_per_cell, ExcInternalError());
+
+ // make sure
+ // that if
+ // this is
+ // not a
+ // primitive
+ // shape function,
+ // then all
+ // the
+ // corresponding
+ // components
+ // in the
+ // mask are
+ // not set
// if (!fe.is_primitive(cell_i))
// for (unsigned int c=0; c<n_components; ++c)
// if (fe.get_nonzero_components(cell_i)[c])
template <int dim, int spacedim>
void
- const DoFHandler<dim,spacedim>& dof,
- const typename FunctionMap<dim>::type& function_map,
- std::vector<std::set<unsigned int> >& boundary_indices,
- const std::vector<bool>& component_mask)
- {
- // if for whatever reason we were
- // passed an empty map, return
- // immediately
+ make_boundary_list(
++ const DoFHandler<dim,spacedim> &dof,
++ const typename FunctionMap<dim>::type &function_map,
++ std::vector<std::set<unsigned int> > &boundary_indices,
++ const std::vector<bool> &component_mask)
++{
++ // if for whatever reason we were
++ // passed an empty map, return
++ // immediately
+ if (function_map.size() == 0)
+ return;
- DoFHandler<dim,spacedim>::invalid_dof_index);
+ const unsigned int n_levels = dof.get_tria().n_levels();
+
+
+
+ const unsigned int n_components = DoFTools::n_components(dof);
+ const bool fe_is_system = (n_components != 1);
+
+ AssertDimension (boundary_indices.size(), n_levels);
+
+ std::vector<unsigned int> local_dofs;
+ local_dofs.reserve (DoFTools::max_dofs_per_face(dof));
+ std::fill (local_dofs.begin (), local_dofs.end (),
- // First, deal with the simpler
- // case when we have to identify
- // all boundary dofs
++ DoFHandler<dim,spacedim>::invalid_dof_index);
+
- cell = dof.begin(),
- endc = dof.end();
++ // First, deal with the simpler
++ // case when we have to identify
++ // all boundary dofs
+ if (component_mask.size() == 0)
+ {
+ typename DoFHandler<dim,spacedim>::cell_iterator
- {
- const FiniteElement<dim> &fe = cell->get_fe();
- const unsigned int level = cell->level();
- local_dofs.resize(fe.dofs_per_face);
-
- for (unsigned int face_no = 0; face_no < GeometryInfo<dim>::faces_per_cell;
- ++face_no)
- if (cell->at_boundary(face_no))
- {
- const typename DoFHandler<dim,spacedim>::face_iterator
- face = cell->face(face_no);
- const unsigned char bi = face->boundary_indicator();
- // Face is listed in
- // boundary map
- if (function_map.find(bi) != function_map.end())
- {
- face->get_mg_dof_indices(level, local_dofs);
- for (unsigned int i=0;i<fe.dofs_per_face;++i)
- boundary_indices[level].insert(local_dofs[i]);
- }
- }
- }
++ cell = dof.begin(),
++ endc = dof.end();
+ for (; cell!=endc; ++cell)
- ExcMessage("It's probably worthwhile to select at least one component."));
++ {
++ const FiniteElement<dim> &fe = cell->get_fe();
++ const unsigned int level = cell->level();
++ local_dofs.resize(fe.dofs_per_face);
++
++ for (unsigned int face_no = 0; face_no < GeometryInfo<dim>::faces_per_cell;
++ ++face_no)
++ if (cell->at_boundary(face_no))
++ {
++ const typename DoFHandler<dim,spacedim>::face_iterator
++ face = cell->face(face_no);
++ const unsigned char bi = face->boundary_indicator();
++ // Face is listed in
++ // boundary map
++ if (function_map.find(bi) != function_map.end())
++ {
++ face->get_mg_dof_indices(level, local_dofs);
++ for (unsigned int i=0; i<fe.dofs_per_face; ++i)
++ boundary_indices[level].insert(local_dofs[i]);
++ }
++ }
++ }
+ }
+ else
+ {
+ Assert (std::count(component_mask.begin(), component_mask.end(), true) > 0,
- cell = dof.begin(),
- endc = dof.end();
++ ExcMessage("It's probably worthwhile to select at least one component."));
+
+ typename DoFHandler<dim,spacedim>::cell_iterator
- for (unsigned int face_no = 0; face_no < GeometryInfo<dim>::faces_per_cell;
- ++face_no)
- {
- if (!(cell->at_boundary(face_no)))
- continue;
-
- const FiniteElement<dim> &fe = cell->get_fe();
- const unsigned int level = cell->level();
-
- // we can presently deal only with
- // primitive elements for boundary
- // values. this does not preclude
- // us using non-primitive elements
- // in components that we aren't
- // interested in, however. make
- // sure that all shape functions
- // that are non-zero for the
- // components we are interested in,
- // are in fact primitive
- for (unsigned int i=0; i<cell->get_fe().dofs_per_cell; ++i)
- {
++ cell = dof.begin(),
++ endc = dof.end();
+ for (; cell!=endc; ++cell)
- for (unsigned int c=0; c<n_components; ++c)
- if ((nonzero_component_array[c] == true)
- &&
- (component_mask[c] == true))
- Assert (cell->get_fe().is_primitive (i),
- ExcMessage ("This function can only deal with requested boundary "
- "values that correspond to primitive (scalar) base "
- "elements"));
- }
-
- typename DoFHandler<dim,spacedim>::face_iterator face = cell->face(face_no);
- const unsigned char boundary_component = face->boundary_indicator();
- if (function_map.find(boundary_component) != function_map.end())
- // face is of the right component
- {
- // get indices, physical location and
- // boundary values of dofs on this
- // face
- local_dofs.resize (fe.dofs_per_face);
- face->get_mg_dof_indices (level, local_dofs);
- if (fe_is_system)
- {
- // enter those dofs
- // into the list that
- // match the
- // component
- // signature. avoid
- // the usual
- // complication that
- // we can't just use
- // *_system_to_component_index
- // for non-primitive
- // FEs
- for (unsigned int i=0; i<local_dofs.size(); ++i)
- {
- unsigned int component;
- if (fe.is_primitive())
- component = fe.face_system_to_component_index(i).first;
- else
- {
- // non-primitive
- // case. make
- // sure that
- // this
- // particular
- // shape
- // function
- // _is_
- // primitive,
- // and get at
- // it's
- // component. use
- // usual
- // trick to
- // transfer
- // face dof
- // index to
- // cell dof
-
- // index
- const unsigned int cell_i
- = (dim == 1 ?
- i
- :
- (dim == 2 ?
- (i<2*fe.dofs_per_vertex ? i : i+2*fe.dofs_per_vertex)
- :
- (dim == 3 ?
- (i<4*fe.dofs_per_vertex ?
- i
- :
- (i<4*fe.dofs_per_vertex+4*fe.dofs_per_line ?
- i+4*fe.dofs_per_vertex
- :
- i+4*fe.dofs_per_vertex+8*fe.dofs_per_line))
- :
- numbers::invalid_unsigned_int)));
- Assert (cell_i < fe.dofs_per_cell, ExcInternalError());
-
- // make sure
- // that if
- // this is
- // not a
- // primitive
- // shape function,
- // then all
- // the
- // corresponding
- // components
- // in the
- // mask are
- // not set
++ for (unsigned int face_no = 0; face_no < GeometryInfo<dim>::faces_per_cell;
++ ++face_no)
++ {
++ if (!(cell->at_boundary(face_no)))
++ continue;
++
++ const FiniteElement<dim> &fe = cell->get_fe();
++ const unsigned int level = cell->level();
++
++ // we can presently deal only with
++ // primitive elements for boundary
++ // values. this does not preclude
++ // us using non-primitive elements
++ // in components that we aren't
++ // interested in, however. make
++ // sure that all shape functions
++ // that are non-zero for the
++ // components we are interested in,
++ // are in fact primitive
++ for (unsigned int i=0; i<cell->get_fe().dofs_per_cell; ++i)
++ {
+ const ComponentMask &nonzero_component_array
+ = cell->get_fe().get_nonzero_components (i);
- }
-
- if (component_mask[component] == true)
- boundary_indices[level].insert(local_dofs[i]);
- }
- }
- else
- for (unsigned int i=0; i<local_dofs.size(); ++i)
- boundary_indices[level].insert(local_dofs[i]);
- }
- }
++ for (unsigned int c=0; c<n_components; ++c)
++ if ((nonzero_component_array[c] == true)
++ &&
++ (component_mask[c] == true))
++ Assert (cell->get_fe().is_primitive (i),
++ ExcMessage ("This function can only deal with requested boundary "
++ "values that correspond to primitive (scalar) base "
++ "elements"));
++ }
++
++ typename DoFHandler<dim,spacedim>::face_iterator face = cell->face(face_no);
++ const unsigned char boundary_component = face->boundary_indicator();
++ if (function_map.find(boundary_component) != function_map.end())
++ // face is of the right component
++ {
++ // get indices, physical location and
++ // boundary values of dofs on this
++ // face
++ local_dofs.resize (fe.dofs_per_face);
++ face->get_mg_dof_indices (level, local_dofs);
++ if (fe_is_system)
++ {
++ // enter those dofs
++ // into the list that
++ // match the
++ // component
++ // signature. avoid
++ // the usual
++ // complication that
++ // we can't just use
++ // *_system_to_component_index
++ // for non-primitive
++ // FEs
++ for (unsigned int i=0; i<local_dofs.size(); ++i)
++ {
++ unsigned int component;
++ if (fe.is_primitive())
++ component = fe.face_system_to_component_index(i).first;
++ else
++ {
++ // non-primitive
++ // case. make
++ // sure that
++ // this
++ // particular
++ // shape
++ // function
++ // _is_
++ // primitive,
++ // and get at
++ // it's
++ // component. use
++ // usual
++ // trick to
++ // transfer
++ // face dof
++ // index to
++ // cell dof
++
++ // index
++ const unsigned int cell_i
++ = (dim == 1 ?
++ i
++ :
++ (dim == 2 ?
++ (i<2*fe.dofs_per_vertex ? i : i+2*fe.dofs_per_vertex)
++ :
++ (dim == 3 ?
++ (i<4*fe.dofs_per_vertex ?
++ i
++ :
++ (i<4*fe.dofs_per_vertex+4*fe.dofs_per_line ?
++ i+4*fe.dofs_per_vertex
++ :
++ i+4*fe.dofs_per_vertex+8*fe.dofs_per_line))
++ :
++ numbers::invalid_unsigned_int)));
++ Assert (cell_i < fe.dofs_per_cell, ExcInternalError());
++
++ // make sure
++ // that if
++ // this is
++ // not a
++ // primitive
++ // shape function,
++ // then all
++ // the
++ // corresponding
++ // components
++ // in the
++ // mask are
++ // not set
+// if (!fe.is_primitive(cell_i))
+// for (unsigned int c=0; c<n_components; ++c)
+// if (fe.get_nonzero_components(cell_i)[c])
+// Assert (component_mask[c] == false,
+// ExcFENotPrimitive());
+
+// let's pick the first of possibly more than one non-zero
+// components. if shape function is non-primitive, then we will ignore
+// the result in the following anyway, otherwise there's only one
+// non-zero component which we will use
+ component = fe.get_nonzero_components(cell_i).first_selected_component();
- make_boundary_list(const MGDoFHandler<dim,spacedim>& dof,
- const typename FunctionMap<dim>::type& function_map,
- std::vector<IndexSet>& boundary_indices,
- const ComponentMask & component_mask)
- {
++ }
++
++ if (component_mask[component] == true)
++ boundary_indices[level].insert(local_dofs[i]);
++ }
++ }
++ else
++ for (unsigned int i=0; i<local_dofs.size(); ++i)
++ boundary_indices[level].insert(local_dofs[i]);
++ }
++ }
+ }
+ }
+
+
+
+ template <int dim, int spacedim>
+ void
+ make_boundary_list(const MGDoFHandler<dim,spacedim> &dof,
+ const typename FunctionMap<dim>::type &function_map,
+ std::vector<IndexSet> &boundary_indices,
+ const ComponentMask &component_mask)
+ {
Assert (boundary_indices.size() == dof.get_tria().n_levels(),
- ExcDimensionMismatch (boundary_indices.size(),
- dof.get_tria().n_levels()));
+ ExcDimensionMismatch (boundary_indices.size(),
+ dof.get_tria().n_levels()));
std::vector<std::set<unsigned int> >
- my_boundary_indices (dof.get_tria().n_levels());
+ my_boundary_indices (dof.get_tria().n_levels());
make_boundary_list (dof, function_map, my_boundary_indices, component_mask);
for (unsigned int i=0; i<dof.get_tria().n_levels(); ++i)
{
template <int dim, int spacedim>
void
extract_inner_interface_dofs (const MGDoFHandler<dim,spacedim> &mg_dof_handler,
- std::vector<std::vector<bool> > &interface_dofs)
- std::vector<std::vector<bool> > &interface_dofs)
++ std::vector<std::vector<bool> > &interface_dofs)
{
Assert (interface_dofs.size() == mg_dof_handler.get_tria().n_levels(),
- ExcDimensionMismatch (interface_dofs.size(),
- mg_dof_handler.get_tria().n_levels()));
+ ExcDimensionMismatch (interface_dofs.size(),
+ mg_dof_handler.get_tria().n_levels()));
for (unsigned int l=0; l<mg_dof_handler.get_tria().n_levels(); ++l)
{
template <int dim, int spacedim>
void
extract_non_interface_dofs (const MGDoFHandler<dim,spacedim> &mg_dof_handler,
- std::vector<std::set<unsigned int> > &non_interface_dofs)
- std::vector<std::set<unsigned int> > &non_interface_dofs)
++ std::vector<std::set<unsigned int> > &non_interface_dofs)
{
Assert (non_interface_dofs.size() == mg_dof_handler.get_tria().n_levels(),
- ExcDimensionMismatch (non_interface_dofs.size(),
- mg_dof_handler.get_tria().n_levels()));
+ ExcDimensionMismatch (non_interface_dofs.size(),
+ mg_dof_handler.get_tria().n_levels()));
const FiniteElement<dim,spacedim> &fe = mg_dof_handler.get_fe();
template <int dim, int spacedim>
void
extract_inner_interface_dofs (const MGDoFHandler<dim,spacedim> &mg_dof_handler,
- std::vector<std::vector<bool> > &interface_dofs,
- std::vector<std::vector<bool> > &boundary_interface_dofs)
- std::vector<std::vector<bool> > &interface_dofs,
- std::vector<std::vector<bool> > &boundary_interface_dofs)
++ std::vector<std::vector<bool> > &interface_dofs,
++ std::vector<std::vector<bool> > &boundary_interface_dofs)
{
Assert (interface_dofs.size() == mg_dof_handler.get_tria().n_levels(),
- ExcDimensionMismatch (interface_dofs.size(),
- mg_dof_handler.get_tria().n_levels()));
+ ExcDimensionMismatch (interface_dofs.size(),
+ mg_dof_handler.get_tria().n_levels()));
Assert (boundary_interface_dofs.size() == mg_dof_handler.get_tria().n_levels(),
- ExcDimensionMismatch (boundary_interface_dofs.size(),
- mg_dof_handler.get_tria().n_levels()));
+ ExcDimensionMismatch (boundary_interface_dofs.size(),
+ mg_dof_handler.get_tria().n_levels()));
for (unsigned int l=0; l<mg_dof_handler.get_tria().n_levels(); ++l)
{
std::fill (boundary_cell_dofs.begin(), boundary_cell_dofs.end(), false);
for (unsigned int face_nr=0; face_nr<GeometryInfo<dim>::faces_per_cell; ++face_nr)
- {
- const typename DoFHandler<dim,spacedim>::face_iterator face = cell->face(face_nr);
- if (!face->at_boundary())
- {
- //interior face
- const typename MGDoFHandler<dim>::cell_iterator
- neighbor = cell->neighbor(face_nr);
-
- // Do refinement face
- // from the coarse side
- if (neighbor->level() < cell->level())
- {
- for (unsigned int j=0; j<dofs_per_face; ++j)
+ {
+ const typename DoFHandler<dim,spacedim>::face_iterator face = cell->face(face_nr);
+ if (!face->at_boundary())
+ {
+ //interior face
+ const typename MGDoFHandler<dim>::cell_iterator
+ neighbor = cell->neighbor(face_nr);
+
+ // Do refinement face
+ // from the coarse side
+ if (neighbor->level() < cell->level())
+ {
+ for (unsigned int j=0; j<dofs_per_face; ++j)
cell_dofs[fe.face_to_cell_index(j,face_nr)] = true;
- has_coarser_neighbor = true;
- }
- }
- }
+ has_coarser_neighbor = true;
+ }
+ }
+ }
if (has_coarser_neighbor == true)
- for (unsigned int face_nr=0; face_nr<GeometryInfo<dim>::faces_per_cell; ++face_nr)
- if(cell->at_boundary(face_nr))
- for(unsigned int j=0; j<dofs_per_face; ++j)
- // if (cell_dofs[fe.face_to_cell_index(j,face_nr)] == true) //is this necessary?
- boundary_cell_dofs[fe.face_to_cell_index(j,face_nr)] = true;
+ for (unsigned int face_nr=0; face_nr<GeometryInfo<dim>::faces_per_cell; ++face_nr)
+ if (cell->at_boundary(face_nr))
+ for (unsigned int j=0; j<dofs_per_face; ++j)
+ // if (cell_dofs[fe.face_to_cell_index(j,face_nr)] == true) //is this necessary?
++ boundary_cell_dofs[fe.face_to_cell_index(j,face_nr)] = true;
+
+
+ const unsigned int level = cell->level();
+ cell->get_mg_dof_indices (local_dof_indices);
+
- for(unsigned int i=0; i<dofs_per_cell; ++i)
- {
- if (cell_dofs[i])
- interface_dofs[level][local_dof_indices[i]] = true;
++ for (unsigned int i=0; i<dofs_per_cell; ++i)
++ {
++ if (cell_dofs[i])
++ interface_dofs[level][local_dof_indices[i]] = true;
+
- if (boundary_cell_dofs[i])
- boundary_interface_dofs[level][local_dof_indices[i]] = true;
- }
++ if (boundary_cell_dofs[i])
++ boundary_interface_dofs[level][local_dof_indices[i]] = true;
++ }
+ }
+ }
+
+
+
+ template <int dim, int spacedim>
+ void
+ extract_inner_interface_dofs (const DoFHandler<dim,spacedim> &dof_handler,
- std::vector<std::vector<bool> > &interface_dofs,
- std::vector<std::vector<bool> > &boundary_interface_dofs)
++ std::vector<std::vector<bool> > &interface_dofs,
++ std::vector<std::vector<bool> > &boundary_interface_dofs)
+ {
+ Assert (interface_dofs.size() == dof_handler.get_tria().n_levels(),
- ExcDimensionMismatch (interface_dofs.size(),
- dof_handler.get_tria().n_levels()));
++ ExcDimensionMismatch (interface_dofs.size(),
++ dof_handler.get_tria().n_levels()));
+ Assert (boundary_interface_dofs.size() == dof_handler.get_tria().n_levels(),
- ExcDimensionMismatch (boundary_interface_dofs.size(),
- dof_handler.get_tria().n_levels()));
++ ExcDimensionMismatch (boundary_interface_dofs.size(),
++ dof_handler.get_tria().n_levels()));
+
+ for (unsigned int l=0; l<dof_handler.get_tria().n_levels(); ++l)
+ {
+ Assert (interface_dofs[l].size() == dof_handler.n_dofs(l),
- ExcDimensionMismatch (interface_dofs[l].size(),
- dof_handler.n_dofs(l)));
++ ExcDimensionMismatch (interface_dofs[l].size(),
++ dof_handler.n_dofs(l)));
+ Assert (boundary_interface_dofs[l].size() == dof_handler.n_dofs(l),
- ExcDimensionMismatch (boundary_interface_dofs[l].size(),
- dof_handler.n_dofs(l)));
++ ExcDimensionMismatch (boundary_interface_dofs[l].size(),
++ dof_handler.n_dofs(l)));
+
+ std::fill (interface_dofs[l].begin(),
- interface_dofs[l].end(),
- false);
++ interface_dofs[l].end(),
++ false);
+ std::fill (boundary_interface_dofs[l].begin(),
- boundary_interface_dofs[l].end(),
- false);
++ boundary_interface_dofs[l].end(),
++ false);
+ }
+
+ const FiniteElement<dim,spacedim> &fe = dof_handler.get_fe();
+
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+ const unsigned int dofs_per_face = fe.dofs_per_face;
+
+ std::vector<unsigned int> local_dof_indices (dofs_per_cell);
+ std::vector<unsigned int> face_dof_indices (dofs_per_face);
+
+ std::vector<bool> cell_dofs(dofs_per_cell, false);
+ std::vector<bool> boundary_cell_dofs(dofs_per_cell, false);
+
+ typename DoFHandler<dim>::cell_iterator cell = dof_handler.begin(),
- endc = dof_handler.end();
++ endc = dof_handler.end();
+
+ for (; cell!=endc; ++cell)
+ {
+ bool has_coarser_neighbor = false;
+
+ std::fill (cell_dofs.begin(), cell_dofs.end(), false);
+ std::fill (boundary_cell_dofs.begin(), boundary_cell_dofs.end(), false);
+
+ for (unsigned int face_nr=0; face_nr<GeometryInfo<dim>::faces_per_cell; ++face_nr)
- {
- const typename DoFHandler<dim,spacedim>::face_iterator face = cell->face(face_nr);
- if (!face->at_boundary())
- {
- //interior face
- const typename DoFHandler<dim>::cell_iterator
- neighbor = cell->neighbor(face_nr);
-
- // Do refinement face
- // from the coarse side
- if (neighbor->level() < cell->level())
- {
- for (unsigned int j=0; j<dofs_per_face; ++j)
- cell_dofs[fe.face_to_cell_index(j,face_nr)] = true;
-
- has_coarser_neighbor = true;
- }
- }
- }
++ {
++ const typename DoFHandler<dim,spacedim>::face_iterator face = cell->face(face_nr);
++ if (!face->at_boundary())
++ {
++ //interior face
++ const typename DoFHandler<dim>::cell_iterator
++ neighbor = cell->neighbor(face_nr);
++
++ // Do refinement face
++ // from the coarse side
++ if (neighbor->level() < cell->level())
++ {
++ for (unsigned int j=0; j<dofs_per_face; ++j)
++ cell_dofs[fe.face_to_cell_index(j,face_nr)] = true;
++
++ has_coarser_neighbor = true;
++ }
++ }
++ }
+
+ if (has_coarser_neighbor == true)
- for (unsigned int face_nr=0; face_nr<GeometryInfo<dim>::faces_per_cell; ++face_nr)
- if(cell->at_boundary(face_nr))
- for(unsigned int j=0; j<dofs_per_face; ++j)
- // if (cell_dofs[fe.face_to_cell_index(j,face_nr)] == true) //is this necessary?
- boundary_cell_dofs[fe.face_to_cell_index(j,face_nr)] = true;
++ for (unsigned int face_nr=0; face_nr<GeometryInfo<dim>::faces_per_cell; ++face_nr)
++ if (cell->at_boundary(face_nr))
++ for (unsigned int j=0; j<dofs_per_face; ++j)
++// if (cell_dofs[fe.face_to_cell_index(j,face_nr)] == true) //is this necessary?
+ boundary_cell_dofs[fe.face_to_cell_index(j,face_nr)] = true;
const unsigned int level = cell->level();
temp_copy_indices.resize (0);
temp_copy_indices.resize (mg_dof.n_dofs(level), numbers::invalid_unsigned_int);
- // Compute coarse level right hand side
- // by restricting from fine level.
+ // Compute coarse level right hand side
+ // by restricting from fine level.
for (; level_cell!=level_end; ++level_cell)
- {
- DoFAccessor<dim, DoFHandler<dim,spacedim> >& global_cell = *level_cell;
- // get the dof numbers of
- // this cell for the global
- // and the level-wise
- // numbering
- global_cell.get_dof_indices(global_dof_indices);
- level_cell->get_mg_dof_indices (level_dof_indices);
-
- for (unsigned int i=0; i<dofs_per_cell; ++i)
+ {
+ DoFAccessor<dim, DoFHandler<dim,spacedim> > &global_cell = *level_cell;
+ // get the dof numbers of
+ // this cell for the global
+ // and the level-wise
+ // numbering
+ global_cell.get_dof_indices(global_dof_indices);
+ level_cell->get_mg_dof_indices (level_dof_indices);
+
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
{
- if(mg_constrained_dofs != 0)
+ if (mg_constrained_dofs != 0)
{
- if(!mg_constrained_dofs->at_refinement_edge(level,level_dof_indices[i]))
- temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i];
+ if (!mg_constrained_dofs->at_refinement_edge(level,level_dof_indices[i]))
+ temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i];
}
else
- temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i];
+ temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i];
}
- }
-
- // now all the active dofs got a valid entry,
- // the other ones have an invalid entry. Count
- // the invalid entries and then resize the
- // copy_indices object. Then, insert the pairs
- // of global index and level index into
- // copy_indices.
+ }
+
+ // now all the active dofs got a valid entry,
++ // the other ones have an invalid entry. Count
++ // the invalid entries and then resize the
++ // copy_indices object. Then, insert the pairs
++ // of global index and level index into
++ // copy_indices.
+ const unsigned int n_active_dofs =
- std::count_if (temp_copy_indices.begin(), temp_copy_indices.end(),
- std::bind2nd(std::not_equal_to<unsigned int>(),
- numbers::invalid_unsigned_int));
++ std::count_if (temp_copy_indices.begin(), temp_copy_indices.end(),
++ std::bind2nd(std::not_equal_to<unsigned int>(),
++ numbers::invalid_unsigned_int));
+ copy_indices[level].resize (n_active_dofs);
+ unsigned int counter = 0;
+ for (unsigned int i=0; i<temp_copy_indices.size(); ++i)
- if (temp_copy_indices[i] != numbers::invalid_unsigned_int)
- copy_indices[level][counter++] =
- std::pair<unsigned int, unsigned int> (temp_copy_indices[i], i);
++ if (temp_copy_indices[i] != numbers::invalid_unsigned_int)
++ copy_indices[level][counter++] =
++ std::pair<unsigned int, unsigned int> (temp_copy_indices[i], i);
+ Assert (counter == n_active_dofs, ExcInternalError());
+ }
+}
+
+
+
+template <typename VECTOR>
+template <int dim, int spacedim>
+void MGTransferPrebuilt<VECTOR>::build_matrices (
+ const DoFHandler<dim,spacedim> &dof_handler)
+{
+ const unsigned int n_levels = dof_handler.get_tria().n_levels();
+ const unsigned int dofs_per_cell = dof_handler.get_fe().dofs_per_cell;
+
+ sizes.resize(n_levels);
- for (unsigned int l=0;l<n_levels;++l)
++ for (unsigned int l=0; l<n_levels; ++l)
+ sizes[l] = dof_handler.n_dofs(l);
+
- // reset the size of the array of
- // matrices. call resize(0) first,
- // in order to delete all elements
- // and clear their memory. then
- // repopulate these arrays
- //
- // note that on resize(0), the
- // shared_ptr class takes care of
- // deleting the object it points to
- // by itself
++ // reset the size of the array of
++ // matrices. call resize(0) first,
++ // in order to delete all elements
++ // and clear their memory. then
++ // repopulate these arrays
++ //
++ // note that on resize(0), the
++ // shared_ptr class takes care of
++ // deleting the object it points to
++ // by itself
+ prolongation_matrices.resize (0);
+ prolongation_sparsities.resize (0);
+
+ for (unsigned int i=0; i<n_levels-1; ++i)
+ {
+ prolongation_sparsities.push_back
- (std_cxx1x::shared_ptr<SparsityPattern> (new SparsityPattern));
++ (std_cxx1x::shared_ptr<SparsityPattern> (new SparsityPattern));
+ prolongation_matrices.push_back
- (std_cxx1x::shared_ptr<SparseMatrix<double> > (new SparseMatrix<double>));
++ (std_cxx1x::shared_ptr<SparseMatrix<double> > (new SparseMatrix<double>));
+ }
+
- // two fields which will store the
- // indices of the multigrid dofs
- // for a cell and one of its children
++ // two fields which will store the
++ // indices of the multigrid dofs
++ // for a cell and one of its children
+ std::vector<unsigned int> dof_indices_parent (dofs_per_cell);
+ std::vector<unsigned int> dof_indices_child (dofs_per_cell);
+
- // for each level: first build the sparsity
- // pattern of the matrices and then build the
- // matrices themselves. note that we only
- // need to take care of cells on the coarser
- // level which have children
++ // for each level: first build the sparsity
++ // pattern of the matrices and then build the
++ // matrices themselves. note that we only
++ // need to take care of cells on the coarser
++ // level which have children
+ for (unsigned int level=0; level<n_levels-1; ++level)
+ {
+
- // reset the dimension of the structure.
- // note that for the number of entries
- // per row, the number of parent dofs
- // coupling to a child dof is
- // necessary. this, of course, is the
- // number of degrees of freedom per
- // cell
- // increment dofs_per_cell
- // since a useless diagonal
- // element will be stored
++ // reset the dimension of the structure.
++ // note that for the number of entries
++ // per row, the number of parent dofs
++ // coupling to a child dof is
++ // necessary. this, of course, is the
++ // number of degrees of freedom per
++ // cell
++ // increment dofs_per_cell
++ // since a useless diagonal
++ // element will be stored
+ prolongation_sparsities[level]->reinit (sizes[level+1],
- sizes[level],
- dofs_per_cell+1);
++ sizes[level],
++ dofs_per_cell+1);
+
+ for (typename DoFHandler<dim,spacedim>::cell_iterator cell = dof_handler.begin(level);
- cell != dof_handler.end(level); ++cell)
- if (cell->has_children())
- {
- cell->get_mg_dof_indices (dof_indices_parent);
-
- Assert(cell->n_children()==GeometryInfo<dim>::max_children_per_cell,
- ExcNotImplemented());
- for (unsigned int child=0; child<cell->n_children(); ++child)
- {
- // set an alias to the
- // prolongation matrix for
- // this child
- const FullMatrix<double> &prolongation
- = dof_handler.get_fe().get_prolongation_matrix (child,
- cell->refinement_case());
-
- Assert (prolongation.n() != 0, ExcNoProlongation());
-
- cell->child(child)->get_mg_dof_indices (dof_indices_child);
-
- // now tag the entries in the
- // matrix which will be used
- // for this pair of parent/child
- for (unsigned int i=0; i<dofs_per_cell; ++i)
- for (unsigned int j=0; j<dofs_per_cell; ++j)
- if (prolongation(i,j) != 0)
- prolongation_sparsities[level]->add (dof_indices_child[i],
- dof_indices_parent[j]);
- }
- }
++ cell != dof_handler.end(level); ++cell)
++ if (cell->has_children())
++ {
++ cell->get_mg_dof_indices (dof_indices_parent);
++
++ Assert(cell->n_children()==GeometryInfo<dim>::max_children_per_cell,
++ ExcNotImplemented());
++ for (unsigned int child=0; child<cell->n_children(); ++child)
++ {
++ // set an alias to the
++ // prolongation matrix for
++ // this child
++ const FullMatrix<double> &prolongation
++ = dof_handler.get_fe().get_prolongation_matrix (child,
++ cell->refinement_case());
++
++ Assert (prolongation.n() != 0, ExcNoProlongation());
++
++ cell->child(child)->get_mg_dof_indices (dof_indices_child);
++
++ // now tag the entries in the
++ // matrix which will be used
++ // for this pair of parent/child
++ for (unsigned int i=0; i<dofs_per_cell; ++i)
++ for (unsigned int j=0; j<dofs_per_cell; ++j)
++ if (prolongation(i,j) != 0)
++ prolongation_sparsities[level]->add (dof_indices_child[i],
++ dof_indices_parent[j]);
++ }
++ }
+
+ prolongation_sparsities[level]->compress ();
+
+ prolongation_matrices[level]->reinit (*prolongation_sparsities[level]);
+
- // now actually build the matrices
++ // now actually build the matrices
+ for (typename DoFHandler<dim,spacedim>::cell_iterator cell = dof_handler.begin(level);
- cell != dof_handler.end(level); ++cell)
- if (cell->has_children())
- {
- cell->get_mg_dof_indices (dof_indices_parent);
-
- Assert(cell->n_children()==GeometryInfo<dim>::max_children_per_cell,
- ExcNotImplemented());
- for (unsigned int child=0; child<cell->n_children(); ++child)
- {
- // set an alias to the
- // prolongation matrix for
- // this child
- const FullMatrix<double> &prolongation
- = dof_handler.get_fe().get_prolongation_matrix (child,
- cell->refinement_case());
-
- cell->child(child)->get_mg_dof_indices (dof_indices_child);
-
- // now set the entries in the
- // matrix
- for (unsigned int i=0; i<dofs_per_cell; ++i)
- prolongation_matrices[level]->set (dof_indices_child[i],
- dofs_per_cell,
- &dof_indices_parent[0],
- &prolongation(i,0),
- true);
- }
- }
++ cell != dof_handler.end(level); ++cell)
++ if (cell->has_children())
++ {
++ cell->get_mg_dof_indices (dof_indices_parent);
++
++ Assert(cell->n_children()==GeometryInfo<dim>::max_children_per_cell,
++ ExcNotImplemented());
++ for (unsigned int child=0; child<cell->n_children(); ++child)
++ {
++ // set an alias to the
++ // prolongation matrix for
++ // this child
++ const FullMatrix<double> &prolongation
++ = dof_handler.get_fe().get_prolongation_matrix (child,
++ cell->refinement_case());
++
++ cell->child(child)->get_mg_dof_indices (dof_indices_child);
++
++ // now set the entries in the
++ // matrix
++ for (unsigned int i=0; i<dofs_per_cell; ++i)
++ prolongation_matrices[level]->set (dof_indices_child[i],
++ dofs_per_cell,
++ &dof_indices_parent[0],
++ &prolongation(i,0),
++ true);
++ }
++ }
+ }
+
+
- // impose boundary conditions
- // but only in the column of
- // the prolongation matrix
++ // impose boundary conditions
++ // but only in the column of
++ // the prolongation matrix
+ if (mg_constrained_dofs != 0)
- if (mg_constrained_dofs->set_boundary_values())
- {
- std::vector<unsigned int> constrain_indices;
- for (int level=n_levels-2; level>=0; --level)
- {
- if (mg_constrained_dofs->get_boundary_indices()[level].size() == 0)
- continue;
-
- // need to delete all the columns in the
- // matrix that are on the boundary. to achive
- // this, create an array as long as there are
- // matrix columns, and find which columns we
- // need to filter away.
- constrain_indices.resize (0);
- constrain_indices.resize (prolongation_matrices[level]->n(), 0);
- std::set<unsigned int>::const_iterator dof
++ if (mg_constrained_dofs->set_boundary_values())
++ {
++ std::vector<unsigned int> constrain_indices;
++ for (int level=n_levels-2; level>=0; --level)
++ {
++ if (mg_constrained_dofs->get_boundary_indices()[level].size() == 0)
++ continue;
++
++ // need to delete all the columns in the
++ // matrix that are on the boundary. to achive
++ // this, create an array as long as there are
++ // matrix columns, and find which columns we
++ // need to filter away.
++ constrain_indices.resize (0);
++ constrain_indices.resize (prolongation_matrices[level]->n(), 0);
++ std::set<unsigned int>::const_iterator dof
+ = mg_constrained_dofs->get_boundary_indices()[level].begin(),
- endd = mg_constrained_dofs->get_boundary_indices()[level].end();
- for (; dof != endd; ++dof)
- constrain_indices[*dof] = 1;
-
- const unsigned int n_dofs = prolongation_matrices[level]->m();
- for (unsigned int i=0; i<n_dofs; ++i)
- {
- SparseMatrix<double>::iterator
- start_row = prolongation_matrices[level]->begin(i),
- end_row = prolongation_matrices[level]->end(i);
- for(; start_row != end_row; ++start_row)
- {
- if (constrain_indices[start_row->column()] == 1)
- start_row->value() = 0;
- }
- }
- }
- }
-
- // to find the indices that describe the
- // relation between global dofs and local
- // numbering on the individual level, first
- // create a temp vector where the ith level
- // entry contains the respective global
- // entry. this gives a neat way to find those
- // indices. in a second step, actually build
- // the std::vector<std::pair<uint,uint> > that
- // only contains the active dofs on the
- // levels.
++ endd = mg_constrained_dofs->get_boundary_indices()[level].end();
++ for (; dof != endd; ++dof)
++ constrain_indices[*dof] = 1;
++
++ const unsigned int n_dofs = prolongation_matrices[level]->m();
++ for (unsigned int i=0; i<n_dofs; ++i)
++ {
++ SparseMatrix<double>::iterator
++ start_row = prolongation_matrices[level]->begin(i),
++ end_row = prolongation_matrices[level]->end(i);
++ for (; start_row != end_row; ++start_row)
++ {
++ if (constrain_indices[start_row->column()] == 1)
++ start_row->value() = 0;
++ }
++ }
++ }
++ }
++
++ // to find the indices that describe the
++ // relation between global dofs and local
++ // numbering on the individual level, first
++ // create a temp vector where the ith level
++ // entry contains the respective global
++ // entry. this gives a neat way to find those
++ // indices. in a second step, actually build
++ // the std::vector<std::pair<uint,uint> > that
++ // only contains the active dofs on the
++ // levels.
+
+ copy_indices.resize(n_levels);
+ std::vector<unsigned int> temp_copy_indices;
+ std::vector<unsigned int> global_dof_indices (dofs_per_cell);
+ std::vector<unsigned int> level_dof_indices (dofs_per_cell);
+ for (int level=dof_handler.get_tria().n_levels()-1; level>=0; --level)
+ {
+ copy_indices[level].clear();
+ typename DoFHandler<dim,spacedim>::active_cell_iterator
- level_cell = dof_handler.begin_active(level);
++ level_cell = dof_handler.begin_active(level);
+ const typename DoFHandler<dim,spacedim>::active_cell_iterator
- level_end = dof_handler.end_active(level);
++ level_end = dof_handler.end_active(level);
+
+ temp_copy_indices.resize (0);
+ temp_copy_indices.resize (dof_handler.n_dofs(level), numbers::invalid_unsigned_int);
+
- // Compute coarse level right hand side
- // by restricting from fine level.
++ // Compute coarse level right hand side
++ // by restricting from fine level.
+ for (; level_cell!=level_end; ++level_cell)
- {
- DoFAccessor<dim, DoFHandler<dim,spacedim> >& global_cell = *level_cell;
- // get the dof numbers of
- // this cell for the global
- // and the level-wise
- // numbering
- global_cell.get_dof_indices(global_dof_indices);
- level_cell->get_mg_dof_indices (level_dof_indices);
-
- for (unsigned int i=0; i<dofs_per_cell; ++i)
- {
- if(mg_constrained_dofs != 0)
++ {
++ DoFAccessor<dim, DoFHandler<dim,spacedim> > &global_cell = *level_cell;
++ // get the dof numbers of
++ // this cell for the global
++ // and the level-wise
++ // numbering
++ global_cell.get_dof_indices(global_dof_indices);
++ level_cell->get_mg_dof_indices (level_dof_indices);
++
++ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ {
- if(!mg_constrained_dofs->at_refinement_edge(level,level_dof_indices[i]))
- temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i];
++ if (mg_constrained_dofs != 0)
++ {
++ if (!mg_constrained_dofs->at_refinement_edge(level,level_dof_indices[i]))
++ temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i];
++ }
++ else
++ temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i];
+ }
- else
- temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i];
- }
- }
-
- // now all the active dofs got a valid entry,
- // the other ones have an invalid entry. Count
- // the invalid entries and then resize the
- // copy_indices object. Then, insert the pairs
- // of global index and level index into
- // copy_indices.
++ }
++
++ // now all the active dofs got a valid entry,
+ // the other ones have an invalid entry. Count
+ // the invalid entries and then resize the
+ // copy_indices object. Then, insert the pairs
+ // of global index and level index into
+ // copy_indices.
const unsigned int n_active_dofs =
- std::count_if (temp_copy_indices.begin(), temp_copy_indices.end(),
- std::bind2nd(std::not_equal_to<unsigned int>(),
- numbers::invalid_unsigned_int));
+ std::count_if (temp_copy_indices.begin(), temp_copy_indices.end(),
+ std::bind2nd(std::not_equal_to<unsigned int>(),
+ numbers::invalid_unsigned_int));
copy_indices[level].resize (n_active_dofs);
unsigned int counter = 0;
for (unsigned int i=0; i<temp_copy_indices.size(); ++i)
template <int spacedim>
template <typename InputVector, class DH>
void KellyErrorEstimator<1,spacedim>::
-estimate (const Mapping<1,spacedim> & /*mapping*/,
- const DH & /*dof_handler*/,
+estimate (const Mapping<1,spacedim> &/*mapping*/,
+ const DH &/*dof_handler*/,
- const hp::QCollection<0> &,
+ const hp::QCollection<0> &,
- const typename FunctionMap<spacedim>::type & /*neumann_bc*/,
- const std::vector<const InputVector *> & /*solutions*/,
- std::vector<Vector<float>*> & /*errors*/,
- const ComponentMask & /*component_mask_*/,
- const Function<spacedim> * /*coefficient*/,
+ const typename FunctionMap<spacedim>::type &/*neumann_bc*/,
+ const std::vector<const InputVector *> &/*solutions*/,
+ std::vector<Vector<float>*> &/*errors*/,
+ const ComponentMask &/*component_mask_*/,
+ const Function<spacedim> */*coefficient*/,
const unsigned int,
const types::subdomain_id /*subdomain_id*/,
const types::material_id /*material_id*/)
struct CopyData
{
- std::vector<types::global_dof_index> dof_indices;
- FullMatrix<double> cell_matrix;
- dealii::Vector<double> cell_rhs;
- std::vector<unsigned int> dof_indices;
++ std::vector<types::global_dof_index> dof_indices;
+ FullMatrix<double> cell_matrix;
+ dealii::Vector<double> cell_rhs;
};
}
SparseMatrix<double> &matrix,
const typename FunctionMap<spacedim>::type &boundary_functions,
Vector<double> &rhs_vector,
- std::vector<unsigned int> &dof_to_boundary_mapping,
+ std::vector<types::global_dof_index> &dof_to_boundary_mapping,
- const Function<spacedim> * const coefficient,
- const std::vector<unsigned int>& component_mapping,
+ const Function<spacedim> *const coefficient,
+ const std::vector<unsigned int> &component_mapping,
const MatrixCreator::internal::IteratorRange<DoFHandler<dim,spacedim> > range,
Threads::ThreadMutex &mutex)
{
std::vector<Vector<double> > rhs_values_system (fe_values.n_quadrature_points,
Vector<double>(n_function_components));
- std::vector<unsigned int> dofs (dofs_per_cell);
- std::vector<unsigned int> dofs_on_face_vector (dofs_per_face);
+ std::vector<types::global_dof_index> dofs (dofs_per_cell);
+ std::vector<types::global_dof_index> dofs_on_face_vector (dofs_per_face);
- // for each dof on the cell, have a
- // flag whether it is on the face
+ // for each dof on the cell, have a
+ // flag whether it is on the face
std::vector<bool> dof_is_on_face(dofs_per_cell);
typename DoFHandler<dim,spacedim>::active_cell_iterator cell = range.first;
template <>
void
create_boundary_mass_matrix_1<2,3> (std_cxx1x::tuple<const Mapping<2,3> &,
- const DoFHandler<2,3> &,
- const Quadrature<1> & > ,
- SparseMatrix<double> &,
+ const DoFHandler<2,3> &,
+ const Quadrature<1> & > ,
+ SparseMatrix<double> &,
const FunctionMap<3>::type &,
Vector<double> &,
- std::vector<unsigned int> &,
+ std::vector<types::global_dof_index> &,
- const Function<3> * const ,
+ const Function<3> *const ,
const std::vector<unsigned int> &,
const MatrixCreator::internal::IteratorRange<DoFHandler<2,3> > ,
Threads::ThreadMutex &)
template <>
void
create_boundary_mass_matrix_1<1,3> (std_cxx1x::tuple<const Mapping<1,3> &,
- const DoFHandler<1,3> &,
- const Quadrature<0> & > ,
- SparseMatrix<double> &,
+ const DoFHandler<1,3> &,
+ const Quadrature<0> & > ,
+ SparseMatrix<double> &,
const FunctionMap<3>::type &,
Vector<double> &,
- std::vector<unsigned int> &,
+ std::vector<types::global_dof_index> &,
- const Function<3> * const ,
+ const Function<3> *const ,
const std::vector<unsigned int> &,
const MatrixCreator::internal::IteratorRange<DoFHandler<1,3> > ,
Threads::ThreadMutex &)
template <int dim, int spacedim>
void
- create_boundary_mass_matrix (const Mapping<dim, spacedim> &mapping,
+ create_boundary_mass_matrix (const Mapping<dim, spacedim> &mapping,
const DoFHandler<dim,spacedim> &dof,
const Quadrature<dim-1> &q,
- SparseMatrix<double> &matrix,
- const typename FunctionMap<spacedim>::type &boundary_functions,
+ SparseMatrix<double> &matrix,
+ const typename FunctionMap<spacedim>::type &boundary_functions,
Vector<double> &rhs_vector,
- std::vector<unsigned int> &dof_to_boundary_mapping,
+ std::vector<types::global_dof_index> &dof_to_boundary_mapping,
- const Function<spacedim> * const coefficient,
+ const Function<spacedim> *const coefficient,
std::vector<unsigned int> component_mapping)
{
- // what would that be in 1d? the
- // identity matrix on the boundary
- // dofs?
+ // what would that be in 1d? the
+ // identity matrix on the boundary
+ // dofs?
if (dim == 1)
{
Assert (false, ExcNotImplemented());
= Threads::split_range<active_cell_iterator> (dof.begin_active(),
dof.end(), n_threads);
- // mutex to synchronise access to
- // the matrix
+ // mutex to synchronise access to
+ // the matrix
Threads::ThreadMutex mutex;
- typedef std_cxx1x::tuple<const Mapping<dim,spacedim>&,
- const DoFHandler<dim,spacedim>&,
- const Quadrature<dim-1>&> Commons;
+ typedef std_cxx1x::tuple<const Mapping<dim,spacedim> &,
+ const DoFHandler<dim,spacedim> &,
+ const Quadrature<dim-1>&> Commons;
- // then assemble in parallel
+ // then assemble in parallel
typedef void (*create_boundary_mass_matrix_1_t)
- (Commons,
- SparseMatrix<double> &matrix,
- const typename FunctionMap<spacedim>::type &boundary_functions,
- Vector<double> &rhs_vector,
- std::vector<types::global_dof_index> &dof_to_boundary_mapping,
- const Function<spacedim> * const coefficient,
- const std::vector<unsigned int>& component_mapping,
- const MatrixCreator::internal::IteratorRange<DoFHandler<dim,spacedim> > range,
- Threads::ThreadMutex &mutex);
+ (Commons,
+ SparseMatrix<double> &matrix,
+ const typename FunctionMap<spacedim>::type &boundary_functions,
+ Vector<double> &rhs_vector,
- std::vector<unsigned int> &dof_to_boundary_mapping,
++ std::vector<types::global_dof_index> &dof_to_boundary_mapping,
+ const Function<spacedim> *const coefficient,
+ const std::vector<unsigned int> &component_mapping,
+ const MatrixCreator::internal::IteratorRange<DoFHandler<dim,spacedim> > range,
+ Threads::ThreadMutex &mutex);
create_boundary_mass_matrix_1_t p
= &create_boundary_mass_matrix_1<dim,spacedim>;
SparseMatrix<double> &matrix,
const typename FunctionMap<spacedim>::type &boundary_functions,
Vector<double> &rhs_vector,
- std::vector<unsigned int> &dof_to_boundary_mapping,
+ std::vector<types::global_dof_index> &dof_to_boundary_mapping,
- const Function<spacedim> * const coefficient,
- const std::vector<unsigned int>& component_mapping,
+ const Function<spacedim> *const coefficient,
+ const std::vector<unsigned int> &component_mapping,
const MatrixCreator::internal::IteratorRange<hp::DoFHandler<dim,spacedim> > range,
Threads::ThreadMutex &mutex)
{
std::vector<double> rhs_values_scalar;
std::vector<Vector<double> > rhs_values_system;
- std::vector<unsigned int> dofs (max_dofs_per_cell);
- std::vector<unsigned int> dofs_on_face_vector (max_dofs_per_face);
+ std::vector<types::global_dof_index> dofs (max_dofs_per_cell);
+ std::vector<types::global_dof_index> dofs_on_face_vector (max_dofs_per_face);
- // for each dof on the cell, have a
- // flag whether it is on the face
+ // for each dof on the cell, have a
+ // flag whether it is on the face
std::vector<bool> dof_is_on_face(max_dofs_per_cell);
SparseMatrix<double> &matrix,
const typename FunctionMap<spacedim>::type &rhs,
Vector<double> &rhs_vector,
- std::vector<unsigned int> &dof_to_boundary_mapping,
+ std::vector<types::global_dof_index> &dof_to_boundary_mapping,
- const Function<spacedim> * const a,
+ const Function<spacedim> *const a,
std::vector<unsigned int> component_mapping)
{
create_boundary_mass_matrix(StaticMappingQ1<dim,spacedim>::mapping, dof, q,
SparseMatrix<double> &matrix,
const typename FunctionMap<spacedim>::type &boundary_functions,
Vector<double> &rhs_vector,
- std::vector<unsigned int> &dof_to_boundary_mapping,
+ std::vector<types::global_dof_index> &dof_to_boundary_mapping,
- const Function<spacedim> * const coefficient,
+ const Function<spacedim> *const coefficient,
std::vector<unsigned int> component_mapping)
{
- // what would that be in 1d? the
- // identity matrix on the boundary
- // dofs?
+ // what would that be in 1d? the
+ // identity matrix on the boundary
+ // dofs?
if (dim == 1)
{
Assert (false, ExcNotImplemented());
= Threads::split_range<active_cell_iterator> (dof.begin_active(),
dof.end(), n_threads);
- typedef std_cxx1x::tuple<const hp::MappingCollection<dim,spacedim>&,
- const hp::DoFHandler<dim,spacedim>&,
- const hp::QCollection<dim-1>&> Commons;
+ typedef std_cxx1x::tuple<const hp::MappingCollection<dim,spacedim> &,
+ const hp::DoFHandler<dim,spacedim> &,
+ const hp::QCollection<dim-1>&> Commons;
- // mutex to synchronise access to
- // the matrix
+ // mutex to synchronise access to
+ // the matrix
Threads::ThreadMutex mutex;
- // then assemble in parallel
+ // then assemble in parallel
typedef void (*create_boundary_mass_matrix_1_t)
- (Commons,
- SparseMatrix<double> &matrix,
- const typename FunctionMap<spacedim>::type &boundary_functions,
- Vector<double> &rhs_vector,
- std::vector<types::global_dof_index> &dof_to_boundary_mapping,
- const Function<spacedim> * const coefficient,
- const std::vector<unsigned int>& component_mapping,
- const MatrixCreator::internal::IteratorRange<hp::DoFHandler<dim,spacedim> > range,
- Threads::ThreadMutex &mutex);
+ (Commons,
+ SparseMatrix<double> &matrix,
+ const typename FunctionMap<spacedim>::type &boundary_functions,
+ Vector<double> &rhs_vector,
- std::vector<unsigned int> &dof_to_boundary_mapping,
++ std::vector<types::global_dof_index> &dof_to_boundary_mapping,
+ const Function<spacedim> *const coefficient,
+ const std::vector<unsigned int> &component_mapping,
+ const MatrixCreator::internal::IteratorRange<hp::DoFHandler<dim,spacedim> > range,
+ Threads::ThreadMutex &mutex);
create_boundary_mass_matrix_1_t p = &create_boundary_mass_matrix_1<dim,spacedim>;
//TODO: Use WorkStream here
SparseMatrix<double> &matrix,
const typename FunctionMap<spacedim>::type &rhs,
Vector<double> &rhs_vector,
- std::vector<unsigned int> &dof_to_boundary_mapping,
+ std::vector<types::global_dof_index> &dof_to_boundary_mapping,
- const Function<spacedim> * const a,
+ const Function<spacedim> *const a,
std::vector<unsigned int> component_mapping)
{
create_boundary_mass_matrix(hp::StaticMappingQ1<dim,spacedim>::mapping_collection, dof, q,
}
- std::map<unsigned int,double>::const_iterator dof = boundary_values.begin(),
- endd = boundary_values.end();
+ std::map<types::global_dof_index,double>::const_iterator dof = boundary_values.begin(),
- endd = boundary_values.end();
++ endd = boundary_values.end();
const SparsityPattern &sparsity = matrix.get_sparsity_pattern();
const std::size_t *sparsity_rowstart = sparsity.get_rowstart_indices();
const unsigned int *sparsity_colnums = sparsity.get_column_numbers();
{
Assert (dof->first < n_dofs, ExcInternalError());
- const unsigned int dof_number = dof->first;
+ const types::global_dof_index dof_number = dof->first;
- // for each boundary dof:
-
- // set entries of this line
- // to zero except for the diagonal
- // entry. Note that the diagonal
- // entry is always the first one
- // for square matrices, i.e.
- // we shall not set
- // matrix.global_entry(
- // sparsity_rowstart[dof.first])
+ // for each boundary dof:
+
+ // set entries of this line
+ // to zero except for the diagonal
+ // entry. Note that the diagonal
+ // entry is always the first one
+ // for square matrices, i.e.
+ // we shall not set
+ // matrix.global_entry(
+ // sparsity_rowstart[dof.first])
- const unsigned int last = sparsity_rowstart[dof_number+1];
- for (unsigned int j=sparsity_rowstart[dof_number]+1; j<last; ++j)
+ const types::global_dof_index last = sparsity_rowstart[dof_number+1];
+ for (types::global_dof_index j=sparsity_rowstart[dof_number]+1; j<last; ++j)
matrix.global_entry(j) = 0.;
}
- // if the user wants to have
- // the symmetry of the matrix
- // preserved, and if the
- // sparsity pattern is
- // symmetric, then do a Gauss
- // elimination step with the
- // present row
+ // if the user wants to have
+ // the symmetry of the matrix
+ // preserved, and if the
+ // sparsity pattern is
+ // symmetric, then do a Gauss
+ // elimination step with the
+ // present row
if (eliminate_columns)
{
- // store the only nonzero entry
- // of this line for the Gauss
- // elimination step
+ // store the only nonzero entry
+ // of this line for the Gauss
+ // elimination step
const number diagonal_entry = matrix.diag_element(dof_number);
- // we have to loop over all
- // rows of the matrix which
- // have a nonzero entry in
- // the column which we work
- // in presently. if the
- // sparsity pattern is
- // symmetric, then we can
- // get the positions of
- // these rows cheaply by
- // looking at the nonzero
- // column numbers of the
- // present row. we need not
- // look at the first entry,
- // since that is the
- // diagonal element and
- // thus the present row
+ // we have to loop over all
+ // rows of the matrix which
+ // have a nonzero entry in
+ // the column which we work
+ // in presently. if the
+ // sparsity pattern is
+ // symmetric, then we can
+ // get the positions of
+ // these rows cheaply by
+ // looking at the nonzero
+ // column numbers of the
+ // present row. we need not
+ // look at the first entry,
+ // since that is the
+ // diagonal element and
+ // thus the present row
- for (unsigned int j=sparsity_rowstart[dof_number]+1; j<last; ++j)
+ for (types::global_dof_index j=sparsity_rowstart[dof_number]+1; j<last; ++j)
{
const unsigned int row = sparsity_colnums[j];
first_nonzero_diagonal_entry = 1;
- std::map<unsigned int,double>::const_iterator dof = boundary_values.begin(),
- endd = boundary_values.end();
+ std::map<types::global_dof_index,double>::const_iterator dof = boundary_values.begin(),
- endd = boundary_values.end();
++ endd = boundary_values.end();
const BlockSparsityPattern &
- sparsity_pattern = matrix.get_sparsity_pattern();
+ sparsity_pattern = matrix.get_sparsity_pattern();
- // pointer to the mapping between
- // global and block indices. since
- // the row and column mappings are
- // equal, store a pointer on only
- // one of them
+ // pointer to the mapping between
+ // global and block indices. since
+ // the row and column mappings are
+ // equal, store a pointer on only
+ // one of them
const BlockIndices &
- index_mapping = sparsity_pattern.get_column_indices();
+ index_mapping = sparsity_pattern.get_column_indices();
- // now loop over all boundary dofs
+ // now loop over all boundary dofs
for (; dof != endd; ++dof)
{
Assert (dof->first < n_dofs, ExcInternalError());
- // get global index and index
- // in the block in which this
- // dof is located
+ // get global index and index
+ // in the block in which this
+ // dof is located
- const unsigned int dof_number = dof->first;
- const std::pair<unsigned int,unsigned int>
+ const types::global_dof_index dof_number = dof->first;
+ const std::pair<types::global_dof_index,unsigned int>
- block_index = index_mapping.global_to_local (dof_number);
-
- // for each boundary dof:
-
- // set entries of this line
- // to zero except for the diagonal
- // entry. Note that the diagonal
- // entry is always the first one
- // for square matrices, i.e.
- // we shall not set
- // matrix.global_entry(
- // sparsity_rowstart[dof.first])
- // of the diagonal block
+ block_index = index_mapping.global_to_local (dof_number);
+
+ // for each boundary dof:
+
+ // set entries of this line
+ // to zero except for the diagonal
+ // entry. Note that the diagonal
+ // entry is always the first one
+ // for square matrices, i.e.
+ // we shall not set
+ // matrix.global_entry(
+ // sparsity_rowstart[dof.first])
+ // of the diagonal block
for (unsigned int block_col=0; block_col<blocks; ++block_col)
{
const SparsityPattern &
for (unsigned int j=first; j<last; ++j)
{
- // get the number
- // of the column in
- // this row in
- // which a nonzero
- // entry is. this
- // is also the row
- // of the transpose
- // block which has
- // an entry in the
- // interesting row
+ // get the number
+ // of the column in
+ // this row in
+ // which a nonzero
+ // entry is. this
+ // is also the row
+ // of the transpose
+ // block which has
+ // an entry in the
+ // interesting row
const unsigned int row = transpose_sparsity.get_column_numbers()[j];
- // find the
- // position of
- // element
- // (row,dof_number)
- // in this block
- // (not in the
- // transpose
- // one). note that
- // we have to take
- // care of special
- // cases with
- // square
- // sub-matrices
+ // find the
+ // position of
+ // element
+ // (row,dof_number)
+ // in this block
+ // (not in the
+ // transpose
+ // one). note that
+ // we have to take
+ // care of special
+ // cases with
+ // square
+ // sub-matrices
- const unsigned int *p = 0;
+ const types::global_dof_index *p = 0;
if (this_sparsity.n_rows() == this_sparsity.n_cols())
{
if (this_sparsity.get_column_numbers()
break;
}
- // figure out which rows of the matrix we
- // have to eliminate on this processor
+ // figure out which rows of the matrix we
+ // have to eliminate on this processor
std::vector<unsigned int> constrained_rows;
- for (std::map<unsigned int,double>::const_iterator
+ for (std::map<types::global_dof_index,double>::const_iterator
- dof = boundary_values.begin();
+ dof = boundary_values.begin();
dof != boundary_values.end();
++dof)
if ((dof->first >= local_range.first) &&
right_hand_side.compress ();
solution.compress ();
- std::vector<unsigned int> indices;
+ std::vector<types::global_dof_index> indices;
std::vector<PetscScalar> solution_values;
- for (std::map<unsigned int,double>::const_iterator
+ for (std::map<types::global_dof_index,double>::const_iterator
- dof = boundary_values.begin();
+ dof = boundary_values.begin();
dof != boundary_values.end();
++dof)
if ((dof->first >= local_range.first) &&
std::vector<std::map<unsigned int,double> > block_boundary_values(n_blocks);
{
int offset = 0, block=0;
- for (std::map<unsigned int,double>::const_iterator
+ for (std::map<types::global_dof_index,double>::const_iterator
- dof = boundary_values.begin();
+ dof = boundary_values.begin();
dof != boundary_values.end();
++dof)
{
right_hand_side.block(block),
eliminate_columns);
- // Finally, we need to do something
- // about the off-diagonal matrices. This
- // is luckily not difficult. Just clear
- // the whole row.
+ // Finally, we need to do something
+ // about the off-diagonal matrices. This
+ // is luckily not difficult. Just clear
+ // the whole row.
for (unsigned int block_m=0; block_m<n_blocks; ++block_m)
{
- const std::pair<unsigned int, unsigned int> local_range
+ const std::pair<types::global_dof_index, types::global_dof_index> local_range
= matrix.block(block_m,0).local_range();
std::vector<unsigned int> constrained_rows;
- for (std::map<unsigned int,double>::const_iterator
+ for (std::map<types::global_dof_index,double>::const_iterator
- dof = block_boundary_values[block_m].begin();
+ dof = block_boundary_values[block_m].begin();
dof != block_boundary_values[block_m].end();
++dof)
if ((dof->first >= local_range.first) &&
Assert (local_range == solution.local_range(),
ExcInternalError());
- // we have to read and write from this
- // matrix (in this order). this will only
- // work if we compress the matrix first,
- // done here
+ // we have to read and write from this
+ // matrix (in this order). this will only
+ // work if we compress the matrix first,
+ // done here
matrix.compress ();
- // determine the first nonzero diagonal
- // entry from within the part of the
- // matrix that we can see. if we can't
- // find such an entry, take one
+ // determine the first nonzero diagonal
+ // entry from within the part of the
+ // matrix that we can see. if we can't
+ // find such an entry, take one
TrilinosScalar average_nonzero_diagonal_entry = 1;
- for (unsigned int i=local_range.first; i<local_range.second; ++i)
+ for (types::global_dof_index i=local_range.first; i<local_range.second; ++i)
if (matrix.diag_element(i) != 0)
{
average_nonzero_diagonal_entry = std::fabs(matrix.diag_element(i));
break;
}
- // figure out which rows of the matrix we
- // have to eliminate on this processor
+ // figure out which rows of the matrix we
+ // have to eliminate on this processor
std::vector<unsigned int> constrained_rows;
- for (std::map<unsigned int,double>::const_iterator
+ for (std::map<types::global_dof_index,double>::const_iterator
- dof = boundary_values.begin();
+ dof = boundary_values.begin();
dof != boundary_values.end();
++dof)
if ((dof->first >= local_range.first) &&
right_hand_side.compress ();
solution.compress ();
- std::vector<unsigned int> indices;
+ std::vector<types::global_dof_index> indices;
std::vector<TrilinosScalar> solution_values;
- for (std::map<unsigned int,double>::const_iterator
+ for (std::map<types::global_dof_index,double>::const_iterator
- dof = boundary_values.begin();
+ dof = boundary_values.begin();
dof != boundary_values.end();
++dof)
if ((dof->first >= local_range.first) &&
matrix.compress();
- // We need to find the subdivision
- // into blocks for the boundary values.
- // To this end, generate a vector of
- // maps with the respective indices.
+ // We need to find the subdivision
+ // into blocks for the boundary values.
+ // To this end, generate a vector of
+ // maps with the respective indices.
- std::vector<std::map<unsigned int,double> > block_boundary_values(n_blocks);
+ std::vector<std::map<types::global_dof_index,double> > block_boundary_values(n_blocks);
{
int offset = 0, block=0;
- for (std::map<unsigned int,double>::const_iterator
+ for (std::map<types::global_dof_index,double>::const_iterator
- dof = boundary_values.begin();
+ dof = boundary_values.begin();
dof != boundary_values.end();
++dof)
{
right_hand_side.block(block),
eliminate_columns);
- // Finally, we need to do something
- // about the off-diagonal matrices. This
- // is luckily not difficult. Just clear
- // the whole row.
+ // Finally, we need to do something
+ // about the off-diagonal matrices. This
+ // is luckily not difficult. Just clear
+ // the whole row.
for (unsigned int block_m=0; block_m<n_blocks; ++block_m)
{
- const std::pair<unsigned int, unsigned int> local_range
+ const std::pair<types::global_dof_index, types::global_dof_index> local_range
= matrix.block(block_m,0).local_range();
std::vector<unsigned int> constrained_rows;
- for (std::map<unsigned int,double>::const_iterator
+ for (std::map<types::global_dof_index,double>::const_iterator
- dof = block_boundary_values[block_m].begin();
+ dof = block_boundary_values[block_m].begin();
dof != block_boundary_values[block_m].end();
++dof)
if ((dof->first >= local_range.first) &&
const unsigned int n_local_dofs = local_dof_indices.size();
for (unsigned int i=0; i<n_local_dofs; ++i)
{
- const std::map<unsigned int, double>::const_iterator
+ const std::map<types::global_dof_index, double>::const_iterator
- boundary_value = boundary_values.find (local_dof_indices[i]);
+ boundary_value = boundary_values.find (local_dof_indices[i]);
if (boundary_value != boundary_values.end())
{
- // remove this row, except for the
- // diagonal element
+ // remove this row, except for the
+ // diagonal element
for (unsigned int j=0; j<n_local_dofs; ++j)
if (i != j)
local_matrix(i,j) = 0;
}
- std::vector<unsigned int>
+ std::vector<types::global_dof_index>
- local_dof_indices (dof_handler->get_fe ().dofs_per_cell);
+ local_dof_indices (dof_handler->get_fe ().dofs_per_cell);
std::vector <int> new_solution_indices;
current_cell->get_dof_indices (local_dof_indices);
- // there is an implicit assumption here
- // that all the closest support point to
- // the requested point for all finite
- // element components lie in the same cell.
- // this could possibly be violated if
- // components use different fe orders,
- // requested points are on the edge or
- // vertex of a cell and we are unlucky with
- // floating point rounding. Worst case
- // scenario however is that the point
- // selected isn't the closest possible, it
- // will still lie within one cell distance.
- // calling
- // GridTools::find_active_cell_around_point
- // to obtain a cell to search is an
- // option for these methods, but currently
- // the GridTools method does not cater for
- // a vector of points, and does not seem to
- // be intrinsicly faster than this method.
+ // there is an implicit assumption here
+ // that all the closest support point to
+ // the requested point for all finite
+ // element components lie in the same cell.
+ // this could possibly be violated if
+ // components use different fe orders,
+ // requested points are on the edge or
+ // vertex of a cell and we are unlucky with
+ // floating point rounding. Worst case
+ // scenario however is that the point
+ // selected isn't the closest possible, it
+ // will still lie within one cell distance.
+ // calling
+ // GridTools::find_active_cell_around_point
+ // to obtain a cell to search is an
+ // option for these methods, but currently
+ // the GridTools method does not cater for
+ // a vector of points, and does not seem to
+ // be intrinsicly faster than this method.
for (unsigned int component = 0;
component < dof_handler->get_fe ().n_components (); component++)
{
" at the same time!"));
Vector<typename VECTOR::value_type> local_values;
- std::vector<unsigned int> dofs;
+ std::vector<types::global_dof_index> dofs;
typename std::map<std::pair<unsigned int, unsigned int>, Pointerstruct>::const_iterator
- pointerstruct,
- cell_map_end=cell_map.end();
+ pointerstruct,
+ cell_map_end=cell_map.end();
Table<2,FullMatrix<double> > interpolation_hp;
internal::extract_interpolation_matrices (*dof_handler, interpolation_hp);